1 /* 2 * This is the Fusion MPT base driver providing common API layer interface 3 * for access to MPT (Message Passing Technology) firmware. 4 * 5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c 6 * Copyright (C) 2012-2014 LSI Corporation 7 * Copyright (C) 2013-2014 Avago Technologies 8 * (mailto: MPT-FusionLinux.pdl@avagotech.com) 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 2 13 * of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * NO WARRANTY 21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 25 * solely responsible for determining the appropriateness of using and 26 * distributing the Program and assumes all risks associated with its 27 * exercise of rights under this Agreement, including but not limited to 28 * the risks and costs of program errors, damage to or loss of data, 29 * programs or equipment, and unavailability or interruption of operations. 30 31 * DISCLAIMER OF LIABILITY 32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 39 40 * You should have received a copy of the GNU General Public License 41 * along with this program; if not, write to the Free Software 42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 43 * USA. 44 */ 45 46 #include <linux/kernel.h> 47 #include <linux/module.h> 48 #include <linux/errno.h> 49 #include <linux/init.h> 50 #include <linux/slab.h> 51 #include <linux/types.h> 52 #include <linux/pci.h> 53 #include <linux/kdev_t.h> 54 #include <linux/blkdev.h> 55 #include <linux/delay.h> 56 #include <linux/interrupt.h> 57 #include <linux/dma-mapping.h> 58 #include <linux/io.h> 59 #include <linux/time.h> 60 #include <linux/ktime.h> 61 #include <linux/kthread.h> 62 #include <asm/page.h> /* To get host page size per arch */ 63 #include <linux/aer.h> 64 65 66 #include "mpt3sas_base.h" 67 68 static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS]; 69 70 71 #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */ 72 73 /* maximum controller queue depth */ 74 #define MAX_HBA_QUEUE_DEPTH 30000 75 #define MAX_CHAIN_DEPTH 100000 76 static int max_queue_depth = -1; 77 module_param(max_queue_depth, int, 0); 78 MODULE_PARM_DESC(max_queue_depth, " max controller queue depth "); 79 80 static int max_sgl_entries = -1; 81 module_param(max_sgl_entries, int, 0); 82 MODULE_PARM_DESC(max_sgl_entries, " max sg entries "); 83 84 static int msix_disable = -1; 85 module_param(msix_disable, int, 0); 86 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); 87 88 static int smp_affinity_enable = 1; 89 module_param(smp_affinity_enable, int, S_IRUGO); 90 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)"); 91 92 static int max_msix_vectors = -1; 93 module_param(max_msix_vectors, int, 0); 94 MODULE_PARM_DESC(max_msix_vectors, 95 " max msix vectors"); 96 97 static int mpt3sas_fwfault_debug; 98 MODULE_PARM_DESC(mpt3sas_fwfault_debug, 99 " enable detection of firmware fault and halt firmware - (default=0)"); 100 101 static int 102 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc); 103 104 /** 105 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. 106 * 107 */ 108 static int 109 _scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp) 110 { 111 int ret = param_set_int(val, kp); 112 struct MPT3SAS_ADAPTER *ioc; 113 114 if (ret) 115 return ret; 116 117 /* global ioc spinlock to protect controller list on list operations */ 118 pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug); 119 spin_lock(&gioc_lock); 120 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) 121 ioc->fwfault_debug = mpt3sas_fwfault_debug; 122 spin_unlock(&gioc_lock); 123 return 0; 124 } 125 module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug, 126 param_get_int, &mpt3sas_fwfault_debug, 0644); 127 128 /** 129 * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc 130 * @arg: input argument, used to derive ioc 131 * 132 * Return 0 if controller is removed from pci subsystem. 133 * Return -1 for other case. 134 */ 135 static int mpt3sas_remove_dead_ioc_func(void *arg) 136 { 137 struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg; 138 struct pci_dev *pdev; 139 140 if ((ioc == NULL)) 141 return -1; 142 143 pdev = ioc->pdev; 144 if ((pdev == NULL)) 145 return -1; 146 pci_stop_and_remove_bus_device_locked(pdev); 147 return 0; 148 } 149 150 /** 151 * _base_fault_reset_work - workq handling ioc fault conditions 152 * @work: input argument, used to derive ioc 153 * Context: sleep. 154 * 155 * Return nothing. 156 */ 157 static void 158 _base_fault_reset_work(struct work_struct *work) 159 { 160 struct MPT3SAS_ADAPTER *ioc = 161 container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work); 162 unsigned long flags; 163 u32 doorbell; 164 int rc; 165 struct task_struct *p; 166 167 168 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 169 if (ioc->shost_recovery || ioc->pci_error_recovery) 170 goto rearm_timer; 171 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 172 173 doorbell = mpt3sas_base_get_iocstate(ioc, 0); 174 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) { 175 pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n", 176 ioc->name); 177 178 /* It may be possible that EEH recovery can resolve some of 179 * pci bus failure issues rather removing the dead ioc function 180 * by considering controller is in a non-operational state. So 181 * here priority is given to the EEH recovery. If it doesn't 182 * not resolve this issue, mpt3sas driver will consider this 183 * controller to non-operational state and remove the dead ioc 184 * function. 185 */ 186 if (ioc->non_operational_loop++ < 5) { 187 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, 188 flags); 189 goto rearm_timer; 190 } 191 192 /* 193 * Call _scsih_flush_pending_cmds callback so that we flush all 194 * pending commands back to OS. This call is required to aovid 195 * deadlock at block layer. Dead IOC will fail to do diag reset, 196 * and this call is safe since dead ioc will never return any 197 * command back from HW. 198 */ 199 ioc->schedule_dead_ioc_flush_running_cmds(ioc); 200 /* 201 * Set remove_host flag early since kernel thread will 202 * take some time to execute. 203 */ 204 ioc->remove_host = 1; 205 /*Remove the Dead Host */ 206 p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc, 207 "%s_dead_ioc_%d", ioc->driver_name, ioc->id); 208 if (IS_ERR(p)) 209 pr_err(MPT3SAS_FMT 210 "%s: Running mpt3sas_dead_ioc thread failed !!!!\n", 211 ioc->name, __func__); 212 else 213 pr_err(MPT3SAS_FMT 214 "%s: Running mpt3sas_dead_ioc thread success !!!!\n", 215 ioc->name, __func__); 216 return; /* don't rearm timer */ 217 } 218 219 ioc->non_operational_loop = 0; 220 221 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) { 222 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 223 pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name, 224 __func__, (rc == 0) ? "success" : "failed"); 225 doorbell = mpt3sas_base_get_iocstate(ioc, 0); 226 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) 227 mpt3sas_base_fault_info(ioc, doorbell & 228 MPI2_DOORBELL_DATA_MASK); 229 if (rc && (doorbell & MPI2_IOC_STATE_MASK) != 230 MPI2_IOC_STATE_OPERATIONAL) 231 return; /* don't rearm timer */ 232 } 233 234 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 235 rearm_timer: 236 if (ioc->fault_reset_work_q) 237 queue_delayed_work(ioc->fault_reset_work_q, 238 &ioc->fault_reset_work, 239 msecs_to_jiffies(FAULT_POLLING_INTERVAL)); 240 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 241 } 242 243 /** 244 * mpt3sas_base_start_watchdog - start the fault_reset_work_q 245 * @ioc: per adapter object 246 * Context: sleep. 247 * 248 * Return nothing. 249 */ 250 void 251 mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc) 252 { 253 unsigned long flags; 254 255 if (ioc->fault_reset_work_q) 256 return; 257 258 /* initialize fault polling */ 259 260 INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work); 261 snprintf(ioc->fault_reset_work_q_name, 262 sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status", 263 ioc->driver_name, ioc->id); 264 ioc->fault_reset_work_q = 265 create_singlethread_workqueue(ioc->fault_reset_work_q_name); 266 if (!ioc->fault_reset_work_q) { 267 pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n", 268 ioc->name, __func__, __LINE__); 269 return; 270 } 271 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 272 if (ioc->fault_reset_work_q) 273 queue_delayed_work(ioc->fault_reset_work_q, 274 &ioc->fault_reset_work, 275 msecs_to_jiffies(FAULT_POLLING_INTERVAL)); 276 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 277 } 278 279 /** 280 * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q 281 * @ioc: per adapter object 282 * Context: sleep. 283 * 284 * Return nothing. 285 */ 286 void 287 mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc) 288 { 289 unsigned long flags; 290 struct workqueue_struct *wq; 291 292 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 293 wq = ioc->fault_reset_work_q; 294 ioc->fault_reset_work_q = NULL; 295 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 296 if (wq) { 297 if (!cancel_delayed_work_sync(&ioc->fault_reset_work)) 298 flush_workqueue(wq); 299 destroy_workqueue(wq); 300 } 301 } 302 303 /** 304 * mpt3sas_base_fault_info - verbose translation of firmware FAULT code 305 * @ioc: per adapter object 306 * @fault_code: fault code 307 * 308 * Return nothing. 309 */ 310 void 311 mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code) 312 { 313 pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n", 314 ioc->name, fault_code); 315 } 316 317 /** 318 * mpt3sas_halt_firmware - halt's mpt controller firmware 319 * @ioc: per adapter object 320 * 321 * For debugging timeout related issues. Writing 0xCOFFEE00 322 * to the doorbell register will halt controller firmware. With 323 * the purpose to stop both driver and firmware, the enduser can 324 * obtain a ring buffer from controller UART. 325 */ 326 void 327 mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc) 328 { 329 u32 doorbell; 330 331 if (!ioc->fwfault_debug) 332 return; 333 334 dump_stack(); 335 336 doorbell = readl(&ioc->chip->Doorbell); 337 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) 338 mpt3sas_base_fault_info(ioc , doorbell); 339 else { 340 writel(0xC0FFEE00, &ioc->chip->Doorbell); 341 pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n", 342 ioc->name); 343 } 344 345 if (ioc->fwfault_debug == 2) 346 for (;;) 347 ; 348 else 349 panic("panic in %s\n", __func__); 350 } 351 352 /** 353 * _base_sas_ioc_info - verbose translation of the ioc status 354 * @ioc: per adapter object 355 * @mpi_reply: reply mf payload returned from firmware 356 * @request_hdr: request mf 357 * 358 * Return nothing. 359 */ 360 static void 361 _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply, 362 MPI2RequestHeader_t *request_hdr) 363 { 364 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & 365 MPI2_IOCSTATUS_MASK; 366 char *desc = NULL; 367 u16 frame_sz; 368 char *func_str = NULL; 369 370 /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */ 371 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 372 request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || 373 request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION) 374 return; 375 376 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) 377 return; 378 379 switch (ioc_status) { 380 381 /**************************************************************************** 382 * Common IOCStatus values for all replies 383 ****************************************************************************/ 384 385 case MPI2_IOCSTATUS_INVALID_FUNCTION: 386 desc = "invalid function"; 387 break; 388 case MPI2_IOCSTATUS_BUSY: 389 desc = "busy"; 390 break; 391 case MPI2_IOCSTATUS_INVALID_SGL: 392 desc = "invalid sgl"; 393 break; 394 case MPI2_IOCSTATUS_INTERNAL_ERROR: 395 desc = "internal error"; 396 break; 397 case MPI2_IOCSTATUS_INVALID_VPID: 398 desc = "invalid vpid"; 399 break; 400 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: 401 desc = "insufficient resources"; 402 break; 403 case MPI2_IOCSTATUS_INSUFFICIENT_POWER: 404 desc = "insufficient power"; 405 break; 406 case MPI2_IOCSTATUS_INVALID_FIELD: 407 desc = "invalid field"; 408 break; 409 case MPI2_IOCSTATUS_INVALID_STATE: 410 desc = "invalid state"; 411 break; 412 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED: 413 desc = "op state not supported"; 414 break; 415 416 /**************************************************************************** 417 * Config IOCStatus values 418 ****************************************************************************/ 419 420 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION: 421 desc = "config invalid action"; 422 break; 423 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE: 424 desc = "config invalid type"; 425 break; 426 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE: 427 desc = "config invalid page"; 428 break; 429 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA: 430 desc = "config invalid data"; 431 break; 432 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS: 433 desc = "config no defaults"; 434 break; 435 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT: 436 desc = "config cant commit"; 437 break; 438 439 /**************************************************************************** 440 * SCSI IO Reply 441 ****************************************************************************/ 442 443 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 444 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: 445 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 446 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 447 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 448 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 449 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 450 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 451 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 452 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 453 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 454 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 455 break; 456 457 /**************************************************************************** 458 * For use by SCSI Initiator and SCSI Target end-to-end data protection 459 ****************************************************************************/ 460 461 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 462 desc = "eedp guard error"; 463 break; 464 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 465 desc = "eedp ref tag error"; 466 break; 467 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 468 desc = "eedp app tag error"; 469 break; 470 471 /**************************************************************************** 472 * SCSI Target values 473 ****************************************************************************/ 474 475 case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX: 476 desc = "target invalid io index"; 477 break; 478 case MPI2_IOCSTATUS_TARGET_ABORTED: 479 desc = "target aborted"; 480 break; 481 case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE: 482 desc = "target no conn retryable"; 483 break; 484 case MPI2_IOCSTATUS_TARGET_NO_CONNECTION: 485 desc = "target no connection"; 486 break; 487 case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH: 488 desc = "target xfer count mismatch"; 489 break; 490 case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR: 491 desc = "target data offset error"; 492 break; 493 case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA: 494 desc = "target too much write data"; 495 break; 496 case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT: 497 desc = "target iu too short"; 498 break; 499 case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT: 500 desc = "target ack nak timeout"; 501 break; 502 case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED: 503 desc = "target nak received"; 504 break; 505 506 /**************************************************************************** 507 * Serial Attached SCSI values 508 ****************************************************************************/ 509 510 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED: 511 desc = "smp request failed"; 512 break; 513 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN: 514 desc = "smp data overrun"; 515 break; 516 517 /**************************************************************************** 518 * Diagnostic Buffer Post / Diagnostic Release values 519 ****************************************************************************/ 520 521 case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED: 522 desc = "diagnostic released"; 523 break; 524 default: 525 break; 526 } 527 528 if (!desc) 529 return; 530 531 switch (request_hdr->Function) { 532 case MPI2_FUNCTION_CONFIG: 533 frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size; 534 func_str = "config_page"; 535 break; 536 case MPI2_FUNCTION_SCSI_TASK_MGMT: 537 frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t); 538 func_str = "task_mgmt"; 539 break; 540 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: 541 frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t); 542 func_str = "sas_iounit_ctl"; 543 break; 544 case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR: 545 frame_sz = sizeof(Mpi2SepRequest_t); 546 func_str = "enclosure"; 547 break; 548 case MPI2_FUNCTION_IOC_INIT: 549 frame_sz = sizeof(Mpi2IOCInitRequest_t); 550 func_str = "ioc_init"; 551 break; 552 case MPI2_FUNCTION_PORT_ENABLE: 553 frame_sz = sizeof(Mpi2PortEnableRequest_t); 554 func_str = "port_enable"; 555 break; 556 case MPI2_FUNCTION_SMP_PASSTHROUGH: 557 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size; 558 func_str = "smp_passthru"; 559 break; 560 case MPI2_FUNCTION_NVME_ENCAPSULATED: 561 frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) + 562 ioc->sge_size; 563 func_str = "nvme_encapsulated"; 564 break; 565 default: 566 frame_sz = 32; 567 func_str = "unknown"; 568 break; 569 } 570 571 pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n", 572 ioc->name, desc, ioc_status, request_hdr, func_str); 573 574 _debug_dump_mf(request_hdr, frame_sz/4); 575 } 576 577 /** 578 * _base_display_event_data - verbose translation of firmware asyn events 579 * @ioc: per adapter object 580 * @mpi_reply: reply mf payload returned from firmware 581 * 582 * Return nothing. 583 */ 584 static void 585 _base_display_event_data(struct MPT3SAS_ADAPTER *ioc, 586 Mpi2EventNotificationReply_t *mpi_reply) 587 { 588 char *desc = NULL; 589 u16 event; 590 591 if (!(ioc->logging_level & MPT_DEBUG_EVENTS)) 592 return; 593 594 event = le16_to_cpu(mpi_reply->Event); 595 596 switch (event) { 597 case MPI2_EVENT_LOG_DATA: 598 desc = "Log Data"; 599 break; 600 case MPI2_EVENT_STATE_CHANGE: 601 desc = "Status Change"; 602 break; 603 case MPI2_EVENT_HARD_RESET_RECEIVED: 604 desc = "Hard Reset Received"; 605 break; 606 case MPI2_EVENT_EVENT_CHANGE: 607 desc = "Event Change"; 608 break; 609 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: 610 desc = "Device Status Change"; 611 break; 612 case MPI2_EVENT_IR_OPERATION_STATUS: 613 if (!ioc->hide_ir_msg) 614 desc = "IR Operation Status"; 615 break; 616 case MPI2_EVENT_SAS_DISCOVERY: 617 { 618 Mpi2EventDataSasDiscovery_t *event_data = 619 (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData; 620 pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name, 621 (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ? 622 "start" : "stop"); 623 if (event_data->DiscoveryStatus) 624 pr_cont(" discovery_status(0x%08x)", 625 le32_to_cpu(event_data->DiscoveryStatus)); 626 pr_cont("\n"); 627 return; 628 } 629 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: 630 desc = "SAS Broadcast Primitive"; 631 break; 632 case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: 633 desc = "SAS Init Device Status Change"; 634 break; 635 case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW: 636 desc = "SAS Init Table Overflow"; 637 break; 638 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 639 desc = "SAS Topology Change List"; 640 break; 641 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: 642 desc = "SAS Enclosure Device Status Change"; 643 break; 644 case MPI2_EVENT_IR_VOLUME: 645 if (!ioc->hide_ir_msg) 646 desc = "IR Volume"; 647 break; 648 case MPI2_EVENT_IR_PHYSICAL_DISK: 649 if (!ioc->hide_ir_msg) 650 desc = "IR Physical Disk"; 651 break; 652 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: 653 if (!ioc->hide_ir_msg) 654 desc = "IR Configuration Change List"; 655 break; 656 case MPI2_EVENT_LOG_ENTRY_ADDED: 657 if (!ioc->hide_ir_msg) 658 desc = "Log Entry Added"; 659 break; 660 case MPI2_EVENT_TEMP_THRESHOLD: 661 desc = "Temperature Threshold"; 662 break; 663 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION: 664 desc = "Cable Event"; 665 break; 666 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE: 667 desc = "PCIE Device Status Change"; 668 break; 669 case MPI2_EVENT_PCIE_ENUMERATION: 670 { 671 Mpi26EventDataPCIeEnumeration_t *event_data = 672 (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData; 673 pr_info(MPT3SAS_FMT "PCIE Enumeration: (%s)", ioc->name, 674 (event_data->ReasonCode == 675 MPI26_EVENT_PCIE_ENUM_RC_STARTED) ? 676 "start" : "stop"); 677 if (event_data->EnumerationStatus) 678 pr_info("enumeration_status(0x%08x)", 679 le32_to_cpu(event_data->EnumerationStatus)); 680 pr_info("\n"); 681 return; 682 } 683 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 684 desc = "PCIE Topology Change List"; 685 break; 686 } 687 688 if (!desc) 689 return; 690 691 pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc); 692 } 693 694 /** 695 * _base_sas_log_info - verbose translation of firmware log info 696 * @ioc: per adapter object 697 * @log_info: log info 698 * 699 * Return nothing. 700 */ 701 static void 702 _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info) 703 { 704 union loginfo_type { 705 u32 loginfo; 706 struct { 707 u32 subcode:16; 708 u32 code:8; 709 u32 originator:4; 710 u32 bus_type:4; 711 } dw; 712 }; 713 union loginfo_type sas_loginfo; 714 char *originator_str = NULL; 715 716 sas_loginfo.loginfo = log_info; 717 if (sas_loginfo.dw.bus_type != 3 /*SAS*/) 718 return; 719 720 /* each nexus loss loginfo */ 721 if (log_info == 0x31170000) 722 return; 723 724 /* eat the loginfos associated with task aborts */ 725 if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info == 726 0x31140000 || log_info == 0x31130000)) 727 return; 728 729 switch (sas_loginfo.dw.originator) { 730 case 0: 731 originator_str = "IOP"; 732 break; 733 case 1: 734 originator_str = "PL"; 735 break; 736 case 2: 737 if (!ioc->hide_ir_msg) 738 originator_str = "IR"; 739 else 740 originator_str = "WarpDrive"; 741 break; 742 } 743 744 pr_warn(MPT3SAS_FMT 745 "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n", 746 ioc->name, log_info, 747 originator_str, sas_loginfo.dw.code, 748 sas_loginfo.dw.subcode); 749 } 750 751 /** 752 * _base_display_reply_info - 753 * @ioc: per adapter object 754 * @smid: system request message index 755 * @msix_index: MSIX table index supplied by the OS 756 * @reply: reply message frame(lower 32bit addr) 757 * 758 * Return nothing. 759 */ 760 static void 761 _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 762 u32 reply) 763 { 764 MPI2DefaultReply_t *mpi_reply; 765 u16 ioc_status; 766 u32 loginfo = 0; 767 768 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 769 if (unlikely(!mpi_reply)) { 770 pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n", 771 ioc->name, __FILE__, __LINE__, __func__); 772 return; 773 } 774 ioc_status = le16_to_cpu(mpi_reply->IOCStatus); 775 776 if ((ioc_status & MPI2_IOCSTATUS_MASK) && 777 (ioc->logging_level & MPT_DEBUG_REPLY)) { 778 _base_sas_ioc_info(ioc , mpi_reply, 779 mpt3sas_base_get_msg_frame(ioc, smid)); 780 } 781 782 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { 783 loginfo = le32_to_cpu(mpi_reply->IOCLogInfo); 784 _base_sas_log_info(ioc, loginfo); 785 } 786 787 if (ioc_status || loginfo) { 788 ioc_status &= MPI2_IOCSTATUS_MASK; 789 mpt3sas_trigger_mpi(ioc, ioc_status, loginfo); 790 } 791 } 792 793 /** 794 * mpt3sas_base_done - base internal command completion routine 795 * @ioc: per adapter object 796 * @smid: system request message index 797 * @msix_index: MSIX table index supplied by the OS 798 * @reply: reply message frame(lower 32bit addr) 799 * 800 * Return 1 meaning mf should be freed from _base_interrupt 801 * 0 means the mf is freed from this function. 802 */ 803 u8 804 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 805 u32 reply) 806 { 807 MPI2DefaultReply_t *mpi_reply; 808 809 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 810 if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK) 811 return mpt3sas_check_for_pending_internal_cmds(ioc, smid); 812 813 if (ioc->base_cmds.status == MPT3_CMD_NOT_USED) 814 return 1; 815 816 ioc->base_cmds.status |= MPT3_CMD_COMPLETE; 817 if (mpi_reply) { 818 ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID; 819 memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); 820 } 821 ioc->base_cmds.status &= ~MPT3_CMD_PENDING; 822 823 complete(&ioc->base_cmds.done); 824 return 1; 825 } 826 827 /** 828 * _base_async_event - main callback handler for firmware asyn events 829 * @ioc: per adapter object 830 * @msix_index: MSIX table index supplied by the OS 831 * @reply: reply message frame(lower 32bit addr) 832 * 833 * Return 1 meaning mf should be freed from _base_interrupt 834 * 0 means the mf is freed from this function. 835 */ 836 static u8 837 _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply) 838 { 839 Mpi2EventNotificationReply_t *mpi_reply; 840 Mpi2EventAckRequest_t *ack_request; 841 u16 smid; 842 struct _event_ack_list *delayed_event_ack; 843 844 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 845 if (!mpi_reply) 846 return 1; 847 if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION) 848 return 1; 849 850 _base_display_event_data(ioc, mpi_reply); 851 852 if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED)) 853 goto out; 854 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); 855 if (!smid) { 856 delayed_event_ack = kzalloc(sizeof(*delayed_event_ack), 857 GFP_ATOMIC); 858 if (!delayed_event_ack) 859 goto out; 860 INIT_LIST_HEAD(&delayed_event_ack->list); 861 delayed_event_ack->Event = mpi_reply->Event; 862 delayed_event_ack->EventContext = mpi_reply->EventContext; 863 list_add_tail(&delayed_event_ack->list, 864 &ioc->delayed_event_ack_list); 865 dewtprintk(ioc, pr_info(MPT3SAS_FMT 866 "DELAYED: EVENT ACK: event (0x%04x)\n", 867 ioc->name, le16_to_cpu(mpi_reply->Event))); 868 goto out; 869 } 870 871 ack_request = mpt3sas_base_get_msg_frame(ioc, smid); 872 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t)); 873 ack_request->Function = MPI2_FUNCTION_EVENT_ACK; 874 ack_request->Event = mpi_reply->Event; 875 ack_request->EventContext = mpi_reply->EventContext; 876 ack_request->VF_ID = 0; /* TODO */ 877 ack_request->VP_ID = 0; 878 ioc->put_smid_default(ioc, smid); 879 880 out: 881 882 /* scsih callback handler */ 883 mpt3sas_scsih_event_callback(ioc, msix_index, reply); 884 885 /* ctl callback handler */ 886 mpt3sas_ctl_event_callback(ioc, msix_index, reply); 887 888 return 1; 889 } 890 891 static struct scsiio_tracker * 892 _get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) 893 { 894 struct scsi_cmnd *cmd; 895 896 if (WARN_ON(!smid) || 897 WARN_ON(smid >= ioc->hi_priority_smid)) 898 return NULL; 899 900 cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 901 if (cmd) 902 return scsi_cmd_priv(cmd); 903 904 return NULL; 905 } 906 907 /** 908 * _base_get_cb_idx - obtain the callback index 909 * @ioc: per adapter object 910 * @smid: system request message index 911 * 912 * Return callback index. 913 */ 914 static u8 915 _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid) 916 { 917 int i; 918 u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1; 919 u8 cb_idx = 0xFF; 920 921 if (smid < ioc->hi_priority_smid) { 922 struct scsiio_tracker *st; 923 924 if (smid < ctl_smid) { 925 st = _get_st_from_smid(ioc, smid); 926 if (st) 927 cb_idx = st->cb_idx; 928 } else if (smid == ctl_smid) 929 cb_idx = ioc->ctl_cb_idx; 930 } else if (smid < ioc->internal_smid) { 931 i = smid - ioc->hi_priority_smid; 932 cb_idx = ioc->hpr_lookup[i].cb_idx; 933 } else if (smid <= ioc->hba_queue_depth) { 934 i = smid - ioc->internal_smid; 935 cb_idx = ioc->internal_lookup[i].cb_idx; 936 } 937 return cb_idx; 938 } 939 940 /** 941 * _base_mask_interrupts - disable interrupts 942 * @ioc: per adapter object 943 * 944 * Disabling ResetIRQ, Reply and Doorbell Interrupts 945 * 946 * Return nothing. 947 */ 948 static void 949 _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc) 950 { 951 u32 him_register; 952 953 ioc->mask_interrupts = 1; 954 him_register = readl(&ioc->chip->HostInterruptMask); 955 him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK; 956 writel(him_register, &ioc->chip->HostInterruptMask); 957 readl(&ioc->chip->HostInterruptMask); 958 } 959 960 /** 961 * _base_unmask_interrupts - enable interrupts 962 * @ioc: per adapter object 963 * 964 * Enabling only Reply Interrupts 965 * 966 * Return nothing. 967 */ 968 static void 969 _base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc) 970 { 971 u32 him_register; 972 973 him_register = readl(&ioc->chip->HostInterruptMask); 974 him_register &= ~MPI2_HIM_RIM; 975 writel(him_register, &ioc->chip->HostInterruptMask); 976 ioc->mask_interrupts = 0; 977 } 978 979 union reply_descriptor { 980 u64 word; 981 struct { 982 u32 low; 983 u32 high; 984 } u; 985 }; 986 987 /** 988 * _base_interrupt - MPT adapter (IOC) specific interrupt handler. 989 * @irq: irq number (not used) 990 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure 991 * @r: pt_regs pointer (not used) 992 * 993 * Return IRQ_HANDLE if processed, else IRQ_NONE. 994 */ 995 static irqreturn_t 996 _base_interrupt(int irq, void *bus_id) 997 { 998 struct adapter_reply_queue *reply_q = bus_id; 999 union reply_descriptor rd; 1000 u32 completed_cmds; 1001 u8 request_desript_type; 1002 u16 smid; 1003 u8 cb_idx; 1004 u32 reply; 1005 u8 msix_index = reply_q->msix_index; 1006 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc; 1007 Mpi2ReplyDescriptorsUnion_t *rpf; 1008 u8 rc; 1009 1010 if (ioc->mask_interrupts) 1011 return IRQ_NONE; 1012 1013 if (!atomic_add_unless(&reply_q->busy, 1, 1)) 1014 return IRQ_NONE; 1015 1016 rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index]; 1017 request_desript_type = rpf->Default.ReplyFlags 1018 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1019 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) { 1020 atomic_dec(&reply_q->busy); 1021 return IRQ_NONE; 1022 } 1023 1024 completed_cmds = 0; 1025 cb_idx = 0xFF; 1026 do { 1027 rd.word = le64_to_cpu(rpf->Words); 1028 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX) 1029 goto out; 1030 reply = 0; 1031 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1); 1032 if (request_desript_type == 1033 MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS || 1034 request_desript_type == 1035 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS || 1036 request_desript_type == 1037 MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) { 1038 cb_idx = _base_get_cb_idx(ioc, smid); 1039 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) && 1040 (likely(mpt_callbacks[cb_idx] != NULL))) { 1041 rc = mpt_callbacks[cb_idx](ioc, smid, 1042 msix_index, 0); 1043 if (rc) 1044 mpt3sas_base_free_smid(ioc, smid); 1045 } 1046 } else if (request_desript_type == 1047 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { 1048 reply = le32_to_cpu( 1049 rpf->AddressReply.ReplyFrameAddress); 1050 if (reply > ioc->reply_dma_max_address || 1051 reply < ioc->reply_dma_min_address) 1052 reply = 0; 1053 if (smid) { 1054 cb_idx = _base_get_cb_idx(ioc, smid); 1055 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) && 1056 (likely(mpt_callbacks[cb_idx] != NULL))) { 1057 rc = mpt_callbacks[cb_idx](ioc, smid, 1058 msix_index, reply); 1059 if (reply) 1060 _base_display_reply_info(ioc, 1061 smid, msix_index, reply); 1062 if (rc) 1063 mpt3sas_base_free_smid(ioc, 1064 smid); 1065 } 1066 } else { 1067 _base_async_event(ioc, msix_index, reply); 1068 } 1069 1070 /* reply free queue handling */ 1071 if (reply) { 1072 ioc->reply_free_host_index = 1073 (ioc->reply_free_host_index == 1074 (ioc->reply_free_queue_depth - 1)) ? 1075 0 : ioc->reply_free_host_index + 1; 1076 ioc->reply_free[ioc->reply_free_host_index] = 1077 cpu_to_le32(reply); 1078 writel(ioc->reply_free_host_index, 1079 &ioc->chip->ReplyFreeHostIndex); 1080 } 1081 } 1082 1083 rpf->Words = cpu_to_le64(ULLONG_MAX); 1084 reply_q->reply_post_host_index = 1085 (reply_q->reply_post_host_index == 1086 (ioc->reply_post_queue_depth - 1)) ? 0 : 1087 reply_q->reply_post_host_index + 1; 1088 request_desript_type = 1089 reply_q->reply_post_free[reply_q->reply_post_host_index]. 1090 Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1091 completed_cmds++; 1092 /* Update the reply post host index after continuously 1093 * processing the threshold number of Reply Descriptors. 1094 * So that FW can find enough entries to post the Reply 1095 * Descriptors in the reply descriptor post queue. 1096 */ 1097 if (completed_cmds > ioc->hba_queue_depth/3) { 1098 if (ioc->combined_reply_queue) { 1099 writel(reply_q->reply_post_host_index | 1100 ((msix_index & 7) << 1101 MPI2_RPHI_MSIX_INDEX_SHIFT), 1102 ioc->replyPostRegisterIndex[msix_index/8]); 1103 } else { 1104 writel(reply_q->reply_post_host_index | 1105 (msix_index << 1106 MPI2_RPHI_MSIX_INDEX_SHIFT), 1107 &ioc->chip->ReplyPostHostIndex); 1108 } 1109 completed_cmds = 1; 1110 } 1111 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 1112 goto out; 1113 if (!reply_q->reply_post_host_index) 1114 rpf = reply_q->reply_post_free; 1115 else 1116 rpf++; 1117 } while (1); 1118 1119 out: 1120 1121 if (!completed_cmds) { 1122 atomic_dec(&reply_q->busy); 1123 return IRQ_NONE; 1124 } 1125 1126 if (ioc->is_warpdrive) { 1127 writel(reply_q->reply_post_host_index, 1128 ioc->reply_post_host_index[msix_index]); 1129 atomic_dec(&reply_q->busy); 1130 return IRQ_HANDLED; 1131 } 1132 1133 /* Update Reply Post Host Index. 1134 * For those HBA's which support combined reply queue feature 1135 * 1. Get the correct Supplemental Reply Post Host Index Register. 1136 * i.e. (msix_index / 8)th entry from Supplemental Reply Post Host 1137 * Index Register address bank i.e replyPostRegisterIndex[], 1138 * 2. Then update this register with new reply host index value 1139 * in ReplyPostIndex field and the MSIxIndex field with 1140 * msix_index value reduced to a value between 0 and 7, 1141 * using a modulo 8 operation. Since each Supplemental Reply Post 1142 * Host Index Register supports 8 MSI-X vectors. 1143 * 1144 * For other HBA's just update the Reply Post Host Index register with 1145 * new reply host index value in ReplyPostIndex Field and msix_index 1146 * value in MSIxIndex field. 1147 */ 1148 if (ioc->combined_reply_queue) 1149 writel(reply_q->reply_post_host_index | ((msix_index & 7) << 1150 MPI2_RPHI_MSIX_INDEX_SHIFT), 1151 ioc->replyPostRegisterIndex[msix_index/8]); 1152 else 1153 writel(reply_q->reply_post_host_index | (msix_index << 1154 MPI2_RPHI_MSIX_INDEX_SHIFT), 1155 &ioc->chip->ReplyPostHostIndex); 1156 atomic_dec(&reply_q->busy); 1157 return IRQ_HANDLED; 1158 } 1159 1160 /** 1161 * _base_is_controller_msix_enabled - is controller support muli-reply queues 1162 * @ioc: per adapter object 1163 * 1164 */ 1165 static inline int 1166 _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc) 1167 { 1168 return (ioc->facts.IOCCapabilities & 1169 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable; 1170 } 1171 1172 /** 1173 * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts 1174 * @ioc: per adapter object 1175 * Context: non ISR conext 1176 * 1177 * Called when a Task Management request has completed. 1178 * 1179 * Return nothing. 1180 */ 1181 void 1182 mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc) 1183 { 1184 struct adapter_reply_queue *reply_q; 1185 1186 /* If MSIX capability is turned off 1187 * then multi-queues are not enabled 1188 */ 1189 if (!_base_is_controller_msix_enabled(ioc)) 1190 return; 1191 1192 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 1193 if (ioc->shost_recovery || ioc->remove_host || 1194 ioc->pci_error_recovery) 1195 return; 1196 /* TMs are on msix_index == 0 */ 1197 if (reply_q->msix_index == 0) 1198 continue; 1199 synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index)); 1200 } 1201 } 1202 1203 /** 1204 * mpt3sas_base_release_callback_handler - clear interrupt callback handler 1205 * @cb_idx: callback index 1206 * 1207 * Return nothing. 1208 */ 1209 void 1210 mpt3sas_base_release_callback_handler(u8 cb_idx) 1211 { 1212 mpt_callbacks[cb_idx] = NULL; 1213 } 1214 1215 /** 1216 * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler 1217 * @cb_func: callback function 1218 * 1219 * Returns cb_func. 1220 */ 1221 u8 1222 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func) 1223 { 1224 u8 cb_idx; 1225 1226 for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--) 1227 if (mpt_callbacks[cb_idx] == NULL) 1228 break; 1229 1230 mpt_callbacks[cb_idx] = cb_func; 1231 return cb_idx; 1232 } 1233 1234 /** 1235 * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler 1236 * 1237 * Return nothing. 1238 */ 1239 void 1240 mpt3sas_base_initialize_callback_handler(void) 1241 { 1242 u8 cb_idx; 1243 1244 for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++) 1245 mpt3sas_base_release_callback_handler(cb_idx); 1246 } 1247 1248 1249 /** 1250 * _base_build_zero_len_sge - build zero length sg entry 1251 * @ioc: per adapter object 1252 * @paddr: virtual address for SGE 1253 * 1254 * Create a zero length scatter gather entry to insure the IOCs hardware has 1255 * something to use if the target device goes brain dead and tries 1256 * to send data even when none is asked for. 1257 * 1258 * Return nothing. 1259 */ 1260 static void 1261 _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr) 1262 { 1263 u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT | 1264 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST | 1265 MPI2_SGE_FLAGS_SIMPLE_ELEMENT) << 1266 MPI2_SGE_FLAGS_SHIFT); 1267 ioc->base_add_sg_single(paddr, flags_length, -1); 1268 } 1269 1270 /** 1271 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr. 1272 * @paddr: virtual address for SGE 1273 * @flags_length: SGE flags and data transfer length 1274 * @dma_addr: Physical address 1275 * 1276 * Return nothing. 1277 */ 1278 static void 1279 _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr) 1280 { 1281 Mpi2SGESimple32_t *sgel = paddr; 1282 1283 flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING | 1284 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT; 1285 sgel->FlagsLength = cpu_to_le32(flags_length); 1286 sgel->Address = cpu_to_le32(dma_addr); 1287 } 1288 1289 1290 /** 1291 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr. 1292 * @paddr: virtual address for SGE 1293 * @flags_length: SGE flags and data transfer length 1294 * @dma_addr: Physical address 1295 * 1296 * Return nothing. 1297 */ 1298 static void 1299 _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr) 1300 { 1301 Mpi2SGESimple64_t *sgel = paddr; 1302 1303 flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING | 1304 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT; 1305 sgel->FlagsLength = cpu_to_le32(flags_length); 1306 sgel->Address = cpu_to_le64(dma_addr); 1307 } 1308 1309 /** 1310 * _base_get_chain_buffer_tracker - obtain chain tracker 1311 * @ioc: per adapter object 1312 * @scmd: SCSI commands of the IO request 1313 * 1314 * Returns chain tracker(from ioc->free_chain_list) 1315 */ 1316 static struct chain_tracker * 1317 _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, 1318 struct scsi_cmnd *scmd) 1319 { 1320 struct chain_tracker *chain_req; 1321 struct scsiio_tracker *st = scsi_cmd_priv(scmd); 1322 unsigned long flags; 1323 1324 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 1325 if (list_empty(&ioc->free_chain_list)) { 1326 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 1327 dfailprintk(ioc, pr_warn(MPT3SAS_FMT 1328 "chain buffers not available\n", ioc->name)); 1329 return NULL; 1330 } 1331 chain_req = list_entry(ioc->free_chain_list.next, 1332 struct chain_tracker, tracker_list); 1333 list_del_init(&chain_req->tracker_list); 1334 list_add_tail(&chain_req->tracker_list, &st->chain_list); 1335 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 1336 return chain_req; 1337 } 1338 1339 1340 /** 1341 * _base_build_sg - build generic sg 1342 * @ioc: per adapter object 1343 * @psge: virtual address for SGE 1344 * @data_out_dma: physical address for WRITES 1345 * @data_out_sz: data xfer size for WRITES 1346 * @data_in_dma: physical address for READS 1347 * @data_in_sz: data xfer size for READS 1348 * 1349 * Return nothing. 1350 */ 1351 static void 1352 _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge, 1353 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, 1354 size_t data_in_sz) 1355 { 1356 u32 sgl_flags; 1357 1358 if (!data_out_sz && !data_in_sz) { 1359 _base_build_zero_len_sge(ioc, psge); 1360 return; 1361 } 1362 1363 if (data_out_sz && data_in_sz) { 1364 /* WRITE sgel first */ 1365 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 1366 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); 1367 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1368 ioc->base_add_sg_single(psge, sgl_flags | 1369 data_out_sz, data_out_dma); 1370 1371 /* incr sgel */ 1372 psge += ioc->sge_size; 1373 1374 /* READ sgel last */ 1375 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 1376 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | 1377 MPI2_SGE_FLAGS_END_OF_LIST); 1378 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1379 ioc->base_add_sg_single(psge, sgl_flags | 1380 data_in_sz, data_in_dma); 1381 } else if (data_out_sz) /* WRITE */ { 1382 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 1383 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | 1384 MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC); 1385 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1386 ioc->base_add_sg_single(psge, sgl_flags | 1387 data_out_sz, data_out_dma); 1388 } else if (data_in_sz) /* READ */ { 1389 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 1390 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | 1391 MPI2_SGE_FLAGS_END_OF_LIST); 1392 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1393 ioc->base_add_sg_single(psge, sgl_flags | 1394 data_in_sz, data_in_dma); 1395 } 1396 } 1397 1398 /* IEEE format sgls */ 1399 1400 /** 1401 * _base_build_nvme_prp - This function is called for NVMe end devices to build 1402 * a native SGL (NVMe PRP). The native SGL is built starting in the first PRP 1403 * entry of the NVMe message (PRP1). If the data buffer is small enough to be 1404 * described entirely using PRP1, then PRP2 is not used. If needed, PRP2 is 1405 * used to describe a larger data buffer. If the data buffer is too large to 1406 * describe using the two PRP entriess inside the NVMe message, then PRP1 1407 * describes the first data memory segment, and PRP2 contains a pointer to a PRP 1408 * list located elsewhere in memory to describe the remaining data memory 1409 * segments. The PRP list will be contiguous. 1410 1411 * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP 1412 * consists of a list of PRP entries to describe a number of noncontigous 1413 * physical memory segments as a single memory buffer, just as a SGL does. Note 1414 * however, that this function is only used by the IOCTL call, so the memory 1415 * given will be guaranteed to be contiguous. There is no need to translate 1416 * non-contiguous SGL into a PRP in this case. All PRPs will describe 1417 * contiguous space that is one page size each. 1418 * 1419 * Each NVMe message contains two PRP entries. The first (PRP1) either contains 1420 * a PRP list pointer or a PRP element, depending upon the command. PRP2 1421 * contains the second PRP element if the memory being described fits within 2 1422 * PRP entries, or a PRP list pointer if the PRP spans more than two entries. 1423 * 1424 * A PRP list pointer contains the address of a PRP list, structured as a linear 1425 * array of PRP entries. Each PRP entry in this list describes a segment of 1426 * physical memory. 1427 * 1428 * Each 64-bit PRP entry comprises an address and an offset field. The address 1429 * always points at the beginning of a 4KB physical memory page, and the offset 1430 * describes where within that 4KB page the memory segment begins. Only the 1431 * first element in a PRP list may contain a non-zero offest, implying that all 1432 * memory segments following the first begin at the start of a 4KB page. 1433 * 1434 * Each PRP element normally describes 4KB of physical memory, with exceptions 1435 * for the first and last elements in the list. If the memory being described 1436 * by the list begins at a non-zero offset within the first 4KB page, then the 1437 * first PRP element will contain a non-zero offset indicating where the region 1438 * begins within the 4KB page. The last memory segment may end before the end 1439 * of the 4KB segment, depending upon the overall size of the memory being 1440 * described by the PRP list. 1441 * 1442 * Since PRP entries lack any indication of size, the overall data buffer length 1443 * is used to determine where the end of the data memory buffer is located, and 1444 * how many PRP entries are required to describe it. 1445 * 1446 * @ioc: per adapter object 1447 * @smid: system request message index for getting asscociated SGL 1448 * @nvme_encap_request: the NVMe request msg frame pointer 1449 * @data_out_dma: physical address for WRITES 1450 * @data_out_sz: data xfer size for WRITES 1451 * @data_in_dma: physical address for READS 1452 * @data_in_sz: data xfer size for READS 1453 * 1454 * Returns nothing. 1455 */ 1456 static void 1457 _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid, 1458 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request, 1459 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, 1460 size_t data_in_sz) 1461 { 1462 int prp_size = NVME_PRP_SIZE; 1463 __le64 *prp_entry, *prp1_entry, *prp2_entry; 1464 __le64 *prp_page; 1465 dma_addr_t prp_entry_dma, prp_page_dma, dma_addr; 1466 u32 offset, entry_len; 1467 u32 page_mask_result, page_mask; 1468 size_t length; 1469 1470 /* 1471 * Not all commands require a data transfer. If no data, just return 1472 * without constructing any PRP. 1473 */ 1474 if (!data_in_sz && !data_out_sz) 1475 return; 1476 /* 1477 * Set pointers to PRP1 and PRP2, which are in the NVMe command. 1478 * PRP1 is located at a 24 byte offset from the start of the NVMe 1479 * command. Then set the current PRP entry pointer to PRP1. 1480 */ 1481 prp1_entry = (__le64 *)(nvme_encap_request->NVMe_Command + 1482 NVME_CMD_PRP1_OFFSET); 1483 prp2_entry = (__le64 *)(nvme_encap_request->NVMe_Command + 1484 NVME_CMD_PRP2_OFFSET); 1485 prp_entry = prp1_entry; 1486 /* 1487 * For the PRP entries, use the specially allocated buffer of 1488 * contiguous memory. 1489 */ 1490 prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid); 1491 prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid); 1492 1493 /* 1494 * Check if we are within 1 entry of a page boundary we don't 1495 * want our first entry to be a PRP List entry. 1496 */ 1497 page_mask = ioc->page_size - 1; 1498 page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask; 1499 if (!page_mask_result) { 1500 /* Bump up to next page boundary. */ 1501 prp_page = (__le64 *)((u8 *)prp_page + prp_size); 1502 prp_page_dma = prp_page_dma + prp_size; 1503 } 1504 1505 /* 1506 * Set PRP physical pointer, which initially points to the current PRP 1507 * DMA memory page. 1508 */ 1509 prp_entry_dma = prp_page_dma; 1510 1511 /* Get physical address and length of the data buffer. */ 1512 if (data_in_sz) { 1513 dma_addr = data_in_dma; 1514 length = data_in_sz; 1515 } else { 1516 dma_addr = data_out_dma; 1517 length = data_out_sz; 1518 } 1519 1520 /* Loop while the length is not zero. */ 1521 while (length) { 1522 /* 1523 * Check if we need to put a list pointer here if we are at 1524 * page boundary - prp_size (8 bytes). 1525 */ 1526 page_mask_result = (prp_entry_dma + prp_size) & page_mask; 1527 if (!page_mask_result) { 1528 /* 1529 * This is the last entry in a PRP List, so we need to 1530 * put a PRP list pointer here. What this does is: 1531 * - bump the current memory pointer to the next 1532 * address, which will be the next full page. 1533 * - set the PRP Entry to point to that page. This 1534 * is now the PRP List pointer. 1535 * - bump the PRP Entry pointer the start of the 1536 * next page. Since all of this PRP memory is 1537 * contiguous, no need to get a new page - it's 1538 * just the next address. 1539 */ 1540 prp_entry_dma++; 1541 *prp_entry = cpu_to_le64(prp_entry_dma); 1542 prp_entry++; 1543 } 1544 1545 /* Need to handle if entry will be part of a page. */ 1546 offset = dma_addr & page_mask; 1547 entry_len = ioc->page_size - offset; 1548 1549 if (prp_entry == prp1_entry) { 1550 /* 1551 * Must fill in the first PRP pointer (PRP1) before 1552 * moving on. 1553 */ 1554 *prp1_entry = cpu_to_le64(dma_addr); 1555 1556 /* 1557 * Now point to the second PRP entry within the 1558 * command (PRP2). 1559 */ 1560 prp_entry = prp2_entry; 1561 } else if (prp_entry == prp2_entry) { 1562 /* 1563 * Should the PRP2 entry be a PRP List pointer or just 1564 * a regular PRP pointer? If there is more than one 1565 * more page of data, must use a PRP List pointer. 1566 */ 1567 if (length > ioc->page_size) { 1568 /* 1569 * PRP2 will contain a PRP List pointer because 1570 * more PRP's are needed with this command. The 1571 * list will start at the beginning of the 1572 * contiguous buffer. 1573 */ 1574 *prp2_entry = cpu_to_le64(prp_entry_dma); 1575 1576 /* 1577 * The next PRP Entry will be the start of the 1578 * first PRP List. 1579 */ 1580 prp_entry = prp_page; 1581 } else { 1582 /* 1583 * After this, the PRP Entries are complete. 1584 * This command uses 2 PRP's and no PRP list. 1585 */ 1586 *prp2_entry = cpu_to_le64(dma_addr); 1587 } 1588 } else { 1589 /* 1590 * Put entry in list and bump the addresses. 1591 * 1592 * After PRP1 and PRP2 are filled in, this will fill in 1593 * all remaining PRP entries in a PRP List, one per 1594 * each time through the loop. 1595 */ 1596 *prp_entry = cpu_to_le64(dma_addr); 1597 prp_entry++; 1598 prp_entry_dma++; 1599 } 1600 1601 /* 1602 * Bump the phys address of the command's data buffer by the 1603 * entry_len. 1604 */ 1605 dma_addr += entry_len; 1606 1607 /* Decrement length accounting for last partial page. */ 1608 if (entry_len > length) 1609 length = 0; 1610 else 1611 length -= entry_len; 1612 } 1613 } 1614 1615 /** 1616 * base_make_prp_nvme - 1617 * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only 1618 * 1619 * @ioc: per adapter object 1620 * @scmd: SCSI command from the mid-layer 1621 * @mpi_request: mpi request 1622 * @smid: msg Index 1623 * @sge_count: scatter gather element count. 1624 * 1625 * Returns: true: PRPs are built 1626 * false: IEEE SGLs needs to be built 1627 */ 1628 static void 1629 base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc, 1630 struct scsi_cmnd *scmd, 1631 Mpi25SCSIIORequest_t *mpi_request, 1632 u16 smid, int sge_count) 1633 { 1634 int sge_len, num_prp_in_chain = 0; 1635 Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl; 1636 __le64 *curr_buff; 1637 dma_addr_t msg_dma, sge_addr, offset; 1638 u32 page_mask, page_mask_result; 1639 struct scatterlist *sg_scmd; 1640 u32 first_prp_len; 1641 int data_len = scsi_bufflen(scmd); 1642 u32 nvme_pg_size; 1643 1644 nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE); 1645 /* 1646 * Nvme has a very convoluted prp format. One prp is required 1647 * for each page or partial page. Driver need to split up OS sg_list 1648 * entries if it is longer than one page or cross a page 1649 * boundary. Driver also have to insert a PRP list pointer entry as 1650 * the last entry in each physical page of the PRP list. 1651 * 1652 * NOTE: The first PRP "entry" is actually placed in the first 1653 * SGL entry in the main message as IEEE 64 format. The 2nd 1654 * entry in the main message is the chain element, and the rest 1655 * of the PRP entries are built in the contiguous pcie buffer. 1656 */ 1657 page_mask = nvme_pg_size - 1; 1658 1659 /* 1660 * Native SGL is needed. 1661 * Put a chain element in main message frame that points to the first 1662 * chain buffer. 1663 * 1664 * NOTE: The ChainOffset field must be 0 when using a chain pointer to 1665 * a native SGL. 1666 */ 1667 1668 /* Set main message chain element pointer */ 1669 main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL; 1670 /* 1671 * For NVMe the chain element needs to be the 2nd SG entry in the main 1672 * message. 1673 */ 1674 main_chain_element = (Mpi25IeeeSgeChain64_t *) 1675 ((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64)); 1676 1677 /* 1678 * For the PRP entries, use the specially allocated buffer of 1679 * contiguous memory. Normal chain buffers can't be used 1680 * because each chain buffer would need to be the size of an OS 1681 * page (4k). 1682 */ 1683 curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid); 1684 msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid); 1685 1686 main_chain_element->Address = cpu_to_le64(msg_dma); 1687 main_chain_element->NextChainOffset = 0; 1688 main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | 1689 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | 1690 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP; 1691 1692 /* Build first prp, sge need not to be page aligned*/ 1693 ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL; 1694 sg_scmd = scsi_sglist(scmd); 1695 sge_addr = sg_dma_address(sg_scmd); 1696 sge_len = sg_dma_len(sg_scmd); 1697 1698 offset = sge_addr & page_mask; 1699 first_prp_len = nvme_pg_size - offset; 1700 1701 ptr_first_sgl->Address = cpu_to_le64(sge_addr); 1702 ptr_first_sgl->Length = cpu_to_le32(first_prp_len); 1703 1704 data_len -= first_prp_len; 1705 1706 if (sge_len > first_prp_len) { 1707 sge_addr += first_prp_len; 1708 sge_len -= first_prp_len; 1709 } else if (data_len && (sge_len == first_prp_len)) { 1710 sg_scmd = sg_next(sg_scmd); 1711 sge_addr = sg_dma_address(sg_scmd); 1712 sge_len = sg_dma_len(sg_scmd); 1713 } 1714 1715 for (;;) { 1716 offset = sge_addr & page_mask; 1717 1718 /* Put PRP pointer due to page boundary*/ 1719 page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask; 1720 if (unlikely(!page_mask_result)) { 1721 scmd_printk(KERN_NOTICE, 1722 scmd, "page boundary curr_buff: 0x%p\n", 1723 curr_buff); 1724 msg_dma += 8; 1725 *curr_buff = cpu_to_le64(msg_dma); 1726 curr_buff++; 1727 num_prp_in_chain++; 1728 } 1729 1730 *curr_buff = cpu_to_le64(sge_addr); 1731 curr_buff++; 1732 msg_dma += 8; 1733 num_prp_in_chain++; 1734 1735 sge_addr += nvme_pg_size; 1736 sge_len -= nvme_pg_size; 1737 data_len -= nvme_pg_size; 1738 1739 if (data_len <= 0) 1740 break; 1741 1742 if (sge_len > 0) 1743 continue; 1744 1745 sg_scmd = sg_next(sg_scmd); 1746 sge_addr = sg_dma_address(sg_scmd); 1747 sge_len = sg_dma_len(sg_scmd); 1748 } 1749 1750 main_chain_element->Length = 1751 cpu_to_le32(num_prp_in_chain * sizeof(u64)); 1752 return; 1753 } 1754 1755 static bool 1756 base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc, 1757 struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count) 1758 { 1759 u32 data_length = 0; 1760 struct scatterlist *sg_scmd; 1761 bool build_prp = true; 1762 1763 data_length = scsi_bufflen(scmd); 1764 sg_scmd = scsi_sglist(scmd); 1765 1766 /* If Datalenth is <= 16K and number of SGE’s entries are <= 2 1767 * we built IEEE SGL 1768 */ 1769 if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2)) 1770 build_prp = false; 1771 1772 return build_prp; 1773 } 1774 1775 /** 1776 * _base_check_pcie_native_sgl - This function is called for PCIe end devices to 1777 * determine if the driver needs to build a native SGL. If so, that native 1778 * SGL is built in the special contiguous buffers allocated especially for 1779 * PCIe SGL creation. If the driver will not build a native SGL, return 1780 * TRUE and a normal IEEE SGL will be built. Currently this routine 1781 * supports NVMe. 1782 * @ioc: per adapter object 1783 * @mpi_request: mf request pointer 1784 * @smid: system request message index 1785 * @scmd: scsi command 1786 * @pcie_device: points to the PCIe device's info 1787 * 1788 * Returns 0 if native SGL was built, 1 if no SGL was built 1789 */ 1790 static int 1791 _base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc, 1792 Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd, 1793 struct _pcie_device *pcie_device) 1794 { 1795 struct scatterlist *sg_scmd; 1796 int sges_left; 1797 1798 /* Get the SG list pointer and info. */ 1799 sg_scmd = scsi_sglist(scmd); 1800 sges_left = scsi_dma_map(scmd); 1801 if (sges_left < 0) { 1802 sdev_printk(KERN_ERR, scmd->device, 1803 "scsi_dma_map failed: request for %d bytes!\n", 1804 scsi_bufflen(scmd)); 1805 return 1; 1806 } 1807 1808 /* Check if we need to build a native SG list. */ 1809 if (base_is_prp_possible(ioc, pcie_device, 1810 scmd, sges_left) == 0) { 1811 /* We built a native SG list, just return. */ 1812 goto out; 1813 } 1814 1815 /* 1816 * Build native NVMe PRP. 1817 */ 1818 base_make_prp_nvme(ioc, scmd, mpi_request, 1819 smid, sges_left); 1820 1821 return 0; 1822 out: 1823 scsi_dma_unmap(scmd); 1824 return 1; 1825 } 1826 1827 /** 1828 * _base_add_sg_single_ieee - add sg element for IEEE format 1829 * @paddr: virtual address for SGE 1830 * @flags: SGE flags 1831 * @chain_offset: number of 128 byte elements from start of segment 1832 * @length: data transfer length 1833 * @dma_addr: Physical address 1834 * 1835 * Return nothing. 1836 */ 1837 static void 1838 _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length, 1839 dma_addr_t dma_addr) 1840 { 1841 Mpi25IeeeSgeChain64_t *sgel = paddr; 1842 1843 sgel->Flags = flags; 1844 sgel->NextChainOffset = chain_offset; 1845 sgel->Length = cpu_to_le32(length); 1846 sgel->Address = cpu_to_le64(dma_addr); 1847 } 1848 1849 /** 1850 * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format 1851 * @ioc: per adapter object 1852 * @paddr: virtual address for SGE 1853 * 1854 * Create a zero length scatter gather entry to insure the IOCs hardware has 1855 * something to use if the target device goes brain dead and tries 1856 * to send data even when none is asked for. 1857 * 1858 * Return nothing. 1859 */ 1860 static void 1861 _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr) 1862 { 1863 u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 1864 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | 1865 MPI25_IEEE_SGE_FLAGS_END_OF_LIST); 1866 1867 _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1); 1868 } 1869 1870 /** 1871 * _base_build_sg_scmd - main sg creation routine 1872 * pcie_device is unused here! 1873 * @ioc: per adapter object 1874 * @scmd: scsi command 1875 * @smid: system request message index 1876 * @unused: unused pcie_device pointer 1877 * Context: none. 1878 * 1879 * The main routine that builds scatter gather table from a given 1880 * scsi request sent via the .queuecommand main handler. 1881 * 1882 * Returns 0 success, anything else error 1883 */ 1884 static int 1885 _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc, 1886 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused) 1887 { 1888 Mpi2SCSIIORequest_t *mpi_request; 1889 dma_addr_t chain_dma; 1890 struct scatterlist *sg_scmd; 1891 void *sg_local, *chain; 1892 u32 chain_offset; 1893 u32 chain_length; 1894 u32 chain_flags; 1895 int sges_left; 1896 u32 sges_in_segment; 1897 u32 sgl_flags; 1898 u32 sgl_flags_last_element; 1899 u32 sgl_flags_end_buffer; 1900 struct chain_tracker *chain_req; 1901 1902 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 1903 1904 /* init scatter gather flags */ 1905 sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT; 1906 if (scmd->sc_data_direction == DMA_TO_DEVICE) 1907 sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC; 1908 sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT) 1909 << MPI2_SGE_FLAGS_SHIFT; 1910 sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT | 1911 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST) 1912 << MPI2_SGE_FLAGS_SHIFT; 1913 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1914 1915 sg_scmd = scsi_sglist(scmd); 1916 sges_left = scsi_dma_map(scmd); 1917 if (sges_left < 0) { 1918 sdev_printk(KERN_ERR, scmd->device, 1919 "pci_map_sg failed: request for %d bytes!\n", 1920 scsi_bufflen(scmd)); 1921 return -ENOMEM; 1922 } 1923 1924 sg_local = &mpi_request->SGL; 1925 sges_in_segment = ioc->max_sges_in_main_message; 1926 if (sges_left <= sges_in_segment) 1927 goto fill_in_last_segment; 1928 1929 mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) + 1930 (sges_in_segment * ioc->sge_size))/4; 1931 1932 /* fill in main message segment when there is a chain following */ 1933 while (sges_in_segment) { 1934 if (sges_in_segment == 1) 1935 ioc->base_add_sg_single(sg_local, 1936 sgl_flags_last_element | sg_dma_len(sg_scmd), 1937 sg_dma_address(sg_scmd)); 1938 else 1939 ioc->base_add_sg_single(sg_local, sgl_flags | 1940 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 1941 sg_scmd = sg_next(sg_scmd); 1942 sg_local += ioc->sge_size; 1943 sges_left--; 1944 sges_in_segment--; 1945 } 1946 1947 /* initializing the chain flags and pointers */ 1948 chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT; 1949 chain_req = _base_get_chain_buffer_tracker(ioc, scmd); 1950 if (!chain_req) 1951 return -1; 1952 chain = chain_req->chain_buffer; 1953 chain_dma = chain_req->chain_buffer_dma; 1954 do { 1955 sges_in_segment = (sges_left <= 1956 ioc->max_sges_in_chain_message) ? sges_left : 1957 ioc->max_sges_in_chain_message; 1958 chain_offset = (sges_left == sges_in_segment) ? 1959 0 : (sges_in_segment * ioc->sge_size)/4; 1960 chain_length = sges_in_segment * ioc->sge_size; 1961 if (chain_offset) { 1962 chain_offset = chain_offset << 1963 MPI2_SGE_CHAIN_OFFSET_SHIFT; 1964 chain_length += ioc->sge_size; 1965 } 1966 ioc->base_add_sg_single(sg_local, chain_flags | chain_offset | 1967 chain_length, chain_dma); 1968 sg_local = chain; 1969 if (!chain_offset) 1970 goto fill_in_last_segment; 1971 1972 /* fill in chain segments */ 1973 while (sges_in_segment) { 1974 if (sges_in_segment == 1) 1975 ioc->base_add_sg_single(sg_local, 1976 sgl_flags_last_element | 1977 sg_dma_len(sg_scmd), 1978 sg_dma_address(sg_scmd)); 1979 else 1980 ioc->base_add_sg_single(sg_local, sgl_flags | 1981 sg_dma_len(sg_scmd), 1982 sg_dma_address(sg_scmd)); 1983 sg_scmd = sg_next(sg_scmd); 1984 sg_local += ioc->sge_size; 1985 sges_left--; 1986 sges_in_segment--; 1987 } 1988 1989 chain_req = _base_get_chain_buffer_tracker(ioc, scmd); 1990 if (!chain_req) 1991 return -1; 1992 chain = chain_req->chain_buffer; 1993 chain_dma = chain_req->chain_buffer_dma; 1994 } while (1); 1995 1996 1997 fill_in_last_segment: 1998 1999 /* fill the last segment */ 2000 while (sges_left) { 2001 if (sges_left == 1) 2002 ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer | 2003 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 2004 else 2005 ioc->base_add_sg_single(sg_local, sgl_flags | 2006 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 2007 sg_scmd = sg_next(sg_scmd); 2008 sg_local += ioc->sge_size; 2009 sges_left--; 2010 } 2011 2012 return 0; 2013 } 2014 2015 /** 2016 * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format 2017 * @ioc: per adapter object 2018 * @scmd: scsi command 2019 * @smid: system request message index 2020 * @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be 2021 * constructed on need. 2022 * Context: none. 2023 * 2024 * The main routine that builds scatter gather table from a given 2025 * scsi request sent via the .queuecommand main handler. 2026 * 2027 * Returns 0 success, anything else error 2028 */ 2029 static int 2030 _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc, 2031 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device) 2032 { 2033 Mpi25SCSIIORequest_t *mpi_request; 2034 dma_addr_t chain_dma; 2035 struct scatterlist *sg_scmd; 2036 void *sg_local, *chain; 2037 u32 chain_offset; 2038 u32 chain_length; 2039 int sges_left; 2040 u32 sges_in_segment; 2041 u8 simple_sgl_flags; 2042 u8 simple_sgl_flags_last; 2043 u8 chain_sgl_flags; 2044 struct chain_tracker *chain_req; 2045 2046 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 2047 2048 /* init scatter gather flags */ 2049 simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 2050 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; 2051 simple_sgl_flags_last = simple_sgl_flags | 2052 MPI25_IEEE_SGE_FLAGS_END_OF_LIST; 2053 chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | 2054 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; 2055 2056 /* Check if we need to build a native SG list. */ 2057 if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request, 2058 smid, scmd, pcie_device) == 0)) { 2059 /* We built a native SG list, just return. */ 2060 return 0; 2061 } 2062 2063 sg_scmd = scsi_sglist(scmd); 2064 sges_left = scsi_dma_map(scmd); 2065 if (sges_left < 0) { 2066 sdev_printk(KERN_ERR, scmd->device, 2067 "pci_map_sg failed: request for %d bytes!\n", 2068 scsi_bufflen(scmd)); 2069 return -ENOMEM; 2070 } 2071 2072 sg_local = &mpi_request->SGL; 2073 sges_in_segment = (ioc->request_sz - 2074 offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee; 2075 if (sges_left <= sges_in_segment) 2076 goto fill_in_last_segment; 2077 2078 mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) + 2079 (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee); 2080 2081 /* fill in main message segment when there is a chain following */ 2082 while (sges_in_segment > 1) { 2083 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, 2084 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 2085 sg_scmd = sg_next(sg_scmd); 2086 sg_local += ioc->sge_size_ieee; 2087 sges_left--; 2088 sges_in_segment--; 2089 } 2090 2091 /* initializing the pointers */ 2092 chain_req = _base_get_chain_buffer_tracker(ioc, scmd); 2093 if (!chain_req) 2094 return -1; 2095 chain = chain_req->chain_buffer; 2096 chain_dma = chain_req->chain_buffer_dma; 2097 do { 2098 sges_in_segment = (sges_left <= 2099 ioc->max_sges_in_chain_message) ? sges_left : 2100 ioc->max_sges_in_chain_message; 2101 chain_offset = (sges_left == sges_in_segment) ? 2102 0 : sges_in_segment; 2103 chain_length = sges_in_segment * ioc->sge_size_ieee; 2104 if (chain_offset) 2105 chain_length += ioc->sge_size_ieee; 2106 _base_add_sg_single_ieee(sg_local, chain_sgl_flags, 2107 chain_offset, chain_length, chain_dma); 2108 2109 sg_local = chain; 2110 if (!chain_offset) 2111 goto fill_in_last_segment; 2112 2113 /* fill in chain segments */ 2114 while (sges_in_segment) { 2115 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, 2116 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 2117 sg_scmd = sg_next(sg_scmd); 2118 sg_local += ioc->sge_size_ieee; 2119 sges_left--; 2120 sges_in_segment--; 2121 } 2122 2123 chain_req = _base_get_chain_buffer_tracker(ioc, scmd); 2124 if (!chain_req) 2125 return -1; 2126 chain = chain_req->chain_buffer; 2127 chain_dma = chain_req->chain_buffer_dma; 2128 } while (1); 2129 2130 2131 fill_in_last_segment: 2132 2133 /* fill the last segment */ 2134 while (sges_left > 0) { 2135 if (sges_left == 1) 2136 _base_add_sg_single_ieee(sg_local, 2137 simple_sgl_flags_last, 0, sg_dma_len(sg_scmd), 2138 sg_dma_address(sg_scmd)); 2139 else 2140 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, 2141 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 2142 sg_scmd = sg_next(sg_scmd); 2143 sg_local += ioc->sge_size_ieee; 2144 sges_left--; 2145 } 2146 2147 return 0; 2148 } 2149 2150 /** 2151 * _base_build_sg_ieee - build generic sg for IEEE format 2152 * @ioc: per adapter object 2153 * @psge: virtual address for SGE 2154 * @data_out_dma: physical address for WRITES 2155 * @data_out_sz: data xfer size for WRITES 2156 * @data_in_dma: physical address for READS 2157 * @data_in_sz: data xfer size for READS 2158 * 2159 * Return nothing. 2160 */ 2161 static void 2162 _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge, 2163 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, 2164 size_t data_in_sz) 2165 { 2166 u8 sgl_flags; 2167 2168 if (!data_out_sz && !data_in_sz) { 2169 _base_build_zero_len_sge_ieee(ioc, psge); 2170 return; 2171 } 2172 2173 if (data_out_sz && data_in_sz) { 2174 /* WRITE sgel first */ 2175 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 2176 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; 2177 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz, 2178 data_out_dma); 2179 2180 /* incr sgel */ 2181 psge += ioc->sge_size_ieee; 2182 2183 /* READ sgel last */ 2184 sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST; 2185 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz, 2186 data_in_dma); 2187 } else if (data_out_sz) /* WRITE */ { 2188 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 2189 MPI25_IEEE_SGE_FLAGS_END_OF_LIST | 2190 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; 2191 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz, 2192 data_out_dma); 2193 } else if (data_in_sz) /* READ */ { 2194 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 2195 MPI25_IEEE_SGE_FLAGS_END_OF_LIST | 2196 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; 2197 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz, 2198 data_in_dma); 2199 } 2200 } 2201 2202 #define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10)) 2203 2204 /** 2205 * _base_config_dma_addressing - set dma addressing 2206 * @ioc: per adapter object 2207 * @pdev: PCI device struct 2208 * 2209 * Returns 0 for success, non-zero for failure. 2210 */ 2211 static int 2212 _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) 2213 { 2214 struct sysinfo s; 2215 u64 consistent_dma_mask; 2216 2217 if (ioc->dma_mask) 2218 consistent_dma_mask = DMA_BIT_MASK(64); 2219 else 2220 consistent_dma_mask = DMA_BIT_MASK(32); 2221 2222 if (sizeof(dma_addr_t) > 4) { 2223 const uint64_t required_mask = 2224 dma_get_required_mask(&pdev->dev); 2225 if ((required_mask > DMA_BIT_MASK(32)) && 2226 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && 2227 !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) { 2228 ioc->base_add_sg_single = &_base_add_sg_single_64; 2229 ioc->sge_size = sizeof(Mpi2SGESimple64_t); 2230 ioc->dma_mask = 64; 2231 goto out; 2232 } 2233 } 2234 2235 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) 2236 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { 2237 ioc->base_add_sg_single = &_base_add_sg_single_32; 2238 ioc->sge_size = sizeof(Mpi2SGESimple32_t); 2239 ioc->dma_mask = 32; 2240 } else 2241 return -ENODEV; 2242 2243 out: 2244 si_meminfo(&s); 2245 pr_info(MPT3SAS_FMT 2246 "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n", 2247 ioc->name, ioc->dma_mask, convert_to_kb(s.totalram)); 2248 2249 return 0; 2250 } 2251 2252 static int 2253 _base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc, 2254 struct pci_dev *pdev) 2255 { 2256 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 2257 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) 2258 return -ENODEV; 2259 } 2260 return 0; 2261 } 2262 2263 /** 2264 * _base_check_enable_msix - checks MSIX capabable. 2265 * @ioc: per adapter object 2266 * 2267 * Check to see if card is capable of MSIX, and set number 2268 * of available msix vectors 2269 */ 2270 static int 2271 _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc) 2272 { 2273 int base; 2274 u16 message_control; 2275 2276 /* Check whether controller SAS2008 B0 controller, 2277 * if it is SAS2008 B0 controller use IO-APIC instead of MSIX 2278 */ 2279 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 && 2280 ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) { 2281 return -EINVAL; 2282 } 2283 2284 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX); 2285 if (!base) { 2286 dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n", 2287 ioc->name)); 2288 return -EINVAL; 2289 } 2290 2291 /* get msix vector count */ 2292 /* NUMA_IO not supported for older controllers */ 2293 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 || 2294 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 || 2295 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 || 2296 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 || 2297 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 || 2298 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 || 2299 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2) 2300 ioc->msix_vector_count = 1; 2301 else { 2302 pci_read_config_word(ioc->pdev, base + 2, &message_control); 2303 ioc->msix_vector_count = (message_control & 0x3FF) + 1; 2304 } 2305 dinitprintk(ioc, pr_info(MPT3SAS_FMT 2306 "msix is supported, vector_count(%d)\n", 2307 ioc->name, ioc->msix_vector_count)); 2308 return 0; 2309 } 2310 2311 /** 2312 * _base_free_irq - free irq 2313 * @ioc: per adapter object 2314 * 2315 * Freeing respective reply_queue from the list. 2316 */ 2317 static void 2318 _base_free_irq(struct MPT3SAS_ADAPTER *ioc) 2319 { 2320 struct adapter_reply_queue *reply_q, *next; 2321 2322 if (list_empty(&ioc->reply_queue_list)) 2323 return; 2324 2325 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { 2326 list_del(&reply_q->list); 2327 free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index), 2328 reply_q); 2329 kfree(reply_q); 2330 } 2331 } 2332 2333 /** 2334 * _base_request_irq - request irq 2335 * @ioc: per adapter object 2336 * @index: msix index into vector table 2337 * 2338 * Inserting respective reply_queue into the list. 2339 */ 2340 static int 2341 _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index) 2342 { 2343 struct pci_dev *pdev = ioc->pdev; 2344 struct adapter_reply_queue *reply_q; 2345 int r; 2346 2347 reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL); 2348 if (!reply_q) { 2349 pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n", 2350 ioc->name, (int)sizeof(struct adapter_reply_queue)); 2351 return -ENOMEM; 2352 } 2353 reply_q->ioc = ioc; 2354 reply_q->msix_index = index; 2355 2356 atomic_set(&reply_q->busy, 0); 2357 if (ioc->msix_enable) 2358 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d", 2359 ioc->driver_name, ioc->id, index); 2360 else 2361 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d", 2362 ioc->driver_name, ioc->id); 2363 r = request_irq(pci_irq_vector(pdev, index), _base_interrupt, 2364 IRQF_SHARED, reply_q->name, reply_q); 2365 if (r) { 2366 pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n", 2367 reply_q->name, pci_irq_vector(pdev, index)); 2368 kfree(reply_q); 2369 return -EBUSY; 2370 } 2371 2372 INIT_LIST_HEAD(&reply_q->list); 2373 list_add_tail(&reply_q->list, &ioc->reply_queue_list); 2374 return 0; 2375 } 2376 2377 /** 2378 * _base_assign_reply_queues - assigning msix index for each cpu 2379 * @ioc: per adapter object 2380 * 2381 * The enduser would need to set the affinity via /proc/irq/#/smp_affinity 2382 * 2383 * It would nice if we could call irq_set_affinity, however it is not 2384 * an exported symbol 2385 */ 2386 static void 2387 _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) 2388 { 2389 unsigned int cpu, nr_cpus, nr_msix, index = 0; 2390 struct adapter_reply_queue *reply_q; 2391 2392 if (!_base_is_controller_msix_enabled(ioc)) 2393 return; 2394 2395 memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz); 2396 2397 nr_cpus = num_online_cpus(); 2398 nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count, 2399 ioc->facts.MaxMSIxVectors); 2400 if (!nr_msix) 2401 return; 2402 2403 if (smp_affinity_enable) { 2404 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 2405 const cpumask_t *mask = pci_irq_get_affinity(ioc->pdev, 2406 reply_q->msix_index); 2407 if (!mask) { 2408 pr_warn(MPT3SAS_FMT "no affinity for msi %x\n", 2409 ioc->name, reply_q->msix_index); 2410 continue; 2411 } 2412 2413 for_each_cpu(cpu, mask) 2414 ioc->cpu_msix_table[cpu] = reply_q->msix_index; 2415 } 2416 return; 2417 } 2418 cpu = cpumask_first(cpu_online_mask); 2419 2420 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 2421 2422 unsigned int i, group = nr_cpus / nr_msix; 2423 2424 if (cpu >= nr_cpus) 2425 break; 2426 2427 if (index < nr_cpus % nr_msix) 2428 group++; 2429 2430 for (i = 0 ; i < group ; i++) { 2431 ioc->cpu_msix_table[cpu] = reply_q->msix_index; 2432 cpu = cpumask_next(cpu, cpu_online_mask); 2433 } 2434 index++; 2435 } 2436 } 2437 2438 /** 2439 * _base_disable_msix - disables msix 2440 * @ioc: per adapter object 2441 * 2442 */ 2443 static void 2444 _base_disable_msix(struct MPT3SAS_ADAPTER *ioc) 2445 { 2446 if (!ioc->msix_enable) 2447 return; 2448 pci_disable_msix(ioc->pdev); 2449 ioc->msix_enable = 0; 2450 } 2451 2452 /** 2453 * _base_enable_msix - enables msix, failback to io_apic 2454 * @ioc: per adapter object 2455 * 2456 */ 2457 static int 2458 _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) 2459 { 2460 int r; 2461 int i, local_max_msix_vectors; 2462 u8 try_msix = 0; 2463 unsigned int irq_flags = PCI_IRQ_MSIX; 2464 2465 if (msix_disable == -1 || msix_disable == 0) 2466 try_msix = 1; 2467 2468 if (!try_msix) 2469 goto try_ioapic; 2470 2471 if (_base_check_enable_msix(ioc) != 0) 2472 goto try_ioapic; 2473 2474 ioc->reply_queue_count = min_t(int, ioc->cpu_count, 2475 ioc->msix_vector_count); 2476 2477 printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores" 2478 ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count, 2479 ioc->cpu_count, max_msix_vectors); 2480 2481 if (!ioc->rdpq_array_enable && max_msix_vectors == -1) 2482 local_max_msix_vectors = (reset_devices) ? 1 : 8; 2483 else 2484 local_max_msix_vectors = max_msix_vectors; 2485 2486 if (local_max_msix_vectors > 0) 2487 ioc->reply_queue_count = min_t(int, local_max_msix_vectors, 2488 ioc->reply_queue_count); 2489 else if (local_max_msix_vectors == 0) 2490 goto try_ioapic; 2491 2492 if (ioc->msix_vector_count < ioc->cpu_count) 2493 smp_affinity_enable = 0; 2494 2495 if (smp_affinity_enable) 2496 irq_flags |= PCI_IRQ_AFFINITY; 2497 2498 r = pci_alloc_irq_vectors(ioc->pdev, 1, ioc->reply_queue_count, 2499 irq_flags); 2500 if (r < 0) { 2501 dfailprintk(ioc, pr_info(MPT3SAS_FMT 2502 "pci_alloc_irq_vectors failed (r=%d) !!!\n", 2503 ioc->name, r)); 2504 goto try_ioapic; 2505 } 2506 2507 ioc->msix_enable = 1; 2508 ioc->reply_queue_count = r; 2509 for (i = 0; i < ioc->reply_queue_count; i++) { 2510 r = _base_request_irq(ioc, i); 2511 if (r) { 2512 _base_free_irq(ioc); 2513 _base_disable_msix(ioc); 2514 goto try_ioapic; 2515 } 2516 } 2517 2518 return 0; 2519 2520 /* failback to io_apic interrupt routing */ 2521 try_ioapic: 2522 2523 ioc->reply_queue_count = 1; 2524 r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY); 2525 if (r < 0) { 2526 dfailprintk(ioc, pr_info(MPT3SAS_FMT 2527 "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n", 2528 ioc->name, r)); 2529 } else 2530 r = _base_request_irq(ioc, 0); 2531 2532 return r; 2533 } 2534 2535 /** 2536 * mpt3sas_base_unmap_resources - free controller resources 2537 * @ioc: per adapter object 2538 */ 2539 static void 2540 mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc) 2541 { 2542 struct pci_dev *pdev = ioc->pdev; 2543 2544 dexitprintk(ioc, printk(MPT3SAS_FMT "%s\n", 2545 ioc->name, __func__)); 2546 2547 _base_free_irq(ioc); 2548 _base_disable_msix(ioc); 2549 2550 if (ioc->combined_reply_queue) { 2551 kfree(ioc->replyPostRegisterIndex); 2552 ioc->replyPostRegisterIndex = NULL; 2553 } 2554 2555 if (ioc->chip_phys) { 2556 iounmap(ioc->chip); 2557 ioc->chip_phys = 0; 2558 } 2559 2560 if (pci_is_enabled(pdev)) { 2561 pci_release_selected_regions(ioc->pdev, ioc->bars); 2562 pci_disable_pcie_error_reporting(pdev); 2563 pci_disable_device(pdev); 2564 } 2565 } 2566 2567 /** 2568 * mpt3sas_base_map_resources - map in controller resources (io/irq/memap) 2569 * @ioc: per adapter object 2570 * 2571 * Returns 0 for success, non-zero for failure. 2572 */ 2573 int 2574 mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) 2575 { 2576 struct pci_dev *pdev = ioc->pdev; 2577 u32 memap_sz; 2578 u32 pio_sz; 2579 int i, r = 0; 2580 u64 pio_chip = 0; 2581 u64 chip_phys = 0; 2582 struct adapter_reply_queue *reply_q; 2583 2584 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", 2585 ioc->name, __func__)); 2586 2587 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); 2588 if (pci_enable_device_mem(pdev)) { 2589 pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n", 2590 ioc->name); 2591 ioc->bars = 0; 2592 return -ENODEV; 2593 } 2594 2595 2596 if (pci_request_selected_regions(pdev, ioc->bars, 2597 ioc->driver_name)) { 2598 pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n", 2599 ioc->name); 2600 ioc->bars = 0; 2601 r = -ENODEV; 2602 goto out_fail; 2603 } 2604 2605 /* AER (Advanced Error Reporting) hooks */ 2606 pci_enable_pcie_error_reporting(pdev); 2607 2608 pci_set_master(pdev); 2609 2610 2611 if (_base_config_dma_addressing(ioc, pdev) != 0) { 2612 pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n", 2613 ioc->name, pci_name(pdev)); 2614 r = -ENODEV; 2615 goto out_fail; 2616 } 2617 2618 for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) && 2619 (!memap_sz || !pio_sz); i++) { 2620 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { 2621 if (pio_sz) 2622 continue; 2623 pio_chip = (u64)pci_resource_start(pdev, i); 2624 pio_sz = pci_resource_len(pdev, i); 2625 } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 2626 if (memap_sz) 2627 continue; 2628 ioc->chip_phys = pci_resource_start(pdev, i); 2629 chip_phys = (u64)ioc->chip_phys; 2630 memap_sz = pci_resource_len(pdev, i); 2631 ioc->chip = ioremap(ioc->chip_phys, memap_sz); 2632 } 2633 } 2634 2635 if (ioc->chip == NULL) { 2636 pr_err(MPT3SAS_FMT "unable to map adapter memory! " 2637 " or resource not found\n", ioc->name); 2638 r = -EINVAL; 2639 goto out_fail; 2640 } 2641 2642 _base_mask_interrupts(ioc); 2643 2644 r = _base_get_ioc_facts(ioc); 2645 if (r) 2646 goto out_fail; 2647 2648 if (!ioc->rdpq_array_enable_assigned) { 2649 ioc->rdpq_array_enable = ioc->rdpq_array_capable; 2650 ioc->rdpq_array_enable_assigned = 1; 2651 } 2652 2653 r = _base_enable_msix(ioc); 2654 if (r) 2655 goto out_fail; 2656 2657 /* Use the Combined reply queue feature only for SAS3 C0 & higher 2658 * revision HBAs and also only when reply queue count is greater than 8 2659 */ 2660 if (ioc->combined_reply_queue && ioc->reply_queue_count > 8) { 2661 /* Determine the Supplemental Reply Post Host Index Registers 2662 * Addresse. Supplemental Reply Post Host Index Registers 2663 * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and 2664 * each register is at offset bytes of 2665 * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one. 2666 */ 2667 ioc->replyPostRegisterIndex = kcalloc( 2668 ioc->combined_reply_index_count, 2669 sizeof(resource_size_t *), GFP_KERNEL); 2670 if (!ioc->replyPostRegisterIndex) { 2671 dfailprintk(ioc, printk(MPT3SAS_FMT 2672 "allocation for reply Post Register Index failed!!!\n", 2673 ioc->name)); 2674 r = -ENOMEM; 2675 goto out_fail; 2676 } 2677 2678 for (i = 0; i < ioc->combined_reply_index_count; i++) { 2679 ioc->replyPostRegisterIndex[i] = (resource_size_t *) 2680 ((u8 *)&ioc->chip->Doorbell + 2681 MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET + 2682 (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET)); 2683 } 2684 } else 2685 ioc->combined_reply_queue = 0; 2686 2687 if (ioc->is_warpdrive) { 2688 ioc->reply_post_host_index[0] = (resource_size_t __iomem *) 2689 &ioc->chip->ReplyPostHostIndex; 2690 2691 for (i = 1; i < ioc->cpu_msix_table_sz; i++) 2692 ioc->reply_post_host_index[i] = 2693 (resource_size_t __iomem *) 2694 ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1) 2695 * 4))); 2696 } 2697 2698 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) 2699 pr_info(MPT3SAS_FMT "%s: IRQ %d\n", 2700 reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : 2701 "IO-APIC enabled"), 2702 pci_irq_vector(ioc->pdev, reply_q->msix_index)); 2703 2704 pr_info(MPT3SAS_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n", 2705 ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz); 2706 pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n", 2707 ioc->name, (unsigned long long)pio_chip, pio_sz); 2708 2709 /* Save PCI configuration state for recovery from PCI AER/EEH errors */ 2710 pci_save_state(pdev); 2711 return 0; 2712 2713 out_fail: 2714 mpt3sas_base_unmap_resources(ioc); 2715 return r; 2716 } 2717 2718 /** 2719 * mpt3sas_base_get_msg_frame - obtain request mf pointer 2720 * @ioc: per adapter object 2721 * @smid: system request message index(smid zero is invalid) 2722 * 2723 * Returns virt pointer to message frame. 2724 */ 2725 void * 2726 mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid) 2727 { 2728 return (void *)(ioc->request + (smid * ioc->request_sz)); 2729 } 2730 2731 /** 2732 * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr 2733 * @ioc: per adapter object 2734 * @smid: system request message index 2735 * 2736 * Returns virt pointer to sense buffer. 2737 */ 2738 void * 2739 mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid) 2740 { 2741 return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE)); 2742 } 2743 2744 /** 2745 * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr 2746 * @ioc: per adapter object 2747 * @smid: system request message index 2748 * 2749 * Returns phys pointer to the low 32bit address of the sense buffer. 2750 */ 2751 __le32 2752 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid) 2753 { 2754 return cpu_to_le32(ioc->sense_dma + ((smid - 1) * 2755 SCSI_SENSE_BUFFERSIZE)); 2756 } 2757 2758 /** 2759 * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr 2760 * @ioc: per adapter object 2761 * @smid: system request message index 2762 * 2763 * Returns virt pointer to a PCIe SGL. 2764 */ 2765 void * 2766 mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid) 2767 { 2768 return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl); 2769 } 2770 2771 /** 2772 * mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr 2773 * @ioc: per adapter object 2774 * @smid: system request message index 2775 * 2776 * Returns phys pointer to the address of the PCIe buffer. 2777 */ 2778 dma_addr_t 2779 mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid) 2780 { 2781 return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma; 2782 } 2783 2784 /** 2785 * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address 2786 * @ioc: per adapter object 2787 * @phys_addr: lower 32 physical addr of the reply 2788 * 2789 * Converts 32bit lower physical addr into a virt address. 2790 */ 2791 void * 2792 mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr) 2793 { 2794 if (!phys_addr) 2795 return NULL; 2796 return ioc->reply + (phys_addr - (u32)ioc->reply_dma); 2797 } 2798 2799 static inline u8 2800 _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc) 2801 { 2802 return ioc->cpu_msix_table[raw_smp_processor_id()]; 2803 } 2804 2805 /** 2806 * mpt3sas_base_get_smid - obtain a free smid from internal queue 2807 * @ioc: per adapter object 2808 * @cb_idx: callback index 2809 * 2810 * Returns smid (zero is invalid) 2811 */ 2812 u16 2813 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) 2814 { 2815 unsigned long flags; 2816 struct request_tracker *request; 2817 u16 smid; 2818 2819 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 2820 if (list_empty(&ioc->internal_free_list)) { 2821 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 2822 pr_err(MPT3SAS_FMT "%s: smid not available\n", 2823 ioc->name, __func__); 2824 return 0; 2825 } 2826 2827 request = list_entry(ioc->internal_free_list.next, 2828 struct request_tracker, tracker_list); 2829 request->cb_idx = cb_idx; 2830 smid = request->smid; 2831 list_del(&request->tracker_list); 2832 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 2833 return smid; 2834 } 2835 2836 /** 2837 * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue 2838 * @ioc: per adapter object 2839 * @cb_idx: callback index 2840 * @scmd: pointer to scsi command object 2841 * 2842 * Returns smid (zero is invalid) 2843 */ 2844 u16 2845 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx, 2846 struct scsi_cmnd *scmd) 2847 { 2848 struct scsiio_tracker *request = scsi_cmd_priv(scmd); 2849 unsigned int tag = scmd->request->tag; 2850 u16 smid; 2851 2852 smid = tag + 1; 2853 request->cb_idx = cb_idx; 2854 request->msix_io = _base_get_msix_index(ioc); 2855 request->smid = smid; 2856 INIT_LIST_HEAD(&request->chain_list); 2857 return smid; 2858 } 2859 2860 /** 2861 * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue 2862 * @ioc: per adapter object 2863 * @cb_idx: callback index 2864 * 2865 * Returns smid (zero is invalid) 2866 */ 2867 u16 2868 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) 2869 { 2870 unsigned long flags; 2871 struct request_tracker *request; 2872 u16 smid; 2873 2874 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 2875 if (list_empty(&ioc->hpr_free_list)) { 2876 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 2877 return 0; 2878 } 2879 2880 request = list_entry(ioc->hpr_free_list.next, 2881 struct request_tracker, tracker_list); 2882 request->cb_idx = cb_idx; 2883 smid = request->smid; 2884 list_del(&request->tracker_list); 2885 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 2886 return smid; 2887 } 2888 2889 static void 2890 _base_recovery_check(struct MPT3SAS_ADAPTER *ioc) 2891 { 2892 /* 2893 * See _wait_for_commands_to_complete() call with regards to this code. 2894 */ 2895 if (ioc->shost_recovery && ioc->pending_io_count) { 2896 ioc->pending_io_count = atomic_read(&ioc->shost->host_busy); 2897 if (ioc->pending_io_count == 0) 2898 wake_up(&ioc->reset_wq); 2899 } 2900 } 2901 2902 void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc, 2903 struct scsiio_tracker *st) 2904 { 2905 if (WARN_ON(st->smid == 0)) 2906 return; 2907 st->cb_idx = 0xFF; 2908 st->direct_io = 0; 2909 if (!list_empty(&st->chain_list)) { 2910 unsigned long flags; 2911 2912 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 2913 list_splice_init(&st->chain_list, &ioc->free_chain_list); 2914 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 2915 } 2916 } 2917 2918 /** 2919 * mpt3sas_base_free_smid - put smid back on free_list 2920 * @ioc: per adapter object 2921 * @smid: system request message index 2922 * 2923 * Return nothing. 2924 */ 2925 void 2926 mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) 2927 { 2928 unsigned long flags; 2929 int i; 2930 2931 if (smid < ioc->hi_priority_smid) { 2932 struct scsiio_tracker *st; 2933 2934 st = _get_st_from_smid(ioc, smid); 2935 if (!st) { 2936 _base_recovery_check(ioc); 2937 return; 2938 } 2939 mpt3sas_base_clear_st(ioc, st); 2940 _base_recovery_check(ioc); 2941 return; 2942 } 2943 2944 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 2945 if (smid < ioc->internal_smid) { 2946 /* hi-priority */ 2947 i = smid - ioc->hi_priority_smid; 2948 ioc->hpr_lookup[i].cb_idx = 0xFF; 2949 list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list); 2950 } else if (smid <= ioc->hba_queue_depth) { 2951 /* internal queue */ 2952 i = smid - ioc->internal_smid; 2953 ioc->internal_lookup[i].cb_idx = 0xFF; 2954 list_add(&ioc->internal_lookup[i].tracker_list, 2955 &ioc->internal_free_list); 2956 } 2957 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 2958 } 2959 2960 /** 2961 * _base_writeq - 64 bit write to MMIO 2962 * @ioc: per adapter object 2963 * @b: data payload 2964 * @addr: address in MMIO space 2965 * @writeq_lock: spin lock 2966 * 2967 * Glue for handling an atomic 64 bit word to MMIO. This special handling takes 2968 * care of 32 bit environment where its not quarenteed to send the entire word 2969 * in one transfer. 2970 */ 2971 #if defined(writeq) && defined(CONFIG_64BIT) 2972 static inline void 2973 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) 2974 { 2975 writeq(cpu_to_le64(b), addr); 2976 } 2977 #else 2978 static inline void 2979 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) 2980 { 2981 unsigned long flags; 2982 __u64 data_out = cpu_to_le64(b); 2983 2984 spin_lock_irqsave(writeq_lock, flags); 2985 writel((u32)(data_out), addr); 2986 writel((u32)(data_out >> 32), (addr + 4)); 2987 spin_unlock_irqrestore(writeq_lock, flags); 2988 } 2989 #endif 2990 2991 /** 2992 * _base_put_smid_scsi_io - send SCSI_IO request to firmware 2993 * @ioc: per adapter object 2994 * @smid: system request message index 2995 * @handle: device handle 2996 * 2997 * Return nothing. 2998 */ 2999 static void 3000 _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) 3001 { 3002 Mpi2RequestDescriptorUnion_t descriptor; 3003 u64 *request = (u64 *)&descriptor; 3004 3005 3006 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; 3007 descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc); 3008 descriptor.SCSIIO.SMID = cpu_to_le16(smid); 3009 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); 3010 descriptor.SCSIIO.LMID = 0; 3011 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 3012 &ioc->scsi_lookup_lock); 3013 } 3014 3015 /** 3016 * _base_put_smid_fast_path - send fast path request to firmware 3017 * @ioc: per adapter object 3018 * @smid: system request message index 3019 * @handle: device handle 3020 * 3021 * Return nothing. 3022 */ 3023 static void 3024 _base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, 3025 u16 handle) 3026 { 3027 Mpi2RequestDescriptorUnion_t descriptor; 3028 u64 *request = (u64 *)&descriptor; 3029 3030 descriptor.SCSIIO.RequestFlags = 3031 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; 3032 descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc); 3033 descriptor.SCSIIO.SMID = cpu_to_le16(smid); 3034 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); 3035 descriptor.SCSIIO.LMID = 0; 3036 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 3037 &ioc->scsi_lookup_lock); 3038 } 3039 3040 /** 3041 * _base_put_smid_hi_priority - send Task Management request to firmware 3042 * @ioc: per adapter object 3043 * @smid: system request message index 3044 * @msix_task: msix_task will be same as msix of IO incase of task abort else 0. 3045 * Return nothing. 3046 */ 3047 static void 3048 _base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid, 3049 u16 msix_task) 3050 { 3051 Mpi2RequestDescriptorUnion_t descriptor; 3052 u64 *request = (u64 *)&descriptor; 3053 3054 descriptor.HighPriority.RequestFlags = 3055 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 3056 descriptor.HighPriority.MSIxIndex = msix_task; 3057 descriptor.HighPriority.SMID = cpu_to_le16(smid); 3058 descriptor.HighPriority.LMID = 0; 3059 descriptor.HighPriority.Reserved1 = 0; 3060 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 3061 &ioc->scsi_lookup_lock); 3062 } 3063 3064 /** 3065 * _base_put_smid_nvme_encap - send NVMe encapsulated request to 3066 * firmware 3067 * @ioc: per adapter object 3068 * @smid: system request message index 3069 * 3070 * Return nothing. 3071 */ 3072 static void 3073 _base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid) 3074 { 3075 Mpi2RequestDescriptorUnion_t descriptor; 3076 u64 *request = (u64 *)&descriptor; 3077 3078 descriptor.Default.RequestFlags = 3079 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED; 3080 descriptor.Default.MSIxIndex = _base_get_msix_index(ioc); 3081 descriptor.Default.SMID = cpu_to_le16(smid); 3082 descriptor.Default.LMID = 0; 3083 descriptor.Default.DescriptorTypeDependent = 0; 3084 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 3085 &ioc->scsi_lookup_lock); 3086 } 3087 3088 /** 3089 * _base_put_smid_default - Default, primarily used for config pages 3090 * @ioc: per adapter object 3091 * @smid: system request message index 3092 * 3093 * Return nothing. 3094 */ 3095 static void 3096 _base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid) 3097 { 3098 Mpi2RequestDescriptorUnion_t descriptor; 3099 u64 *request = (u64 *)&descriptor; 3100 3101 descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 3102 descriptor.Default.MSIxIndex = _base_get_msix_index(ioc); 3103 descriptor.Default.SMID = cpu_to_le16(smid); 3104 descriptor.Default.LMID = 0; 3105 descriptor.Default.DescriptorTypeDependent = 0; 3106 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 3107 &ioc->scsi_lookup_lock); 3108 } 3109 3110 /** 3111 * _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using 3112 * Atomic Request Descriptor 3113 * @ioc: per adapter object 3114 * @smid: system request message index 3115 * @handle: device handle, unused in this function, for function type match 3116 * 3117 * Return nothing. 3118 */ 3119 static void 3120 _base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, 3121 u16 handle) 3122 { 3123 Mpi26AtomicRequestDescriptor_t descriptor; 3124 u32 *request = (u32 *)&descriptor; 3125 3126 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; 3127 descriptor.MSIxIndex = _base_get_msix_index(ioc); 3128 descriptor.SMID = cpu_to_le16(smid); 3129 3130 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); 3131 } 3132 3133 /** 3134 * _base_put_smid_fast_path_atomic - send fast path request to firmware 3135 * using Atomic Request Descriptor 3136 * @ioc: per adapter object 3137 * @smid: system request message index 3138 * @handle: device handle, unused in this function, for function type match 3139 * Return nothing 3140 */ 3141 static void 3142 _base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, 3143 u16 handle) 3144 { 3145 Mpi26AtomicRequestDescriptor_t descriptor; 3146 u32 *request = (u32 *)&descriptor; 3147 3148 descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; 3149 descriptor.MSIxIndex = _base_get_msix_index(ioc); 3150 descriptor.SMID = cpu_to_le16(smid); 3151 3152 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); 3153 } 3154 3155 /** 3156 * _base_put_smid_hi_priority_atomic - send Task Management request to 3157 * firmware using Atomic Request Descriptor 3158 * @ioc: per adapter object 3159 * @smid: system request message index 3160 * @msix_task: msix_task will be same as msix of IO incase of task abort else 0 3161 * 3162 * Return nothing. 3163 */ 3164 static void 3165 _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, 3166 u16 msix_task) 3167 { 3168 Mpi26AtomicRequestDescriptor_t descriptor; 3169 u32 *request = (u32 *)&descriptor; 3170 3171 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 3172 descriptor.MSIxIndex = msix_task; 3173 descriptor.SMID = cpu_to_le16(smid); 3174 3175 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); 3176 } 3177 3178 /** 3179 * _base_put_smid_nvme_encap_atomic - send NVMe encapsulated request to 3180 * firmware using Atomic Request Descriptor 3181 * @ioc: per adapter object 3182 * @smid: system request message index 3183 * 3184 * Return nothing. 3185 */ 3186 static void 3187 _base_put_smid_nvme_encap_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid) 3188 { 3189 Mpi26AtomicRequestDescriptor_t descriptor; 3190 u32 *request = (u32 *)&descriptor; 3191 3192 descriptor.RequestFlags = MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED; 3193 descriptor.MSIxIndex = _base_get_msix_index(ioc); 3194 descriptor.SMID = cpu_to_le16(smid); 3195 3196 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); 3197 } 3198 3199 /** 3200 * _base_put_smid_default - Default, primarily used for config pages 3201 * use Atomic Request Descriptor 3202 * @ioc: per adapter object 3203 * @smid: system request message index 3204 * 3205 * Return nothing. 3206 */ 3207 static void 3208 _base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid) 3209 { 3210 Mpi26AtomicRequestDescriptor_t descriptor; 3211 u32 *request = (u32 *)&descriptor; 3212 3213 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 3214 descriptor.MSIxIndex = _base_get_msix_index(ioc); 3215 descriptor.SMID = cpu_to_le16(smid); 3216 3217 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); 3218 } 3219 3220 /** 3221 * _base_display_OEMs_branding - Display branding string 3222 * @ioc: per adapter object 3223 * 3224 * Return nothing. 3225 */ 3226 static void 3227 _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc) 3228 { 3229 if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL) 3230 return; 3231 3232 switch (ioc->pdev->subsystem_vendor) { 3233 case PCI_VENDOR_ID_INTEL: 3234 switch (ioc->pdev->device) { 3235 case MPI2_MFGPAGE_DEVID_SAS2008: 3236 switch (ioc->pdev->subsystem_device) { 3237 case MPT2SAS_INTEL_RMS2LL080_SSDID: 3238 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3239 MPT2SAS_INTEL_RMS2LL080_BRANDING); 3240 break; 3241 case MPT2SAS_INTEL_RMS2LL040_SSDID: 3242 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3243 MPT2SAS_INTEL_RMS2LL040_BRANDING); 3244 break; 3245 case MPT2SAS_INTEL_SSD910_SSDID: 3246 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3247 MPT2SAS_INTEL_SSD910_BRANDING); 3248 break; 3249 default: 3250 pr_info(MPT3SAS_FMT 3251 "Intel(R) Controller: Subsystem ID: 0x%X\n", 3252 ioc->name, ioc->pdev->subsystem_device); 3253 break; 3254 } 3255 case MPI2_MFGPAGE_DEVID_SAS2308_2: 3256 switch (ioc->pdev->subsystem_device) { 3257 case MPT2SAS_INTEL_RS25GB008_SSDID: 3258 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3259 MPT2SAS_INTEL_RS25GB008_BRANDING); 3260 break; 3261 case MPT2SAS_INTEL_RMS25JB080_SSDID: 3262 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3263 MPT2SAS_INTEL_RMS25JB080_BRANDING); 3264 break; 3265 case MPT2SAS_INTEL_RMS25JB040_SSDID: 3266 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3267 MPT2SAS_INTEL_RMS25JB040_BRANDING); 3268 break; 3269 case MPT2SAS_INTEL_RMS25KB080_SSDID: 3270 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3271 MPT2SAS_INTEL_RMS25KB080_BRANDING); 3272 break; 3273 case MPT2SAS_INTEL_RMS25KB040_SSDID: 3274 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3275 MPT2SAS_INTEL_RMS25KB040_BRANDING); 3276 break; 3277 case MPT2SAS_INTEL_RMS25LB040_SSDID: 3278 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3279 MPT2SAS_INTEL_RMS25LB040_BRANDING); 3280 break; 3281 case MPT2SAS_INTEL_RMS25LB080_SSDID: 3282 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3283 MPT2SAS_INTEL_RMS25LB080_BRANDING); 3284 break; 3285 default: 3286 pr_info(MPT3SAS_FMT 3287 "Intel(R) Controller: Subsystem ID: 0x%X\n", 3288 ioc->name, ioc->pdev->subsystem_device); 3289 break; 3290 } 3291 case MPI25_MFGPAGE_DEVID_SAS3008: 3292 switch (ioc->pdev->subsystem_device) { 3293 case MPT3SAS_INTEL_RMS3JC080_SSDID: 3294 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3295 MPT3SAS_INTEL_RMS3JC080_BRANDING); 3296 break; 3297 3298 case MPT3SAS_INTEL_RS3GC008_SSDID: 3299 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3300 MPT3SAS_INTEL_RS3GC008_BRANDING); 3301 break; 3302 case MPT3SAS_INTEL_RS3FC044_SSDID: 3303 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3304 MPT3SAS_INTEL_RS3FC044_BRANDING); 3305 break; 3306 case MPT3SAS_INTEL_RS3UC080_SSDID: 3307 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3308 MPT3SAS_INTEL_RS3UC080_BRANDING); 3309 break; 3310 default: 3311 pr_info(MPT3SAS_FMT 3312 "Intel(R) Controller: Subsystem ID: 0x%X\n", 3313 ioc->name, ioc->pdev->subsystem_device); 3314 break; 3315 } 3316 break; 3317 default: 3318 pr_info(MPT3SAS_FMT 3319 "Intel(R) Controller: Subsystem ID: 0x%X\n", 3320 ioc->name, ioc->pdev->subsystem_device); 3321 break; 3322 } 3323 break; 3324 case PCI_VENDOR_ID_DELL: 3325 switch (ioc->pdev->device) { 3326 case MPI2_MFGPAGE_DEVID_SAS2008: 3327 switch (ioc->pdev->subsystem_device) { 3328 case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID: 3329 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3330 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING); 3331 break; 3332 case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID: 3333 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3334 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING); 3335 break; 3336 case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID: 3337 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3338 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING); 3339 break; 3340 case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID: 3341 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3342 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING); 3343 break; 3344 case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID: 3345 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3346 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING); 3347 break; 3348 case MPT2SAS_DELL_PERC_H200_SSDID: 3349 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3350 MPT2SAS_DELL_PERC_H200_BRANDING); 3351 break; 3352 case MPT2SAS_DELL_6GBPS_SAS_SSDID: 3353 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3354 MPT2SAS_DELL_6GBPS_SAS_BRANDING); 3355 break; 3356 default: 3357 pr_info(MPT3SAS_FMT 3358 "Dell 6Gbps HBA: Subsystem ID: 0x%X\n", 3359 ioc->name, ioc->pdev->subsystem_device); 3360 break; 3361 } 3362 break; 3363 case MPI25_MFGPAGE_DEVID_SAS3008: 3364 switch (ioc->pdev->subsystem_device) { 3365 case MPT3SAS_DELL_12G_HBA_SSDID: 3366 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3367 MPT3SAS_DELL_12G_HBA_BRANDING); 3368 break; 3369 default: 3370 pr_info(MPT3SAS_FMT 3371 "Dell 12Gbps HBA: Subsystem ID: 0x%X\n", 3372 ioc->name, ioc->pdev->subsystem_device); 3373 break; 3374 } 3375 break; 3376 default: 3377 pr_info(MPT3SAS_FMT 3378 "Dell HBA: Subsystem ID: 0x%X\n", ioc->name, 3379 ioc->pdev->subsystem_device); 3380 break; 3381 } 3382 break; 3383 case PCI_VENDOR_ID_CISCO: 3384 switch (ioc->pdev->device) { 3385 case MPI25_MFGPAGE_DEVID_SAS3008: 3386 switch (ioc->pdev->subsystem_device) { 3387 case MPT3SAS_CISCO_12G_8E_HBA_SSDID: 3388 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3389 MPT3SAS_CISCO_12G_8E_HBA_BRANDING); 3390 break; 3391 case MPT3SAS_CISCO_12G_8I_HBA_SSDID: 3392 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3393 MPT3SAS_CISCO_12G_8I_HBA_BRANDING); 3394 break; 3395 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID: 3396 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3397 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING); 3398 break; 3399 default: 3400 pr_info(MPT3SAS_FMT 3401 "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n", 3402 ioc->name, ioc->pdev->subsystem_device); 3403 break; 3404 } 3405 break; 3406 case MPI25_MFGPAGE_DEVID_SAS3108_1: 3407 switch (ioc->pdev->subsystem_device) { 3408 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID: 3409 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3410 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING); 3411 break; 3412 case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID: 3413 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3414 MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING 3415 ); 3416 break; 3417 default: 3418 pr_info(MPT3SAS_FMT 3419 "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n", 3420 ioc->name, ioc->pdev->subsystem_device); 3421 break; 3422 } 3423 break; 3424 default: 3425 pr_info(MPT3SAS_FMT 3426 "Cisco SAS HBA: Subsystem ID: 0x%X\n", 3427 ioc->name, ioc->pdev->subsystem_device); 3428 break; 3429 } 3430 break; 3431 case MPT2SAS_HP_3PAR_SSVID: 3432 switch (ioc->pdev->device) { 3433 case MPI2_MFGPAGE_DEVID_SAS2004: 3434 switch (ioc->pdev->subsystem_device) { 3435 case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID: 3436 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3437 MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING); 3438 break; 3439 default: 3440 pr_info(MPT3SAS_FMT 3441 "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n", 3442 ioc->name, ioc->pdev->subsystem_device); 3443 break; 3444 } 3445 case MPI2_MFGPAGE_DEVID_SAS2308_2: 3446 switch (ioc->pdev->subsystem_device) { 3447 case MPT2SAS_HP_2_4_INTERNAL_SSDID: 3448 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3449 MPT2SAS_HP_2_4_INTERNAL_BRANDING); 3450 break; 3451 case MPT2SAS_HP_2_4_EXTERNAL_SSDID: 3452 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3453 MPT2SAS_HP_2_4_EXTERNAL_BRANDING); 3454 break; 3455 case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID: 3456 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3457 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING); 3458 break; 3459 case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID: 3460 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3461 MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING); 3462 break; 3463 default: 3464 pr_info(MPT3SAS_FMT 3465 "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n", 3466 ioc->name, ioc->pdev->subsystem_device); 3467 break; 3468 } 3469 default: 3470 pr_info(MPT3SAS_FMT 3471 "HP SAS HBA: Subsystem ID: 0x%X\n", 3472 ioc->name, ioc->pdev->subsystem_device); 3473 break; 3474 } 3475 default: 3476 break; 3477 } 3478 } 3479 3480 /** 3481 * _base_display_ioc_capabilities - Disply IOC's capabilities. 3482 * @ioc: per adapter object 3483 * 3484 * Return nothing. 3485 */ 3486 static void 3487 _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc) 3488 { 3489 int i = 0; 3490 char desc[16]; 3491 u32 iounit_pg1_flags; 3492 u32 bios_version; 3493 3494 bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); 3495 strncpy(desc, ioc->manu_pg0.ChipName, 16); 3496 pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\ 3497 "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n", 3498 ioc->name, desc, 3499 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, 3500 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, 3501 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, 3502 ioc->facts.FWVersion.Word & 0x000000FF, 3503 ioc->pdev->revision, 3504 (bios_version & 0xFF000000) >> 24, 3505 (bios_version & 0x00FF0000) >> 16, 3506 (bios_version & 0x0000FF00) >> 8, 3507 bios_version & 0x000000FF); 3508 3509 _base_display_OEMs_branding(ioc); 3510 3511 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) { 3512 pr_info("%sNVMe", i ? "," : ""); 3513 i++; 3514 } 3515 3516 pr_info(MPT3SAS_FMT "Protocol=(", ioc->name); 3517 3518 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) { 3519 pr_info("Initiator"); 3520 i++; 3521 } 3522 3523 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) { 3524 pr_info("%sTarget", i ? "," : ""); 3525 i++; 3526 } 3527 3528 i = 0; 3529 pr_info("), "); 3530 pr_info("Capabilities=("); 3531 3532 if (!ioc->hide_ir_msg) { 3533 if (ioc->facts.IOCCapabilities & 3534 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) { 3535 pr_info("Raid"); 3536 i++; 3537 } 3538 } 3539 3540 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) { 3541 pr_info("%sTLR", i ? "," : ""); 3542 i++; 3543 } 3544 3545 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) { 3546 pr_info("%sMulticast", i ? "," : ""); 3547 i++; 3548 } 3549 3550 if (ioc->facts.IOCCapabilities & 3551 MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) { 3552 pr_info("%sBIDI Target", i ? "," : ""); 3553 i++; 3554 } 3555 3556 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) { 3557 pr_info("%sEEDP", i ? "," : ""); 3558 i++; 3559 } 3560 3561 if (ioc->facts.IOCCapabilities & 3562 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) { 3563 pr_info("%sSnapshot Buffer", i ? "," : ""); 3564 i++; 3565 } 3566 3567 if (ioc->facts.IOCCapabilities & 3568 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) { 3569 pr_info("%sDiag Trace Buffer", i ? "," : ""); 3570 i++; 3571 } 3572 3573 if (ioc->facts.IOCCapabilities & 3574 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) { 3575 pr_info("%sDiag Extended Buffer", i ? "," : ""); 3576 i++; 3577 } 3578 3579 if (ioc->facts.IOCCapabilities & 3580 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) { 3581 pr_info("%sTask Set Full", i ? "," : ""); 3582 i++; 3583 } 3584 3585 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); 3586 if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) { 3587 pr_info("%sNCQ", i ? "," : ""); 3588 i++; 3589 } 3590 3591 pr_info(")\n"); 3592 } 3593 3594 /** 3595 * mpt3sas_base_update_missing_delay - change the missing delay timers 3596 * @ioc: per adapter object 3597 * @device_missing_delay: amount of time till device is reported missing 3598 * @io_missing_delay: interval IO is returned when there is a missing device 3599 * 3600 * Return nothing. 3601 * 3602 * Passed on the command line, this function will modify the device missing 3603 * delay, as well as the io missing delay. This should be called at driver 3604 * load time. 3605 */ 3606 void 3607 mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc, 3608 u16 device_missing_delay, u8 io_missing_delay) 3609 { 3610 u16 dmd, dmd_new, dmd_orignal; 3611 u8 io_missing_delay_original; 3612 u16 sz; 3613 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; 3614 Mpi2ConfigReply_t mpi_reply; 3615 u8 num_phys = 0; 3616 u16 ioc_status; 3617 3618 mpt3sas_config_get_number_hba_phys(ioc, &num_phys); 3619 if (!num_phys) 3620 return; 3621 3622 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys * 3623 sizeof(Mpi2SasIOUnit1PhyData_t)); 3624 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); 3625 if (!sas_iounit_pg1) { 3626 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 3627 ioc->name, __FILE__, __LINE__, __func__); 3628 goto out; 3629 } 3630 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, 3631 sas_iounit_pg1, sz))) { 3632 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 3633 ioc->name, __FILE__, __LINE__, __func__); 3634 goto out; 3635 } 3636 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 3637 MPI2_IOCSTATUS_MASK; 3638 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 3639 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 3640 ioc->name, __FILE__, __LINE__, __func__); 3641 goto out; 3642 } 3643 3644 /* device missing delay */ 3645 dmd = sas_iounit_pg1->ReportDeviceMissingDelay; 3646 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) 3647 dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; 3648 else 3649 dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; 3650 dmd_orignal = dmd; 3651 if (device_missing_delay > 0x7F) { 3652 dmd = (device_missing_delay > 0x7F0) ? 0x7F0 : 3653 device_missing_delay; 3654 dmd = dmd / 16; 3655 dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16; 3656 } else 3657 dmd = device_missing_delay; 3658 sas_iounit_pg1->ReportDeviceMissingDelay = dmd; 3659 3660 /* io missing delay */ 3661 io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay; 3662 sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay; 3663 3664 if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, 3665 sz)) { 3666 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) 3667 dmd_new = (dmd & 3668 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; 3669 else 3670 dmd_new = 3671 dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; 3672 pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n", 3673 ioc->name, dmd_orignal, dmd_new); 3674 pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n", 3675 ioc->name, io_missing_delay_original, 3676 io_missing_delay); 3677 ioc->device_missing_delay = dmd_new; 3678 ioc->io_missing_delay = io_missing_delay; 3679 } 3680 3681 out: 3682 kfree(sas_iounit_pg1); 3683 } 3684 /** 3685 * _base_static_config_pages - static start of day config pages 3686 * @ioc: per adapter object 3687 * 3688 * Return nothing. 3689 */ 3690 static void 3691 _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) 3692 { 3693 Mpi2ConfigReply_t mpi_reply; 3694 u32 iounit_pg1_flags; 3695 3696 mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0); 3697 if (ioc->ir_firmware) 3698 mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply, 3699 &ioc->manu_pg10); 3700 3701 /* 3702 * Ensure correct T10 PI operation if vendor left EEDPTagMode 3703 * flag unset in NVDATA. 3704 */ 3705 mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11); 3706 if (ioc->manu_pg11.EEDPTagMode == 0) { 3707 pr_err("%s: overriding NVDATA EEDPTagMode setting\n", 3708 ioc->name); 3709 ioc->manu_pg11.EEDPTagMode &= ~0x3; 3710 ioc->manu_pg11.EEDPTagMode |= 0x1; 3711 mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply, 3712 &ioc->manu_pg11); 3713 } 3714 3715 mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2); 3716 mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3); 3717 mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8); 3718 mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0); 3719 mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); 3720 mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8); 3721 _base_display_ioc_capabilities(ioc); 3722 3723 /* 3724 * Enable task_set_full handling in iounit_pg1 when the 3725 * facts capabilities indicate that its supported. 3726 */ 3727 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); 3728 if ((ioc->facts.IOCCapabilities & 3729 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING)) 3730 iounit_pg1_flags &= 3731 ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; 3732 else 3733 iounit_pg1_flags |= 3734 MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; 3735 ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); 3736 mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); 3737 3738 if (ioc->iounit_pg8.NumSensors) 3739 ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors; 3740 } 3741 3742 /** 3743 * _base_release_memory_pools - release memory 3744 * @ioc: per adapter object 3745 * 3746 * Free memory allocated from _base_allocate_memory_pools. 3747 * 3748 * Return nothing. 3749 */ 3750 static void 3751 _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) 3752 { 3753 int i = 0; 3754 struct reply_post_struct *rps; 3755 3756 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3757 __func__)); 3758 3759 if (ioc->request) { 3760 pci_free_consistent(ioc->pdev, ioc->request_dma_sz, 3761 ioc->request, ioc->request_dma); 3762 dexitprintk(ioc, pr_info(MPT3SAS_FMT 3763 "request_pool(0x%p): free\n", 3764 ioc->name, ioc->request)); 3765 ioc->request = NULL; 3766 } 3767 3768 if (ioc->sense) { 3769 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma); 3770 dma_pool_destroy(ioc->sense_dma_pool); 3771 dexitprintk(ioc, pr_info(MPT3SAS_FMT 3772 "sense_pool(0x%p): free\n", 3773 ioc->name, ioc->sense)); 3774 ioc->sense = NULL; 3775 } 3776 3777 if (ioc->reply) { 3778 dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma); 3779 dma_pool_destroy(ioc->reply_dma_pool); 3780 dexitprintk(ioc, pr_info(MPT3SAS_FMT 3781 "reply_pool(0x%p): free\n", 3782 ioc->name, ioc->reply)); 3783 ioc->reply = NULL; 3784 } 3785 3786 if (ioc->reply_free) { 3787 dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free, 3788 ioc->reply_free_dma); 3789 dma_pool_destroy(ioc->reply_free_dma_pool); 3790 dexitprintk(ioc, pr_info(MPT3SAS_FMT 3791 "reply_free_pool(0x%p): free\n", 3792 ioc->name, ioc->reply_free)); 3793 ioc->reply_free = NULL; 3794 } 3795 3796 if (ioc->reply_post) { 3797 do { 3798 rps = &ioc->reply_post[i]; 3799 if (rps->reply_post_free) { 3800 dma_pool_free( 3801 ioc->reply_post_free_dma_pool, 3802 rps->reply_post_free, 3803 rps->reply_post_free_dma); 3804 dexitprintk(ioc, pr_info(MPT3SAS_FMT 3805 "reply_post_free_pool(0x%p): free\n", 3806 ioc->name, rps->reply_post_free)); 3807 rps->reply_post_free = NULL; 3808 } 3809 } while (ioc->rdpq_array_enable && 3810 (++i < ioc->reply_queue_count)); 3811 3812 dma_pool_destroy(ioc->reply_post_free_dma_pool); 3813 kfree(ioc->reply_post); 3814 } 3815 3816 if (ioc->pcie_sgl_dma_pool) { 3817 for (i = 0; i < ioc->scsiio_depth; i++) { 3818 dma_pool_free(ioc->pcie_sgl_dma_pool, 3819 ioc->pcie_sg_lookup[i].pcie_sgl, 3820 ioc->pcie_sg_lookup[i].pcie_sgl_dma); 3821 } 3822 if (ioc->pcie_sgl_dma_pool) 3823 dma_pool_destroy(ioc->pcie_sgl_dma_pool); 3824 } 3825 3826 if (ioc->config_page) { 3827 dexitprintk(ioc, pr_info(MPT3SAS_FMT 3828 "config_page(0x%p): free\n", ioc->name, 3829 ioc->config_page)); 3830 pci_free_consistent(ioc->pdev, ioc->config_page_sz, 3831 ioc->config_page, ioc->config_page_dma); 3832 } 3833 3834 kfree(ioc->hpr_lookup); 3835 kfree(ioc->internal_lookup); 3836 if (ioc->chain_lookup) { 3837 for (i = 0; i < ioc->chain_depth; i++) { 3838 if (ioc->chain_lookup[i].chain_buffer) 3839 dma_pool_free(ioc->chain_dma_pool, 3840 ioc->chain_lookup[i].chain_buffer, 3841 ioc->chain_lookup[i].chain_buffer_dma); 3842 } 3843 dma_pool_destroy(ioc->chain_dma_pool); 3844 free_pages((ulong)ioc->chain_lookup, ioc->chain_pages); 3845 ioc->chain_lookup = NULL; 3846 } 3847 } 3848 3849 /** 3850 * _base_allocate_memory_pools - allocate start of day memory pools 3851 * @ioc: per adapter object 3852 * 3853 * Returns 0 success, anything else error 3854 */ 3855 static int 3856 _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) 3857 { 3858 struct mpt3sas_facts *facts; 3859 u16 max_sge_elements; 3860 u16 chains_needed_per_io; 3861 u32 sz, total_sz, reply_post_free_sz; 3862 u32 retry_sz; 3863 u16 max_request_credit, nvme_blocks_needed; 3864 unsigned short sg_tablesize; 3865 u16 sge_size; 3866 int i; 3867 3868 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3869 __func__)); 3870 3871 3872 retry_sz = 0; 3873 facts = &ioc->facts; 3874 3875 /* command line tunables for max sgl entries */ 3876 if (max_sgl_entries != -1) 3877 sg_tablesize = max_sgl_entries; 3878 else { 3879 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) 3880 sg_tablesize = MPT2SAS_SG_DEPTH; 3881 else 3882 sg_tablesize = MPT3SAS_SG_DEPTH; 3883 } 3884 3885 /* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */ 3886 if (reset_devices) 3887 sg_tablesize = min_t(unsigned short, sg_tablesize, 3888 MPT_KDUMP_MIN_PHYS_SEGMENTS); 3889 3890 if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS) 3891 sg_tablesize = MPT_MIN_PHYS_SEGMENTS; 3892 else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) { 3893 sg_tablesize = min_t(unsigned short, sg_tablesize, 3894 SG_MAX_SEGMENTS); 3895 pr_warn(MPT3SAS_FMT 3896 "sg_tablesize(%u) is bigger than kernel" 3897 " defined SG_CHUNK_SIZE(%u)\n", ioc->name, 3898 sg_tablesize, MPT_MAX_PHYS_SEGMENTS); 3899 } 3900 ioc->shost->sg_tablesize = sg_tablesize; 3901 3902 ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)), 3903 (facts->RequestCredit / 4)); 3904 if (ioc->internal_depth < INTERNAL_CMDS_COUNT) { 3905 if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT + 3906 INTERNAL_SCSIIO_CMDS_COUNT)) { 3907 pr_err(MPT3SAS_FMT "IOC doesn't have enough Request \ 3908 Credits, it has just %d number of credits\n", 3909 ioc->name, facts->RequestCredit); 3910 return -ENOMEM; 3911 } 3912 ioc->internal_depth = 10; 3913 } 3914 3915 ioc->hi_priority_depth = ioc->internal_depth - (5); 3916 /* command line tunables for max controller queue depth */ 3917 if (max_queue_depth != -1 && max_queue_depth != 0) { 3918 max_request_credit = min_t(u16, max_queue_depth + 3919 ioc->internal_depth, facts->RequestCredit); 3920 if (max_request_credit > MAX_HBA_QUEUE_DEPTH) 3921 max_request_credit = MAX_HBA_QUEUE_DEPTH; 3922 } else if (reset_devices) 3923 max_request_credit = min_t(u16, facts->RequestCredit, 3924 (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth)); 3925 else 3926 max_request_credit = min_t(u16, facts->RequestCredit, 3927 MAX_HBA_QUEUE_DEPTH); 3928 3929 /* Firmware maintains additional facts->HighPriorityCredit number of 3930 * credits for HiPriprity Request messages, so hba queue depth will be 3931 * sum of max_request_credit and high priority queue depth. 3932 */ 3933 ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth; 3934 3935 /* request frame size */ 3936 ioc->request_sz = facts->IOCRequestFrameSize * 4; 3937 3938 /* reply frame size */ 3939 ioc->reply_sz = facts->ReplyFrameSize * 4; 3940 3941 /* chain segment size */ 3942 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { 3943 if (facts->IOCMaxChainSegmentSize) 3944 ioc->chain_segment_sz = 3945 facts->IOCMaxChainSegmentSize * 3946 MAX_CHAIN_ELEMT_SZ; 3947 else 3948 /* set to 128 bytes size if IOCMaxChainSegmentSize is zero */ 3949 ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS * 3950 MAX_CHAIN_ELEMT_SZ; 3951 } else 3952 ioc->chain_segment_sz = ioc->request_sz; 3953 3954 /* calculate the max scatter element size */ 3955 sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee); 3956 3957 retry_allocation: 3958 total_sz = 0; 3959 /* calculate number of sg elements left over in the 1st frame */ 3960 max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) - 3961 sizeof(Mpi2SGEIOUnion_t)) + sge_size); 3962 ioc->max_sges_in_main_message = max_sge_elements/sge_size; 3963 3964 /* now do the same for a chain buffer */ 3965 max_sge_elements = ioc->chain_segment_sz - sge_size; 3966 ioc->max_sges_in_chain_message = max_sge_elements/sge_size; 3967 3968 /* 3969 * MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE 3970 */ 3971 chains_needed_per_io = ((ioc->shost->sg_tablesize - 3972 ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message) 3973 + 1; 3974 if (chains_needed_per_io > facts->MaxChainDepth) { 3975 chains_needed_per_io = facts->MaxChainDepth; 3976 ioc->shost->sg_tablesize = min_t(u16, 3977 ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message 3978 * chains_needed_per_io), ioc->shost->sg_tablesize); 3979 } 3980 ioc->chains_needed_per_io = chains_needed_per_io; 3981 3982 /* reply free queue sizing - taking into account for 64 FW events */ 3983 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; 3984 3985 /* calculate reply descriptor post queue depth */ 3986 ioc->reply_post_queue_depth = ioc->hba_queue_depth + 3987 ioc->reply_free_queue_depth + 1 ; 3988 /* align the reply post queue on the next 16 count boundary */ 3989 if (ioc->reply_post_queue_depth % 16) 3990 ioc->reply_post_queue_depth += 16 - 3991 (ioc->reply_post_queue_depth % 16); 3992 3993 if (ioc->reply_post_queue_depth > 3994 facts->MaxReplyDescriptorPostQueueDepth) { 3995 ioc->reply_post_queue_depth = 3996 facts->MaxReplyDescriptorPostQueueDepth - 3997 (facts->MaxReplyDescriptorPostQueueDepth % 16); 3998 ioc->hba_queue_depth = 3999 ((ioc->reply_post_queue_depth - 64) / 2) - 1; 4000 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; 4001 } 4002 4003 dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \ 4004 "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), " 4005 "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message, 4006 ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize, 4007 ioc->chains_needed_per_io)); 4008 4009 /* reply post queue, 16 byte align */ 4010 reply_post_free_sz = ioc->reply_post_queue_depth * 4011 sizeof(Mpi2DefaultReplyDescriptor_t); 4012 4013 sz = reply_post_free_sz; 4014 if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable) 4015 sz *= ioc->reply_queue_count; 4016 4017 ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ? 4018 (ioc->reply_queue_count):1, 4019 sizeof(struct reply_post_struct), GFP_KERNEL); 4020 4021 if (!ioc->reply_post) { 4022 pr_err(MPT3SAS_FMT "reply_post_free pool: kcalloc failed\n", 4023 ioc->name); 4024 goto out; 4025 } 4026 ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool", 4027 &ioc->pdev->dev, sz, 16, 0); 4028 if (!ioc->reply_post_free_dma_pool) { 4029 pr_err(MPT3SAS_FMT 4030 "reply_post_free pool: dma_pool_create failed\n", 4031 ioc->name); 4032 goto out; 4033 } 4034 i = 0; 4035 do { 4036 ioc->reply_post[i].reply_post_free = 4037 dma_pool_alloc(ioc->reply_post_free_dma_pool, 4038 GFP_KERNEL, 4039 &ioc->reply_post[i].reply_post_free_dma); 4040 if (!ioc->reply_post[i].reply_post_free) { 4041 pr_err(MPT3SAS_FMT 4042 "reply_post_free pool: dma_pool_alloc failed\n", 4043 ioc->name); 4044 goto out; 4045 } 4046 memset(ioc->reply_post[i].reply_post_free, 0, sz); 4047 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4048 "reply post free pool (0x%p): depth(%d)," 4049 "element_size(%d), pool_size(%d kB)\n", ioc->name, 4050 ioc->reply_post[i].reply_post_free, 4051 ioc->reply_post_queue_depth, 8, sz/1024)); 4052 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4053 "reply_post_free_dma = (0x%llx)\n", ioc->name, 4054 (unsigned long long) 4055 ioc->reply_post[i].reply_post_free_dma)); 4056 total_sz += sz; 4057 } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count)); 4058 4059 if (ioc->dma_mask == 64) { 4060 if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) { 4061 pr_warn(MPT3SAS_FMT 4062 "no suitable consistent DMA mask for %s\n", 4063 ioc->name, pci_name(ioc->pdev)); 4064 goto out; 4065 } 4066 } 4067 4068 ioc->scsiio_depth = ioc->hba_queue_depth - 4069 ioc->hi_priority_depth - ioc->internal_depth; 4070 4071 /* set the scsi host can_queue depth 4072 * with some internal commands that could be outstanding 4073 */ 4074 ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT; 4075 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4076 "scsi host: can_queue depth (%d)\n", 4077 ioc->name, ioc->shost->can_queue)); 4078 4079 4080 /* contiguous pool for request and chains, 16 byte align, one extra " 4081 * "frame for smid=0 4082 */ 4083 ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth; 4084 sz = ((ioc->scsiio_depth + 1) * ioc->request_sz); 4085 4086 /* hi-priority queue */ 4087 sz += (ioc->hi_priority_depth * ioc->request_sz); 4088 4089 /* internal queue */ 4090 sz += (ioc->internal_depth * ioc->request_sz); 4091 4092 ioc->request_dma_sz = sz; 4093 ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma); 4094 if (!ioc->request) { 4095 pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \ 4096 "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), " 4097 "total(%d kB)\n", ioc->name, ioc->hba_queue_depth, 4098 ioc->chains_needed_per_io, ioc->request_sz, sz/1024); 4099 if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH) 4100 goto out; 4101 retry_sz = 64; 4102 ioc->hba_queue_depth -= retry_sz; 4103 _base_release_memory_pools(ioc); 4104 goto retry_allocation; 4105 } 4106 4107 if (retry_sz) 4108 pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \ 4109 "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), " 4110 "total(%d kb)\n", ioc->name, ioc->hba_queue_depth, 4111 ioc->chains_needed_per_io, ioc->request_sz, sz/1024); 4112 4113 /* hi-priority queue */ 4114 ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) * 4115 ioc->request_sz); 4116 ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) * 4117 ioc->request_sz); 4118 4119 /* internal queue */ 4120 ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth * 4121 ioc->request_sz); 4122 ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth * 4123 ioc->request_sz); 4124 4125 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4126 "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n", 4127 ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz, 4128 (ioc->hba_queue_depth * ioc->request_sz)/1024)); 4129 4130 dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n", 4131 ioc->name, (unsigned long long) ioc->request_dma)); 4132 total_sz += sz; 4133 4134 dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n", 4135 ioc->name, ioc->request, ioc->scsiio_depth)); 4136 4137 ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH); 4138 sz = ioc->chain_depth * sizeof(struct chain_tracker); 4139 ioc->chain_pages = get_order(sz); 4140 ioc->chain_lookup = (struct chain_tracker *)__get_free_pages( 4141 GFP_KERNEL, ioc->chain_pages); 4142 if (!ioc->chain_lookup) { 4143 pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages failed\n", 4144 ioc->name); 4145 goto out; 4146 } 4147 ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev, 4148 ioc->chain_segment_sz, 16, 0); 4149 if (!ioc->chain_dma_pool) { 4150 pr_err(MPT3SAS_FMT "chain_dma_pool: dma_pool_create failed\n", 4151 ioc->name); 4152 goto out; 4153 } 4154 for (i = 0; i < ioc->chain_depth; i++) { 4155 ioc->chain_lookup[i].chain_buffer = dma_pool_alloc( 4156 ioc->chain_dma_pool , GFP_KERNEL, 4157 &ioc->chain_lookup[i].chain_buffer_dma); 4158 if (!ioc->chain_lookup[i].chain_buffer) { 4159 ioc->chain_depth = i; 4160 goto chain_done; 4161 } 4162 total_sz += ioc->chain_segment_sz; 4163 } 4164 chain_done: 4165 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4166 "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n", 4167 ioc->name, ioc->chain_depth, ioc->chain_segment_sz, 4168 ((ioc->chain_depth * ioc->chain_segment_sz))/1024)); 4169 4170 /* initialize hi-priority queue smid's */ 4171 ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth, 4172 sizeof(struct request_tracker), GFP_KERNEL); 4173 if (!ioc->hpr_lookup) { 4174 pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n", 4175 ioc->name); 4176 goto out; 4177 } 4178 ioc->hi_priority_smid = ioc->scsiio_depth + 1; 4179 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4180 "hi_priority(0x%p): depth(%d), start smid(%d)\n", 4181 ioc->name, ioc->hi_priority, 4182 ioc->hi_priority_depth, ioc->hi_priority_smid)); 4183 4184 /* initialize internal queue smid's */ 4185 ioc->internal_lookup = kcalloc(ioc->internal_depth, 4186 sizeof(struct request_tracker), GFP_KERNEL); 4187 if (!ioc->internal_lookup) { 4188 pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n", 4189 ioc->name); 4190 goto out; 4191 } 4192 ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth; 4193 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4194 "internal(0x%p): depth(%d), start smid(%d)\n", 4195 ioc->name, ioc->internal, 4196 ioc->internal_depth, ioc->internal_smid)); 4197 /* 4198 * The number of NVMe page sized blocks needed is: 4199 * (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1 4200 * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry 4201 * that is placed in the main message frame. 8 is the size of each PRP 4202 * entry or PRP list pointer entry. 8 is subtracted from page_size 4203 * because of the PRP list pointer entry at the end of a page, so this 4204 * is not counted as a PRP entry. The 1 added page is a round up. 4205 * 4206 * To avoid allocation failures due to the amount of memory that could 4207 * be required for NVMe PRP's, only each set of NVMe blocks will be 4208 * contiguous, so a new set is allocated for each possible I/O. 4209 */ 4210 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) { 4211 nvme_blocks_needed = 4212 (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1; 4213 nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE); 4214 nvme_blocks_needed++; 4215 4216 sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth; 4217 ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL); 4218 if (!ioc->pcie_sg_lookup) { 4219 pr_info(MPT3SAS_FMT 4220 "PCIe SGL lookup: kzalloc failed\n", ioc->name); 4221 goto out; 4222 } 4223 sz = nvme_blocks_needed * ioc->page_size; 4224 ioc->pcie_sgl_dma_pool = 4225 dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0); 4226 if (!ioc->pcie_sgl_dma_pool) { 4227 pr_info(MPT3SAS_FMT 4228 "PCIe SGL pool: dma_pool_create failed\n", 4229 ioc->name); 4230 goto out; 4231 } 4232 for (i = 0; i < ioc->scsiio_depth; i++) { 4233 ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc( 4234 ioc->pcie_sgl_dma_pool, GFP_KERNEL, 4235 &ioc->pcie_sg_lookup[i].pcie_sgl_dma); 4236 if (!ioc->pcie_sg_lookup[i].pcie_sgl) { 4237 pr_info(MPT3SAS_FMT 4238 "PCIe SGL pool: dma_pool_alloc failed\n", 4239 ioc->name); 4240 goto out; 4241 } 4242 } 4243 4244 dinitprintk(ioc, pr_info(MPT3SAS_FMT "PCIe sgl pool depth(%d), " 4245 "element_size(%d), pool_size(%d kB)\n", ioc->name, 4246 ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024)); 4247 total_sz += sz * ioc->scsiio_depth; 4248 } 4249 /* sense buffers, 4 byte align */ 4250 sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE; 4251 ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4252 4, 0); 4253 if (!ioc->sense_dma_pool) { 4254 pr_err(MPT3SAS_FMT "sense pool: dma_pool_create failed\n", 4255 ioc->name); 4256 goto out; 4257 } 4258 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL, 4259 &ioc->sense_dma); 4260 if (!ioc->sense) { 4261 pr_err(MPT3SAS_FMT "sense pool: dma_pool_alloc failed\n", 4262 ioc->name); 4263 goto out; 4264 } 4265 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4266 "sense pool(0x%p): depth(%d), element_size(%d), pool_size" 4267 "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth, 4268 SCSI_SENSE_BUFFERSIZE, sz/1024)); 4269 dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n", 4270 ioc->name, (unsigned long long)ioc->sense_dma)); 4271 total_sz += sz; 4272 4273 /* reply pool, 4 byte align */ 4274 sz = ioc->reply_free_queue_depth * ioc->reply_sz; 4275 ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz, 4276 4, 0); 4277 if (!ioc->reply_dma_pool) { 4278 pr_err(MPT3SAS_FMT "reply pool: dma_pool_create failed\n", 4279 ioc->name); 4280 goto out; 4281 } 4282 ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL, 4283 &ioc->reply_dma); 4284 if (!ioc->reply) { 4285 pr_err(MPT3SAS_FMT "reply pool: dma_pool_alloc failed\n", 4286 ioc->name); 4287 goto out; 4288 } 4289 ioc->reply_dma_min_address = (u32)(ioc->reply_dma); 4290 ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz; 4291 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4292 "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n", 4293 ioc->name, ioc->reply, 4294 ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024)); 4295 dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n", 4296 ioc->name, (unsigned long long)ioc->reply_dma)); 4297 total_sz += sz; 4298 4299 /* reply free queue, 16 byte align */ 4300 sz = ioc->reply_free_queue_depth * 4; 4301 ioc->reply_free_dma_pool = dma_pool_create("reply_free pool", 4302 &ioc->pdev->dev, sz, 16, 0); 4303 if (!ioc->reply_free_dma_pool) { 4304 pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_create failed\n", 4305 ioc->name); 4306 goto out; 4307 } 4308 ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool, GFP_KERNEL, 4309 &ioc->reply_free_dma); 4310 if (!ioc->reply_free) { 4311 pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_alloc failed\n", 4312 ioc->name); 4313 goto out; 4314 } 4315 memset(ioc->reply_free, 0, sz); 4316 dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \ 4317 "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name, 4318 ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024)); 4319 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4320 "reply_free_dma (0x%llx)\n", 4321 ioc->name, (unsigned long long)ioc->reply_free_dma)); 4322 total_sz += sz; 4323 4324 ioc->config_page_sz = 512; 4325 ioc->config_page = pci_alloc_consistent(ioc->pdev, 4326 ioc->config_page_sz, &ioc->config_page_dma); 4327 if (!ioc->config_page) { 4328 pr_err(MPT3SAS_FMT 4329 "config page: dma_pool_alloc failed\n", 4330 ioc->name); 4331 goto out; 4332 } 4333 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4334 "config page(0x%p): size(%d)\n", 4335 ioc->name, ioc->config_page, ioc->config_page_sz)); 4336 dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n", 4337 ioc->name, (unsigned long long)ioc->config_page_dma)); 4338 total_sz += ioc->config_page_sz; 4339 4340 pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n", 4341 ioc->name, total_sz/1024); 4342 pr_info(MPT3SAS_FMT 4343 "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n", 4344 ioc->name, ioc->shost->can_queue, facts->RequestCredit); 4345 pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n", 4346 ioc->name, ioc->shost->sg_tablesize); 4347 return 0; 4348 4349 out: 4350 return -ENOMEM; 4351 } 4352 4353 /** 4354 * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter. 4355 * @ioc: Pointer to MPT_ADAPTER structure 4356 * @cooked: Request raw or cooked IOC state 4357 * 4358 * Returns all IOC Doorbell register bits if cooked==0, else just the 4359 * Doorbell bits in MPI_IOC_STATE_MASK. 4360 */ 4361 u32 4362 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked) 4363 { 4364 u32 s, sc; 4365 4366 s = readl(&ioc->chip->Doorbell); 4367 sc = s & MPI2_IOC_STATE_MASK; 4368 return cooked ? sc : s; 4369 } 4370 4371 /** 4372 * _base_wait_on_iocstate - waiting on a particular ioc state 4373 * @ioc_state: controller state { READY, OPERATIONAL, or RESET } 4374 * @timeout: timeout in second 4375 * 4376 * Returns 0 for success, non-zero for failure. 4377 */ 4378 static int 4379 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout) 4380 { 4381 u32 count, cntdn; 4382 u32 current_state; 4383 4384 count = 0; 4385 cntdn = 1000 * timeout; 4386 do { 4387 current_state = mpt3sas_base_get_iocstate(ioc, 1); 4388 if (current_state == ioc_state) 4389 return 0; 4390 if (count && current_state == MPI2_IOC_STATE_FAULT) 4391 break; 4392 4393 usleep_range(1000, 1500); 4394 count++; 4395 } while (--cntdn); 4396 4397 return current_state; 4398 } 4399 4400 /** 4401 * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by 4402 * a write to the doorbell) 4403 * @ioc: per adapter object 4404 * @timeout: timeout in second 4405 * 4406 * Returns 0 for success, non-zero for failure. 4407 * 4408 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell. 4409 */ 4410 static int 4411 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc); 4412 4413 static int 4414 _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout) 4415 { 4416 u32 cntdn, count; 4417 u32 int_status; 4418 4419 count = 0; 4420 cntdn = 1000 * timeout; 4421 do { 4422 int_status = readl(&ioc->chip->HostInterruptStatus); 4423 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 4424 dhsprintk(ioc, pr_info(MPT3SAS_FMT 4425 "%s: successful count(%d), timeout(%d)\n", 4426 ioc->name, __func__, count, timeout)); 4427 return 0; 4428 } 4429 4430 usleep_range(1000, 1500); 4431 count++; 4432 } while (--cntdn); 4433 4434 pr_err(MPT3SAS_FMT 4435 "%s: failed due to timeout count(%d), int_status(%x)!\n", 4436 ioc->name, __func__, count, int_status); 4437 return -EFAULT; 4438 } 4439 4440 static int 4441 _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout) 4442 { 4443 u32 cntdn, count; 4444 u32 int_status; 4445 4446 count = 0; 4447 cntdn = 2000 * timeout; 4448 do { 4449 int_status = readl(&ioc->chip->HostInterruptStatus); 4450 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 4451 dhsprintk(ioc, pr_info(MPT3SAS_FMT 4452 "%s: successful count(%d), timeout(%d)\n", 4453 ioc->name, __func__, count, timeout)); 4454 return 0; 4455 } 4456 4457 udelay(500); 4458 count++; 4459 } while (--cntdn); 4460 4461 pr_err(MPT3SAS_FMT 4462 "%s: failed due to timeout count(%d), int_status(%x)!\n", 4463 ioc->name, __func__, count, int_status); 4464 return -EFAULT; 4465 4466 } 4467 4468 /** 4469 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell. 4470 * @ioc: per adapter object 4471 * @timeout: timeout in second 4472 * 4473 * Returns 0 for success, non-zero for failure. 4474 * 4475 * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to 4476 * doorbell. 4477 */ 4478 static int 4479 _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout) 4480 { 4481 u32 cntdn, count; 4482 u32 int_status; 4483 u32 doorbell; 4484 4485 count = 0; 4486 cntdn = 1000 * timeout; 4487 do { 4488 int_status = readl(&ioc->chip->HostInterruptStatus); 4489 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) { 4490 dhsprintk(ioc, pr_info(MPT3SAS_FMT 4491 "%s: successful count(%d), timeout(%d)\n", 4492 ioc->name, __func__, count, timeout)); 4493 return 0; 4494 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 4495 doorbell = readl(&ioc->chip->Doorbell); 4496 if ((doorbell & MPI2_IOC_STATE_MASK) == 4497 MPI2_IOC_STATE_FAULT) { 4498 mpt3sas_base_fault_info(ioc , doorbell); 4499 return -EFAULT; 4500 } 4501 } else if (int_status == 0xFFFFFFFF) 4502 goto out; 4503 4504 usleep_range(1000, 1500); 4505 count++; 4506 } while (--cntdn); 4507 4508 out: 4509 pr_err(MPT3SAS_FMT 4510 "%s: failed due to timeout count(%d), int_status(%x)!\n", 4511 ioc->name, __func__, count, int_status); 4512 return -EFAULT; 4513 } 4514 4515 /** 4516 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use 4517 * @ioc: per adapter object 4518 * @timeout: timeout in second 4519 * 4520 * Returns 0 for success, non-zero for failure. 4521 * 4522 */ 4523 static int 4524 _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout) 4525 { 4526 u32 cntdn, count; 4527 u32 doorbell_reg; 4528 4529 count = 0; 4530 cntdn = 1000 * timeout; 4531 do { 4532 doorbell_reg = readl(&ioc->chip->Doorbell); 4533 if (!(doorbell_reg & MPI2_DOORBELL_USED)) { 4534 dhsprintk(ioc, pr_info(MPT3SAS_FMT 4535 "%s: successful count(%d), timeout(%d)\n", 4536 ioc->name, __func__, count, timeout)); 4537 return 0; 4538 } 4539 4540 usleep_range(1000, 1500); 4541 count++; 4542 } while (--cntdn); 4543 4544 pr_err(MPT3SAS_FMT 4545 "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n", 4546 ioc->name, __func__, count, doorbell_reg); 4547 return -EFAULT; 4548 } 4549 4550 /** 4551 * _base_send_ioc_reset - send doorbell reset 4552 * @ioc: per adapter object 4553 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET 4554 * @timeout: timeout in second 4555 * 4556 * Returns 0 for success, non-zero for failure. 4557 */ 4558 static int 4559 _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout) 4560 { 4561 u32 ioc_state; 4562 int r = 0; 4563 4564 if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) { 4565 pr_err(MPT3SAS_FMT "%s: unknown reset_type\n", 4566 ioc->name, __func__); 4567 return -EFAULT; 4568 } 4569 4570 if (!(ioc->facts.IOCCapabilities & 4571 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY)) 4572 return -EFAULT; 4573 4574 pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name); 4575 4576 writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT, 4577 &ioc->chip->Doorbell); 4578 if ((_base_wait_for_doorbell_ack(ioc, 15))) { 4579 r = -EFAULT; 4580 goto out; 4581 } 4582 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout); 4583 if (ioc_state) { 4584 pr_err(MPT3SAS_FMT 4585 "%s: failed going to ready state (ioc_state=0x%x)\n", 4586 ioc->name, __func__, ioc_state); 4587 r = -EFAULT; 4588 goto out; 4589 } 4590 out: 4591 pr_info(MPT3SAS_FMT "message unit reset: %s\n", 4592 ioc->name, ((r == 0) ? "SUCCESS" : "FAILED")); 4593 return r; 4594 } 4595 4596 /** 4597 * _base_handshake_req_reply_wait - send request thru doorbell interface 4598 * @ioc: per adapter object 4599 * @request_bytes: request length 4600 * @request: pointer having request payload 4601 * @reply_bytes: reply length 4602 * @reply: pointer to reply payload 4603 * @timeout: timeout in second 4604 * 4605 * Returns 0 for success, non-zero for failure. 4606 */ 4607 static int 4608 _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, 4609 u32 *request, int reply_bytes, u16 *reply, int timeout) 4610 { 4611 MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply; 4612 int i; 4613 u8 failed; 4614 __le32 *mfp; 4615 4616 /* make sure doorbell is not in use */ 4617 if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) { 4618 pr_err(MPT3SAS_FMT 4619 "doorbell is in use (line=%d)\n", 4620 ioc->name, __LINE__); 4621 return -EFAULT; 4622 } 4623 4624 /* clear pending doorbell interrupts from previous state changes */ 4625 if (readl(&ioc->chip->HostInterruptStatus) & 4626 MPI2_HIS_IOC2SYS_DB_STATUS) 4627 writel(0, &ioc->chip->HostInterruptStatus); 4628 4629 /* send message to ioc */ 4630 writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) | 4631 ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)), 4632 &ioc->chip->Doorbell); 4633 4634 if ((_base_spin_on_doorbell_int(ioc, 5))) { 4635 pr_err(MPT3SAS_FMT 4636 "doorbell handshake int failed (line=%d)\n", 4637 ioc->name, __LINE__); 4638 return -EFAULT; 4639 } 4640 writel(0, &ioc->chip->HostInterruptStatus); 4641 4642 if ((_base_wait_for_doorbell_ack(ioc, 5))) { 4643 pr_err(MPT3SAS_FMT 4644 "doorbell handshake ack failed (line=%d)\n", 4645 ioc->name, __LINE__); 4646 return -EFAULT; 4647 } 4648 4649 /* send message 32-bits at a time */ 4650 for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) { 4651 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell); 4652 if ((_base_wait_for_doorbell_ack(ioc, 5))) 4653 failed = 1; 4654 } 4655 4656 if (failed) { 4657 pr_err(MPT3SAS_FMT 4658 "doorbell handshake sending request failed (line=%d)\n", 4659 ioc->name, __LINE__); 4660 return -EFAULT; 4661 } 4662 4663 /* now wait for the reply */ 4664 if ((_base_wait_for_doorbell_int(ioc, timeout))) { 4665 pr_err(MPT3SAS_FMT 4666 "doorbell handshake int failed (line=%d)\n", 4667 ioc->name, __LINE__); 4668 return -EFAULT; 4669 } 4670 4671 /* read the first two 16-bits, it gives the total length of the reply */ 4672 reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell) 4673 & MPI2_DOORBELL_DATA_MASK); 4674 writel(0, &ioc->chip->HostInterruptStatus); 4675 if ((_base_wait_for_doorbell_int(ioc, 5))) { 4676 pr_err(MPT3SAS_FMT 4677 "doorbell handshake int failed (line=%d)\n", 4678 ioc->name, __LINE__); 4679 return -EFAULT; 4680 } 4681 reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell) 4682 & MPI2_DOORBELL_DATA_MASK); 4683 writel(0, &ioc->chip->HostInterruptStatus); 4684 4685 for (i = 2; i < default_reply->MsgLength * 2; i++) { 4686 if ((_base_wait_for_doorbell_int(ioc, 5))) { 4687 pr_err(MPT3SAS_FMT 4688 "doorbell handshake int failed (line=%d)\n", 4689 ioc->name, __LINE__); 4690 return -EFAULT; 4691 } 4692 if (i >= reply_bytes/2) /* overflow case */ 4693 readl(&ioc->chip->Doorbell); 4694 else 4695 reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell) 4696 & MPI2_DOORBELL_DATA_MASK); 4697 writel(0, &ioc->chip->HostInterruptStatus); 4698 } 4699 4700 _base_wait_for_doorbell_int(ioc, 5); 4701 if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) { 4702 dhsprintk(ioc, pr_info(MPT3SAS_FMT 4703 "doorbell is in use (line=%d)\n", ioc->name, __LINE__)); 4704 } 4705 writel(0, &ioc->chip->HostInterruptStatus); 4706 4707 if (ioc->logging_level & MPT_DEBUG_INIT) { 4708 mfp = (__le32 *)reply; 4709 pr_info("\toffset:data\n"); 4710 for (i = 0; i < reply_bytes/4; i++) 4711 pr_info("\t[0x%02x]:%08x\n", i*4, 4712 le32_to_cpu(mfp[i])); 4713 } 4714 return 0; 4715 } 4716 4717 /** 4718 * mpt3sas_base_sas_iounit_control - send sas iounit control to FW 4719 * @ioc: per adapter object 4720 * @mpi_reply: the reply payload from FW 4721 * @mpi_request: the request payload sent to FW 4722 * 4723 * The SAS IO Unit Control Request message allows the host to perform low-level 4724 * operations, such as resets on the PHYs of the IO Unit, also allows the host 4725 * to obtain the IOC assigned device handles for a device if it has other 4726 * identifying information about the device, in addition allows the host to 4727 * remove IOC resources associated with the device. 4728 * 4729 * Returns 0 for success, non-zero for failure. 4730 */ 4731 int 4732 mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, 4733 Mpi2SasIoUnitControlReply_t *mpi_reply, 4734 Mpi2SasIoUnitControlRequest_t *mpi_request) 4735 { 4736 u16 smid; 4737 u32 ioc_state; 4738 bool issue_reset = false; 4739 int rc; 4740 void *request; 4741 u16 wait_state_count; 4742 4743 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 4744 __func__)); 4745 4746 mutex_lock(&ioc->base_cmds.mutex); 4747 4748 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) { 4749 pr_err(MPT3SAS_FMT "%s: base_cmd in use\n", 4750 ioc->name, __func__); 4751 rc = -EAGAIN; 4752 goto out; 4753 } 4754 4755 wait_state_count = 0; 4756 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 4757 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 4758 if (wait_state_count++ == 10) { 4759 pr_err(MPT3SAS_FMT 4760 "%s: failed due to ioc not operational\n", 4761 ioc->name, __func__); 4762 rc = -EFAULT; 4763 goto out; 4764 } 4765 ssleep(1); 4766 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 4767 pr_info(MPT3SAS_FMT 4768 "%s: waiting for operational state(count=%d)\n", 4769 ioc->name, __func__, wait_state_count); 4770 } 4771 4772 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); 4773 if (!smid) { 4774 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 4775 ioc->name, __func__); 4776 rc = -EAGAIN; 4777 goto out; 4778 } 4779 4780 rc = 0; 4781 ioc->base_cmds.status = MPT3_CMD_PENDING; 4782 request = mpt3sas_base_get_msg_frame(ioc, smid); 4783 ioc->base_cmds.smid = smid; 4784 memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)); 4785 if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || 4786 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) 4787 ioc->ioc_link_reset_in_progress = 1; 4788 init_completion(&ioc->base_cmds.done); 4789 ioc->put_smid_default(ioc, smid); 4790 wait_for_completion_timeout(&ioc->base_cmds.done, 4791 msecs_to_jiffies(10000)); 4792 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || 4793 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) && 4794 ioc->ioc_link_reset_in_progress) 4795 ioc->ioc_link_reset_in_progress = 0; 4796 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { 4797 pr_err(MPT3SAS_FMT "%s: timeout\n", 4798 ioc->name, __func__); 4799 _debug_dump_mf(mpi_request, 4800 sizeof(Mpi2SasIoUnitControlRequest_t)/4); 4801 if (!(ioc->base_cmds.status & MPT3_CMD_RESET)) 4802 issue_reset = true; 4803 goto issue_host_reset; 4804 } 4805 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) 4806 memcpy(mpi_reply, ioc->base_cmds.reply, 4807 sizeof(Mpi2SasIoUnitControlReply_t)); 4808 else 4809 memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t)); 4810 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 4811 goto out; 4812 4813 issue_host_reset: 4814 if (issue_reset) 4815 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 4816 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 4817 rc = -EFAULT; 4818 out: 4819 mutex_unlock(&ioc->base_cmds.mutex); 4820 return rc; 4821 } 4822 4823 /** 4824 * mpt3sas_base_scsi_enclosure_processor - sending request to sep device 4825 * @ioc: per adapter object 4826 * @mpi_reply: the reply payload from FW 4827 * @mpi_request: the request payload sent to FW 4828 * 4829 * The SCSI Enclosure Processor request message causes the IOC to 4830 * communicate with SES devices to control LED status signals. 4831 * 4832 * Returns 0 for success, non-zero for failure. 4833 */ 4834 int 4835 mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, 4836 Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request) 4837 { 4838 u16 smid; 4839 u32 ioc_state; 4840 bool issue_reset = false; 4841 int rc; 4842 void *request; 4843 u16 wait_state_count; 4844 4845 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 4846 __func__)); 4847 4848 mutex_lock(&ioc->base_cmds.mutex); 4849 4850 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) { 4851 pr_err(MPT3SAS_FMT "%s: base_cmd in use\n", 4852 ioc->name, __func__); 4853 rc = -EAGAIN; 4854 goto out; 4855 } 4856 4857 wait_state_count = 0; 4858 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 4859 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 4860 if (wait_state_count++ == 10) { 4861 pr_err(MPT3SAS_FMT 4862 "%s: failed due to ioc not operational\n", 4863 ioc->name, __func__); 4864 rc = -EFAULT; 4865 goto out; 4866 } 4867 ssleep(1); 4868 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 4869 pr_info(MPT3SAS_FMT 4870 "%s: waiting for operational state(count=%d)\n", 4871 ioc->name, 4872 __func__, wait_state_count); 4873 } 4874 4875 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); 4876 if (!smid) { 4877 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 4878 ioc->name, __func__); 4879 rc = -EAGAIN; 4880 goto out; 4881 } 4882 4883 rc = 0; 4884 ioc->base_cmds.status = MPT3_CMD_PENDING; 4885 request = mpt3sas_base_get_msg_frame(ioc, smid); 4886 ioc->base_cmds.smid = smid; 4887 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t)); 4888 init_completion(&ioc->base_cmds.done); 4889 ioc->put_smid_default(ioc, smid); 4890 wait_for_completion_timeout(&ioc->base_cmds.done, 4891 msecs_to_jiffies(10000)); 4892 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { 4893 pr_err(MPT3SAS_FMT "%s: timeout\n", 4894 ioc->name, __func__); 4895 _debug_dump_mf(mpi_request, 4896 sizeof(Mpi2SepRequest_t)/4); 4897 if (!(ioc->base_cmds.status & MPT3_CMD_RESET)) 4898 issue_reset = false; 4899 goto issue_host_reset; 4900 } 4901 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) 4902 memcpy(mpi_reply, ioc->base_cmds.reply, 4903 sizeof(Mpi2SepReply_t)); 4904 else 4905 memset(mpi_reply, 0, sizeof(Mpi2SepReply_t)); 4906 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 4907 goto out; 4908 4909 issue_host_reset: 4910 if (issue_reset) 4911 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 4912 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 4913 rc = -EFAULT; 4914 out: 4915 mutex_unlock(&ioc->base_cmds.mutex); 4916 return rc; 4917 } 4918 4919 /** 4920 * _base_get_port_facts - obtain port facts reply and save in ioc 4921 * @ioc: per adapter object 4922 * 4923 * Returns 0 for success, non-zero for failure. 4924 */ 4925 static int 4926 _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port) 4927 { 4928 Mpi2PortFactsRequest_t mpi_request; 4929 Mpi2PortFactsReply_t mpi_reply; 4930 struct mpt3sas_port_facts *pfacts; 4931 int mpi_reply_sz, mpi_request_sz, r; 4932 4933 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 4934 __func__)); 4935 4936 mpi_reply_sz = sizeof(Mpi2PortFactsReply_t); 4937 mpi_request_sz = sizeof(Mpi2PortFactsRequest_t); 4938 memset(&mpi_request, 0, mpi_request_sz); 4939 mpi_request.Function = MPI2_FUNCTION_PORT_FACTS; 4940 mpi_request.PortNumber = port; 4941 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz, 4942 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5); 4943 4944 if (r != 0) { 4945 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", 4946 ioc->name, __func__, r); 4947 return r; 4948 } 4949 4950 pfacts = &ioc->pfacts[port]; 4951 memset(pfacts, 0, sizeof(struct mpt3sas_port_facts)); 4952 pfacts->PortNumber = mpi_reply.PortNumber; 4953 pfacts->VP_ID = mpi_reply.VP_ID; 4954 pfacts->VF_ID = mpi_reply.VF_ID; 4955 pfacts->MaxPostedCmdBuffers = 4956 le16_to_cpu(mpi_reply.MaxPostedCmdBuffers); 4957 4958 return 0; 4959 } 4960 4961 /** 4962 * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL 4963 * @ioc: per adapter object 4964 * @timeout: 4965 * 4966 * Returns 0 for success, non-zero for failure. 4967 */ 4968 static int 4969 _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout) 4970 { 4971 u32 ioc_state; 4972 int rc; 4973 4974 dinitprintk(ioc, printk(MPT3SAS_FMT "%s\n", ioc->name, 4975 __func__)); 4976 4977 if (ioc->pci_error_recovery) { 4978 dfailprintk(ioc, printk(MPT3SAS_FMT 4979 "%s: host in pci error recovery\n", ioc->name, __func__)); 4980 return -EFAULT; 4981 } 4982 4983 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 4984 dhsprintk(ioc, printk(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n", 4985 ioc->name, __func__, ioc_state)); 4986 4987 if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) || 4988 (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL) 4989 return 0; 4990 4991 if (ioc_state & MPI2_DOORBELL_USED) { 4992 dhsprintk(ioc, printk(MPT3SAS_FMT 4993 "unexpected doorbell active!\n", ioc->name)); 4994 goto issue_diag_reset; 4995 } 4996 4997 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 4998 mpt3sas_base_fault_info(ioc, ioc_state & 4999 MPI2_DOORBELL_DATA_MASK); 5000 goto issue_diag_reset; 5001 } 5002 5003 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout); 5004 if (ioc_state) { 5005 dfailprintk(ioc, printk(MPT3SAS_FMT 5006 "%s: failed going to ready state (ioc_state=0x%x)\n", 5007 ioc->name, __func__, ioc_state)); 5008 return -EFAULT; 5009 } 5010 5011 issue_diag_reset: 5012 rc = _base_diag_reset(ioc); 5013 return rc; 5014 } 5015 5016 /** 5017 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc 5018 * @ioc: per adapter object 5019 * 5020 * Returns 0 for success, non-zero for failure. 5021 */ 5022 static int 5023 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc) 5024 { 5025 Mpi2IOCFactsRequest_t mpi_request; 5026 Mpi2IOCFactsReply_t mpi_reply; 5027 struct mpt3sas_facts *facts; 5028 int mpi_reply_sz, mpi_request_sz, r; 5029 5030 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5031 __func__)); 5032 5033 r = _base_wait_for_iocstate(ioc, 10); 5034 if (r) { 5035 dfailprintk(ioc, printk(MPT3SAS_FMT 5036 "%s: failed getting to correct state\n", 5037 ioc->name, __func__)); 5038 return r; 5039 } 5040 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t); 5041 mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t); 5042 memset(&mpi_request, 0, mpi_request_sz); 5043 mpi_request.Function = MPI2_FUNCTION_IOC_FACTS; 5044 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz, 5045 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5); 5046 5047 if (r != 0) { 5048 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", 5049 ioc->name, __func__, r); 5050 return r; 5051 } 5052 5053 facts = &ioc->facts; 5054 memset(facts, 0, sizeof(struct mpt3sas_facts)); 5055 facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion); 5056 facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion); 5057 facts->VP_ID = mpi_reply.VP_ID; 5058 facts->VF_ID = mpi_reply.VF_ID; 5059 facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions); 5060 facts->MaxChainDepth = mpi_reply.MaxChainDepth; 5061 facts->WhoInit = mpi_reply.WhoInit; 5062 facts->NumberOfPorts = mpi_reply.NumberOfPorts; 5063 facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors; 5064 facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit); 5065 facts->MaxReplyDescriptorPostQueueDepth = 5066 le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth); 5067 facts->ProductID = le16_to_cpu(mpi_reply.ProductID); 5068 facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities); 5069 if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) 5070 ioc->ir_firmware = 1; 5071 if ((facts->IOCCapabilities & 5072 MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices)) 5073 ioc->rdpq_array_capable = 1; 5074 if (facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ) 5075 ioc->atomic_desc_capable = 1; 5076 facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word); 5077 facts->IOCRequestFrameSize = 5078 le16_to_cpu(mpi_reply.IOCRequestFrameSize); 5079 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { 5080 facts->IOCMaxChainSegmentSize = 5081 le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize); 5082 } 5083 facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators); 5084 facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets); 5085 ioc->shost->max_id = -1; 5086 facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders); 5087 facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures); 5088 facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags); 5089 facts->HighPriorityCredit = 5090 le16_to_cpu(mpi_reply.HighPriorityCredit); 5091 facts->ReplyFrameSize = mpi_reply.ReplyFrameSize; 5092 facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle); 5093 facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize; 5094 5095 /* 5096 * Get the Page Size from IOC Facts. If it's 0, default to 4k. 5097 */ 5098 ioc->page_size = 1 << facts->CurrentHostPageSize; 5099 if (ioc->page_size == 1) { 5100 pr_info(MPT3SAS_FMT "CurrentHostPageSize is 0: Setting " 5101 "default host page size to 4k\n", ioc->name); 5102 ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K; 5103 } 5104 dinitprintk(ioc, pr_info(MPT3SAS_FMT "CurrentHostPageSize(%d)\n", 5105 ioc->name, facts->CurrentHostPageSize)); 5106 5107 dinitprintk(ioc, pr_info(MPT3SAS_FMT 5108 "hba queue depth(%d), max chains per io(%d)\n", 5109 ioc->name, facts->RequestCredit, 5110 facts->MaxChainDepth)); 5111 dinitprintk(ioc, pr_info(MPT3SAS_FMT 5112 "request frame size(%d), reply frame size(%d)\n", ioc->name, 5113 facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4)); 5114 return 0; 5115 } 5116 5117 /** 5118 * _base_send_ioc_init - send ioc_init to firmware 5119 * @ioc: per adapter object 5120 * 5121 * Returns 0 for success, non-zero for failure. 5122 */ 5123 static int 5124 _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc) 5125 { 5126 Mpi2IOCInitRequest_t mpi_request; 5127 Mpi2IOCInitReply_t mpi_reply; 5128 int i, r = 0; 5129 ktime_t current_time; 5130 u16 ioc_status; 5131 u32 reply_post_free_array_sz = 0; 5132 Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL; 5133 dma_addr_t reply_post_free_array_dma; 5134 5135 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5136 __func__)); 5137 5138 memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t)); 5139 mpi_request.Function = MPI2_FUNCTION_IOC_INIT; 5140 mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER; 5141 mpi_request.VF_ID = 0; /* TODO */ 5142 mpi_request.VP_ID = 0; 5143 mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged); 5144 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); 5145 mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K; 5146 5147 if (_base_is_controller_msix_enabled(ioc)) 5148 mpi_request.HostMSIxVectors = ioc->reply_queue_count; 5149 mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4); 5150 mpi_request.ReplyDescriptorPostQueueDepth = 5151 cpu_to_le16(ioc->reply_post_queue_depth); 5152 mpi_request.ReplyFreeQueueDepth = 5153 cpu_to_le16(ioc->reply_free_queue_depth); 5154 5155 mpi_request.SenseBufferAddressHigh = 5156 cpu_to_le32((u64)ioc->sense_dma >> 32); 5157 mpi_request.SystemReplyAddressHigh = 5158 cpu_to_le32((u64)ioc->reply_dma >> 32); 5159 mpi_request.SystemRequestFrameBaseAddress = 5160 cpu_to_le64((u64)ioc->request_dma); 5161 mpi_request.ReplyFreeQueueAddress = 5162 cpu_to_le64((u64)ioc->reply_free_dma); 5163 5164 if (ioc->rdpq_array_enable) { 5165 reply_post_free_array_sz = ioc->reply_queue_count * 5166 sizeof(Mpi2IOCInitRDPQArrayEntry); 5167 reply_post_free_array = pci_alloc_consistent(ioc->pdev, 5168 reply_post_free_array_sz, &reply_post_free_array_dma); 5169 if (!reply_post_free_array) { 5170 pr_err(MPT3SAS_FMT 5171 "reply_post_free_array: pci_alloc_consistent failed\n", 5172 ioc->name); 5173 r = -ENOMEM; 5174 goto out; 5175 } 5176 memset(reply_post_free_array, 0, reply_post_free_array_sz); 5177 for (i = 0; i < ioc->reply_queue_count; i++) 5178 reply_post_free_array[i].RDPQBaseAddress = 5179 cpu_to_le64( 5180 (u64)ioc->reply_post[i].reply_post_free_dma); 5181 mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE; 5182 mpi_request.ReplyDescriptorPostQueueAddress = 5183 cpu_to_le64((u64)reply_post_free_array_dma); 5184 } else { 5185 mpi_request.ReplyDescriptorPostQueueAddress = 5186 cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma); 5187 } 5188 5189 /* This time stamp specifies number of milliseconds 5190 * since epoch ~ midnight January 1, 1970. 5191 */ 5192 current_time = ktime_get_real(); 5193 mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time)); 5194 5195 if (ioc->logging_level & MPT_DEBUG_INIT) { 5196 __le32 *mfp; 5197 int i; 5198 5199 mfp = (__le32 *)&mpi_request; 5200 pr_info("\toffset:data\n"); 5201 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++) 5202 pr_info("\t[0x%02x]:%08x\n", i*4, 5203 le32_to_cpu(mfp[i])); 5204 } 5205 5206 r = _base_handshake_req_reply_wait(ioc, 5207 sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request, 5208 sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10); 5209 5210 if (r != 0) { 5211 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", 5212 ioc->name, __func__, r); 5213 goto out; 5214 } 5215 5216 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 5217 if (ioc_status != MPI2_IOCSTATUS_SUCCESS || 5218 mpi_reply.IOCLogInfo) { 5219 pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__); 5220 r = -EIO; 5221 } 5222 5223 out: 5224 if (reply_post_free_array) 5225 pci_free_consistent(ioc->pdev, reply_post_free_array_sz, 5226 reply_post_free_array, 5227 reply_post_free_array_dma); 5228 return r; 5229 } 5230 5231 /** 5232 * mpt3sas_port_enable_done - command completion routine for port enable 5233 * @ioc: per adapter object 5234 * @smid: system request message index 5235 * @msix_index: MSIX table index supplied by the OS 5236 * @reply: reply message frame(lower 32bit addr) 5237 * 5238 * Return 1 meaning mf should be freed from _base_interrupt 5239 * 0 means the mf is freed from this function. 5240 */ 5241 u8 5242 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 5243 u32 reply) 5244 { 5245 MPI2DefaultReply_t *mpi_reply; 5246 u16 ioc_status; 5247 5248 if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED) 5249 return 1; 5250 5251 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 5252 if (!mpi_reply) 5253 return 1; 5254 5255 if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE) 5256 return 1; 5257 5258 ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING; 5259 ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE; 5260 ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID; 5261 memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); 5262 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 5263 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 5264 ioc->port_enable_failed = 1; 5265 5266 if (ioc->is_driver_loading) { 5267 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 5268 mpt3sas_port_enable_complete(ioc); 5269 return 1; 5270 } else { 5271 ioc->start_scan_failed = ioc_status; 5272 ioc->start_scan = 0; 5273 return 1; 5274 } 5275 } 5276 complete(&ioc->port_enable_cmds.done); 5277 return 1; 5278 } 5279 5280 /** 5281 * _base_send_port_enable - send port_enable(discovery stuff) to firmware 5282 * @ioc: per adapter object 5283 * 5284 * Returns 0 for success, non-zero for failure. 5285 */ 5286 static int 5287 _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc) 5288 { 5289 Mpi2PortEnableRequest_t *mpi_request; 5290 Mpi2PortEnableReply_t *mpi_reply; 5291 int r = 0; 5292 u16 smid; 5293 u16 ioc_status; 5294 5295 pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name); 5296 5297 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { 5298 pr_err(MPT3SAS_FMT "%s: internal command already in use\n", 5299 ioc->name, __func__); 5300 return -EAGAIN; 5301 } 5302 5303 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx); 5304 if (!smid) { 5305 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 5306 ioc->name, __func__); 5307 return -EAGAIN; 5308 } 5309 5310 ioc->port_enable_cmds.status = MPT3_CMD_PENDING; 5311 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 5312 ioc->port_enable_cmds.smid = smid; 5313 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); 5314 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; 5315 5316 init_completion(&ioc->port_enable_cmds.done); 5317 ioc->put_smid_default(ioc, smid); 5318 wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ); 5319 if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) { 5320 pr_err(MPT3SAS_FMT "%s: timeout\n", 5321 ioc->name, __func__); 5322 _debug_dump_mf(mpi_request, 5323 sizeof(Mpi2PortEnableRequest_t)/4); 5324 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) 5325 r = -EFAULT; 5326 else 5327 r = -ETIME; 5328 goto out; 5329 } 5330 5331 mpi_reply = ioc->port_enable_cmds.reply; 5332 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 5333 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 5334 pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n", 5335 ioc->name, __func__, ioc_status); 5336 r = -EFAULT; 5337 goto out; 5338 } 5339 5340 out: 5341 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 5342 pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ? 5343 "SUCCESS" : "FAILED")); 5344 return r; 5345 } 5346 5347 /** 5348 * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply) 5349 * @ioc: per adapter object 5350 * 5351 * Returns 0 for success, non-zero for failure. 5352 */ 5353 int 5354 mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc) 5355 { 5356 Mpi2PortEnableRequest_t *mpi_request; 5357 u16 smid; 5358 5359 pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name); 5360 5361 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { 5362 pr_err(MPT3SAS_FMT "%s: internal command already in use\n", 5363 ioc->name, __func__); 5364 return -EAGAIN; 5365 } 5366 5367 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx); 5368 if (!smid) { 5369 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 5370 ioc->name, __func__); 5371 return -EAGAIN; 5372 } 5373 5374 ioc->port_enable_cmds.status = MPT3_CMD_PENDING; 5375 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 5376 ioc->port_enable_cmds.smid = smid; 5377 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); 5378 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; 5379 5380 ioc->put_smid_default(ioc, smid); 5381 return 0; 5382 } 5383 5384 /** 5385 * _base_determine_wait_on_discovery - desposition 5386 * @ioc: per adapter object 5387 * 5388 * Decide whether to wait on discovery to complete. Used to either 5389 * locate boot device, or report volumes ahead of physical devices. 5390 * 5391 * Returns 1 for wait, 0 for don't wait 5392 */ 5393 static int 5394 _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc) 5395 { 5396 /* We wait for discovery to complete if IR firmware is loaded. 5397 * The sas topology events arrive before PD events, so we need time to 5398 * turn on the bit in ioc->pd_handles to indicate PD 5399 * Also, it maybe required to report Volumes ahead of physical 5400 * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set. 5401 */ 5402 if (ioc->ir_firmware) 5403 return 1; 5404 5405 /* if no Bios, then we don't need to wait */ 5406 if (!ioc->bios_pg3.BiosVersion) 5407 return 0; 5408 5409 /* Bios is present, then we drop down here. 5410 * 5411 * If there any entries in the Bios Page 2, then we wait 5412 * for discovery to complete. 5413 */ 5414 5415 /* Current Boot Device */ 5416 if ((ioc->bios_pg2.CurrentBootDeviceForm & 5417 MPI2_BIOSPAGE2_FORM_MASK) == 5418 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && 5419 /* Request Boot Device */ 5420 (ioc->bios_pg2.ReqBootDeviceForm & 5421 MPI2_BIOSPAGE2_FORM_MASK) == 5422 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && 5423 /* Alternate Request Boot Device */ 5424 (ioc->bios_pg2.ReqAltBootDeviceForm & 5425 MPI2_BIOSPAGE2_FORM_MASK) == 5426 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED) 5427 return 0; 5428 5429 return 1; 5430 } 5431 5432 /** 5433 * _base_unmask_events - turn on notification for this event 5434 * @ioc: per adapter object 5435 * @event: firmware event 5436 * 5437 * The mask is stored in ioc->event_masks. 5438 */ 5439 static void 5440 _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event) 5441 { 5442 u32 desired_event; 5443 5444 if (event >= 128) 5445 return; 5446 5447 desired_event = (1 << (event % 32)); 5448 5449 if (event < 32) 5450 ioc->event_masks[0] &= ~desired_event; 5451 else if (event < 64) 5452 ioc->event_masks[1] &= ~desired_event; 5453 else if (event < 96) 5454 ioc->event_masks[2] &= ~desired_event; 5455 else if (event < 128) 5456 ioc->event_masks[3] &= ~desired_event; 5457 } 5458 5459 /** 5460 * _base_event_notification - send event notification 5461 * @ioc: per adapter object 5462 * 5463 * Returns 0 for success, non-zero for failure. 5464 */ 5465 static int 5466 _base_event_notification(struct MPT3SAS_ADAPTER *ioc) 5467 { 5468 Mpi2EventNotificationRequest_t *mpi_request; 5469 u16 smid; 5470 int r = 0; 5471 int i; 5472 5473 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5474 __func__)); 5475 5476 if (ioc->base_cmds.status & MPT3_CMD_PENDING) { 5477 pr_err(MPT3SAS_FMT "%s: internal command already in use\n", 5478 ioc->name, __func__); 5479 return -EAGAIN; 5480 } 5481 5482 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); 5483 if (!smid) { 5484 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 5485 ioc->name, __func__); 5486 return -EAGAIN; 5487 } 5488 ioc->base_cmds.status = MPT3_CMD_PENDING; 5489 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 5490 ioc->base_cmds.smid = smid; 5491 memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t)); 5492 mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 5493 mpi_request->VF_ID = 0; /* TODO */ 5494 mpi_request->VP_ID = 0; 5495 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 5496 mpi_request->EventMasks[i] = 5497 cpu_to_le32(ioc->event_masks[i]); 5498 init_completion(&ioc->base_cmds.done); 5499 ioc->put_smid_default(ioc, smid); 5500 wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ); 5501 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { 5502 pr_err(MPT3SAS_FMT "%s: timeout\n", 5503 ioc->name, __func__); 5504 _debug_dump_mf(mpi_request, 5505 sizeof(Mpi2EventNotificationRequest_t)/4); 5506 if (ioc->base_cmds.status & MPT3_CMD_RESET) 5507 r = -EFAULT; 5508 else 5509 r = -ETIME; 5510 } else 5511 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n", 5512 ioc->name, __func__)); 5513 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 5514 return r; 5515 } 5516 5517 /** 5518 * mpt3sas_base_validate_event_type - validating event types 5519 * @ioc: per adapter object 5520 * @event: firmware event 5521 * 5522 * This will turn on firmware event notification when application 5523 * ask for that event. We don't mask events that are already enabled. 5524 */ 5525 void 5526 mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type) 5527 { 5528 int i, j; 5529 u32 event_mask, desired_event; 5530 u8 send_update_to_fw; 5531 5532 for (i = 0, send_update_to_fw = 0; i < 5533 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) { 5534 event_mask = ~event_type[i]; 5535 desired_event = 1; 5536 for (j = 0; j < 32; j++) { 5537 if (!(event_mask & desired_event) && 5538 (ioc->event_masks[i] & desired_event)) { 5539 ioc->event_masks[i] &= ~desired_event; 5540 send_update_to_fw = 1; 5541 } 5542 desired_event = (desired_event << 1); 5543 } 5544 } 5545 5546 if (!send_update_to_fw) 5547 return; 5548 5549 mutex_lock(&ioc->base_cmds.mutex); 5550 _base_event_notification(ioc); 5551 mutex_unlock(&ioc->base_cmds.mutex); 5552 } 5553 5554 /** 5555 * _base_diag_reset - the "big hammer" start of day reset 5556 * @ioc: per adapter object 5557 * 5558 * Returns 0 for success, non-zero for failure. 5559 */ 5560 static int 5561 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc) 5562 { 5563 u32 host_diagnostic; 5564 u32 ioc_state; 5565 u32 count; 5566 u32 hcb_size; 5567 5568 pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name); 5569 5570 drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n", 5571 ioc->name)); 5572 5573 count = 0; 5574 do { 5575 /* Write magic sequence to WriteSequence register 5576 * Loop until in diagnostic mode 5577 */ 5578 drsprintk(ioc, pr_info(MPT3SAS_FMT 5579 "write magic sequence\n", ioc->name)); 5580 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); 5581 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence); 5582 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence); 5583 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence); 5584 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence); 5585 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence); 5586 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence); 5587 5588 /* wait 100 msec */ 5589 msleep(100); 5590 5591 if (count++ > 20) 5592 goto out; 5593 5594 host_diagnostic = readl(&ioc->chip->HostDiagnostic); 5595 drsprintk(ioc, pr_info(MPT3SAS_FMT 5596 "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n", 5597 ioc->name, count, host_diagnostic)); 5598 5599 } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0); 5600 5601 hcb_size = readl(&ioc->chip->HCBSize); 5602 5603 drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n", 5604 ioc->name)); 5605 writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER, 5606 &ioc->chip->HostDiagnostic); 5607 5608 /*This delay allows the chip PCIe hardware time to finish reset tasks*/ 5609 msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000); 5610 5611 /* Approximately 300 second max wait */ 5612 for (count = 0; count < (300000000 / 5613 MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) { 5614 5615 host_diagnostic = readl(&ioc->chip->HostDiagnostic); 5616 5617 if (host_diagnostic == 0xFFFFFFFF) 5618 goto out; 5619 if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER)) 5620 break; 5621 5622 msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000); 5623 } 5624 5625 if (host_diagnostic & MPI2_DIAG_HCB_MODE) { 5626 5627 drsprintk(ioc, pr_info(MPT3SAS_FMT 5628 "restart the adapter assuming the HCB Address points to good F/W\n", 5629 ioc->name)); 5630 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK; 5631 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW; 5632 writel(host_diagnostic, &ioc->chip->HostDiagnostic); 5633 5634 drsprintk(ioc, pr_info(MPT3SAS_FMT 5635 "re-enable the HCDW\n", ioc->name)); 5636 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE, 5637 &ioc->chip->HCBSize); 5638 } 5639 5640 drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n", 5641 ioc->name)); 5642 writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET, 5643 &ioc->chip->HostDiagnostic); 5644 5645 drsprintk(ioc, pr_info(MPT3SAS_FMT 5646 "disable writes to the diagnostic register\n", ioc->name)); 5647 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); 5648 5649 drsprintk(ioc, pr_info(MPT3SAS_FMT 5650 "Wait for FW to go to the READY state\n", ioc->name)); 5651 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20); 5652 if (ioc_state) { 5653 pr_err(MPT3SAS_FMT 5654 "%s: failed going to ready state (ioc_state=0x%x)\n", 5655 ioc->name, __func__, ioc_state); 5656 goto out; 5657 } 5658 5659 pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name); 5660 return 0; 5661 5662 out: 5663 pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name); 5664 return -EFAULT; 5665 } 5666 5667 /** 5668 * _base_make_ioc_ready - put controller in READY state 5669 * @ioc: per adapter object 5670 * @type: FORCE_BIG_HAMMER or SOFT_RESET 5671 * 5672 * Returns 0 for success, non-zero for failure. 5673 */ 5674 static int 5675 _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type) 5676 { 5677 u32 ioc_state; 5678 int rc; 5679 int count; 5680 5681 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5682 __func__)); 5683 5684 if (ioc->pci_error_recovery) 5685 return 0; 5686 5687 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 5688 dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n", 5689 ioc->name, __func__, ioc_state)); 5690 5691 /* if in RESET state, it should move to READY state shortly */ 5692 count = 0; 5693 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) { 5694 while ((ioc_state & MPI2_IOC_STATE_MASK) != 5695 MPI2_IOC_STATE_READY) { 5696 if (count++ == 10) { 5697 pr_err(MPT3SAS_FMT 5698 "%s: failed going to ready state (ioc_state=0x%x)\n", 5699 ioc->name, __func__, ioc_state); 5700 return -EFAULT; 5701 } 5702 ssleep(1); 5703 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 5704 } 5705 } 5706 5707 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) 5708 return 0; 5709 5710 if (ioc_state & MPI2_DOORBELL_USED) { 5711 dhsprintk(ioc, pr_info(MPT3SAS_FMT 5712 "unexpected doorbell active!\n", 5713 ioc->name)); 5714 goto issue_diag_reset; 5715 } 5716 5717 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 5718 mpt3sas_base_fault_info(ioc, ioc_state & 5719 MPI2_DOORBELL_DATA_MASK); 5720 goto issue_diag_reset; 5721 } 5722 5723 if (type == FORCE_BIG_HAMMER) 5724 goto issue_diag_reset; 5725 5726 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL) 5727 if (!(_base_send_ioc_reset(ioc, 5728 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) { 5729 return 0; 5730 } 5731 5732 issue_diag_reset: 5733 rc = _base_diag_reset(ioc); 5734 return rc; 5735 } 5736 5737 /** 5738 * _base_make_ioc_operational - put controller in OPERATIONAL state 5739 * @ioc: per adapter object 5740 * 5741 * Returns 0 for success, non-zero for failure. 5742 */ 5743 static int 5744 _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc) 5745 { 5746 int r, i, index; 5747 unsigned long flags; 5748 u32 reply_address; 5749 u16 smid; 5750 struct _tr_list *delayed_tr, *delayed_tr_next; 5751 struct _sc_list *delayed_sc, *delayed_sc_next; 5752 struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next; 5753 u8 hide_flag; 5754 struct adapter_reply_queue *reply_q; 5755 Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig; 5756 5757 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5758 __func__)); 5759 5760 /* clean the delayed target reset list */ 5761 list_for_each_entry_safe(delayed_tr, delayed_tr_next, 5762 &ioc->delayed_tr_list, list) { 5763 list_del(&delayed_tr->list); 5764 kfree(delayed_tr); 5765 } 5766 5767 5768 list_for_each_entry_safe(delayed_tr, delayed_tr_next, 5769 &ioc->delayed_tr_volume_list, list) { 5770 list_del(&delayed_tr->list); 5771 kfree(delayed_tr); 5772 } 5773 5774 list_for_each_entry_safe(delayed_sc, delayed_sc_next, 5775 &ioc->delayed_sc_list, list) { 5776 list_del(&delayed_sc->list); 5777 kfree(delayed_sc); 5778 } 5779 5780 list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next, 5781 &ioc->delayed_event_ack_list, list) { 5782 list_del(&delayed_event_ack->list); 5783 kfree(delayed_event_ack); 5784 } 5785 5786 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 5787 5788 /* hi-priority queue */ 5789 INIT_LIST_HEAD(&ioc->hpr_free_list); 5790 smid = ioc->hi_priority_smid; 5791 for (i = 0; i < ioc->hi_priority_depth; i++, smid++) { 5792 ioc->hpr_lookup[i].cb_idx = 0xFF; 5793 ioc->hpr_lookup[i].smid = smid; 5794 list_add_tail(&ioc->hpr_lookup[i].tracker_list, 5795 &ioc->hpr_free_list); 5796 } 5797 5798 /* internal queue */ 5799 INIT_LIST_HEAD(&ioc->internal_free_list); 5800 smid = ioc->internal_smid; 5801 for (i = 0; i < ioc->internal_depth; i++, smid++) { 5802 ioc->internal_lookup[i].cb_idx = 0xFF; 5803 ioc->internal_lookup[i].smid = smid; 5804 list_add_tail(&ioc->internal_lookup[i].tracker_list, 5805 &ioc->internal_free_list); 5806 } 5807 5808 /* chain pool */ 5809 INIT_LIST_HEAD(&ioc->free_chain_list); 5810 for (i = 0; i < ioc->chain_depth; i++) 5811 list_add_tail(&ioc->chain_lookup[i].tracker_list, 5812 &ioc->free_chain_list); 5813 5814 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 5815 5816 /* initialize Reply Free Queue */ 5817 for (i = 0, reply_address = (u32)ioc->reply_dma ; 5818 i < ioc->reply_free_queue_depth ; i++, reply_address += 5819 ioc->reply_sz) 5820 ioc->reply_free[i] = cpu_to_le32(reply_address); 5821 5822 /* initialize reply queues */ 5823 if (ioc->is_driver_loading) 5824 _base_assign_reply_queues(ioc); 5825 5826 /* initialize Reply Post Free Queue */ 5827 index = 0; 5828 reply_post_free_contig = ioc->reply_post[0].reply_post_free; 5829 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 5830 /* 5831 * If RDPQ is enabled, switch to the next allocation. 5832 * Otherwise advance within the contiguous region. 5833 */ 5834 if (ioc->rdpq_array_enable) { 5835 reply_q->reply_post_free = 5836 ioc->reply_post[index++].reply_post_free; 5837 } else { 5838 reply_q->reply_post_free = reply_post_free_contig; 5839 reply_post_free_contig += ioc->reply_post_queue_depth; 5840 } 5841 5842 reply_q->reply_post_host_index = 0; 5843 for (i = 0; i < ioc->reply_post_queue_depth; i++) 5844 reply_q->reply_post_free[i].Words = 5845 cpu_to_le64(ULLONG_MAX); 5846 if (!_base_is_controller_msix_enabled(ioc)) 5847 goto skip_init_reply_post_free_queue; 5848 } 5849 skip_init_reply_post_free_queue: 5850 5851 r = _base_send_ioc_init(ioc); 5852 if (r) 5853 return r; 5854 5855 /* initialize reply free host index */ 5856 ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1; 5857 writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex); 5858 5859 /* initialize reply post host index */ 5860 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 5861 if (ioc->combined_reply_queue) 5862 writel((reply_q->msix_index & 7)<< 5863 MPI2_RPHI_MSIX_INDEX_SHIFT, 5864 ioc->replyPostRegisterIndex[reply_q->msix_index/8]); 5865 else 5866 writel(reply_q->msix_index << 5867 MPI2_RPHI_MSIX_INDEX_SHIFT, 5868 &ioc->chip->ReplyPostHostIndex); 5869 5870 if (!_base_is_controller_msix_enabled(ioc)) 5871 goto skip_init_reply_post_host_index; 5872 } 5873 5874 skip_init_reply_post_host_index: 5875 5876 _base_unmask_interrupts(ioc); 5877 r = _base_event_notification(ioc); 5878 if (r) 5879 return r; 5880 5881 _base_static_config_pages(ioc); 5882 5883 if (ioc->is_driver_loading) { 5884 5885 if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier 5886 == 0x80) { 5887 hide_flag = (u8) ( 5888 le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) & 5889 MFG_PAGE10_HIDE_SSDS_MASK); 5890 if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK) 5891 ioc->mfg_pg10_hide_flag = hide_flag; 5892 } 5893 5894 ioc->wait_for_discovery_to_complete = 5895 _base_determine_wait_on_discovery(ioc); 5896 5897 return r; /* scan_start and scan_finished support */ 5898 } 5899 5900 r = _base_send_port_enable(ioc); 5901 if (r) 5902 return r; 5903 5904 return r; 5905 } 5906 5907 /** 5908 * mpt3sas_base_free_resources - free resources controller resources 5909 * @ioc: per adapter object 5910 * 5911 * Return nothing. 5912 */ 5913 void 5914 mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc) 5915 { 5916 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5917 __func__)); 5918 5919 /* synchronizing freeing resource with pci_access_mutex lock */ 5920 mutex_lock(&ioc->pci_access_mutex); 5921 if (ioc->chip_phys && ioc->chip) { 5922 _base_mask_interrupts(ioc); 5923 ioc->shost_recovery = 1; 5924 _base_make_ioc_ready(ioc, SOFT_RESET); 5925 ioc->shost_recovery = 0; 5926 } 5927 5928 mpt3sas_base_unmap_resources(ioc); 5929 mutex_unlock(&ioc->pci_access_mutex); 5930 return; 5931 } 5932 5933 /** 5934 * mpt3sas_base_attach - attach controller instance 5935 * @ioc: per adapter object 5936 * 5937 * Returns 0 for success, non-zero for failure. 5938 */ 5939 int 5940 mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) 5941 { 5942 int r, i; 5943 int cpu_id, last_cpu_id = 0; 5944 5945 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5946 __func__)); 5947 5948 /* setup cpu_msix_table */ 5949 ioc->cpu_count = num_online_cpus(); 5950 for_each_online_cpu(cpu_id) 5951 last_cpu_id = cpu_id; 5952 ioc->cpu_msix_table_sz = last_cpu_id + 1; 5953 ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL); 5954 ioc->reply_queue_count = 1; 5955 if (!ioc->cpu_msix_table) { 5956 dfailprintk(ioc, pr_info(MPT3SAS_FMT 5957 "allocation for cpu_msix_table failed!!!\n", 5958 ioc->name)); 5959 r = -ENOMEM; 5960 goto out_free_resources; 5961 } 5962 5963 if (ioc->is_warpdrive) { 5964 ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz, 5965 sizeof(resource_size_t *), GFP_KERNEL); 5966 if (!ioc->reply_post_host_index) { 5967 dfailprintk(ioc, pr_info(MPT3SAS_FMT "allocation " 5968 "for reply_post_host_index failed!!!\n", 5969 ioc->name)); 5970 r = -ENOMEM; 5971 goto out_free_resources; 5972 } 5973 } 5974 5975 ioc->rdpq_array_enable_assigned = 0; 5976 ioc->dma_mask = 0; 5977 r = mpt3sas_base_map_resources(ioc); 5978 if (r) 5979 goto out_free_resources; 5980 5981 pci_set_drvdata(ioc->pdev, ioc->shost); 5982 r = _base_get_ioc_facts(ioc); 5983 if (r) 5984 goto out_free_resources; 5985 5986 switch (ioc->hba_mpi_version_belonged) { 5987 case MPI2_VERSION: 5988 ioc->build_sg_scmd = &_base_build_sg_scmd; 5989 ioc->build_sg = &_base_build_sg; 5990 ioc->build_zero_len_sge = &_base_build_zero_len_sge; 5991 break; 5992 case MPI25_VERSION: 5993 case MPI26_VERSION: 5994 /* 5995 * In SAS3.0, 5996 * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and 5997 * Target Status - all require the IEEE formated scatter gather 5998 * elements. 5999 */ 6000 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee; 6001 ioc->build_sg = &_base_build_sg_ieee; 6002 ioc->build_nvme_prp = &_base_build_nvme_prp; 6003 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee; 6004 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t); 6005 6006 break; 6007 } 6008 6009 if (ioc->atomic_desc_capable) { 6010 ioc->put_smid_default = &_base_put_smid_default_atomic; 6011 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic; 6012 ioc->put_smid_fast_path = &_base_put_smid_fast_path_atomic; 6013 ioc->put_smid_hi_priority = &_base_put_smid_hi_priority_atomic; 6014 ioc->put_smid_nvme_encap = &_base_put_smid_nvme_encap_atomic; 6015 } else { 6016 ioc->put_smid_default = &_base_put_smid_default; 6017 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io; 6018 ioc->put_smid_fast_path = &_base_put_smid_fast_path; 6019 ioc->put_smid_hi_priority = &_base_put_smid_hi_priority; 6020 ioc->put_smid_nvme_encap = &_base_put_smid_nvme_encap; 6021 } 6022 6023 6024 /* 6025 * These function pointers for other requests that don't 6026 * the require IEEE scatter gather elements. 6027 * 6028 * For example Configuration Pages and SAS IOUNIT Control don't. 6029 */ 6030 ioc->build_sg_mpi = &_base_build_sg; 6031 ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge; 6032 6033 r = _base_make_ioc_ready(ioc, SOFT_RESET); 6034 if (r) 6035 goto out_free_resources; 6036 6037 ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts, 6038 sizeof(struct mpt3sas_port_facts), GFP_KERNEL); 6039 if (!ioc->pfacts) { 6040 r = -ENOMEM; 6041 goto out_free_resources; 6042 } 6043 6044 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) { 6045 r = _base_get_port_facts(ioc, i); 6046 if (r) 6047 goto out_free_resources; 6048 } 6049 6050 r = _base_allocate_memory_pools(ioc); 6051 if (r) 6052 goto out_free_resources; 6053 6054 init_waitqueue_head(&ioc->reset_wq); 6055 6056 /* allocate memory pd handle bitmask list */ 6057 ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8); 6058 if (ioc->facts.MaxDevHandle % 8) 6059 ioc->pd_handles_sz++; 6060 ioc->pd_handles = kzalloc(ioc->pd_handles_sz, 6061 GFP_KERNEL); 6062 if (!ioc->pd_handles) { 6063 r = -ENOMEM; 6064 goto out_free_resources; 6065 } 6066 ioc->blocking_handles = kzalloc(ioc->pd_handles_sz, 6067 GFP_KERNEL); 6068 if (!ioc->blocking_handles) { 6069 r = -ENOMEM; 6070 goto out_free_resources; 6071 } 6072 6073 /* allocate memory for pending OS device add list */ 6074 ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8); 6075 if (ioc->facts.MaxDevHandle % 8) 6076 ioc->pend_os_device_add_sz++; 6077 ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz, 6078 GFP_KERNEL); 6079 if (!ioc->pend_os_device_add) 6080 goto out_free_resources; 6081 6082 ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz; 6083 ioc->device_remove_in_progress = 6084 kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL); 6085 if (!ioc->device_remove_in_progress) 6086 goto out_free_resources; 6087 6088 ioc->fwfault_debug = mpt3sas_fwfault_debug; 6089 6090 /* base internal command bits */ 6091 mutex_init(&ioc->base_cmds.mutex); 6092 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 6093 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 6094 6095 /* port_enable command bits */ 6096 ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 6097 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 6098 6099 /* transport internal command bits */ 6100 ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 6101 ioc->transport_cmds.status = MPT3_CMD_NOT_USED; 6102 mutex_init(&ioc->transport_cmds.mutex); 6103 6104 /* scsih internal command bits */ 6105 ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 6106 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 6107 mutex_init(&ioc->scsih_cmds.mutex); 6108 6109 /* task management internal command bits */ 6110 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 6111 ioc->tm_cmds.status = MPT3_CMD_NOT_USED; 6112 mutex_init(&ioc->tm_cmds.mutex); 6113 6114 /* config page internal command bits */ 6115 ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 6116 ioc->config_cmds.status = MPT3_CMD_NOT_USED; 6117 mutex_init(&ioc->config_cmds.mutex); 6118 6119 /* ctl module internal command bits */ 6120 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 6121 ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); 6122 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 6123 mutex_init(&ioc->ctl_cmds.mutex); 6124 6125 if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply || 6126 !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply || 6127 !ioc->tm_cmds.reply || !ioc->config_cmds.reply || 6128 !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) { 6129 r = -ENOMEM; 6130 goto out_free_resources; 6131 } 6132 6133 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 6134 ioc->event_masks[i] = -1; 6135 6136 /* here we enable the events we care about */ 6137 _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY); 6138 _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE); 6139 _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST); 6140 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); 6141 _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); 6142 _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST); 6143 _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME); 6144 _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK); 6145 _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); 6146 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); 6147 _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD); 6148 _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION); 6149 if (ioc->hba_mpi_version_belonged == MPI26_VERSION) { 6150 if (ioc->is_gen35_ioc) { 6151 _base_unmask_events(ioc, 6152 MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE); 6153 _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION); 6154 _base_unmask_events(ioc, 6155 MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST); 6156 } 6157 } 6158 r = _base_make_ioc_operational(ioc); 6159 if (r) 6160 goto out_free_resources; 6161 6162 ioc->non_operational_loop = 0; 6163 ioc->got_task_abort_from_ioctl = 0; 6164 return 0; 6165 6166 out_free_resources: 6167 6168 ioc->remove_host = 1; 6169 6170 mpt3sas_base_free_resources(ioc); 6171 _base_release_memory_pools(ioc); 6172 pci_set_drvdata(ioc->pdev, NULL); 6173 kfree(ioc->cpu_msix_table); 6174 if (ioc->is_warpdrive) 6175 kfree(ioc->reply_post_host_index); 6176 kfree(ioc->pd_handles); 6177 kfree(ioc->blocking_handles); 6178 kfree(ioc->device_remove_in_progress); 6179 kfree(ioc->pend_os_device_add); 6180 kfree(ioc->tm_cmds.reply); 6181 kfree(ioc->transport_cmds.reply); 6182 kfree(ioc->scsih_cmds.reply); 6183 kfree(ioc->config_cmds.reply); 6184 kfree(ioc->base_cmds.reply); 6185 kfree(ioc->port_enable_cmds.reply); 6186 kfree(ioc->ctl_cmds.reply); 6187 kfree(ioc->ctl_cmds.sense); 6188 kfree(ioc->pfacts); 6189 ioc->ctl_cmds.reply = NULL; 6190 ioc->base_cmds.reply = NULL; 6191 ioc->tm_cmds.reply = NULL; 6192 ioc->scsih_cmds.reply = NULL; 6193 ioc->transport_cmds.reply = NULL; 6194 ioc->config_cmds.reply = NULL; 6195 ioc->pfacts = NULL; 6196 return r; 6197 } 6198 6199 6200 /** 6201 * mpt3sas_base_detach - remove controller instance 6202 * @ioc: per adapter object 6203 * 6204 * Return nothing. 6205 */ 6206 void 6207 mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc) 6208 { 6209 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 6210 __func__)); 6211 6212 mpt3sas_base_stop_watchdog(ioc); 6213 mpt3sas_base_free_resources(ioc); 6214 _base_release_memory_pools(ioc); 6215 pci_set_drvdata(ioc->pdev, NULL); 6216 kfree(ioc->cpu_msix_table); 6217 if (ioc->is_warpdrive) 6218 kfree(ioc->reply_post_host_index); 6219 kfree(ioc->pd_handles); 6220 kfree(ioc->blocking_handles); 6221 kfree(ioc->device_remove_in_progress); 6222 kfree(ioc->pend_os_device_add); 6223 kfree(ioc->pfacts); 6224 kfree(ioc->ctl_cmds.reply); 6225 kfree(ioc->ctl_cmds.sense); 6226 kfree(ioc->base_cmds.reply); 6227 kfree(ioc->port_enable_cmds.reply); 6228 kfree(ioc->tm_cmds.reply); 6229 kfree(ioc->transport_cmds.reply); 6230 kfree(ioc->scsih_cmds.reply); 6231 kfree(ioc->config_cmds.reply); 6232 } 6233 6234 /** 6235 * _base_reset_handler - reset callback handler (for base) 6236 * @ioc: per adapter object 6237 * @reset_phase: phase 6238 * 6239 * The handler for doing any required cleanup or initialization. 6240 * 6241 * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET, 6242 * MPT3_IOC_DONE_RESET 6243 * 6244 * Return nothing. 6245 */ 6246 static void 6247 _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) 6248 { 6249 mpt3sas_scsih_reset_handler(ioc, reset_phase); 6250 mpt3sas_ctl_reset_handler(ioc, reset_phase); 6251 switch (reset_phase) { 6252 case MPT3_IOC_PRE_RESET: 6253 dtmprintk(ioc, pr_info(MPT3SAS_FMT 6254 "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__)); 6255 break; 6256 case MPT3_IOC_AFTER_RESET: 6257 dtmprintk(ioc, pr_info(MPT3SAS_FMT 6258 "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__)); 6259 if (ioc->transport_cmds.status & MPT3_CMD_PENDING) { 6260 ioc->transport_cmds.status |= MPT3_CMD_RESET; 6261 mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid); 6262 complete(&ioc->transport_cmds.done); 6263 } 6264 if (ioc->base_cmds.status & MPT3_CMD_PENDING) { 6265 ioc->base_cmds.status |= MPT3_CMD_RESET; 6266 mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid); 6267 complete(&ioc->base_cmds.done); 6268 } 6269 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { 6270 ioc->port_enable_failed = 1; 6271 ioc->port_enable_cmds.status |= MPT3_CMD_RESET; 6272 mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid); 6273 if (ioc->is_driver_loading) { 6274 ioc->start_scan_failed = 6275 MPI2_IOCSTATUS_INTERNAL_ERROR; 6276 ioc->start_scan = 0; 6277 ioc->port_enable_cmds.status = 6278 MPT3_CMD_NOT_USED; 6279 } else 6280 complete(&ioc->port_enable_cmds.done); 6281 } 6282 if (ioc->config_cmds.status & MPT3_CMD_PENDING) { 6283 ioc->config_cmds.status |= MPT3_CMD_RESET; 6284 mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid); 6285 ioc->config_cmds.smid = USHRT_MAX; 6286 complete(&ioc->config_cmds.done); 6287 } 6288 break; 6289 case MPT3_IOC_DONE_RESET: 6290 dtmprintk(ioc, pr_info(MPT3SAS_FMT 6291 "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__)); 6292 break; 6293 } 6294 } 6295 6296 /** 6297 * _wait_for_commands_to_complete - reset controller 6298 * @ioc: Pointer to MPT_ADAPTER structure 6299 * 6300 * This function is waiting 10s for all pending commands to complete 6301 * prior to putting controller in reset. 6302 */ 6303 static void 6304 _wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc) 6305 { 6306 u32 ioc_state; 6307 6308 ioc->pending_io_count = 0; 6309 6310 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 6311 if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) 6312 return; 6313 6314 /* pending command count */ 6315 ioc->pending_io_count = atomic_read(&ioc->shost->host_busy); 6316 6317 if (!ioc->pending_io_count) 6318 return; 6319 6320 /* wait for pending commands to complete */ 6321 wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ); 6322 } 6323 6324 /** 6325 * mpt3sas_base_hard_reset_handler - reset controller 6326 * @ioc: Pointer to MPT_ADAPTER structure 6327 * @type: FORCE_BIG_HAMMER or SOFT_RESET 6328 * 6329 * Returns 0 for success, non-zero for failure. 6330 */ 6331 int 6332 mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, 6333 enum reset_type type) 6334 { 6335 int r; 6336 unsigned long flags; 6337 u32 ioc_state; 6338 u8 is_fault = 0, is_trigger = 0; 6339 6340 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 6341 __func__)); 6342 6343 if (ioc->pci_error_recovery) { 6344 pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n", 6345 ioc->name, __func__); 6346 r = 0; 6347 goto out_unlocked; 6348 } 6349 6350 if (mpt3sas_fwfault_debug) 6351 mpt3sas_halt_firmware(ioc); 6352 6353 /* wait for an active reset in progress to complete */ 6354 if (!mutex_trylock(&ioc->reset_in_progress_mutex)) { 6355 do { 6356 ssleep(1); 6357 } while (ioc->shost_recovery == 1); 6358 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, 6359 __func__)); 6360 return ioc->ioc_reset_in_progress_status; 6361 } 6362 6363 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 6364 ioc->shost_recovery = 1; 6365 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 6366 6367 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 6368 MPT3_DIAG_BUFFER_IS_REGISTERED) && 6369 (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 6370 MPT3_DIAG_BUFFER_IS_RELEASED))) { 6371 is_trigger = 1; 6372 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 6373 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) 6374 is_fault = 1; 6375 } 6376 _base_reset_handler(ioc, MPT3_IOC_PRE_RESET); 6377 _wait_for_commands_to_complete(ioc); 6378 _base_mask_interrupts(ioc); 6379 r = _base_make_ioc_ready(ioc, type); 6380 if (r) 6381 goto out; 6382 _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET); 6383 6384 /* If this hard reset is called while port enable is active, then 6385 * there is no reason to call make_ioc_operational 6386 */ 6387 if (ioc->is_driver_loading && ioc->port_enable_failed) { 6388 ioc->remove_host = 1; 6389 r = -EFAULT; 6390 goto out; 6391 } 6392 r = _base_get_ioc_facts(ioc); 6393 if (r) 6394 goto out; 6395 6396 if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable) 6397 panic("%s: Issue occurred with flashing controller firmware." 6398 "Please reboot the system and ensure that the correct" 6399 " firmware version is running\n", ioc->name); 6400 6401 r = _base_make_ioc_operational(ioc); 6402 if (!r) 6403 _base_reset_handler(ioc, MPT3_IOC_DONE_RESET); 6404 6405 out: 6406 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n", 6407 ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED"))); 6408 6409 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 6410 ioc->ioc_reset_in_progress_status = r; 6411 ioc->shost_recovery = 0; 6412 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 6413 ioc->ioc_reset_count++; 6414 mutex_unlock(&ioc->reset_in_progress_mutex); 6415 6416 out_unlocked: 6417 if ((r == 0) && is_trigger) { 6418 if (is_fault) 6419 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT); 6420 else 6421 mpt3sas_trigger_master(ioc, 6422 MASTER_TRIGGER_ADAPTER_RESET); 6423 } 6424 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, 6425 __func__)); 6426 return r; 6427 } 6428