1 /* 2 * This is the Fusion MPT base driver providing common API layer interface 3 * for access to MPT (Message Passing Technology) firmware. 4 * 5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c 6 * Copyright (C) 2012-2014 LSI Corporation 7 * Copyright (C) 2013-2014 Avago Technologies 8 * (mailto: MPT-FusionLinux.pdl@avagotech.com) 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 2 13 * of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * NO WARRANTY 21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 25 * solely responsible for determining the appropriateness of using and 26 * distributing the Program and assumes all risks associated with its 27 * exercise of rights under this Agreement, including but not limited to 28 * the risks and costs of program errors, damage to or loss of data, 29 * programs or equipment, and unavailability or interruption of operations. 30 31 * DISCLAIMER OF LIABILITY 32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 39 40 * You should have received a copy of the GNU General Public License 41 * along with this program; if not, write to the Free Software 42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 43 * USA. 44 */ 45 46 #include <linux/kernel.h> 47 #include <linux/module.h> 48 #include <linux/errno.h> 49 #include <linux/init.h> 50 #include <linux/slab.h> 51 #include <linux/types.h> 52 #include <linux/pci.h> 53 #include <linux/kdev_t.h> 54 #include <linux/blkdev.h> 55 #include <linux/delay.h> 56 #include <linux/interrupt.h> 57 #include <linux/dma-mapping.h> 58 #include <linux/io.h> 59 #include <linux/time.h> 60 #include <linux/ktime.h> 61 #include <linux/kthread.h> 62 #include <asm/page.h> /* To get host page size per arch */ 63 #include <linux/aer.h> 64 65 66 #include "mpt3sas_base.h" 67 68 static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS]; 69 70 71 #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */ 72 73 /* maximum controller queue depth */ 74 #define MAX_HBA_QUEUE_DEPTH 30000 75 #define MAX_CHAIN_DEPTH 100000 76 static int max_queue_depth = -1; 77 module_param(max_queue_depth, int, 0); 78 MODULE_PARM_DESC(max_queue_depth, " max controller queue depth "); 79 80 static int max_sgl_entries = -1; 81 module_param(max_sgl_entries, int, 0); 82 MODULE_PARM_DESC(max_sgl_entries, " max sg entries "); 83 84 static int msix_disable = -1; 85 module_param(msix_disable, int, 0); 86 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); 87 88 static int smp_affinity_enable = 1; 89 module_param(smp_affinity_enable, int, S_IRUGO); 90 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)"); 91 92 static int max_msix_vectors = -1; 93 module_param(max_msix_vectors, int, 0); 94 MODULE_PARM_DESC(max_msix_vectors, 95 " max msix vectors"); 96 97 static int mpt3sas_fwfault_debug; 98 MODULE_PARM_DESC(mpt3sas_fwfault_debug, 99 " enable detection of firmware fault and halt firmware - (default=0)"); 100 101 static int 102 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc); 103 104 /** 105 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. 106 * 107 */ 108 static int 109 _scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp) 110 { 111 int ret = param_set_int(val, kp); 112 struct MPT3SAS_ADAPTER *ioc; 113 114 if (ret) 115 return ret; 116 117 /* global ioc spinlock to protect controller list on list operations */ 118 pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug); 119 spin_lock(&gioc_lock); 120 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) 121 ioc->fwfault_debug = mpt3sas_fwfault_debug; 122 spin_unlock(&gioc_lock); 123 return 0; 124 } 125 module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug, 126 param_get_int, &mpt3sas_fwfault_debug, 0644); 127 128 /** 129 * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc 130 * @arg: input argument, used to derive ioc 131 * 132 * Return 0 if controller is removed from pci subsystem. 133 * Return -1 for other case. 134 */ 135 static int mpt3sas_remove_dead_ioc_func(void *arg) 136 { 137 struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg; 138 struct pci_dev *pdev; 139 140 if ((ioc == NULL)) 141 return -1; 142 143 pdev = ioc->pdev; 144 if ((pdev == NULL)) 145 return -1; 146 pci_stop_and_remove_bus_device_locked(pdev); 147 return 0; 148 } 149 150 /** 151 * _base_fault_reset_work - workq handling ioc fault conditions 152 * @work: input argument, used to derive ioc 153 * Context: sleep. 154 * 155 * Return nothing. 156 */ 157 static void 158 _base_fault_reset_work(struct work_struct *work) 159 { 160 struct MPT3SAS_ADAPTER *ioc = 161 container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work); 162 unsigned long flags; 163 u32 doorbell; 164 int rc; 165 struct task_struct *p; 166 167 168 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 169 if (ioc->shost_recovery || ioc->pci_error_recovery) 170 goto rearm_timer; 171 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 172 173 doorbell = mpt3sas_base_get_iocstate(ioc, 0); 174 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) { 175 pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n", 176 ioc->name); 177 178 /* It may be possible that EEH recovery can resolve some of 179 * pci bus failure issues rather removing the dead ioc function 180 * by considering controller is in a non-operational state. So 181 * here priority is given to the EEH recovery. If it doesn't 182 * not resolve this issue, mpt3sas driver will consider this 183 * controller to non-operational state and remove the dead ioc 184 * function. 185 */ 186 if (ioc->non_operational_loop++ < 5) { 187 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, 188 flags); 189 goto rearm_timer; 190 } 191 192 /* 193 * Call _scsih_flush_pending_cmds callback so that we flush all 194 * pending commands back to OS. This call is required to aovid 195 * deadlock at block layer. Dead IOC will fail to do diag reset, 196 * and this call is safe since dead ioc will never return any 197 * command back from HW. 198 */ 199 ioc->schedule_dead_ioc_flush_running_cmds(ioc); 200 /* 201 * Set remove_host flag early since kernel thread will 202 * take some time to execute. 203 */ 204 ioc->remove_host = 1; 205 /*Remove the Dead Host */ 206 p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc, 207 "%s_dead_ioc_%d", ioc->driver_name, ioc->id); 208 if (IS_ERR(p)) 209 pr_err(MPT3SAS_FMT 210 "%s: Running mpt3sas_dead_ioc thread failed !!!!\n", 211 ioc->name, __func__); 212 else 213 pr_err(MPT3SAS_FMT 214 "%s: Running mpt3sas_dead_ioc thread success !!!!\n", 215 ioc->name, __func__); 216 return; /* don't rearm timer */ 217 } 218 219 ioc->non_operational_loop = 0; 220 221 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) { 222 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 223 pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name, 224 __func__, (rc == 0) ? "success" : "failed"); 225 doorbell = mpt3sas_base_get_iocstate(ioc, 0); 226 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) 227 mpt3sas_base_fault_info(ioc, doorbell & 228 MPI2_DOORBELL_DATA_MASK); 229 if (rc && (doorbell & MPI2_IOC_STATE_MASK) != 230 MPI2_IOC_STATE_OPERATIONAL) 231 return; /* don't rearm timer */ 232 } 233 234 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 235 rearm_timer: 236 if (ioc->fault_reset_work_q) 237 queue_delayed_work(ioc->fault_reset_work_q, 238 &ioc->fault_reset_work, 239 msecs_to_jiffies(FAULT_POLLING_INTERVAL)); 240 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 241 } 242 243 /** 244 * mpt3sas_base_start_watchdog - start the fault_reset_work_q 245 * @ioc: per adapter object 246 * Context: sleep. 247 * 248 * Return nothing. 249 */ 250 void 251 mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc) 252 { 253 unsigned long flags; 254 255 if (ioc->fault_reset_work_q) 256 return; 257 258 /* initialize fault polling */ 259 260 INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work); 261 snprintf(ioc->fault_reset_work_q_name, 262 sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status", 263 ioc->driver_name, ioc->id); 264 ioc->fault_reset_work_q = 265 create_singlethread_workqueue(ioc->fault_reset_work_q_name); 266 if (!ioc->fault_reset_work_q) { 267 pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n", 268 ioc->name, __func__, __LINE__); 269 return; 270 } 271 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 272 if (ioc->fault_reset_work_q) 273 queue_delayed_work(ioc->fault_reset_work_q, 274 &ioc->fault_reset_work, 275 msecs_to_jiffies(FAULT_POLLING_INTERVAL)); 276 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 277 } 278 279 /** 280 * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q 281 * @ioc: per adapter object 282 * Context: sleep. 283 * 284 * Return nothing. 285 */ 286 void 287 mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc) 288 { 289 unsigned long flags; 290 struct workqueue_struct *wq; 291 292 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 293 wq = ioc->fault_reset_work_q; 294 ioc->fault_reset_work_q = NULL; 295 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 296 if (wq) { 297 if (!cancel_delayed_work_sync(&ioc->fault_reset_work)) 298 flush_workqueue(wq); 299 destroy_workqueue(wq); 300 } 301 } 302 303 /** 304 * mpt3sas_base_fault_info - verbose translation of firmware FAULT code 305 * @ioc: per adapter object 306 * @fault_code: fault code 307 * 308 * Return nothing. 309 */ 310 void 311 mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code) 312 { 313 pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n", 314 ioc->name, fault_code); 315 } 316 317 /** 318 * mpt3sas_halt_firmware - halt's mpt controller firmware 319 * @ioc: per adapter object 320 * 321 * For debugging timeout related issues. Writing 0xCOFFEE00 322 * to the doorbell register will halt controller firmware. With 323 * the purpose to stop both driver and firmware, the enduser can 324 * obtain a ring buffer from controller UART. 325 */ 326 void 327 mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc) 328 { 329 u32 doorbell; 330 331 if (!ioc->fwfault_debug) 332 return; 333 334 dump_stack(); 335 336 doorbell = readl(&ioc->chip->Doorbell); 337 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) 338 mpt3sas_base_fault_info(ioc , doorbell); 339 else { 340 writel(0xC0FFEE00, &ioc->chip->Doorbell); 341 pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n", 342 ioc->name); 343 } 344 345 if (ioc->fwfault_debug == 2) 346 for (;;) 347 ; 348 else 349 panic("panic in %s\n", __func__); 350 } 351 352 /** 353 * _base_sas_ioc_info - verbose translation of the ioc status 354 * @ioc: per adapter object 355 * @mpi_reply: reply mf payload returned from firmware 356 * @request_hdr: request mf 357 * 358 * Return nothing. 359 */ 360 static void 361 _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply, 362 MPI2RequestHeader_t *request_hdr) 363 { 364 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & 365 MPI2_IOCSTATUS_MASK; 366 char *desc = NULL; 367 u16 frame_sz; 368 char *func_str = NULL; 369 370 /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */ 371 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 372 request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || 373 request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION) 374 return; 375 376 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) 377 return; 378 379 switch (ioc_status) { 380 381 /**************************************************************************** 382 * Common IOCStatus values for all replies 383 ****************************************************************************/ 384 385 case MPI2_IOCSTATUS_INVALID_FUNCTION: 386 desc = "invalid function"; 387 break; 388 case MPI2_IOCSTATUS_BUSY: 389 desc = "busy"; 390 break; 391 case MPI2_IOCSTATUS_INVALID_SGL: 392 desc = "invalid sgl"; 393 break; 394 case MPI2_IOCSTATUS_INTERNAL_ERROR: 395 desc = "internal error"; 396 break; 397 case MPI2_IOCSTATUS_INVALID_VPID: 398 desc = "invalid vpid"; 399 break; 400 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: 401 desc = "insufficient resources"; 402 break; 403 case MPI2_IOCSTATUS_INSUFFICIENT_POWER: 404 desc = "insufficient power"; 405 break; 406 case MPI2_IOCSTATUS_INVALID_FIELD: 407 desc = "invalid field"; 408 break; 409 case MPI2_IOCSTATUS_INVALID_STATE: 410 desc = "invalid state"; 411 break; 412 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED: 413 desc = "op state not supported"; 414 break; 415 416 /**************************************************************************** 417 * Config IOCStatus values 418 ****************************************************************************/ 419 420 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION: 421 desc = "config invalid action"; 422 break; 423 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE: 424 desc = "config invalid type"; 425 break; 426 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE: 427 desc = "config invalid page"; 428 break; 429 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA: 430 desc = "config invalid data"; 431 break; 432 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS: 433 desc = "config no defaults"; 434 break; 435 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT: 436 desc = "config cant commit"; 437 break; 438 439 /**************************************************************************** 440 * SCSI IO Reply 441 ****************************************************************************/ 442 443 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 444 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: 445 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 446 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 447 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 448 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 449 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 450 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 451 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 452 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 453 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 454 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 455 break; 456 457 /**************************************************************************** 458 * For use by SCSI Initiator and SCSI Target end-to-end data protection 459 ****************************************************************************/ 460 461 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 462 desc = "eedp guard error"; 463 break; 464 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 465 desc = "eedp ref tag error"; 466 break; 467 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 468 desc = "eedp app tag error"; 469 break; 470 471 /**************************************************************************** 472 * SCSI Target values 473 ****************************************************************************/ 474 475 case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX: 476 desc = "target invalid io index"; 477 break; 478 case MPI2_IOCSTATUS_TARGET_ABORTED: 479 desc = "target aborted"; 480 break; 481 case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE: 482 desc = "target no conn retryable"; 483 break; 484 case MPI2_IOCSTATUS_TARGET_NO_CONNECTION: 485 desc = "target no connection"; 486 break; 487 case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH: 488 desc = "target xfer count mismatch"; 489 break; 490 case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR: 491 desc = "target data offset error"; 492 break; 493 case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA: 494 desc = "target too much write data"; 495 break; 496 case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT: 497 desc = "target iu too short"; 498 break; 499 case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT: 500 desc = "target ack nak timeout"; 501 break; 502 case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED: 503 desc = "target nak received"; 504 break; 505 506 /**************************************************************************** 507 * Serial Attached SCSI values 508 ****************************************************************************/ 509 510 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED: 511 desc = "smp request failed"; 512 break; 513 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN: 514 desc = "smp data overrun"; 515 break; 516 517 /**************************************************************************** 518 * Diagnostic Buffer Post / Diagnostic Release values 519 ****************************************************************************/ 520 521 case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED: 522 desc = "diagnostic released"; 523 break; 524 default: 525 break; 526 } 527 528 if (!desc) 529 return; 530 531 switch (request_hdr->Function) { 532 case MPI2_FUNCTION_CONFIG: 533 frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size; 534 func_str = "config_page"; 535 break; 536 case MPI2_FUNCTION_SCSI_TASK_MGMT: 537 frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t); 538 func_str = "task_mgmt"; 539 break; 540 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: 541 frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t); 542 func_str = "sas_iounit_ctl"; 543 break; 544 case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR: 545 frame_sz = sizeof(Mpi2SepRequest_t); 546 func_str = "enclosure"; 547 break; 548 case MPI2_FUNCTION_IOC_INIT: 549 frame_sz = sizeof(Mpi2IOCInitRequest_t); 550 func_str = "ioc_init"; 551 break; 552 case MPI2_FUNCTION_PORT_ENABLE: 553 frame_sz = sizeof(Mpi2PortEnableRequest_t); 554 func_str = "port_enable"; 555 break; 556 case MPI2_FUNCTION_SMP_PASSTHROUGH: 557 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size; 558 func_str = "smp_passthru"; 559 break; 560 case MPI2_FUNCTION_NVME_ENCAPSULATED: 561 frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) + 562 ioc->sge_size; 563 func_str = "nvme_encapsulated"; 564 break; 565 default: 566 frame_sz = 32; 567 func_str = "unknown"; 568 break; 569 } 570 571 pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n", 572 ioc->name, desc, ioc_status, request_hdr, func_str); 573 574 _debug_dump_mf(request_hdr, frame_sz/4); 575 } 576 577 /** 578 * _base_display_event_data - verbose translation of firmware asyn events 579 * @ioc: per adapter object 580 * @mpi_reply: reply mf payload returned from firmware 581 * 582 * Return nothing. 583 */ 584 static void 585 _base_display_event_data(struct MPT3SAS_ADAPTER *ioc, 586 Mpi2EventNotificationReply_t *mpi_reply) 587 { 588 char *desc = NULL; 589 u16 event; 590 591 if (!(ioc->logging_level & MPT_DEBUG_EVENTS)) 592 return; 593 594 event = le16_to_cpu(mpi_reply->Event); 595 596 switch (event) { 597 case MPI2_EVENT_LOG_DATA: 598 desc = "Log Data"; 599 break; 600 case MPI2_EVENT_STATE_CHANGE: 601 desc = "Status Change"; 602 break; 603 case MPI2_EVENT_HARD_RESET_RECEIVED: 604 desc = "Hard Reset Received"; 605 break; 606 case MPI2_EVENT_EVENT_CHANGE: 607 desc = "Event Change"; 608 break; 609 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: 610 desc = "Device Status Change"; 611 break; 612 case MPI2_EVENT_IR_OPERATION_STATUS: 613 if (!ioc->hide_ir_msg) 614 desc = "IR Operation Status"; 615 break; 616 case MPI2_EVENT_SAS_DISCOVERY: 617 { 618 Mpi2EventDataSasDiscovery_t *event_data = 619 (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData; 620 pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name, 621 (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ? 622 "start" : "stop"); 623 if (event_data->DiscoveryStatus) 624 pr_cont(" discovery_status(0x%08x)", 625 le32_to_cpu(event_data->DiscoveryStatus)); 626 pr_cont("\n"); 627 return; 628 } 629 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: 630 desc = "SAS Broadcast Primitive"; 631 break; 632 case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: 633 desc = "SAS Init Device Status Change"; 634 break; 635 case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW: 636 desc = "SAS Init Table Overflow"; 637 break; 638 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 639 desc = "SAS Topology Change List"; 640 break; 641 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: 642 desc = "SAS Enclosure Device Status Change"; 643 break; 644 case MPI2_EVENT_IR_VOLUME: 645 if (!ioc->hide_ir_msg) 646 desc = "IR Volume"; 647 break; 648 case MPI2_EVENT_IR_PHYSICAL_DISK: 649 if (!ioc->hide_ir_msg) 650 desc = "IR Physical Disk"; 651 break; 652 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: 653 if (!ioc->hide_ir_msg) 654 desc = "IR Configuration Change List"; 655 break; 656 case MPI2_EVENT_LOG_ENTRY_ADDED: 657 if (!ioc->hide_ir_msg) 658 desc = "Log Entry Added"; 659 break; 660 case MPI2_EVENT_TEMP_THRESHOLD: 661 desc = "Temperature Threshold"; 662 break; 663 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION: 664 desc = "Cable Event"; 665 break; 666 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE: 667 desc = "PCIE Device Status Change"; 668 break; 669 case MPI2_EVENT_PCIE_ENUMERATION: 670 { 671 Mpi26EventDataPCIeEnumeration_t *event_data = 672 (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData; 673 pr_info(MPT3SAS_FMT "PCIE Enumeration: (%s)", ioc->name, 674 (event_data->ReasonCode == 675 MPI26_EVENT_PCIE_ENUM_RC_STARTED) ? 676 "start" : "stop"); 677 if (event_data->EnumerationStatus) 678 pr_info("enumeration_status(0x%08x)", 679 le32_to_cpu(event_data->EnumerationStatus)); 680 pr_info("\n"); 681 return; 682 } 683 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 684 desc = "PCIE Topology Change List"; 685 break; 686 } 687 688 if (!desc) 689 return; 690 691 pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc); 692 } 693 694 /** 695 * _base_sas_log_info - verbose translation of firmware log info 696 * @ioc: per adapter object 697 * @log_info: log info 698 * 699 * Return nothing. 700 */ 701 static void 702 _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info) 703 { 704 union loginfo_type { 705 u32 loginfo; 706 struct { 707 u32 subcode:16; 708 u32 code:8; 709 u32 originator:4; 710 u32 bus_type:4; 711 } dw; 712 }; 713 union loginfo_type sas_loginfo; 714 char *originator_str = NULL; 715 716 sas_loginfo.loginfo = log_info; 717 if (sas_loginfo.dw.bus_type != 3 /*SAS*/) 718 return; 719 720 /* each nexus loss loginfo */ 721 if (log_info == 0x31170000) 722 return; 723 724 /* eat the loginfos associated with task aborts */ 725 if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info == 726 0x31140000 || log_info == 0x31130000)) 727 return; 728 729 switch (sas_loginfo.dw.originator) { 730 case 0: 731 originator_str = "IOP"; 732 break; 733 case 1: 734 originator_str = "PL"; 735 break; 736 case 2: 737 if (!ioc->hide_ir_msg) 738 originator_str = "IR"; 739 else 740 originator_str = "WarpDrive"; 741 break; 742 } 743 744 pr_warn(MPT3SAS_FMT 745 "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n", 746 ioc->name, log_info, 747 originator_str, sas_loginfo.dw.code, 748 sas_loginfo.dw.subcode); 749 } 750 751 /** 752 * _base_display_reply_info - 753 * @ioc: per adapter object 754 * @smid: system request message index 755 * @msix_index: MSIX table index supplied by the OS 756 * @reply: reply message frame(lower 32bit addr) 757 * 758 * Return nothing. 759 */ 760 static void 761 _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 762 u32 reply) 763 { 764 MPI2DefaultReply_t *mpi_reply; 765 u16 ioc_status; 766 u32 loginfo = 0; 767 768 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 769 if (unlikely(!mpi_reply)) { 770 pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n", 771 ioc->name, __FILE__, __LINE__, __func__); 772 return; 773 } 774 ioc_status = le16_to_cpu(mpi_reply->IOCStatus); 775 776 if ((ioc_status & MPI2_IOCSTATUS_MASK) && 777 (ioc->logging_level & MPT_DEBUG_REPLY)) { 778 _base_sas_ioc_info(ioc , mpi_reply, 779 mpt3sas_base_get_msg_frame(ioc, smid)); 780 } 781 782 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { 783 loginfo = le32_to_cpu(mpi_reply->IOCLogInfo); 784 _base_sas_log_info(ioc, loginfo); 785 } 786 787 if (ioc_status || loginfo) { 788 ioc_status &= MPI2_IOCSTATUS_MASK; 789 mpt3sas_trigger_mpi(ioc, ioc_status, loginfo); 790 } 791 } 792 793 /** 794 * mpt3sas_base_done - base internal command completion routine 795 * @ioc: per adapter object 796 * @smid: system request message index 797 * @msix_index: MSIX table index supplied by the OS 798 * @reply: reply message frame(lower 32bit addr) 799 * 800 * Return 1 meaning mf should be freed from _base_interrupt 801 * 0 means the mf is freed from this function. 802 */ 803 u8 804 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 805 u32 reply) 806 { 807 MPI2DefaultReply_t *mpi_reply; 808 809 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 810 if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK) 811 return mpt3sas_check_for_pending_internal_cmds(ioc, smid); 812 813 if (ioc->base_cmds.status == MPT3_CMD_NOT_USED) 814 return 1; 815 816 ioc->base_cmds.status |= MPT3_CMD_COMPLETE; 817 if (mpi_reply) { 818 ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID; 819 memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); 820 } 821 ioc->base_cmds.status &= ~MPT3_CMD_PENDING; 822 823 complete(&ioc->base_cmds.done); 824 return 1; 825 } 826 827 /** 828 * _base_async_event - main callback handler for firmware asyn events 829 * @ioc: per adapter object 830 * @msix_index: MSIX table index supplied by the OS 831 * @reply: reply message frame(lower 32bit addr) 832 * 833 * Return 1 meaning mf should be freed from _base_interrupt 834 * 0 means the mf is freed from this function. 835 */ 836 static u8 837 _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply) 838 { 839 Mpi2EventNotificationReply_t *mpi_reply; 840 Mpi2EventAckRequest_t *ack_request; 841 u16 smid; 842 struct _event_ack_list *delayed_event_ack; 843 844 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 845 if (!mpi_reply) 846 return 1; 847 if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION) 848 return 1; 849 850 _base_display_event_data(ioc, mpi_reply); 851 852 if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED)) 853 goto out; 854 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); 855 if (!smid) { 856 delayed_event_ack = kzalloc(sizeof(*delayed_event_ack), 857 GFP_ATOMIC); 858 if (!delayed_event_ack) 859 goto out; 860 INIT_LIST_HEAD(&delayed_event_ack->list); 861 delayed_event_ack->Event = mpi_reply->Event; 862 delayed_event_ack->EventContext = mpi_reply->EventContext; 863 list_add_tail(&delayed_event_ack->list, 864 &ioc->delayed_event_ack_list); 865 dewtprintk(ioc, pr_info(MPT3SAS_FMT 866 "DELAYED: EVENT ACK: event (0x%04x)\n", 867 ioc->name, le16_to_cpu(mpi_reply->Event))); 868 goto out; 869 } 870 871 ack_request = mpt3sas_base_get_msg_frame(ioc, smid); 872 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t)); 873 ack_request->Function = MPI2_FUNCTION_EVENT_ACK; 874 ack_request->Event = mpi_reply->Event; 875 ack_request->EventContext = mpi_reply->EventContext; 876 ack_request->VF_ID = 0; /* TODO */ 877 ack_request->VP_ID = 0; 878 ioc->put_smid_default(ioc, smid); 879 880 out: 881 882 /* scsih callback handler */ 883 mpt3sas_scsih_event_callback(ioc, msix_index, reply); 884 885 /* ctl callback handler */ 886 mpt3sas_ctl_event_callback(ioc, msix_index, reply); 887 888 return 1; 889 } 890 891 static struct scsiio_tracker * 892 _get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) 893 { 894 struct scsi_cmnd *cmd; 895 896 if (WARN_ON(!smid) || 897 WARN_ON(smid >= ioc->hi_priority_smid)) 898 return NULL; 899 900 cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 901 if (cmd) 902 return scsi_cmd_priv(cmd); 903 904 return NULL; 905 } 906 907 /** 908 * _base_get_cb_idx - obtain the callback index 909 * @ioc: per adapter object 910 * @smid: system request message index 911 * 912 * Return callback index. 913 */ 914 static u8 915 _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid) 916 { 917 int i; 918 u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1; 919 u8 cb_idx = 0xFF; 920 921 if (smid < ioc->hi_priority_smid) { 922 struct scsiio_tracker *st; 923 924 if (smid < ctl_smid) { 925 st = _get_st_from_smid(ioc, smid); 926 if (st) 927 cb_idx = st->cb_idx; 928 } else if (smid == ctl_smid) 929 cb_idx = ioc->ctl_cb_idx; 930 } else if (smid < ioc->internal_smid) { 931 i = smid - ioc->hi_priority_smid; 932 cb_idx = ioc->hpr_lookup[i].cb_idx; 933 } else if (smid <= ioc->hba_queue_depth) { 934 i = smid - ioc->internal_smid; 935 cb_idx = ioc->internal_lookup[i].cb_idx; 936 } 937 return cb_idx; 938 } 939 940 /** 941 * _base_mask_interrupts - disable interrupts 942 * @ioc: per adapter object 943 * 944 * Disabling ResetIRQ, Reply and Doorbell Interrupts 945 * 946 * Return nothing. 947 */ 948 static void 949 _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc) 950 { 951 u32 him_register; 952 953 ioc->mask_interrupts = 1; 954 him_register = readl(&ioc->chip->HostInterruptMask); 955 him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK; 956 writel(him_register, &ioc->chip->HostInterruptMask); 957 readl(&ioc->chip->HostInterruptMask); 958 } 959 960 /** 961 * _base_unmask_interrupts - enable interrupts 962 * @ioc: per adapter object 963 * 964 * Enabling only Reply Interrupts 965 * 966 * Return nothing. 967 */ 968 static void 969 _base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc) 970 { 971 u32 him_register; 972 973 him_register = readl(&ioc->chip->HostInterruptMask); 974 him_register &= ~MPI2_HIM_RIM; 975 writel(him_register, &ioc->chip->HostInterruptMask); 976 ioc->mask_interrupts = 0; 977 } 978 979 union reply_descriptor { 980 u64 word; 981 struct { 982 u32 low; 983 u32 high; 984 } u; 985 }; 986 987 /** 988 * _base_interrupt - MPT adapter (IOC) specific interrupt handler. 989 * @irq: irq number (not used) 990 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure 991 * @r: pt_regs pointer (not used) 992 * 993 * Return IRQ_HANDLE if processed, else IRQ_NONE. 994 */ 995 static irqreturn_t 996 _base_interrupt(int irq, void *bus_id) 997 { 998 struct adapter_reply_queue *reply_q = bus_id; 999 union reply_descriptor rd; 1000 u32 completed_cmds; 1001 u8 request_desript_type; 1002 u16 smid; 1003 u8 cb_idx; 1004 u32 reply; 1005 u8 msix_index = reply_q->msix_index; 1006 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc; 1007 Mpi2ReplyDescriptorsUnion_t *rpf; 1008 u8 rc; 1009 1010 if (ioc->mask_interrupts) 1011 return IRQ_NONE; 1012 1013 if (!atomic_add_unless(&reply_q->busy, 1, 1)) 1014 return IRQ_NONE; 1015 1016 rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index]; 1017 request_desript_type = rpf->Default.ReplyFlags 1018 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1019 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) { 1020 atomic_dec(&reply_q->busy); 1021 return IRQ_NONE; 1022 } 1023 1024 completed_cmds = 0; 1025 cb_idx = 0xFF; 1026 do { 1027 rd.word = le64_to_cpu(rpf->Words); 1028 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX) 1029 goto out; 1030 reply = 0; 1031 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1); 1032 if (request_desript_type == 1033 MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS || 1034 request_desript_type == 1035 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS || 1036 request_desript_type == 1037 MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) { 1038 cb_idx = _base_get_cb_idx(ioc, smid); 1039 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) && 1040 (likely(mpt_callbacks[cb_idx] != NULL))) { 1041 rc = mpt_callbacks[cb_idx](ioc, smid, 1042 msix_index, 0); 1043 if (rc) 1044 mpt3sas_base_free_smid(ioc, smid); 1045 } 1046 } else if (request_desript_type == 1047 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { 1048 reply = le32_to_cpu( 1049 rpf->AddressReply.ReplyFrameAddress); 1050 if (reply > ioc->reply_dma_max_address || 1051 reply < ioc->reply_dma_min_address) 1052 reply = 0; 1053 if (smid) { 1054 cb_idx = _base_get_cb_idx(ioc, smid); 1055 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) && 1056 (likely(mpt_callbacks[cb_idx] != NULL))) { 1057 rc = mpt_callbacks[cb_idx](ioc, smid, 1058 msix_index, reply); 1059 if (reply) 1060 _base_display_reply_info(ioc, 1061 smid, msix_index, reply); 1062 if (rc) 1063 mpt3sas_base_free_smid(ioc, 1064 smid); 1065 } 1066 } else { 1067 _base_async_event(ioc, msix_index, reply); 1068 } 1069 1070 /* reply free queue handling */ 1071 if (reply) { 1072 ioc->reply_free_host_index = 1073 (ioc->reply_free_host_index == 1074 (ioc->reply_free_queue_depth - 1)) ? 1075 0 : ioc->reply_free_host_index + 1; 1076 ioc->reply_free[ioc->reply_free_host_index] = 1077 cpu_to_le32(reply); 1078 writel(ioc->reply_free_host_index, 1079 &ioc->chip->ReplyFreeHostIndex); 1080 } 1081 } 1082 1083 rpf->Words = cpu_to_le64(ULLONG_MAX); 1084 reply_q->reply_post_host_index = 1085 (reply_q->reply_post_host_index == 1086 (ioc->reply_post_queue_depth - 1)) ? 0 : 1087 reply_q->reply_post_host_index + 1; 1088 request_desript_type = 1089 reply_q->reply_post_free[reply_q->reply_post_host_index]. 1090 Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1091 completed_cmds++; 1092 /* Update the reply post host index after continuously 1093 * processing the threshold number of Reply Descriptors. 1094 * So that FW can find enough entries to post the Reply 1095 * Descriptors in the reply descriptor post queue. 1096 */ 1097 if (completed_cmds > ioc->hba_queue_depth/3) { 1098 if (ioc->combined_reply_queue) { 1099 writel(reply_q->reply_post_host_index | 1100 ((msix_index & 7) << 1101 MPI2_RPHI_MSIX_INDEX_SHIFT), 1102 ioc->replyPostRegisterIndex[msix_index/8]); 1103 } else { 1104 writel(reply_q->reply_post_host_index | 1105 (msix_index << 1106 MPI2_RPHI_MSIX_INDEX_SHIFT), 1107 &ioc->chip->ReplyPostHostIndex); 1108 } 1109 completed_cmds = 1; 1110 } 1111 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 1112 goto out; 1113 if (!reply_q->reply_post_host_index) 1114 rpf = reply_q->reply_post_free; 1115 else 1116 rpf++; 1117 } while (1); 1118 1119 out: 1120 1121 if (!completed_cmds) { 1122 atomic_dec(&reply_q->busy); 1123 return IRQ_NONE; 1124 } 1125 1126 if (ioc->is_warpdrive) { 1127 writel(reply_q->reply_post_host_index, 1128 ioc->reply_post_host_index[msix_index]); 1129 atomic_dec(&reply_q->busy); 1130 return IRQ_HANDLED; 1131 } 1132 1133 /* Update Reply Post Host Index. 1134 * For those HBA's which support combined reply queue feature 1135 * 1. Get the correct Supplemental Reply Post Host Index Register. 1136 * i.e. (msix_index / 8)th entry from Supplemental Reply Post Host 1137 * Index Register address bank i.e replyPostRegisterIndex[], 1138 * 2. Then update this register with new reply host index value 1139 * in ReplyPostIndex field and the MSIxIndex field with 1140 * msix_index value reduced to a value between 0 and 7, 1141 * using a modulo 8 operation. Since each Supplemental Reply Post 1142 * Host Index Register supports 8 MSI-X vectors. 1143 * 1144 * For other HBA's just update the Reply Post Host Index register with 1145 * new reply host index value in ReplyPostIndex Field and msix_index 1146 * value in MSIxIndex field. 1147 */ 1148 if (ioc->combined_reply_queue) 1149 writel(reply_q->reply_post_host_index | ((msix_index & 7) << 1150 MPI2_RPHI_MSIX_INDEX_SHIFT), 1151 ioc->replyPostRegisterIndex[msix_index/8]); 1152 else 1153 writel(reply_q->reply_post_host_index | (msix_index << 1154 MPI2_RPHI_MSIX_INDEX_SHIFT), 1155 &ioc->chip->ReplyPostHostIndex); 1156 atomic_dec(&reply_q->busy); 1157 return IRQ_HANDLED; 1158 } 1159 1160 /** 1161 * _base_is_controller_msix_enabled - is controller support muli-reply queues 1162 * @ioc: per adapter object 1163 * 1164 */ 1165 static inline int 1166 _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc) 1167 { 1168 return (ioc->facts.IOCCapabilities & 1169 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable; 1170 } 1171 1172 /** 1173 * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts 1174 * @ioc: per adapter object 1175 * Context: non ISR conext 1176 * 1177 * Called when a Task Management request has completed. 1178 * 1179 * Return nothing. 1180 */ 1181 void 1182 mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc) 1183 { 1184 struct adapter_reply_queue *reply_q; 1185 1186 /* If MSIX capability is turned off 1187 * then multi-queues are not enabled 1188 */ 1189 if (!_base_is_controller_msix_enabled(ioc)) 1190 return; 1191 1192 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 1193 if (ioc->shost_recovery || ioc->remove_host || 1194 ioc->pci_error_recovery) 1195 return; 1196 /* TMs are on msix_index == 0 */ 1197 if (reply_q->msix_index == 0) 1198 continue; 1199 synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index)); 1200 } 1201 } 1202 1203 /** 1204 * mpt3sas_base_release_callback_handler - clear interrupt callback handler 1205 * @cb_idx: callback index 1206 * 1207 * Return nothing. 1208 */ 1209 void 1210 mpt3sas_base_release_callback_handler(u8 cb_idx) 1211 { 1212 mpt_callbacks[cb_idx] = NULL; 1213 } 1214 1215 /** 1216 * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler 1217 * @cb_func: callback function 1218 * 1219 * Returns cb_func. 1220 */ 1221 u8 1222 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func) 1223 { 1224 u8 cb_idx; 1225 1226 for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--) 1227 if (mpt_callbacks[cb_idx] == NULL) 1228 break; 1229 1230 mpt_callbacks[cb_idx] = cb_func; 1231 return cb_idx; 1232 } 1233 1234 /** 1235 * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler 1236 * 1237 * Return nothing. 1238 */ 1239 void 1240 mpt3sas_base_initialize_callback_handler(void) 1241 { 1242 u8 cb_idx; 1243 1244 for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++) 1245 mpt3sas_base_release_callback_handler(cb_idx); 1246 } 1247 1248 1249 /** 1250 * _base_build_zero_len_sge - build zero length sg entry 1251 * @ioc: per adapter object 1252 * @paddr: virtual address for SGE 1253 * 1254 * Create a zero length scatter gather entry to insure the IOCs hardware has 1255 * something to use if the target device goes brain dead and tries 1256 * to send data even when none is asked for. 1257 * 1258 * Return nothing. 1259 */ 1260 static void 1261 _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr) 1262 { 1263 u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT | 1264 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST | 1265 MPI2_SGE_FLAGS_SIMPLE_ELEMENT) << 1266 MPI2_SGE_FLAGS_SHIFT); 1267 ioc->base_add_sg_single(paddr, flags_length, -1); 1268 } 1269 1270 /** 1271 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr. 1272 * @paddr: virtual address for SGE 1273 * @flags_length: SGE flags and data transfer length 1274 * @dma_addr: Physical address 1275 * 1276 * Return nothing. 1277 */ 1278 static void 1279 _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr) 1280 { 1281 Mpi2SGESimple32_t *sgel = paddr; 1282 1283 flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING | 1284 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT; 1285 sgel->FlagsLength = cpu_to_le32(flags_length); 1286 sgel->Address = cpu_to_le32(dma_addr); 1287 } 1288 1289 1290 /** 1291 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr. 1292 * @paddr: virtual address for SGE 1293 * @flags_length: SGE flags and data transfer length 1294 * @dma_addr: Physical address 1295 * 1296 * Return nothing. 1297 */ 1298 static void 1299 _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr) 1300 { 1301 Mpi2SGESimple64_t *sgel = paddr; 1302 1303 flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING | 1304 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT; 1305 sgel->FlagsLength = cpu_to_le32(flags_length); 1306 sgel->Address = cpu_to_le64(dma_addr); 1307 } 1308 1309 /** 1310 * _base_get_chain_buffer_tracker - obtain chain tracker 1311 * @ioc: per adapter object 1312 * @scmd: SCSI commands of the IO request 1313 * 1314 * Returns chain tracker(from ioc->free_chain_list) 1315 */ 1316 static struct chain_tracker * 1317 _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, 1318 struct scsi_cmnd *scmd) 1319 { 1320 struct chain_tracker *chain_req; 1321 struct scsiio_tracker *st = scsi_cmd_priv(scmd); 1322 unsigned long flags; 1323 1324 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 1325 if (list_empty(&ioc->free_chain_list)) { 1326 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 1327 dfailprintk(ioc, pr_warn(MPT3SAS_FMT 1328 "chain buffers not available\n", ioc->name)); 1329 return NULL; 1330 } 1331 chain_req = list_entry(ioc->free_chain_list.next, 1332 struct chain_tracker, tracker_list); 1333 list_del_init(&chain_req->tracker_list); 1334 list_add_tail(&chain_req->tracker_list, &st->chain_list); 1335 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 1336 return chain_req; 1337 } 1338 1339 1340 /** 1341 * _base_build_sg - build generic sg 1342 * @ioc: per adapter object 1343 * @psge: virtual address for SGE 1344 * @data_out_dma: physical address for WRITES 1345 * @data_out_sz: data xfer size for WRITES 1346 * @data_in_dma: physical address for READS 1347 * @data_in_sz: data xfer size for READS 1348 * 1349 * Return nothing. 1350 */ 1351 static void 1352 _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge, 1353 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, 1354 size_t data_in_sz) 1355 { 1356 u32 sgl_flags; 1357 1358 if (!data_out_sz && !data_in_sz) { 1359 _base_build_zero_len_sge(ioc, psge); 1360 return; 1361 } 1362 1363 if (data_out_sz && data_in_sz) { 1364 /* WRITE sgel first */ 1365 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 1366 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); 1367 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1368 ioc->base_add_sg_single(psge, sgl_flags | 1369 data_out_sz, data_out_dma); 1370 1371 /* incr sgel */ 1372 psge += ioc->sge_size; 1373 1374 /* READ sgel last */ 1375 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 1376 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | 1377 MPI2_SGE_FLAGS_END_OF_LIST); 1378 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1379 ioc->base_add_sg_single(psge, sgl_flags | 1380 data_in_sz, data_in_dma); 1381 } else if (data_out_sz) /* WRITE */ { 1382 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 1383 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | 1384 MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC); 1385 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1386 ioc->base_add_sg_single(psge, sgl_flags | 1387 data_out_sz, data_out_dma); 1388 } else if (data_in_sz) /* READ */ { 1389 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 1390 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | 1391 MPI2_SGE_FLAGS_END_OF_LIST); 1392 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1393 ioc->base_add_sg_single(psge, sgl_flags | 1394 data_in_sz, data_in_dma); 1395 } 1396 } 1397 1398 /* IEEE format sgls */ 1399 1400 /** 1401 * _base_build_nvme_prp - This function is called for NVMe end devices to build 1402 * a native SGL (NVMe PRP). The native SGL is built starting in the first PRP 1403 * entry of the NVMe message (PRP1). If the data buffer is small enough to be 1404 * described entirely using PRP1, then PRP2 is not used. If needed, PRP2 is 1405 * used to describe a larger data buffer. If the data buffer is too large to 1406 * describe using the two PRP entriess inside the NVMe message, then PRP1 1407 * describes the first data memory segment, and PRP2 contains a pointer to a PRP 1408 * list located elsewhere in memory to describe the remaining data memory 1409 * segments. The PRP list will be contiguous. 1410 1411 * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP 1412 * consists of a list of PRP entries to describe a number of noncontigous 1413 * physical memory segments as a single memory buffer, just as a SGL does. Note 1414 * however, that this function is only used by the IOCTL call, so the memory 1415 * given will be guaranteed to be contiguous. There is no need to translate 1416 * non-contiguous SGL into a PRP in this case. All PRPs will describe 1417 * contiguous space that is one page size each. 1418 * 1419 * Each NVMe message contains two PRP entries. The first (PRP1) either contains 1420 * a PRP list pointer or a PRP element, depending upon the command. PRP2 1421 * contains the second PRP element if the memory being described fits within 2 1422 * PRP entries, or a PRP list pointer if the PRP spans more than two entries. 1423 * 1424 * A PRP list pointer contains the address of a PRP list, structured as a linear 1425 * array of PRP entries. Each PRP entry in this list describes a segment of 1426 * physical memory. 1427 * 1428 * Each 64-bit PRP entry comprises an address and an offset field. The address 1429 * always points at the beginning of a 4KB physical memory page, and the offset 1430 * describes where within that 4KB page the memory segment begins. Only the 1431 * first element in a PRP list may contain a non-zero offest, implying that all 1432 * memory segments following the first begin at the start of a 4KB page. 1433 * 1434 * Each PRP element normally describes 4KB of physical memory, with exceptions 1435 * for the first and last elements in the list. If the memory being described 1436 * by the list begins at a non-zero offset within the first 4KB page, then the 1437 * first PRP element will contain a non-zero offset indicating where the region 1438 * begins within the 4KB page. The last memory segment may end before the end 1439 * of the 4KB segment, depending upon the overall size of the memory being 1440 * described by the PRP list. 1441 * 1442 * Since PRP entries lack any indication of size, the overall data buffer length 1443 * is used to determine where the end of the data memory buffer is located, and 1444 * how many PRP entries are required to describe it. 1445 * 1446 * @ioc: per adapter object 1447 * @smid: system request message index for getting asscociated SGL 1448 * @nvme_encap_request: the NVMe request msg frame pointer 1449 * @data_out_dma: physical address for WRITES 1450 * @data_out_sz: data xfer size for WRITES 1451 * @data_in_dma: physical address for READS 1452 * @data_in_sz: data xfer size for READS 1453 * 1454 * Returns nothing. 1455 */ 1456 static void 1457 _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid, 1458 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request, 1459 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, 1460 size_t data_in_sz) 1461 { 1462 int prp_size = NVME_PRP_SIZE; 1463 __le64 *prp_entry, *prp1_entry, *prp2_entry; 1464 __le64 *prp_page; 1465 dma_addr_t prp_entry_dma, prp_page_dma, dma_addr; 1466 u32 offset, entry_len; 1467 u32 page_mask_result, page_mask; 1468 size_t length; 1469 1470 /* 1471 * Not all commands require a data transfer. If no data, just return 1472 * without constructing any PRP. 1473 */ 1474 if (!data_in_sz && !data_out_sz) 1475 return; 1476 /* 1477 * Set pointers to PRP1 and PRP2, which are in the NVMe command. 1478 * PRP1 is located at a 24 byte offset from the start of the NVMe 1479 * command. Then set the current PRP entry pointer to PRP1. 1480 */ 1481 prp1_entry = (__le64 *)(nvme_encap_request->NVMe_Command + 1482 NVME_CMD_PRP1_OFFSET); 1483 prp2_entry = (__le64 *)(nvme_encap_request->NVMe_Command + 1484 NVME_CMD_PRP2_OFFSET); 1485 prp_entry = prp1_entry; 1486 /* 1487 * For the PRP entries, use the specially allocated buffer of 1488 * contiguous memory. 1489 */ 1490 prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid); 1491 prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid); 1492 1493 /* 1494 * Check if we are within 1 entry of a page boundary we don't 1495 * want our first entry to be a PRP List entry. 1496 */ 1497 page_mask = ioc->page_size - 1; 1498 page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask; 1499 if (!page_mask_result) { 1500 /* Bump up to next page boundary. */ 1501 prp_page = (__le64 *)((u8 *)prp_page + prp_size); 1502 prp_page_dma = prp_page_dma + prp_size; 1503 } 1504 1505 /* 1506 * Set PRP physical pointer, which initially points to the current PRP 1507 * DMA memory page. 1508 */ 1509 prp_entry_dma = prp_page_dma; 1510 1511 /* Get physical address and length of the data buffer. */ 1512 if (data_in_sz) { 1513 dma_addr = data_in_dma; 1514 length = data_in_sz; 1515 } else { 1516 dma_addr = data_out_dma; 1517 length = data_out_sz; 1518 } 1519 1520 /* Loop while the length is not zero. */ 1521 while (length) { 1522 /* 1523 * Check if we need to put a list pointer here if we are at 1524 * page boundary - prp_size (8 bytes). 1525 */ 1526 page_mask_result = (prp_entry_dma + prp_size) & page_mask; 1527 if (!page_mask_result) { 1528 /* 1529 * This is the last entry in a PRP List, so we need to 1530 * put a PRP list pointer here. What this does is: 1531 * - bump the current memory pointer to the next 1532 * address, which will be the next full page. 1533 * - set the PRP Entry to point to that page. This 1534 * is now the PRP List pointer. 1535 * - bump the PRP Entry pointer the start of the 1536 * next page. Since all of this PRP memory is 1537 * contiguous, no need to get a new page - it's 1538 * just the next address. 1539 */ 1540 prp_entry_dma++; 1541 *prp_entry = cpu_to_le64(prp_entry_dma); 1542 prp_entry++; 1543 } 1544 1545 /* Need to handle if entry will be part of a page. */ 1546 offset = dma_addr & page_mask; 1547 entry_len = ioc->page_size - offset; 1548 1549 if (prp_entry == prp1_entry) { 1550 /* 1551 * Must fill in the first PRP pointer (PRP1) before 1552 * moving on. 1553 */ 1554 *prp1_entry = cpu_to_le64(dma_addr); 1555 1556 /* 1557 * Now point to the second PRP entry within the 1558 * command (PRP2). 1559 */ 1560 prp_entry = prp2_entry; 1561 } else if (prp_entry == prp2_entry) { 1562 /* 1563 * Should the PRP2 entry be a PRP List pointer or just 1564 * a regular PRP pointer? If there is more than one 1565 * more page of data, must use a PRP List pointer. 1566 */ 1567 if (length > ioc->page_size) { 1568 /* 1569 * PRP2 will contain a PRP List pointer because 1570 * more PRP's are needed with this command. The 1571 * list will start at the beginning of the 1572 * contiguous buffer. 1573 */ 1574 *prp2_entry = cpu_to_le64(prp_entry_dma); 1575 1576 /* 1577 * The next PRP Entry will be the start of the 1578 * first PRP List. 1579 */ 1580 prp_entry = prp_page; 1581 } else { 1582 /* 1583 * After this, the PRP Entries are complete. 1584 * This command uses 2 PRP's and no PRP list. 1585 */ 1586 *prp2_entry = cpu_to_le64(dma_addr); 1587 } 1588 } else { 1589 /* 1590 * Put entry in list and bump the addresses. 1591 * 1592 * After PRP1 and PRP2 are filled in, this will fill in 1593 * all remaining PRP entries in a PRP List, one per 1594 * each time through the loop. 1595 */ 1596 *prp_entry = cpu_to_le64(dma_addr); 1597 prp_entry++; 1598 prp_entry_dma++; 1599 } 1600 1601 /* 1602 * Bump the phys address of the command's data buffer by the 1603 * entry_len. 1604 */ 1605 dma_addr += entry_len; 1606 1607 /* Decrement length accounting for last partial page. */ 1608 if (entry_len > length) 1609 length = 0; 1610 else 1611 length -= entry_len; 1612 } 1613 } 1614 1615 /** 1616 * base_make_prp_nvme - 1617 * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only 1618 * 1619 * @ioc: per adapter object 1620 * @scmd: SCSI command from the mid-layer 1621 * @mpi_request: mpi request 1622 * @smid: msg Index 1623 * @sge_count: scatter gather element count. 1624 * 1625 * Returns: true: PRPs are built 1626 * false: IEEE SGLs needs to be built 1627 */ 1628 static void 1629 base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc, 1630 struct scsi_cmnd *scmd, 1631 Mpi25SCSIIORequest_t *mpi_request, 1632 u16 smid, int sge_count) 1633 { 1634 int sge_len, num_prp_in_chain = 0; 1635 Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl; 1636 __le64 *curr_buff; 1637 dma_addr_t msg_dma, sge_addr, offset; 1638 u32 page_mask, page_mask_result; 1639 struct scatterlist *sg_scmd; 1640 u32 first_prp_len; 1641 int data_len = scsi_bufflen(scmd); 1642 u32 nvme_pg_size; 1643 1644 nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE); 1645 /* 1646 * Nvme has a very convoluted prp format. One prp is required 1647 * for each page or partial page. Driver need to split up OS sg_list 1648 * entries if it is longer than one page or cross a page 1649 * boundary. Driver also have to insert a PRP list pointer entry as 1650 * the last entry in each physical page of the PRP list. 1651 * 1652 * NOTE: The first PRP "entry" is actually placed in the first 1653 * SGL entry in the main message as IEEE 64 format. The 2nd 1654 * entry in the main message is the chain element, and the rest 1655 * of the PRP entries are built in the contiguous pcie buffer. 1656 */ 1657 page_mask = nvme_pg_size - 1; 1658 1659 /* 1660 * Native SGL is needed. 1661 * Put a chain element in main message frame that points to the first 1662 * chain buffer. 1663 * 1664 * NOTE: The ChainOffset field must be 0 when using a chain pointer to 1665 * a native SGL. 1666 */ 1667 1668 /* Set main message chain element pointer */ 1669 main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL; 1670 /* 1671 * For NVMe the chain element needs to be the 2nd SG entry in the main 1672 * message. 1673 */ 1674 main_chain_element = (Mpi25IeeeSgeChain64_t *) 1675 ((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64)); 1676 1677 /* 1678 * For the PRP entries, use the specially allocated buffer of 1679 * contiguous memory. Normal chain buffers can't be used 1680 * because each chain buffer would need to be the size of an OS 1681 * page (4k). 1682 */ 1683 curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid); 1684 msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid); 1685 1686 main_chain_element->Address = cpu_to_le64(msg_dma); 1687 main_chain_element->NextChainOffset = 0; 1688 main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | 1689 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | 1690 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP; 1691 1692 /* Build first prp, sge need not to be page aligned*/ 1693 ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL; 1694 sg_scmd = scsi_sglist(scmd); 1695 sge_addr = sg_dma_address(sg_scmd); 1696 sge_len = sg_dma_len(sg_scmd); 1697 1698 offset = sge_addr & page_mask; 1699 first_prp_len = nvme_pg_size - offset; 1700 1701 ptr_first_sgl->Address = cpu_to_le64(sge_addr); 1702 ptr_first_sgl->Length = cpu_to_le32(first_prp_len); 1703 1704 data_len -= first_prp_len; 1705 1706 if (sge_len > first_prp_len) { 1707 sge_addr += first_prp_len; 1708 sge_len -= first_prp_len; 1709 } else if (data_len && (sge_len == first_prp_len)) { 1710 sg_scmd = sg_next(sg_scmd); 1711 sge_addr = sg_dma_address(sg_scmd); 1712 sge_len = sg_dma_len(sg_scmd); 1713 } 1714 1715 for (;;) { 1716 offset = sge_addr & page_mask; 1717 1718 /* Put PRP pointer due to page boundary*/ 1719 page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask; 1720 if (unlikely(!page_mask_result)) { 1721 scmd_printk(KERN_NOTICE, 1722 scmd, "page boundary curr_buff: 0x%p\n", 1723 curr_buff); 1724 msg_dma += 8; 1725 *curr_buff = cpu_to_le64(msg_dma); 1726 curr_buff++; 1727 num_prp_in_chain++; 1728 } 1729 1730 *curr_buff = cpu_to_le64(sge_addr); 1731 curr_buff++; 1732 msg_dma += 8; 1733 num_prp_in_chain++; 1734 1735 sge_addr += nvme_pg_size; 1736 sge_len -= nvme_pg_size; 1737 data_len -= nvme_pg_size; 1738 1739 if (data_len <= 0) 1740 break; 1741 1742 if (sge_len > 0) 1743 continue; 1744 1745 sg_scmd = sg_next(sg_scmd); 1746 sge_addr = sg_dma_address(sg_scmd); 1747 sge_len = sg_dma_len(sg_scmd); 1748 } 1749 1750 main_chain_element->Length = 1751 cpu_to_le32(num_prp_in_chain * sizeof(u64)); 1752 return; 1753 } 1754 1755 static bool 1756 base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc, 1757 struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count) 1758 { 1759 u32 data_length = 0; 1760 struct scatterlist *sg_scmd; 1761 bool build_prp = true; 1762 1763 data_length = scsi_bufflen(scmd); 1764 sg_scmd = scsi_sglist(scmd); 1765 1766 /* If Datalenth is <= 16K and number of SGE’s entries are <= 2 1767 * we built IEEE SGL 1768 */ 1769 if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2)) 1770 build_prp = false; 1771 1772 return build_prp; 1773 } 1774 1775 /** 1776 * _base_check_pcie_native_sgl - This function is called for PCIe end devices to 1777 * determine if the driver needs to build a native SGL. If so, that native 1778 * SGL is built in the special contiguous buffers allocated especially for 1779 * PCIe SGL creation. If the driver will not build a native SGL, return 1780 * TRUE and a normal IEEE SGL will be built. Currently this routine 1781 * supports NVMe. 1782 * @ioc: per adapter object 1783 * @mpi_request: mf request pointer 1784 * @smid: system request message index 1785 * @scmd: scsi command 1786 * @pcie_device: points to the PCIe device's info 1787 * 1788 * Returns 0 if native SGL was built, 1 if no SGL was built 1789 */ 1790 static int 1791 _base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc, 1792 Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd, 1793 struct _pcie_device *pcie_device) 1794 { 1795 struct scatterlist *sg_scmd; 1796 int sges_left; 1797 1798 /* Get the SG list pointer and info. */ 1799 sg_scmd = scsi_sglist(scmd); 1800 sges_left = scsi_dma_map(scmd); 1801 if (sges_left < 0) { 1802 sdev_printk(KERN_ERR, scmd->device, 1803 "scsi_dma_map failed: request for %d bytes!\n", 1804 scsi_bufflen(scmd)); 1805 return 1; 1806 } 1807 1808 /* Check if we need to build a native SG list. */ 1809 if (base_is_prp_possible(ioc, pcie_device, 1810 scmd, sges_left) == 0) { 1811 /* We built a native SG list, just return. */ 1812 goto out; 1813 } 1814 1815 /* 1816 * Build native NVMe PRP. 1817 */ 1818 base_make_prp_nvme(ioc, scmd, mpi_request, 1819 smid, sges_left); 1820 1821 return 0; 1822 out: 1823 scsi_dma_unmap(scmd); 1824 return 1; 1825 } 1826 1827 /** 1828 * _base_add_sg_single_ieee - add sg element for IEEE format 1829 * @paddr: virtual address for SGE 1830 * @flags: SGE flags 1831 * @chain_offset: number of 128 byte elements from start of segment 1832 * @length: data transfer length 1833 * @dma_addr: Physical address 1834 * 1835 * Return nothing. 1836 */ 1837 static void 1838 _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length, 1839 dma_addr_t dma_addr) 1840 { 1841 Mpi25IeeeSgeChain64_t *sgel = paddr; 1842 1843 sgel->Flags = flags; 1844 sgel->NextChainOffset = chain_offset; 1845 sgel->Length = cpu_to_le32(length); 1846 sgel->Address = cpu_to_le64(dma_addr); 1847 } 1848 1849 /** 1850 * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format 1851 * @ioc: per adapter object 1852 * @paddr: virtual address for SGE 1853 * 1854 * Create a zero length scatter gather entry to insure the IOCs hardware has 1855 * something to use if the target device goes brain dead and tries 1856 * to send data even when none is asked for. 1857 * 1858 * Return nothing. 1859 */ 1860 static void 1861 _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr) 1862 { 1863 u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 1864 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | 1865 MPI25_IEEE_SGE_FLAGS_END_OF_LIST); 1866 1867 _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1); 1868 } 1869 1870 /** 1871 * _base_build_sg_scmd - main sg creation routine 1872 * pcie_device is unused here! 1873 * @ioc: per adapter object 1874 * @scmd: scsi command 1875 * @smid: system request message index 1876 * @unused: unused pcie_device pointer 1877 * Context: none. 1878 * 1879 * The main routine that builds scatter gather table from a given 1880 * scsi request sent via the .queuecommand main handler. 1881 * 1882 * Returns 0 success, anything else error 1883 */ 1884 static int 1885 _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc, 1886 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused) 1887 { 1888 Mpi2SCSIIORequest_t *mpi_request; 1889 dma_addr_t chain_dma; 1890 struct scatterlist *sg_scmd; 1891 void *sg_local, *chain; 1892 u32 chain_offset; 1893 u32 chain_length; 1894 u32 chain_flags; 1895 int sges_left; 1896 u32 sges_in_segment; 1897 u32 sgl_flags; 1898 u32 sgl_flags_last_element; 1899 u32 sgl_flags_end_buffer; 1900 struct chain_tracker *chain_req; 1901 1902 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 1903 1904 /* init scatter gather flags */ 1905 sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT; 1906 if (scmd->sc_data_direction == DMA_TO_DEVICE) 1907 sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC; 1908 sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT) 1909 << MPI2_SGE_FLAGS_SHIFT; 1910 sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT | 1911 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST) 1912 << MPI2_SGE_FLAGS_SHIFT; 1913 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1914 1915 sg_scmd = scsi_sglist(scmd); 1916 sges_left = scsi_dma_map(scmd); 1917 if (sges_left < 0) { 1918 sdev_printk(KERN_ERR, scmd->device, 1919 "pci_map_sg failed: request for %d bytes!\n", 1920 scsi_bufflen(scmd)); 1921 return -ENOMEM; 1922 } 1923 1924 sg_local = &mpi_request->SGL; 1925 sges_in_segment = ioc->max_sges_in_main_message; 1926 if (sges_left <= sges_in_segment) 1927 goto fill_in_last_segment; 1928 1929 mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) + 1930 (sges_in_segment * ioc->sge_size))/4; 1931 1932 /* fill in main message segment when there is a chain following */ 1933 while (sges_in_segment) { 1934 if (sges_in_segment == 1) 1935 ioc->base_add_sg_single(sg_local, 1936 sgl_flags_last_element | sg_dma_len(sg_scmd), 1937 sg_dma_address(sg_scmd)); 1938 else 1939 ioc->base_add_sg_single(sg_local, sgl_flags | 1940 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 1941 sg_scmd = sg_next(sg_scmd); 1942 sg_local += ioc->sge_size; 1943 sges_left--; 1944 sges_in_segment--; 1945 } 1946 1947 /* initializing the chain flags and pointers */ 1948 chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT; 1949 chain_req = _base_get_chain_buffer_tracker(ioc, scmd); 1950 if (!chain_req) 1951 return -1; 1952 chain = chain_req->chain_buffer; 1953 chain_dma = chain_req->chain_buffer_dma; 1954 do { 1955 sges_in_segment = (sges_left <= 1956 ioc->max_sges_in_chain_message) ? sges_left : 1957 ioc->max_sges_in_chain_message; 1958 chain_offset = (sges_left == sges_in_segment) ? 1959 0 : (sges_in_segment * ioc->sge_size)/4; 1960 chain_length = sges_in_segment * ioc->sge_size; 1961 if (chain_offset) { 1962 chain_offset = chain_offset << 1963 MPI2_SGE_CHAIN_OFFSET_SHIFT; 1964 chain_length += ioc->sge_size; 1965 } 1966 ioc->base_add_sg_single(sg_local, chain_flags | chain_offset | 1967 chain_length, chain_dma); 1968 sg_local = chain; 1969 if (!chain_offset) 1970 goto fill_in_last_segment; 1971 1972 /* fill in chain segments */ 1973 while (sges_in_segment) { 1974 if (sges_in_segment == 1) 1975 ioc->base_add_sg_single(sg_local, 1976 sgl_flags_last_element | 1977 sg_dma_len(sg_scmd), 1978 sg_dma_address(sg_scmd)); 1979 else 1980 ioc->base_add_sg_single(sg_local, sgl_flags | 1981 sg_dma_len(sg_scmd), 1982 sg_dma_address(sg_scmd)); 1983 sg_scmd = sg_next(sg_scmd); 1984 sg_local += ioc->sge_size; 1985 sges_left--; 1986 sges_in_segment--; 1987 } 1988 1989 chain_req = _base_get_chain_buffer_tracker(ioc, scmd); 1990 if (!chain_req) 1991 return -1; 1992 chain = chain_req->chain_buffer; 1993 chain_dma = chain_req->chain_buffer_dma; 1994 } while (1); 1995 1996 1997 fill_in_last_segment: 1998 1999 /* fill the last segment */ 2000 while (sges_left) { 2001 if (sges_left == 1) 2002 ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer | 2003 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 2004 else 2005 ioc->base_add_sg_single(sg_local, sgl_flags | 2006 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 2007 sg_scmd = sg_next(sg_scmd); 2008 sg_local += ioc->sge_size; 2009 sges_left--; 2010 } 2011 2012 return 0; 2013 } 2014 2015 /** 2016 * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format 2017 * @ioc: per adapter object 2018 * @scmd: scsi command 2019 * @smid: system request message index 2020 * @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be 2021 * constructed on need. 2022 * Context: none. 2023 * 2024 * The main routine that builds scatter gather table from a given 2025 * scsi request sent via the .queuecommand main handler. 2026 * 2027 * Returns 0 success, anything else error 2028 */ 2029 static int 2030 _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc, 2031 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device) 2032 { 2033 Mpi25SCSIIORequest_t *mpi_request; 2034 dma_addr_t chain_dma; 2035 struct scatterlist *sg_scmd; 2036 void *sg_local, *chain; 2037 u32 chain_offset; 2038 u32 chain_length; 2039 int sges_left; 2040 u32 sges_in_segment; 2041 u8 simple_sgl_flags; 2042 u8 simple_sgl_flags_last; 2043 u8 chain_sgl_flags; 2044 struct chain_tracker *chain_req; 2045 2046 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 2047 2048 /* init scatter gather flags */ 2049 simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 2050 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; 2051 simple_sgl_flags_last = simple_sgl_flags | 2052 MPI25_IEEE_SGE_FLAGS_END_OF_LIST; 2053 chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | 2054 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; 2055 2056 /* Check if we need to build a native SG list. */ 2057 if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request, 2058 smid, scmd, pcie_device) == 0)) { 2059 /* We built a native SG list, just return. */ 2060 return 0; 2061 } 2062 2063 sg_scmd = scsi_sglist(scmd); 2064 sges_left = scsi_dma_map(scmd); 2065 if (sges_left < 0) { 2066 sdev_printk(KERN_ERR, scmd->device, 2067 "pci_map_sg failed: request for %d bytes!\n", 2068 scsi_bufflen(scmd)); 2069 return -ENOMEM; 2070 } 2071 2072 sg_local = &mpi_request->SGL; 2073 sges_in_segment = (ioc->request_sz - 2074 offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee; 2075 if (sges_left <= sges_in_segment) 2076 goto fill_in_last_segment; 2077 2078 mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) + 2079 (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee); 2080 2081 /* fill in main message segment when there is a chain following */ 2082 while (sges_in_segment > 1) { 2083 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, 2084 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 2085 sg_scmd = sg_next(sg_scmd); 2086 sg_local += ioc->sge_size_ieee; 2087 sges_left--; 2088 sges_in_segment--; 2089 } 2090 2091 /* initializing the pointers */ 2092 chain_req = _base_get_chain_buffer_tracker(ioc, scmd); 2093 if (!chain_req) 2094 return -1; 2095 chain = chain_req->chain_buffer; 2096 chain_dma = chain_req->chain_buffer_dma; 2097 do { 2098 sges_in_segment = (sges_left <= 2099 ioc->max_sges_in_chain_message) ? sges_left : 2100 ioc->max_sges_in_chain_message; 2101 chain_offset = (sges_left == sges_in_segment) ? 2102 0 : sges_in_segment; 2103 chain_length = sges_in_segment * ioc->sge_size_ieee; 2104 if (chain_offset) 2105 chain_length += ioc->sge_size_ieee; 2106 _base_add_sg_single_ieee(sg_local, chain_sgl_flags, 2107 chain_offset, chain_length, chain_dma); 2108 2109 sg_local = chain; 2110 if (!chain_offset) 2111 goto fill_in_last_segment; 2112 2113 /* fill in chain segments */ 2114 while (sges_in_segment) { 2115 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, 2116 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 2117 sg_scmd = sg_next(sg_scmd); 2118 sg_local += ioc->sge_size_ieee; 2119 sges_left--; 2120 sges_in_segment--; 2121 } 2122 2123 chain_req = _base_get_chain_buffer_tracker(ioc, scmd); 2124 if (!chain_req) 2125 return -1; 2126 chain = chain_req->chain_buffer; 2127 chain_dma = chain_req->chain_buffer_dma; 2128 } while (1); 2129 2130 2131 fill_in_last_segment: 2132 2133 /* fill the last segment */ 2134 while (sges_left > 0) { 2135 if (sges_left == 1) 2136 _base_add_sg_single_ieee(sg_local, 2137 simple_sgl_flags_last, 0, sg_dma_len(sg_scmd), 2138 sg_dma_address(sg_scmd)); 2139 else 2140 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, 2141 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 2142 sg_scmd = sg_next(sg_scmd); 2143 sg_local += ioc->sge_size_ieee; 2144 sges_left--; 2145 } 2146 2147 return 0; 2148 } 2149 2150 /** 2151 * _base_build_sg_ieee - build generic sg for IEEE format 2152 * @ioc: per adapter object 2153 * @psge: virtual address for SGE 2154 * @data_out_dma: physical address for WRITES 2155 * @data_out_sz: data xfer size for WRITES 2156 * @data_in_dma: physical address for READS 2157 * @data_in_sz: data xfer size for READS 2158 * 2159 * Return nothing. 2160 */ 2161 static void 2162 _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge, 2163 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, 2164 size_t data_in_sz) 2165 { 2166 u8 sgl_flags; 2167 2168 if (!data_out_sz && !data_in_sz) { 2169 _base_build_zero_len_sge_ieee(ioc, psge); 2170 return; 2171 } 2172 2173 if (data_out_sz && data_in_sz) { 2174 /* WRITE sgel first */ 2175 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 2176 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; 2177 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz, 2178 data_out_dma); 2179 2180 /* incr sgel */ 2181 psge += ioc->sge_size_ieee; 2182 2183 /* READ sgel last */ 2184 sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST; 2185 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz, 2186 data_in_dma); 2187 } else if (data_out_sz) /* WRITE */ { 2188 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 2189 MPI25_IEEE_SGE_FLAGS_END_OF_LIST | 2190 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; 2191 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz, 2192 data_out_dma); 2193 } else if (data_in_sz) /* READ */ { 2194 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 2195 MPI25_IEEE_SGE_FLAGS_END_OF_LIST | 2196 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; 2197 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz, 2198 data_in_dma); 2199 } 2200 } 2201 2202 #define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10)) 2203 2204 /** 2205 * _base_config_dma_addressing - set dma addressing 2206 * @ioc: per adapter object 2207 * @pdev: PCI device struct 2208 * 2209 * Returns 0 for success, non-zero for failure. 2210 */ 2211 static int 2212 _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) 2213 { 2214 struct sysinfo s; 2215 u64 consistent_dma_mask; 2216 2217 if (ioc->dma_mask) 2218 consistent_dma_mask = DMA_BIT_MASK(64); 2219 else 2220 consistent_dma_mask = DMA_BIT_MASK(32); 2221 2222 if (sizeof(dma_addr_t) > 4) { 2223 const uint64_t required_mask = 2224 dma_get_required_mask(&pdev->dev); 2225 if ((required_mask > DMA_BIT_MASK(32)) && 2226 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && 2227 !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) { 2228 ioc->base_add_sg_single = &_base_add_sg_single_64; 2229 ioc->sge_size = sizeof(Mpi2SGESimple64_t); 2230 ioc->dma_mask = 64; 2231 goto out; 2232 } 2233 } 2234 2235 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) 2236 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { 2237 ioc->base_add_sg_single = &_base_add_sg_single_32; 2238 ioc->sge_size = sizeof(Mpi2SGESimple32_t); 2239 ioc->dma_mask = 32; 2240 } else 2241 return -ENODEV; 2242 2243 out: 2244 si_meminfo(&s); 2245 pr_info(MPT3SAS_FMT 2246 "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n", 2247 ioc->name, ioc->dma_mask, convert_to_kb(s.totalram)); 2248 2249 return 0; 2250 } 2251 2252 static int 2253 _base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc, 2254 struct pci_dev *pdev) 2255 { 2256 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 2257 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) 2258 return -ENODEV; 2259 } 2260 return 0; 2261 } 2262 2263 /** 2264 * _base_check_enable_msix - checks MSIX capabable. 2265 * @ioc: per adapter object 2266 * 2267 * Check to see if card is capable of MSIX, and set number 2268 * of available msix vectors 2269 */ 2270 static int 2271 _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc) 2272 { 2273 int base; 2274 u16 message_control; 2275 2276 /* Check whether controller SAS2008 B0 controller, 2277 * if it is SAS2008 B0 controller use IO-APIC instead of MSIX 2278 */ 2279 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 && 2280 ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) { 2281 return -EINVAL; 2282 } 2283 2284 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX); 2285 if (!base) { 2286 dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n", 2287 ioc->name)); 2288 return -EINVAL; 2289 } 2290 2291 /* get msix vector count */ 2292 /* NUMA_IO not supported for older controllers */ 2293 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 || 2294 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 || 2295 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 || 2296 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 || 2297 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 || 2298 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 || 2299 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2) 2300 ioc->msix_vector_count = 1; 2301 else { 2302 pci_read_config_word(ioc->pdev, base + 2, &message_control); 2303 ioc->msix_vector_count = (message_control & 0x3FF) + 1; 2304 } 2305 dinitprintk(ioc, pr_info(MPT3SAS_FMT 2306 "msix is supported, vector_count(%d)\n", 2307 ioc->name, ioc->msix_vector_count)); 2308 return 0; 2309 } 2310 2311 /** 2312 * _base_free_irq - free irq 2313 * @ioc: per adapter object 2314 * 2315 * Freeing respective reply_queue from the list. 2316 */ 2317 static void 2318 _base_free_irq(struct MPT3SAS_ADAPTER *ioc) 2319 { 2320 struct adapter_reply_queue *reply_q, *next; 2321 2322 if (list_empty(&ioc->reply_queue_list)) 2323 return; 2324 2325 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { 2326 list_del(&reply_q->list); 2327 free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index), 2328 reply_q); 2329 kfree(reply_q); 2330 } 2331 } 2332 2333 /** 2334 * _base_request_irq - request irq 2335 * @ioc: per adapter object 2336 * @index: msix index into vector table 2337 * 2338 * Inserting respective reply_queue into the list. 2339 */ 2340 static int 2341 _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index) 2342 { 2343 struct pci_dev *pdev = ioc->pdev; 2344 struct adapter_reply_queue *reply_q; 2345 int r; 2346 2347 reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL); 2348 if (!reply_q) { 2349 pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n", 2350 ioc->name, (int)sizeof(struct adapter_reply_queue)); 2351 return -ENOMEM; 2352 } 2353 reply_q->ioc = ioc; 2354 reply_q->msix_index = index; 2355 2356 atomic_set(&reply_q->busy, 0); 2357 if (ioc->msix_enable) 2358 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d", 2359 ioc->driver_name, ioc->id, index); 2360 else 2361 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d", 2362 ioc->driver_name, ioc->id); 2363 r = request_irq(pci_irq_vector(pdev, index), _base_interrupt, 2364 IRQF_SHARED, reply_q->name, reply_q); 2365 if (r) { 2366 pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n", 2367 reply_q->name, pci_irq_vector(pdev, index)); 2368 kfree(reply_q); 2369 return -EBUSY; 2370 } 2371 2372 INIT_LIST_HEAD(&reply_q->list); 2373 list_add_tail(&reply_q->list, &ioc->reply_queue_list); 2374 return 0; 2375 } 2376 2377 /** 2378 * _base_assign_reply_queues - assigning msix index for each cpu 2379 * @ioc: per adapter object 2380 * 2381 * The enduser would need to set the affinity via /proc/irq/#/smp_affinity 2382 * 2383 * It would nice if we could call irq_set_affinity, however it is not 2384 * an exported symbol 2385 */ 2386 static void 2387 _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) 2388 { 2389 unsigned int cpu, nr_cpus, nr_msix, index = 0; 2390 struct adapter_reply_queue *reply_q; 2391 2392 if (!_base_is_controller_msix_enabled(ioc)) 2393 return; 2394 2395 memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz); 2396 2397 nr_cpus = num_online_cpus(); 2398 nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count, 2399 ioc->facts.MaxMSIxVectors); 2400 if (!nr_msix) 2401 return; 2402 2403 if (smp_affinity_enable) { 2404 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 2405 const cpumask_t *mask = pci_irq_get_affinity(ioc->pdev, 2406 reply_q->msix_index); 2407 if (!mask) { 2408 pr_warn(MPT3SAS_FMT "no affinity for msi %x\n", 2409 ioc->name, reply_q->msix_index); 2410 continue; 2411 } 2412 2413 for_each_cpu_and(cpu, mask, cpu_online_mask) { 2414 if (cpu >= ioc->cpu_msix_table_sz) 2415 break; 2416 ioc->cpu_msix_table[cpu] = reply_q->msix_index; 2417 } 2418 } 2419 return; 2420 } 2421 cpu = cpumask_first(cpu_online_mask); 2422 2423 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 2424 2425 unsigned int i, group = nr_cpus / nr_msix; 2426 2427 if (cpu >= nr_cpus) 2428 break; 2429 2430 if (index < nr_cpus % nr_msix) 2431 group++; 2432 2433 for (i = 0 ; i < group ; i++) { 2434 ioc->cpu_msix_table[cpu] = reply_q->msix_index; 2435 cpu = cpumask_next(cpu, cpu_online_mask); 2436 } 2437 index++; 2438 } 2439 } 2440 2441 /** 2442 * _base_disable_msix - disables msix 2443 * @ioc: per adapter object 2444 * 2445 */ 2446 static void 2447 _base_disable_msix(struct MPT3SAS_ADAPTER *ioc) 2448 { 2449 if (!ioc->msix_enable) 2450 return; 2451 pci_disable_msix(ioc->pdev); 2452 ioc->msix_enable = 0; 2453 } 2454 2455 /** 2456 * _base_enable_msix - enables msix, failback to io_apic 2457 * @ioc: per adapter object 2458 * 2459 */ 2460 static int 2461 _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) 2462 { 2463 int r; 2464 int i, local_max_msix_vectors; 2465 u8 try_msix = 0; 2466 unsigned int irq_flags = PCI_IRQ_MSIX; 2467 2468 if (msix_disable == -1 || msix_disable == 0) 2469 try_msix = 1; 2470 2471 if (!try_msix) 2472 goto try_ioapic; 2473 2474 if (_base_check_enable_msix(ioc) != 0) 2475 goto try_ioapic; 2476 2477 ioc->reply_queue_count = min_t(int, ioc->cpu_count, 2478 ioc->msix_vector_count); 2479 2480 printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores" 2481 ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count, 2482 ioc->cpu_count, max_msix_vectors); 2483 2484 if (!ioc->rdpq_array_enable && max_msix_vectors == -1) 2485 local_max_msix_vectors = (reset_devices) ? 1 : 8; 2486 else 2487 local_max_msix_vectors = max_msix_vectors; 2488 2489 if (local_max_msix_vectors > 0) 2490 ioc->reply_queue_count = min_t(int, local_max_msix_vectors, 2491 ioc->reply_queue_count); 2492 else if (local_max_msix_vectors == 0) 2493 goto try_ioapic; 2494 2495 if (ioc->msix_vector_count < ioc->cpu_count) 2496 smp_affinity_enable = 0; 2497 2498 if (smp_affinity_enable) 2499 irq_flags |= PCI_IRQ_AFFINITY; 2500 2501 r = pci_alloc_irq_vectors(ioc->pdev, 1, ioc->reply_queue_count, 2502 irq_flags); 2503 if (r < 0) { 2504 dfailprintk(ioc, pr_info(MPT3SAS_FMT 2505 "pci_alloc_irq_vectors failed (r=%d) !!!\n", 2506 ioc->name, r)); 2507 goto try_ioapic; 2508 } 2509 2510 ioc->msix_enable = 1; 2511 ioc->reply_queue_count = r; 2512 for (i = 0; i < ioc->reply_queue_count; i++) { 2513 r = _base_request_irq(ioc, i); 2514 if (r) { 2515 _base_free_irq(ioc); 2516 _base_disable_msix(ioc); 2517 goto try_ioapic; 2518 } 2519 } 2520 2521 return 0; 2522 2523 /* failback to io_apic interrupt routing */ 2524 try_ioapic: 2525 2526 ioc->reply_queue_count = 1; 2527 r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY); 2528 if (r < 0) { 2529 dfailprintk(ioc, pr_info(MPT3SAS_FMT 2530 "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n", 2531 ioc->name, r)); 2532 } else 2533 r = _base_request_irq(ioc, 0); 2534 2535 return r; 2536 } 2537 2538 /** 2539 * mpt3sas_base_unmap_resources - free controller resources 2540 * @ioc: per adapter object 2541 */ 2542 static void 2543 mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc) 2544 { 2545 struct pci_dev *pdev = ioc->pdev; 2546 2547 dexitprintk(ioc, printk(MPT3SAS_FMT "%s\n", 2548 ioc->name, __func__)); 2549 2550 _base_free_irq(ioc); 2551 _base_disable_msix(ioc); 2552 2553 if (ioc->combined_reply_queue) { 2554 kfree(ioc->replyPostRegisterIndex); 2555 ioc->replyPostRegisterIndex = NULL; 2556 } 2557 2558 if (ioc->chip_phys) { 2559 iounmap(ioc->chip); 2560 ioc->chip_phys = 0; 2561 } 2562 2563 if (pci_is_enabled(pdev)) { 2564 pci_release_selected_regions(ioc->pdev, ioc->bars); 2565 pci_disable_pcie_error_reporting(pdev); 2566 pci_disable_device(pdev); 2567 } 2568 } 2569 2570 /** 2571 * mpt3sas_base_map_resources - map in controller resources (io/irq/memap) 2572 * @ioc: per adapter object 2573 * 2574 * Returns 0 for success, non-zero for failure. 2575 */ 2576 int 2577 mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) 2578 { 2579 struct pci_dev *pdev = ioc->pdev; 2580 u32 memap_sz; 2581 u32 pio_sz; 2582 int i, r = 0; 2583 u64 pio_chip = 0; 2584 u64 chip_phys = 0; 2585 struct adapter_reply_queue *reply_q; 2586 2587 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", 2588 ioc->name, __func__)); 2589 2590 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); 2591 if (pci_enable_device_mem(pdev)) { 2592 pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n", 2593 ioc->name); 2594 ioc->bars = 0; 2595 return -ENODEV; 2596 } 2597 2598 2599 if (pci_request_selected_regions(pdev, ioc->bars, 2600 ioc->driver_name)) { 2601 pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n", 2602 ioc->name); 2603 ioc->bars = 0; 2604 r = -ENODEV; 2605 goto out_fail; 2606 } 2607 2608 /* AER (Advanced Error Reporting) hooks */ 2609 pci_enable_pcie_error_reporting(pdev); 2610 2611 pci_set_master(pdev); 2612 2613 2614 if (_base_config_dma_addressing(ioc, pdev) != 0) { 2615 pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n", 2616 ioc->name, pci_name(pdev)); 2617 r = -ENODEV; 2618 goto out_fail; 2619 } 2620 2621 for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) && 2622 (!memap_sz || !pio_sz); i++) { 2623 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { 2624 if (pio_sz) 2625 continue; 2626 pio_chip = (u64)pci_resource_start(pdev, i); 2627 pio_sz = pci_resource_len(pdev, i); 2628 } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 2629 if (memap_sz) 2630 continue; 2631 ioc->chip_phys = pci_resource_start(pdev, i); 2632 chip_phys = (u64)ioc->chip_phys; 2633 memap_sz = pci_resource_len(pdev, i); 2634 ioc->chip = ioremap(ioc->chip_phys, memap_sz); 2635 } 2636 } 2637 2638 if (ioc->chip == NULL) { 2639 pr_err(MPT3SAS_FMT "unable to map adapter memory! " 2640 " or resource not found\n", ioc->name); 2641 r = -EINVAL; 2642 goto out_fail; 2643 } 2644 2645 _base_mask_interrupts(ioc); 2646 2647 r = _base_get_ioc_facts(ioc); 2648 if (r) 2649 goto out_fail; 2650 2651 if (!ioc->rdpq_array_enable_assigned) { 2652 ioc->rdpq_array_enable = ioc->rdpq_array_capable; 2653 ioc->rdpq_array_enable_assigned = 1; 2654 } 2655 2656 r = _base_enable_msix(ioc); 2657 if (r) 2658 goto out_fail; 2659 2660 /* Use the Combined reply queue feature only for SAS3 C0 & higher 2661 * revision HBAs and also only when reply queue count is greater than 8 2662 */ 2663 if (ioc->combined_reply_queue && ioc->reply_queue_count > 8) { 2664 /* Determine the Supplemental Reply Post Host Index Registers 2665 * Addresse. Supplemental Reply Post Host Index Registers 2666 * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and 2667 * each register is at offset bytes of 2668 * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one. 2669 */ 2670 ioc->replyPostRegisterIndex = kcalloc( 2671 ioc->combined_reply_index_count, 2672 sizeof(resource_size_t *), GFP_KERNEL); 2673 if (!ioc->replyPostRegisterIndex) { 2674 dfailprintk(ioc, printk(MPT3SAS_FMT 2675 "allocation for reply Post Register Index failed!!!\n", 2676 ioc->name)); 2677 r = -ENOMEM; 2678 goto out_fail; 2679 } 2680 2681 for (i = 0; i < ioc->combined_reply_index_count; i++) { 2682 ioc->replyPostRegisterIndex[i] = (resource_size_t *) 2683 ((u8 *)&ioc->chip->Doorbell + 2684 MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET + 2685 (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET)); 2686 } 2687 } else 2688 ioc->combined_reply_queue = 0; 2689 2690 if (ioc->is_warpdrive) { 2691 ioc->reply_post_host_index[0] = (resource_size_t __iomem *) 2692 &ioc->chip->ReplyPostHostIndex; 2693 2694 for (i = 1; i < ioc->cpu_msix_table_sz; i++) 2695 ioc->reply_post_host_index[i] = 2696 (resource_size_t __iomem *) 2697 ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1) 2698 * 4))); 2699 } 2700 2701 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) 2702 pr_info(MPT3SAS_FMT "%s: IRQ %d\n", 2703 reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : 2704 "IO-APIC enabled"), 2705 pci_irq_vector(ioc->pdev, reply_q->msix_index)); 2706 2707 pr_info(MPT3SAS_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n", 2708 ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz); 2709 pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n", 2710 ioc->name, (unsigned long long)pio_chip, pio_sz); 2711 2712 /* Save PCI configuration state for recovery from PCI AER/EEH errors */ 2713 pci_save_state(pdev); 2714 return 0; 2715 2716 out_fail: 2717 mpt3sas_base_unmap_resources(ioc); 2718 return r; 2719 } 2720 2721 /** 2722 * mpt3sas_base_get_msg_frame - obtain request mf pointer 2723 * @ioc: per adapter object 2724 * @smid: system request message index(smid zero is invalid) 2725 * 2726 * Returns virt pointer to message frame. 2727 */ 2728 void * 2729 mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid) 2730 { 2731 return (void *)(ioc->request + (smid * ioc->request_sz)); 2732 } 2733 2734 /** 2735 * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr 2736 * @ioc: per adapter object 2737 * @smid: system request message index 2738 * 2739 * Returns virt pointer to sense buffer. 2740 */ 2741 void * 2742 mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid) 2743 { 2744 return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE)); 2745 } 2746 2747 /** 2748 * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr 2749 * @ioc: per adapter object 2750 * @smid: system request message index 2751 * 2752 * Returns phys pointer to the low 32bit address of the sense buffer. 2753 */ 2754 __le32 2755 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid) 2756 { 2757 return cpu_to_le32(ioc->sense_dma + ((smid - 1) * 2758 SCSI_SENSE_BUFFERSIZE)); 2759 } 2760 2761 /** 2762 * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr 2763 * @ioc: per adapter object 2764 * @smid: system request message index 2765 * 2766 * Returns virt pointer to a PCIe SGL. 2767 */ 2768 void * 2769 mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid) 2770 { 2771 return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl); 2772 } 2773 2774 /** 2775 * mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr 2776 * @ioc: per adapter object 2777 * @smid: system request message index 2778 * 2779 * Returns phys pointer to the address of the PCIe buffer. 2780 */ 2781 dma_addr_t 2782 mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid) 2783 { 2784 return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma; 2785 } 2786 2787 /** 2788 * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address 2789 * @ioc: per adapter object 2790 * @phys_addr: lower 32 physical addr of the reply 2791 * 2792 * Converts 32bit lower physical addr into a virt address. 2793 */ 2794 void * 2795 mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr) 2796 { 2797 if (!phys_addr) 2798 return NULL; 2799 return ioc->reply + (phys_addr - (u32)ioc->reply_dma); 2800 } 2801 2802 static inline u8 2803 _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc) 2804 { 2805 return ioc->cpu_msix_table[raw_smp_processor_id()]; 2806 } 2807 2808 /** 2809 * mpt3sas_base_get_smid - obtain a free smid from internal queue 2810 * @ioc: per adapter object 2811 * @cb_idx: callback index 2812 * 2813 * Returns smid (zero is invalid) 2814 */ 2815 u16 2816 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) 2817 { 2818 unsigned long flags; 2819 struct request_tracker *request; 2820 u16 smid; 2821 2822 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 2823 if (list_empty(&ioc->internal_free_list)) { 2824 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 2825 pr_err(MPT3SAS_FMT "%s: smid not available\n", 2826 ioc->name, __func__); 2827 return 0; 2828 } 2829 2830 request = list_entry(ioc->internal_free_list.next, 2831 struct request_tracker, tracker_list); 2832 request->cb_idx = cb_idx; 2833 smid = request->smid; 2834 list_del(&request->tracker_list); 2835 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 2836 return smid; 2837 } 2838 2839 /** 2840 * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue 2841 * @ioc: per adapter object 2842 * @cb_idx: callback index 2843 * @scmd: pointer to scsi command object 2844 * 2845 * Returns smid (zero is invalid) 2846 */ 2847 u16 2848 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx, 2849 struct scsi_cmnd *scmd) 2850 { 2851 struct scsiio_tracker *request = scsi_cmd_priv(scmd); 2852 unsigned int tag = scmd->request->tag; 2853 u16 smid; 2854 2855 smid = tag + 1; 2856 request->cb_idx = cb_idx; 2857 request->msix_io = _base_get_msix_index(ioc); 2858 request->smid = smid; 2859 INIT_LIST_HEAD(&request->chain_list); 2860 return smid; 2861 } 2862 2863 /** 2864 * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue 2865 * @ioc: per adapter object 2866 * @cb_idx: callback index 2867 * 2868 * Returns smid (zero is invalid) 2869 */ 2870 u16 2871 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) 2872 { 2873 unsigned long flags; 2874 struct request_tracker *request; 2875 u16 smid; 2876 2877 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 2878 if (list_empty(&ioc->hpr_free_list)) { 2879 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 2880 return 0; 2881 } 2882 2883 request = list_entry(ioc->hpr_free_list.next, 2884 struct request_tracker, tracker_list); 2885 request->cb_idx = cb_idx; 2886 smid = request->smid; 2887 list_del(&request->tracker_list); 2888 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 2889 return smid; 2890 } 2891 2892 static void 2893 _base_recovery_check(struct MPT3SAS_ADAPTER *ioc) 2894 { 2895 /* 2896 * See _wait_for_commands_to_complete() call with regards to this code. 2897 */ 2898 if (ioc->shost_recovery && ioc->pending_io_count) { 2899 ioc->pending_io_count = atomic_read(&ioc->shost->host_busy); 2900 if (ioc->pending_io_count == 0) 2901 wake_up(&ioc->reset_wq); 2902 } 2903 } 2904 2905 void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc, 2906 struct scsiio_tracker *st) 2907 { 2908 if (WARN_ON(st->smid == 0)) 2909 return; 2910 st->cb_idx = 0xFF; 2911 st->direct_io = 0; 2912 if (!list_empty(&st->chain_list)) { 2913 unsigned long flags; 2914 2915 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 2916 list_splice_init(&st->chain_list, &ioc->free_chain_list); 2917 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 2918 } 2919 } 2920 2921 /** 2922 * mpt3sas_base_free_smid - put smid back on free_list 2923 * @ioc: per adapter object 2924 * @smid: system request message index 2925 * 2926 * Return nothing. 2927 */ 2928 void 2929 mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) 2930 { 2931 unsigned long flags; 2932 int i; 2933 2934 if (smid < ioc->hi_priority_smid) { 2935 struct scsiio_tracker *st; 2936 2937 st = _get_st_from_smid(ioc, smid); 2938 if (!st) { 2939 _base_recovery_check(ioc); 2940 return; 2941 } 2942 mpt3sas_base_clear_st(ioc, st); 2943 _base_recovery_check(ioc); 2944 return; 2945 } 2946 2947 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 2948 if (smid < ioc->internal_smid) { 2949 /* hi-priority */ 2950 i = smid - ioc->hi_priority_smid; 2951 ioc->hpr_lookup[i].cb_idx = 0xFF; 2952 list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list); 2953 } else if (smid <= ioc->hba_queue_depth) { 2954 /* internal queue */ 2955 i = smid - ioc->internal_smid; 2956 ioc->internal_lookup[i].cb_idx = 0xFF; 2957 list_add(&ioc->internal_lookup[i].tracker_list, 2958 &ioc->internal_free_list); 2959 } 2960 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 2961 } 2962 2963 /** 2964 * _base_writeq - 64 bit write to MMIO 2965 * @ioc: per adapter object 2966 * @b: data payload 2967 * @addr: address in MMIO space 2968 * @writeq_lock: spin lock 2969 * 2970 * Glue for handling an atomic 64 bit word to MMIO. This special handling takes 2971 * care of 32 bit environment where its not quarenteed to send the entire word 2972 * in one transfer. 2973 */ 2974 #if defined(writeq) && defined(CONFIG_64BIT) 2975 static inline void 2976 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) 2977 { 2978 writeq(cpu_to_le64(b), addr); 2979 } 2980 #else 2981 static inline void 2982 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) 2983 { 2984 unsigned long flags; 2985 __u64 data_out = cpu_to_le64(b); 2986 2987 spin_lock_irqsave(writeq_lock, flags); 2988 writel((u32)(data_out), addr); 2989 writel((u32)(data_out >> 32), (addr + 4)); 2990 spin_unlock_irqrestore(writeq_lock, flags); 2991 } 2992 #endif 2993 2994 /** 2995 * _base_put_smid_scsi_io - send SCSI_IO request to firmware 2996 * @ioc: per adapter object 2997 * @smid: system request message index 2998 * @handle: device handle 2999 * 3000 * Return nothing. 3001 */ 3002 static void 3003 _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) 3004 { 3005 Mpi2RequestDescriptorUnion_t descriptor; 3006 u64 *request = (u64 *)&descriptor; 3007 3008 3009 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; 3010 descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc); 3011 descriptor.SCSIIO.SMID = cpu_to_le16(smid); 3012 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); 3013 descriptor.SCSIIO.LMID = 0; 3014 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 3015 &ioc->scsi_lookup_lock); 3016 } 3017 3018 /** 3019 * _base_put_smid_fast_path - send fast path request to firmware 3020 * @ioc: per adapter object 3021 * @smid: system request message index 3022 * @handle: device handle 3023 * 3024 * Return nothing. 3025 */ 3026 static void 3027 _base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, 3028 u16 handle) 3029 { 3030 Mpi2RequestDescriptorUnion_t descriptor; 3031 u64 *request = (u64 *)&descriptor; 3032 3033 descriptor.SCSIIO.RequestFlags = 3034 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; 3035 descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc); 3036 descriptor.SCSIIO.SMID = cpu_to_le16(smid); 3037 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); 3038 descriptor.SCSIIO.LMID = 0; 3039 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 3040 &ioc->scsi_lookup_lock); 3041 } 3042 3043 /** 3044 * _base_put_smid_hi_priority - send Task Management request to firmware 3045 * @ioc: per adapter object 3046 * @smid: system request message index 3047 * @msix_task: msix_task will be same as msix of IO incase of task abort else 0. 3048 * Return nothing. 3049 */ 3050 static void 3051 _base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid, 3052 u16 msix_task) 3053 { 3054 Mpi2RequestDescriptorUnion_t descriptor; 3055 u64 *request = (u64 *)&descriptor; 3056 3057 descriptor.HighPriority.RequestFlags = 3058 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 3059 descriptor.HighPriority.MSIxIndex = msix_task; 3060 descriptor.HighPriority.SMID = cpu_to_le16(smid); 3061 descriptor.HighPriority.LMID = 0; 3062 descriptor.HighPriority.Reserved1 = 0; 3063 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 3064 &ioc->scsi_lookup_lock); 3065 } 3066 3067 /** 3068 * _base_put_smid_nvme_encap - send NVMe encapsulated request to 3069 * firmware 3070 * @ioc: per adapter object 3071 * @smid: system request message index 3072 * 3073 * Return nothing. 3074 */ 3075 static void 3076 _base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid) 3077 { 3078 Mpi2RequestDescriptorUnion_t descriptor; 3079 u64 *request = (u64 *)&descriptor; 3080 3081 descriptor.Default.RequestFlags = 3082 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED; 3083 descriptor.Default.MSIxIndex = _base_get_msix_index(ioc); 3084 descriptor.Default.SMID = cpu_to_le16(smid); 3085 descriptor.Default.LMID = 0; 3086 descriptor.Default.DescriptorTypeDependent = 0; 3087 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 3088 &ioc->scsi_lookup_lock); 3089 } 3090 3091 /** 3092 * _base_put_smid_default - Default, primarily used for config pages 3093 * @ioc: per adapter object 3094 * @smid: system request message index 3095 * 3096 * Return nothing. 3097 */ 3098 static void 3099 _base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid) 3100 { 3101 Mpi2RequestDescriptorUnion_t descriptor; 3102 u64 *request = (u64 *)&descriptor; 3103 3104 descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 3105 descriptor.Default.MSIxIndex = _base_get_msix_index(ioc); 3106 descriptor.Default.SMID = cpu_to_le16(smid); 3107 descriptor.Default.LMID = 0; 3108 descriptor.Default.DescriptorTypeDependent = 0; 3109 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 3110 &ioc->scsi_lookup_lock); 3111 } 3112 3113 /** 3114 * _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using 3115 * Atomic Request Descriptor 3116 * @ioc: per adapter object 3117 * @smid: system request message index 3118 * @handle: device handle, unused in this function, for function type match 3119 * 3120 * Return nothing. 3121 */ 3122 static void 3123 _base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, 3124 u16 handle) 3125 { 3126 Mpi26AtomicRequestDescriptor_t descriptor; 3127 u32 *request = (u32 *)&descriptor; 3128 3129 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; 3130 descriptor.MSIxIndex = _base_get_msix_index(ioc); 3131 descriptor.SMID = cpu_to_le16(smid); 3132 3133 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); 3134 } 3135 3136 /** 3137 * _base_put_smid_fast_path_atomic - send fast path request to firmware 3138 * using Atomic Request Descriptor 3139 * @ioc: per adapter object 3140 * @smid: system request message index 3141 * @handle: device handle, unused in this function, for function type match 3142 * Return nothing 3143 */ 3144 static void 3145 _base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, 3146 u16 handle) 3147 { 3148 Mpi26AtomicRequestDescriptor_t descriptor; 3149 u32 *request = (u32 *)&descriptor; 3150 3151 descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; 3152 descriptor.MSIxIndex = _base_get_msix_index(ioc); 3153 descriptor.SMID = cpu_to_le16(smid); 3154 3155 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); 3156 } 3157 3158 /** 3159 * _base_put_smid_hi_priority_atomic - send Task Management request to 3160 * firmware using Atomic Request Descriptor 3161 * @ioc: per adapter object 3162 * @smid: system request message index 3163 * @msix_task: msix_task will be same as msix of IO incase of task abort else 0 3164 * 3165 * Return nothing. 3166 */ 3167 static void 3168 _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, 3169 u16 msix_task) 3170 { 3171 Mpi26AtomicRequestDescriptor_t descriptor; 3172 u32 *request = (u32 *)&descriptor; 3173 3174 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 3175 descriptor.MSIxIndex = msix_task; 3176 descriptor.SMID = cpu_to_le16(smid); 3177 3178 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); 3179 } 3180 3181 /** 3182 * _base_put_smid_nvme_encap_atomic - send NVMe encapsulated request to 3183 * firmware using Atomic Request Descriptor 3184 * @ioc: per adapter object 3185 * @smid: system request message index 3186 * 3187 * Return nothing. 3188 */ 3189 static void 3190 _base_put_smid_nvme_encap_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid) 3191 { 3192 Mpi26AtomicRequestDescriptor_t descriptor; 3193 u32 *request = (u32 *)&descriptor; 3194 3195 descriptor.RequestFlags = MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED; 3196 descriptor.MSIxIndex = _base_get_msix_index(ioc); 3197 descriptor.SMID = cpu_to_le16(smid); 3198 3199 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); 3200 } 3201 3202 /** 3203 * _base_put_smid_default - Default, primarily used for config pages 3204 * use Atomic Request Descriptor 3205 * @ioc: per adapter object 3206 * @smid: system request message index 3207 * 3208 * Return nothing. 3209 */ 3210 static void 3211 _base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid) 3212 { 3213 Mpi26AtomicRequestDescriptor_t descriptor; 3214 u32 *request = (u32 *)&descriptor; 3215 3216 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 3217 descriptor.MSIxIndex = _base_get_msix_index(ioc); 3218 descriptor.SMID = cpu_to_le16(smid); 3219 3220 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); 3221 } 3222 3223 /** 3224 * _base_display_OEMs_branding - Display branding string 3225 * @ioc: per adapter object 3226 * 3227 * Return nothing. 3228 */ 3229 static void 3230 _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc) 3231 { 3232 if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL) 3233 return; 3234 3235 switch (ioc->pdev->subsystem_vendor) { 3236 case PCI_VENDOR_ID_INTEL: 3237 switch (ioc->pdev->device) { 3238 case MPI2_MFGPAGE_DEVID_SAS2008: 3239 switch (ioc->pdev->subsystem_device) { 3240 case MPT2SAS_INTEL_RMS2LL080_SSDID: 3241 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3242 MPT2SAS_INTEL_RMS2LL080_BRANDING); 3243 break; 3244 case MPT2SAS_INTEL_RMS2LL040_SSDID: 3245 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3246 MPT2SAS_INTEL_RMS2LL040_BRANDING); 3247 break; 3248 case MPT2SAS_INTEL_SSD910_SSDID: 3249 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3250 MPT2SAS_INTEL_SSD910_BRANDING); 3251 break; 3252 default: 3253 pr_info(MPT3SAS_FMT 3254 "Intel(R) Controller: Subsystem ID: 0x%X\n", 3255 ioc->name, ioc->pdev->subsystem_device); 3256 break; 3257 } 3258 case MPI2_MFGPAGE_DEVID_SAS2308_2: 3259 switch (ioc->pdev->subsystem_device) { 3260 case MPT2SAS_INTEL_RS25GB008_SSDID: 3261 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3262 MPT2SAS_INTEL_RS25GB008_BRANDING); 3263 break; 3264 case MPT2SAS_INTEL_RMS25JB080_SSDID: 3265 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3266 MPT2SAS_INTEL_RMS25JB080_BRANDING); 3267 break; 3268 case MPT2SAS_INTEL_RMS25JB040_SSDID: 3269 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3270 MPT2SAS_INTEL_RMS25JB040_BRANDING); 3271 break; 3272 case MPT2SAS_INTEL_RMS25KB080_SSDID: 3273 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3274 MPT2SAS_INTEL_RMS25KB080_BRANDING); 3275 break; 3276 case MPT2SAS_INTEL_RMS25KB040_SSDID: 3277 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3278 MPT2SAS_INTEL_RMS25KB040_BRANDING); 3279 break; 3280 case MPT2SAS_INTEL_RMS25LB040_SSDID: 3281 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3282 MPT2SAS_INTEL_RMS25LB040_BRANDING); 3283 break; 3284 case MPT2SAS_INTEL_RMS25LB080_SSDID: 3285 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3286 MPT2SAS_INTEL_RMS25LB080_BRANDING); 3287 break; 3288 default: 3289 pr_info(MPT3SAS_FMT 3290 "Intel(R) Controller: Subsystem ID: 0x%X\n", 3291 ioc->name, ioc->pdev->subsystem_device); 3292 break; 3293 } 3294 case MPI25_MFGPAGE_DEVID_SAS3008: 3295 switch (ioc->pdev->subsystem_device) { 3296 case MPT3SAS_INTEL_RMS3JC080_SSDID: 3297 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3298 MPT3SAS_INTEL_RMS3JC080_BRANDING); 3299 break; 3300 3301 case MPT3SAS_INTEL_RS3GC008_SSDID: 3302 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3303 MPT3SAS_INTEL_RS3GC008_BRANDING); 3304 break; 3305 case MPT3SAS_INTEL_RS3FC044_SSDID: 3306 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3307 MPT3SAS_INTEL_RS3FC044_BRANDING); 3308 break; 3309 case MPT3SAS_INTEL_RS3UC080_SSDID: 3310 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3311 MPT3SAS_INTEL_RS3UC080_BRANDING); 3312 break; 3313 default: 3314 pr_info(MPT3SAS_FMT 3315 "Intel(R) Controller: Subsystem ID: 0x%X\n", 3316 ioc->name, ioc->pdev->subsystem_device); 3317 break; 3318 } 3319 break; 3320 default: 3321 pr_info(MPT3SAS_FMT 3322 "Intel(R) Controller: Subsystem ID: 0x%X\n", 3323 ioc->name, ioc->pdev->subsystem_device); 3324 break; 3325 } 3326 break; 3327 case PCI_VENDOR_ID_DELL: 3328 switch (ioc->pdev->device) { 3329 case MPI2_MFGPAGE_DEVID_SAS2008: 3330 switch (ioc->pdev->subsystem_device) { 3331 case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID: 3332 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3333 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING); 3334 break; 3335 case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID: 3336 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3337 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING); 3338 break; 3339 case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID: 3340 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3341 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING); 3342 break; 3343 case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID: 3344 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3345 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING); 3346 break; 3347 case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID: 3348 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3349 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING); 3350 break; 3351 case MPT2SAS_DELL_PERC_H200_SSDID: 3352 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3353 MPT2SAS_DELL_PERC_H200_BRANDING); 3354 break; 3355 case MPT2SAS_DELL_6GBPS_SAS_SSDID: 3356 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3357 MPT2SAS_DELL_6GBPS_SAS_BRANDING); 3358 break; 3359 default: 3360 pr_info(MPT3SAS_FMT 3361 "Dell 6Gbps HBA: Subsystem ID: 0x%X\n", 3362 ioc->name, ioc->pdev->subsystem_device); 3363 break; 3364 } 3365 break; 3366 case MPI25_MFGPAGE_DEVID_SAS3008: 3367 switch (ioc->pdev->subsystem_device) { 3368 case MPT3SAS_DELL_12G_HBA_SSDID: 3369 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3370 MPT3SAS_DELL_12G_HBA_BRANDING); 3371 break; 3372 default: 3373 pr_info(MPT3SAS_FMT 3374 "Dell 12Gbps HBA: Subsystem ID: 0x%X\n", 3375 ioc->name, ioc->pdev->subsystem_device); 3376 break; 3377 } 3378 break; 3379 default: 3380 pr_info(MPT3SAS_FMT 3381 "Dell HBA: Subsystem ID: 0x%X\n", ioc->name, 3382 ioc->pdev->subsystem_device); 3383 break; 3384 } 3385 break; 3386 case PCI_VENDOR_ID_CISCO: 3387 switch (ioc->pdev->device) { 3388 case MPI25_MFGPAGE_DEVID_SAS3008: 3389 switch (ioc->pdev->subsystem_device) { 3390 case MPT3SAS_CISCO_12G_8E_HBA_SSDID: 3391 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3392 MPT3SAS_CISCO_12G_8E_HBA_BRANDING); 3393 break; 3394 case MPT3SAS_CISCO_12G_8I_HBA_SSDID: 3395 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3396 MPT3SAS_CISCO_12G_8I_HBA_BRANDING); 3397 break; 3398 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID: 3399 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3400 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING); 3401 break; 3402 default: 3403 pr_info(MPT3SAS_FMT 3404 "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n", 3405 ioc->name, ioc->pdev->subsystem_device); 3406 break; 3407 } 3408 break; 3409 case MPI25_MFGPAGE_DEVID_SAS3108_1: 3410 switch (ioc->pdev->subsystem_device) { 3411 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID: 3412 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3413 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING); 3414 break; 3415 case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID: 3416 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3417 MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING 3418 ); 3419 break; 3420 default: 3421 pr_info(MPT3SAS_FMT 3422 "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n", 3423 ioc->name, ioc->pdev->subsystem_device); 3424 break; 3425 } 3426 break; 3427 default: 3428 pr_info(MPT3SAS_FMT 3429 "Cisco SAS HBA: Subsystem ID: 0x%X\n", 3430 ioc->name, ioc->pdev->subsystem_device); 3431 break; 3432 } 3433 break; 3434 case MPT2SAS_HP_3PAR_SSVID: 3435 switch (ioc->pdev->device) { 3436 case MPI2_MFGPAGE_DEVID_SAS2004: 3437 switch (ioc->pdev->subsystem_device) { 3438 case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID: 3439 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3440 MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING); 3441 break; 3442 default: 3443 pr_info(MPT3SAS_FMT 3444 "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n", 3445 ioc->name, ioc->pdev->subsystem_device); 3446 break; 3447 } 3448 case MPI2_MFGPAGE_DEVID_SAS2308_2: 3449 switch (ioc->pdev->subsystem_device) { 3450 case MPT2SAS_HP_2_4_INTERNAL_SSDID: 3451 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3452 MPT2SAS_HP_2_4_INTERNAL_BRANDING); 3453 break; 3454 case MPT2SAS_HP_2_4_EXTERNAL_SSDID: 3455 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3456 MPT2SAS_HP_2_4_EXTERNAL_BRANDING); 3457 break; 3458 case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID: 3459 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3460 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING); 3461 break; 3462 case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID: 3463 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3464 MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING); 3465 break; 3466 default: 3467 pr_info(MPT3SAS_FMT 3468 "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n", 3469 ioc->name, ioc->pdev->subsystem_device); 3470 break; 3471 } 3472 default: 3473 pr_info(MPT3SAS_FMT 3474 "HP SAS HBA: Subsystem ID: 0x%X\n", 3475 ioc->name, ioc->pdev->subsystem_device); 3476 break; 3477 } 3478 default: 3479 break; 3480 } 3481 } 3482 3483 /** 3484 * _base_display_ioc_capabilities - Disply IOC's capabilities. 3485 * @ioc: per adapter object 3486 * 3487 * Return nothing. 3488 */ 3489 static void 3490 _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc) 3491 { 3492 int i = 0; 3493 char desc[16]; 3494 u32 iounit_pg1_flags; 3495 u32 bios_version; 3496 3497 bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); 3498 strncpy(desc, ioc->manu_pg0.ChipName, 16); 3499 pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\ 3500 "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n", 3501 ioc->name, desc, 3502 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, 3503 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, 3504 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, 3505 ioc->facts.FWVersion.Word & 0x000000FF, 3506 ioc->pdev->revision, 3507 (bios_version & 0xFF000000) >> 24, 3508 (bios_version & 0x00FF0000) >> 16, 3509 (bios_version & 0x0000FF00) >> 8, 3510 bios_version & 0x000000FF); 3511 3512 _base_display_OEMs_branding(ioc); 3513 3514 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) { 3515 pr_info("%sNVMe", i ? "," : ""); 3516 i++; 3517 } 3518 3519 pr_info(MPT3SAS_FMT "Protocol=(", ioc->name); 3520 3521 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) { 3522 pr_info("Initiator"); 3523 i++; 3524 } 3525 3526 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) { 3527 pr_info("%sTarget", i ? "," : ""); 3528 i++; 3529 } 3530 3531 i = 0; 3532 pr_info("), "); 3533 pr_info("Capabilities=("); 3534 3535 if (!ioc->hide_ir_msg) { 3536 if (ioc->facts.IOCCapabilities & 3537 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) { 3538 pr_info("Raid"); 3539 i++; 3540 } 3541 } 3542 3543 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) { 3544 pr_info("%sTLR", i ? "," : ""); 3545 i++; 3546 } 3547 3548 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) { 3549 pr_info("%sMulticast", i ? "," : ""); 3550 i++; 3551 } 3552 3553 if (ioc->facts.IOCCapabilities & 3554 MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) { 3555 pr_info("%sBIDI Target", i ? "," : ""); 3556 i++; 3557 } 3558 3559 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) { 3560 pr_info("%sEEDP", i ? "," : ""); 3561 i++; 3562 } 3563 3564 if (ioc->facts.IOCCapabilities & 3565 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) { 3566 pr_info("%sSnapshot Buffer", i ? "," : ""); 3567 i++; 3568 } 3569 3570 if (ioc->facts.IOCCapabilities & 3571 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) { 3572 pr_info("%sDiag Trace Buffer", i ? "," : ""); 3573 i++; 3574 } 3575 3576 if (ioc->facts.IOCCapabilities & 3577 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) { 3578 pr_info("%sDiag Extended Buffer", i ? "," : ""); 3579 i++; 3580 } 3581 3582 if (ioc->facts.IOCCapabilities & 3583 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) { 3584 pr_info("%sTask Set Full", i ? "," : ""); 3585 i++; 3586 } 3587 3588 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); 3589 if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) { 3590 pr_info("%sNCQ", i ? "," : ""); 3591 i++; 3592 } 3593 3594 pr_info(")\n"); 3595 } 3596 3597 /** 3598 * mpt3sas_base_update_missing_delay - change the missing delay timers 3599 * @ioc: per adapter object 3600 * @device_missing_delay: amount of time till device is reported missing 3601 * @io_missing_delay: interval IO is returned when there is a missing device 3602 * 3603 * Return nothing. 3604 * 3605 * Passed on the command line, this function will modify the device missing 3606 * delay, as well as the io missing delay. This should be called at driver 3607 * load time. 3608 */ 3609 void 3610 mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc, 3611 u16 device_missing_delay, u8 io_missing_delay) 3612 { 3613 u16 dmd, dmd_new, dmd_orignal; 3614 u8 io_missing_delay_original; 3615 u16 sz; 3616 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; 3617 Mpi2ConfigReply_t mpi_reply; 3618 u8 num_phys = 0; 3619 u16 ioc_status; 3620 3621 mpt3sas_config_get_number_hba_phys(ioc, &num_phys); 3622 if (!num_phys) 3623 return; 3624 3625 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys * 3626 sizeof(Mpi2SasIOUnit1PhyData_t)); 3627 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); 3628 if (!sas_iounit_pg1) { 3629 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 3630 ioc->name, __FILE__, __LINE__, __func__); 3631 goto out; 3632 } 3633 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, 3634 sas_iounit_pg1, sz))) { 3635 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 3636 ioc->name, __FILE__, __LINE__, __func__); 3637 goto out; 3638 } 3639 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 3640 MPI2_IOCSTATUS_MASK; 3641 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 3642 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 3643 ioc->name, __FILE__, __LINE__, __func__); 3644 goto out; 3645 } 3646 3647 /* device missing delay */ 3648 dmd = sas_iounit_pg1->ReportDeviceMissingDelay; 3649 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) 3650 dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; 3651 else 3652 dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; 3653 dmd_orignal = dmd; 3654 if (device_missing_delay > 0x7F) { 3655 dmd = (device_missing_delay > 0x7F0) ? 0x7F0 : 3656 device_missing_delay; 3657 dmd = dmd / 16; 3658 dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16; 3659 } else 3660 dmd = device_missing_delay; 3661 sas_iounit_pg1->ReportDeviceMissingDelay = dmd; 3662 3663 /* io missing delay */ 3664 io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay; 3665 sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay; 3666 3667 if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, 3668 sz)) { 3669 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) 3670 dmd_new = (dmd & 3671 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; 3672 else 3673 dmd_new = 3674 dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; 3675 pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n", 3676 ioc->name, dmd_orignal, dmd_new); 3677 pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n", 3678 ioc->name, io_missing_delay_original, 3679 io_missing_delay); 3680 ioc->device_missing_delay = dmd_new; 3681 ioc->io_missing_delay = io_missing_delay; 3682 } 3683 3684 out: 3685 kfree(sas_iounit_pg1); 3686 } 3687 /** 3688 * _base_static_config_pages - static start of day config pages 3689 * @ioc: per adapter object 3690 * 3691 * Return nothing. 3692 */ 3693 static void 3694 _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) 3695 { 3696 Mpi2ConfigReply_t mpi_reply; 3697 u32 iounit_pg1_flags; 3698 3699 mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0); 3700 if (ioc->ir_firmware) 3701 mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply, 3702 &ioc->manu_pg10); 3703 3704 /* 3705 * Ensure correct T10 PI operation if vendor left EEDPTagMode 3706 * flag unset in NVDATA. 3707 */ 3708 mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11); 3709 if (ioc->manu_pg11.EEDPTagMode == 0) { 3710 pr_err("%s: overriding NVDATA EEDPTagMode setting\n", 3711 ioc->name); 3712 ioc->manu_pg11.EEDPTagMode &= ~0x3; 3713 ioc->manu_pg11.EEDPTagMode |= 0x1; 3714 mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply, 3715 &ioc->manu_pg11); 3716 } 3717 3718 mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2); 3719 mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3); 3720 mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8); 3721 mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0); 3722 mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); 3723 mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8); 3724 _base_display_ioc_capabilities(ioc); 3725 3726 /* 3727 * Enable task_set_full handling in iounit_pg1 when the 3728 * facts capabilities indicate that its supported. 3729 */ 3730 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); 3731 if ((ioc->facts.IOCCapabilities & 3732 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING)) 3733 iounit_pg1_flags &= 3734 ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; 3735 else 3736 iounit_pg1_flags |= 3737 MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; 3738 ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); 3739 mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); 3740 3741 if (ioc->iounit_pg8.NumSensors) 3742 ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors; 3743 } 3744 3745 /** 3746 * _base_release_memory_pools - release memory 3747 * @ioc: per adapter object 3748 * 3749 * Free memory allocated from _base_allocate_memory_pools. 3750 * 3751 * Return nothing. 3752 */ 3753 static void 3754 _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) 3755 { 3756 int i = 0; 3757 struct reply_post_struct *rps; 3758 3759 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3760 __func__)); 3761 3762 if (ioc->request) { 3763 pci_free_consistent(ioc->pdev, ioc->request_dma_sz, 3764 ioc->request, ioc->request_dma); 3765 dexitprintk(ioc, pr_info(MPT3SAS_FMT 3766 "request_pool(0x%p): free\n", 3767 ioc->name, ioc->request)); 3768 ioc->request = NULL; 3769 } 3770 3771 if (ioc->sense) { 3772 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma); 3773 dma_pool_destroy(ioc->sense_dma_pool); 3774 dexitprintk(ioc, pr_info(MPT3SAS_FMT 3775 "sense_pool(0x%p): free\n", 3776 ioc->name, ioc->sense)); 3777 ioc->sense = NULL; 3778 } 3779 3780 if (ioc->reply) { 3781 dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma); 3782 dma_pool_destroy(ioc->reply_dma_pool); 3783 dexitprintk(ioc, pr_info(MPT3SAS_FMT 3784 "reply_pool(0x%p): free\n", 3785 ioc->name, ioc->reply)); 3786 ioc->reply = NULL; 3787 } 3788 3789 if (ioc->reply_free) { 3790 dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free, 3791 ioc->reply_free_dma); 3792 dma_pool_destroy(ioc->reply_free_dma_pool); 3793 dexitprintk(ioc, pr_info(MPT3SAS_FMT 3794 "reply_free_pool(0x%p): free\n", 3795 ioc->name, ioc->reply_free)); 3796 ioc->reply_free = NULL; 3797 } 3798 3799 if (ioc->reply_post) { 3800 do { 3801 rps = &ioc->reply_post[i]; 3802 if (rps->reply_post_free) { 3803 dma_pool_free( 3804 ioc->reply_post_free_dma_pool, 3805 rps->reply_post_free, 3806 rps->reply_post_free_dma); 3807 dexitprintk(ioc, pr_info(MPT3SAS_FMT 3808 "reply_post_free_pool(0x%p): free\n", 3809 ioc->name, rps->reply_post_free)); 3810 rps->reply_post_free = NULL; 3811 } 3812 } while (ioc->rdpq_array_enable && 3813 (++i < ioc->reply_queue_count)); 3814 3815 dma_pool_destroy(ioc->reply_post_free_dma_pool); 3816 kfree(ioc->reply_post); 3817 } 3818 3819 if (ioc->pcie_sgl_dma_pool) { 3820 for (i = 0; i < ioc->scsiio_depth; i++) { 3821 dma_pool_free(ioc->pcie_sgl_dma_pool, 3822 ioc->pcie_sg_lookup[i].pcie_sgl, 3823 ioc->pcie_sg_lookup[i].pcie_sgl_dma); 3824 } 3825 if (ioc->pcie_sgl_dma_pool) 3826 dma_pool_destroy(ioc->pcie_sgl_dma_pool); 3827 } 3828 3829 if (ioc->config_page) { 3830 dexitprintk(ioc, pr_info(MPT3SAS_FMT 3831 "config_page(0x%p): free\n", ioc->name, 3832 ioc->config_page)); 3833 pci_free_consistent(ioc->pdev, ioc->config_page_sz, 3834 ioc->config_page, ioc->config_page_dma); 3835 } 3836 3837 kfree(ioc->hpr_lookup); 3838 kfree(ioc->internal_lookup); 3839 if (ioc->chain_lookup) { 3840 for (i = 0; i < ioc->chain_depth; i++) { 3841 if (ioc->chain_lookup[i].chain_buffer) 3842 dma_pool_free(ioc->chain_dma_pool, 3843 ioc->chain_lookup[i].chain_buffer, 3844 ioc->chain_lookup[i].chain_buffer_dma); 3845 } 3846 dma_pool_destroy(ioc->chain_dma_pool); 3847 free_pages((ulong)ioc->chain_lookup, ioc->chain_pages); 3848 ioc->chain_lookup = NULL; 3849 } 3850 } 3851 3852 /** 3853 * _base_allocate_memory_pools - allocate start of day memory pools 3854 * @ioc: per adapter object 3855 * 3856 * Returns 0 success, anything else error 3857 */ 3858 static int 3859 _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) 3860 { 3861 struct mpt3sas_facts *facts; 3862 u16 max_sge_elements; 3863 u16 chains_needed_per_io; 3864 u32 sz, total_sz, reply_post_free_sz; 3865 u32 retry_sz; 3866 u16 max_request_credit, nvme_blocks_needed; 3867 unsigned short sg_tablesize; 3868 u16 sge_size; 3869 int i; 3870 3871 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3872 __func__)); 3873 3874 3875 retry_sz = 0; 3876 facts = &ioc->facts; 3877 3878 /* command line tunables for max sgl entries */ 3879 if (max_sgl_entries != -1) 3880 sg_tablesize = max_sgl_entries; 3881 else { 3882 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) 3883 sg_tablesize = MPT2SAS_SG_DEPTH; 3884 else 3885 sg_tablesize = MPT3SAS_SG_DEPTH; 3886 } 3887 3888 /* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */ 3889 if (reset_devices) 3890 sg_tablesize = min_t(unsigned short, sg_tablesize, 3891 MPT_KDUMP_MIN_PHYS_SEGMENTS); 3892 3893 if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS) 3894 sg_tablesize = MPT_MIN_PHYS_SEGMENTS; 3895 else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) { 3896 sg_tablesize = min_t(unsigned short, sg_tablesize, 3897 SG_MAX_SEGMENTS); 3898 pr_warn(MPT3SAS_FMT 3899 "sg_tablesize(%u) is bigger than kernel" 3900 " defined SG_CHUNK_SIZE(%u)\n", ioc->name, 3901 sg_tablesize, MPT_MAX_PHYS_SEGMENTS); 3902 } 3903 ioc->shost->sg_tablesize = sg_tablesize; 3904 3905 ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)), 3906 (facts->RequestCredit / 4)); 3907 if (ioc->internal_depth < INTERNAL_CMDS_COUNT) { 3908 if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT + 3909 INTERNAL_SCSIIO_CMDS_COUNT)) { 3910 pr_err(MPT3SAS_FMT "IOC doesn't have enough Request \ 3911 Credits, it has just %d number of credits\n", 3912 ioc->name, facts->RequestCredit); 3913 return -ENOMEM; 3914 } 3915 ioc->internal_depth = 10; 3916 } 3917 3918 ioc->hi_priority_depth = ioc->internal_depth - (5); 3919 /* command line tunables for max controller queue depth */ 3920 if (max_queue_depth != -1 && max_queue_depth != 0) { 3921 max_request_credit = min_t(u16, max_queue_depth + 3922 ioc->internal_depth, facts->RequestCredit); 3923 if (max_request_credit > MAX_HBA_QUEUE_DEPTH) 3924 max_request_credit = MAX_HBA_QUEUE_DEPTH; 3925 } else if (reset_devices) 3926 max_request_credit = min_t(u16, facts->RequestCredit, 3927 (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth)); 3928 else 3929 max_request_credit = min_t(u16, facts->RequestCredit, 3930 MAX_HBA_QUEUE_DEPTH); 3931 3932 /* Firmware maintains additional facts->HighPriorityCredit number of 3933 * credits for HiPriprity Request messages, so hba queue depth will be 3934 * sum of max_request_credit and high priority queue depth. 3935 */ 3936 ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth; 3937 3938 /* request frame size */ 3939 ioc->request_sz = facts->IOCRequestFrameSize * 4; 3940 3941 /* reply frame size */ 3942 ioc->reply_sz = facts->ReplyFrameSize * 4; 3943 3944 /* chain segment size */ 3945 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { 3946 if (facts->IOCMaxChainSegmentSize) 3947 ioc->chain_segment_sz = 3948 facts->IOCMaxChainSegmentSize * 3949 MAX_CHAIN_ELEMT_SZ; 3950 else 3951 /* set to 128 bytes size if IOCMaxChainSegmentSize is zero */ 3952 ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS * 3953 MAX_CHAIN_ELEMT_SZ; 3954 } else 3955 ioc->chain_segment_sz = ioc->request_sz; 3956 3957 /* calculate the max scatter element size */ 3958 sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee); 3959 3960 retry_allocation: 3961 total_sz = 0; 3962 /* calculate number of sg elements left over in the 1st frame */ 3963 max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) - 3964 sizeof(Mpi2SGEIOUnion_t)) + sge_size); 3965 ioc->max_sges_in_main_message = max_sge_elements/sge_size; 3966 3967 /* now do the same for a chain buffer */ 3968 max_sge_elements = ioc->chain_segment_sz - sge_size; 3969 ioc->max_sges_in_chain_message = max_sge_elements/sge_size; 3970 3971 /* 3972 * MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE 3973 */ 3974 chains_needed_per_io = ((ioc->shost->sg_tablesize - 3975 ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message) 3976 + 1; 3977 if (chains_needed_per_io > facts->MaxChainDepth) { 3978 chains_needed_per_io = facts->MaxChainDepth; 3979 ioc->shost->sg_tablesize = min_t(u16, 3980 ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message 3981 * chains_needed_per_io), ioc->shost->sg_tablesize); 3982 } 3983 ioc->chains_needed_per_io = chains_needed_per_io; 3984 3985 /* reply free queue sizing - taking into account for 64 FW events */ 3986 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; 3987 3988 /* calculate reply descriptor post queue depth */ 3989 ioc->reply_post_queue_depth = ioc->hba_queue_depth + 3990 ioc->reply_free_queue_depth + 1 ; 3991 /* align the reply post queue on the next 16 count boundary */ 3992 if (ioc->reply_post_queue_depth % 16) 3993 ioc->reply_post_queue_depth += 16 - 3994 (ioc->reply_post_queue_depth % 16); 3995 3996 if (ioc->reply_post_queue_depth > 3997 facts->MaxReplyDescriptorPostQueueDepth) { 3998 ioc->reply_post_queue_depth = 3999 facts->MaxReplyDescriptorPostQueueDepth - 4000 (facts->MaxReplyDescriptorPostQueueDepth % 16); 4001 ioc->hba_queue_depth = 4002 ((ioc->reply_post_queue_depth - 64) / 2) - 1; 4003 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; 4004 } 4005 4006 dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \ 4007 "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), " 4008 "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message, 4009 ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize, 4010 ioc->chains_needed_per_io)); 4011 4012 /* reply post queue, 16 byte align */ 4013 reply_post_free_sz = ioc->reply_post_queue_depth * 4014 sizeof(Mpi2DefaultReplyDescriptor_t); 4015 4016 sz = reply_post_free_sz; 4017 if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable) 4018 sz *= ioc->reply_queue_count; 4019 4020 ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ? 4021 (ioc->reply_queue_count):1, 4022 sizeof(struct reply_post_struct), GFP_KERNEL); 4023 4024 if (!ioc->reply_post) { 4025 pr_err(MPT3SAS_FMT "reply_post_free pool: kcalloc failed\n", 4026 ioc->name); 4027 goto out; 4028 } 4029 ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool", 4030 &ioc->pdev->dev, sz, 16, 0); 4031 if (!ioc->reply_post_free_dma_pool) { 4032 pr_err(MPT3SAS_FMT 4033 "reply_post_free pool: dma_pool_create failed\n", 4034 ioc->name); 4035 goto out; 4036 } 4037 i = 0; 4038 do { 4039 ioc->reply_post[i].reply_post_free = 4040 dma_pool_alloc(ioc->reply_post_free_dma_pool, 4041 GFP_KERNEL, 4042 &ioc->reply_post[i].reply_post_free_dma); 4043 if (!ioc->reply_post[i].reply_post_free) { 4044 pr_err(MPT3SAS_FMT 4045 "reply_post_free pool: dma_pool_alloc failed\n", 4046 ioc->name); 4047 goto out; 4048 } 4049 memset(ioc->reply_post[i].reply_post_free, 0, sz); 4050 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4051 "reply post free pool (0x%p): depth(%d)," 4052 "element_size(%d), pool_size(%d kB)\n", ioc->name, 4053 ioc->reply_post[i].reply_post_free, 4054 ioc->reply_post_queue_depth, 8, sz/1024)); 4055 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4056 "reply_post_free_dma = (0x%llx)\n", ioc->name, 4057 (unsigned long long) 4058 ioc->reply_post[i].reply_post_free_dma)); 4059 total_sz += sz; 4060 } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count)); 4061 4062 if (ioc->dma_mask == 64) { 4063 if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) { 4064 pr_warn(MPT3SAS_FMT 4065 "no suitable consistent DMA mask for %s\n", 4066 ioc->name, pci_name(ioc->pdev)); 4067 goto out; 4068 } 4069 } 4070 4071 ioc->scsiio_depth = ioc->hba_queue_depth - 4072 ioc->hi_priority_depth - ioc->internal_depth; 4073 4074 /* set the scsi host can_queue depth 4075 * with some internal commands that could be outstanding 4076 */ 4077 ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT; 4078 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4079 "scsi host: can_queue depth (%d)\n", 4080 ioc->name, ioc->shost->can_queue)); 4081 4082 4083 /* contiguous pool for request and chains, 16 byte align, one extra " 4084 * "frame for smid=0 4085 */ 4086 ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth; 4087 sz = ((ioc->scsiio_depth + 1) * ioc->request_sz); 4088 4089 /* hi-priority queue */ 4090 sz += (ioc->hi_priority_depth * ioc->request_sz); 4091 4092 /* internal queue */ 4093 sz += (ioc->internal_depth * ioc->request_sz); 4094 4095 ioc->request_dma_sz = sz; 4096 ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma); 4097 if (!ioc->request) { 4098 pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \ 4099 "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), " 4100 "total(%d kB)\n", ioc->name, ioc->hba_queue_depth, 4101 ioc->chains_needed_per_io, ioc->request_sz, sz/1024); 4102 if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH) 4103 goto out; 4104 retry_sz = 64; 4105 ioc->hba_queue_depth -= retry_sz; 4106 _base_release_memory_pools(ioc); 4107 goto retry_allocation; 4108 } 4109 4110 if (retry_sz) 4111 pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \ 4112 "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), " 4113 "total(%d kb)\n", ioc->name, ioc->hba_queue_depth, 4114 ioc->chains_needed_per_io, ioc->request_sz, sz/1024); 4115 4116 /* hi-priority queue */ 4117 ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) * 4118 ioc->request_sz); 4119 ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) * 4120 ioc->request_sz); 4121 4122 /* internal queue */ 4123 ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth * 4124 ioc->request_sz); 4125 ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth * 4126 ioc->request_sz); 4127 4128 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4129 "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n", 4130 ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz, 4131 (ioc->hba_queue_depth * ioc->request_sz)/1024)); 4132 4133 dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n", 4134 ioc->name, (unsigned long long) ioc->request_dma)); 4135 total_sz += sz; 4136 4137 dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n", 4138 ioc->name, ioc->request, ioc->scsiio_depth)); 4139 4140 ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH); 4141 sz = ioc->chain_depth * sizeof(struct chain_tracker); 4142 ioc->chain_pages = get_order(sz); 4143 ioc->chain_lookup = (struct chain_tracker *)__get_free_pages( 4144 GFP_KERNEL, ioc->chain_pages); 4145 if (!ioc->chain_lookup) { 4146 pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages failed\n", 4147 ioc->name); 4148 goto out; 4149 } 4150 ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev, 4151 ioc->chain_segment_sz, 16, 0); 4152 if (!ioc->chain_dma_pool) { 4153 pr_err(MPT3SAS_FMT "chain_dma_pool: dma_pool_create failed\n", 4154 ioc->name); 4155 goto out; 4156 } 4157 for (i = 0; i < ioc->chain_depth; i++) { 4158 ioc->chain_lookup[i].chain_buffer = dma_pool_alloc( 4159 ioc->chain_dma_pool , GFP_KERNEL, 4160 &ioc->chain_lookup[i].chain_buffer_dma); 4161 if (!ioc->chain_lookup[i].chain_buffer) { 4162 ioc->chain_depth = i; 4163 goto chain_done; 4164 } 4165 total_sz += ioc->chain_segment_sz; 4166 } 4167 chain_done: 4168 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4169 "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n", 4170 ioc->name, ioc->chain_depth, ioc->chain_segment_sz, 4171 ((ioc->chain_depth * ioc->chain_segment_sz))/1024)); 4172 4173 /* initialize hi-priority queue smid's */ 4174 ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth, 4175 sizeof(struct request_tracker), GFP_KERNEL); 4176 if (!ioc->hpr_lookup) { 4177 pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n", 4178 ioc->name); 4179 goto out; 4180 } 4181 ioc->hi_priority_smid = ioc->scsiio_depth + 1; 4182 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4183 "hi_priority(0x%p): depth(%d), start smid(%d)\n", 4184 ioc->name, ioc->hi_priority, 4185 ioc->hi_priority_depth, ioc->hi_priority_smid)); 4186 4187 /* initialize internal queue smid's */ 4188 ioc->internal_lookup = kcalloc(ioc->internal_depth, 4189 sizeof(struct request_tracker), GFP_KERNEL); 4190 if (!ioc->internal_lookup) { 4191 pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n", 4192 ioc->name); 4193 goto out; 4194 } 4195 ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth; 4196 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4197 "internal(0x%p): depth(%d), start smid(%d)\n", 4198 ioc->name, ioc->internal, 4199 ioc->internal_depth, ioc->internal_smid)); 4200 /* 4201 * The number of NVMe page sized blocks needed is: 4202 * (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1 4203 * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry 4204 * that is placed in the main message frame. 8 is the size of each PRP 4205 * entry or PRP list pointer entry. 8 is subtracted from page_size 4206 * because of the PRP list pointer entry at the end of a page, so this 4207 * is not counted as a PRP entry. The 1 added page is a round up. 4208 * 4209 * To avoid allocation failures due to the amount of memory that could 4210 * be required for NVMe PRP's, only each set of NVMe blocks will be 4211 * contiguous, so a new set is allocated for each possible I/O. 4212 */ 4213 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) { 4214 nvme_blocks_needed = 4215 (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1; 4216 nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE); 4217 nvme_blocks_needed++; 4218 4219 sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth; 4220 ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL); 4221 if (!ioc->pcie_sg_lookup) { 4222 pr_info(MPT3SAS_FMT 4223 "PCIe SGL lookup: kzalloc failed\n", ioc->name); 4224 goto out; 4225 } 4226 sz = nvme_blocks_needed * ioc->page_size; 4227 ioc->pcie_sgl_dma_pool = 4228 dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0); 4229 if (!ioc->pcie_sgl_dma_pool) { 4230 pr_info(MPT3SAS_FMT 4231 "PCIe SGL pool: dma_pool_create failed\n", 4232 ioc->name); 4233 goto out; 4234 } 4235 for (i = 0; i < ioc->scsiio_depth; i++) { 4236 ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc( 4237 ioc->pcie_sgl_dma_pool, GFP_KERNEL, 4238 &ioc->pcie_sg_lookup[i].pcie_sgl_dma); 4239 if (!ioc->pcie_sg_lookup[i].pcie_sgl) { 4240 pr_info(MPT3SAS_FMT 4241 "PCIe SGL pool: dma_pool_alloc failed\n", 4242 ioc->name); 4243 goto out; 4244 } 4245 } 4246 4247 dinitprintk(ioc, pr_info(MPT3SAS_FMT "PCIe sgl pool depth(%d), " 4248 "element_size(%d), pool_size(%d kB)\n", ioc->name, 4249 ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024)); 4250 total_sz += sz * ioc->scsiio_depth; 4251 } 4252 /* sense buffers, 4 byte align */ 4253 sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE; 4254 ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4255 4, 0); 4256 if (!ioc->sense_dma_pool) { 4257 pr_err(MPT3SAS_FMT "sense pool: dma_pool_create failed\n", 4258 ioc->name); 4259 goto out; 4260 } 4261 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL, 4262 &ioc->sense_dma); 4263 if (!ioc->sense) { 4264 pr_err(MPT3SAS_FMT "sense pool: dma_pool_alloc failed\n", 4265 ioc->name); 4266 goto out; 4267 } 4268 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4269 "sense pool(0x%p): depth(%d), element_size(%d), pool_size" 4270 "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth, 4271 SCSI_SENSE_BUFFERSIZE, sz/1024)); 4272 dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n", 4273 ioc->name, (unsigned long long)ioc->sense_dma)); 4274 total_sz += sz; 4275 4276 /* reply pool, 4 byte align */ 4277 sz = ioc->reply_free_queue_depth * ioc->reply_sz; 4278 ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz, 4279 4, 0); 4280 if (!ioc->reply_dma_pool) { 4281 pr_err(MPT3SAS_FMT "reply pool: dma_pool_create failed\n", 4282 ioc->name); 4283 goto out; 4284 } 4285 ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL, 4286 &ioc->reply_dma); 4287 if (!ioc->reply) { 4288 pr_err(MPT3SAS_FMT "reply pool: dma_pool_alloc failed\n", 4289 ioc->name); 4290 goto out; 4291 } 4292 ioc->reply_dma_min_address = (u32)(ioc->reply_dma); 4293 ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz; 4294 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4295 "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n", 4296 ioc->name, ioc->reply, 4297 ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024)); 4298 dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n", 4299 ioc->name, (unsigned long long)ioc->reply_dma)); 4300 total_sz += sz; 4301 4302 /* reply free queue, 16 byte align */ 4303 sz = ioc->reply_free_queue_depth * 4; 4304 ioc->reply_free_dma_pool = dma_pool_create("reply_free pool", 4305 &ioc->pdev->dev, sz, 16, 0); 4306 if (!ioc->reply_free_dma_pool) { 4307 pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_create failed\n", 4308 ioc->name); 4309 goto out; 4310 } 4311 ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool, GFP_KERNEL, 4312 &ioc->reply_free_dma); 4313 if (!ioc->reply_free) { 4314 pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_alloc failed\n", 4315 ioc->name); 4316 goto out; 4317 } 4318 memset(ioc->reply_free, 0, sz); 4319 dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \ 4320 "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name, 4321 ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024)); 4322 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4323 "reply_free_dma (0x%llx)\n", 4324 ioc->name, (unsigned long long)ioc->reply_free_dma)); 4325 total_sz += sz; 4326 4327 ioc->config_page_sz = 512; 4328 ioc->config_page = pci_alloc_consistent(ioc->pdev, 4329 ioc->config_page_sz, &ioc->config_page_dma); 4330 if (!ioc->config_page) { 4331 pr_err(MPT3SAS_FMT 4332 "config page: dma_pool_alloc failed\n", 4333 ioc->name); 4334 goto out; 4335 } 4336 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4337 "config page(0x%p): size(%d)\n", 4338 ioc->name, ioc->config_page, ioc->config_page_sz)); 4339 dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n", 4340 ioc->name, (unsigned long long)ioc->config_page_dma)); 4341 total_sz += ioc->config_page_sz; 4342 4343 pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n", 4344 ioc->name, total_sz/1024); 4345 pr_info(MPT3SAS_FMT 4346 "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n", 4347 ioc->name, ioc->shost->can_queue, facts->RequestCredit); 4348 pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n", 4349 ioc->name, ioc->shost->sg_tablesize); 4350 return 0; 4351 4352 out: 4353 return -ENOMEM; 4354 } 4355 4356 /** 4357 * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter. 4358 * @ioc: Pointer to MPT_ADAPTER structure 4359 * @cooked: Request raw or cooked IOC state 4360 * 4361 * Returns all IOC Doorbell register bits if cooked==0, else just the 4362 * Doorbell bits in MPI_IOC_STATE_MASK. 4363 */ 4364 u32 4365 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked) 4366 { 4367 u32 s, sc; 4368 4369 s = readl(&ioc->chip->Doorbell); 4370 sc = s & MPI2_IOC_STATE_MASK; 4371 return cooked ? sc : s; 4372 } 4373 4374 /** 4375 * _base_wait_on_iocstate - waiting on a particular ioc state 4376 * @ioc_state: controller state { READY, OPERATIONAL, or RESET } 4377 * @timeout: timeout in second 4378 * 4379 * Returns 0 for success, non-zero for failure. 4380 */ 4381 static int 4382 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout) 4383 { 4384 u32 count, cntdn; 4385 u32 current_state; 4386 4387 count = 0; 4388 cntdn = 1000 * timeout; 4389 do { 4390 current_state = mpt3sas_base_get_iocstate(ioc, 1); 4391 if (current_state == ioc_state) 4392 return 0; 4393 if (count && current_state == MPI2_IOC_STATE_FAULT) 4394 break; 4395 4396 usleep_range(1000, 1500); 4397 count++; 4398 } while (--cntdn); 4399 4400 return current_state; 4401 } 4402 4403 /** 4404 * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by 4405 * a write to the doorbell) 4406 * @ioc: per adapter object 4407 * @timeout: timeout in second 4408 * 4409 * Returns 0 for success, non-zero for failure. 4410 * 4411 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell. 4412 */ 4413 static int 4414 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc); 4415 4416 static int 4417 _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout) 4418 { 4419 u32 cntdn, count; 4420 u32 int_status; 4421 4422 count = 0; 4423 cntdn = 1000 * timeout; 4424 do { 4425 int_status = readl(&ioc->chip->HostInterruptStatus); 4426 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 4427 dhsprintk(ioc, pr_info(MPT3SAS_FMT 4428 "%s: successful count(%d), timeout(%d)\n", 4429 ioc->name, __func__, count, timeout)); 4430 return 0; 4431 } 4432 4433 usleep_range(1000, 1500); 4434 count++; 4435 } while (--cntdn); 4436 4437 pr_err(MPT3SAS_FMT 4438 "%s: failed due to timeout count(%d), int_status(%x)!\n", 4439 ioc->name, __func__, count, int_status); 4440 return -EFAULT; 4441 } 4442 4443 static int 4444 _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout) 4445 { 4446 u32 cntdn, count; 4447 u32 int_status; 4448 4449 count = 0; 4450 cntdn = 2000 * timeout; 4451 do { 4452 int_status = readl(&ioc->chip->HostInterruptStatus); 4453 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 4454 dhsprintk(ioc, pr_info(MPT3SAS_FMT 4455 "%s: successful count(%d), timeout(%d)\n", 4456 ioc->name, __func__, count, timeout)); 4457 return 0; 4458 } 4459 4460 udelay(500); 4461 count++; 4462 } while (--cntdn); 4463 4464 pr_err(MPT3SAS_FMT 4465 "%s: failed due to timeout count(%d), int_status(%x)!\n", 4466 ioc->name, __func__, count, int_status); 4467 return -EFAULT; 4468 4469 } 4470 4471 /** 4472 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell. 4473 * @ioc: per adapter object 4474 * @timeout: timeout in second 4475 * 4476 * Returns 0 for success, non-zero for failure. 4477 * 4478 * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to 4479 * doorbell. 4480 */ 4481 static int 4482 _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout) 4483 { 4484 u32 cntdn, count; 4485 u32 int_status; 4486 u32 doorbell; 4487 4488 count = 0; 4489 cntdn = 1000 * timeout; 4490 do { 4491 int_status = readl(&ioc->chip->HostInterruptStatus); 4492 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) { 4493 dhsprintk(ioc, pr_info(MPT3SAS_FMT 4494 "%s: successful count(%d), timeout(%d)\n", 4495 ioc->name, __func__, count, timeout)); 4496 return 0; 4497 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 4498 doorbell = readl(&ioc->chip->Doorbell); 4499 if ((doorbell & MPI2_IOC_STATE_MASK) == 4500 MPI2_IOC_STATE_FAULT) { 4501 mpt3sas_base_fault_info(ioc , doorbell); 4502 return -EFAULT; 4503 } 4504 } else if (int_status == 0xFFFFFFFF) 4505 goto out; 4506 4507 usleep_range(1000, 1500); 4508 count++; 4509 } while (--cntdn); 4510 4511 out: 4512 pr_err(MPT3SAS_FMT 4513 "%s: failed due to timeout count(%d), int_status(%x)!\n", 4514 ioc->name, __func__, count, int_status); 4515 return -EFAULT; 4516 } 4517 4518 /** 4519 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use 4520 * @ioc: per adapter object 4521 * @timeout: timeout in second 4522 * 4523 * Returns 0 for success, non-zero for failure. 4524 * 4525 */ 4526 static int 4527 _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout) 4528 { 4529 u32 cntdn, count; 4530 u32 doorbell_reg; 4531 4532 count = 0; 4533 cntdn = 1000 * timeout; 4534 do { 4535 doorbell_reg = readl(&ioc->chip->Doorbell); 4536 if (!(doorbell_reg & MPI2_DOORBELL_USED)) { 4537 dhsprintk(ioc, pr_info(MPT3SAS_FMT 4538 "%s: successful count(%d), timeout(%d)\n", 4539 ioc->name, __func__, count, timeout)); 4540 return 0; 4541 } 4542 4543 usleep_range(1000, 1500); 4544 count++; 4545 } while (--cntdn); 4546 4547 pr_err(MPT3SAS_FMT 4548 "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n", 4549 ioc->name, __func__, count, doorbell_reg); 4550 return -EFAULT; 4551 } 4552 4553 /** 4554 * _base_send_ioc_reset - send doorbell reset 4555 * @ioc: per adapter object 4556 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET 4557 * @timeout: timeout in second 4558 * 4559 * Returns 0 for success, non-zero for failure. 4560 */ 4561 static int 4562 _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout) 4563 { 4564 u32 ioc_state; 4565 int r = 0; 4566 4567 if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) { 4568 pr_err(MPT3SAS_FMT "%s: unknown reset_type\n", 4569 ioc->name, __func__); 4570 return -EFAULT; 4571 } 4572 4573 if (!(ioc->facts.IOCCapabilities & 4574 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY)) 4575 return -EFAULT; 4576 4577 pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name); 4578 4579 writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT, 4580 &ioc->chip->Doorbell); 4581 if ((_base_wait_for_doorbell_ack(ioc, 15))) { 4582 r = -EFAULT; 4583 goto out; 4584 } 4585 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout); 4586 if (ioc_state) { 4587 pr_err(MPT3SAS_FMT 4588 "%s: failed going to ready state (ioc_state=0x%x)\n", 4589 ioc->name, __func__, ioc_state); 4590 r = -EFAULT; 4591 goto out; 4592 } 4593 out: 4594 pr_info(MPT3SAS_FMT "message unit reset: %s\n", 4595 ioc->name, ((r == 0) ? "SUCCESS" : "FAILED")); 4596 return r; 4597 } 4598 4599 /** 4600 * _base_handshake_req_reply_wait - send request thru doorbell interface 4601 * @ioc: per adapter object 4602 * @request_bytes: request length 4603 * @request: pointer having request payload 4604 * @reply_bytes: reply length 4605 * @reply: pointer to reply payload 4606 * @timeout: timeout in second 4607 * 4608 * Returns 0 for success, non-zero for failure. 4609 */ 4610 static int 4611 _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, 4612 u32 *request, int reply_bytes, u16 *reply, int timeout) 4613 { 4614 MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply; 4615 int i; 4616 u8 failed; 4617 __le32 *mfp; 4618 4619 /* make sure doorbell is not in use */ 4620 if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) { 4621 pr_err(MPT3SAS_FMT 4622 "doorbell is in use (line=%d)\n", 4623 ioc->name, __LINE__); 4624 return -EFAULT; 4625 } 4626 4627 /* clear pending doorbell interrupts from previous state changes */ 4628 if (readl(&ioc->chip->HostInterruptStatus) & 4629 MPI2_HIS_IOC2SYS_DB_STATUS) 4630 writel(0, &ioc->chip->HostInterruptStatus); 4631 4632 /* send message to ioc */ 4633 writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) | 4634 ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)), 4635 &ioc->chip->Doorbell); 4636 4637 if ((_base_spin_on_doorbell_int(ioc, 5))) { 4638 pr_err(MPT3SAS_FMT 4639 "doorbell handshake int failed (line=%d)\n", 4640 ioc->name, __LINE__); 4641 return -EFAULT; 4642 } 4643 writel(0, &ioc->chip->HostInterruptStatus); 4644 4645 if ((_base_wait_for_doorbell_ack(ioc, 5))) { 4646 pr_err(MPT3SAS_FMT 4647 "doorbell handshake ack failed (line=%d)\n", 4648 ioc->name, __LINE__); 4649 return -EFAULT; 4650 } 4651 4652 /* send message 32-bits at a time */ 4653 for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) { 4654 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell); 4655 if ((_base_wait_for_doorbell_ack(ioc, 5))) 4656 failed = 1; 4657 } 4658 4659 if (failed) { 4660 pr_err(MPT3SAS_FMT 4661 "doorbell handshake sending request failed (line=%d)\n", 4662 ioc->name, __LINE__); 4663 return -EFAULT; 4664 } 4665 4666 /* now wait for the reply */ 4667 if ((_base_wait_for_doorbell_int(ioc, timeout))) { 4668 pr_err(MPT3SAS_FMT 4669 "doorbell handshake int failed (line=%d)\n", 4670 ioc->name, __LINE__); 4671 return -EFAULT; 4672 } 4673 4674 /* read the first two 16-bits, it gives the total length of the reply */ 4675 reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell) 4676 & MPI2_DOORBELL_DATA_MASK); 4677 writel(0, &ioc->chip->HostInterruptStatus); 4678 if ((_base_wait_for_doorbell_int(ioc, 5))) { 4679 pr_err(MPT3SAS_FMT 4680 "doorbell handshake int failed (line=%d)\n", 4681 ioc->name, __LINE__); 4682 return -EFAULT; 4683 } 4684 reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell) 4685 & MPI2_DOORBELL_DATA_MASK); 4686 writel(0, &ioc->chip->HostInterruptStatus); 4687 4688 for (i = 2; i < default_reply->MsgLength * 2; i++) { 4689 if ((_base_wait_for_doorbell_int(ioc, 5))) { 4690 pr_err(MPT3SAS_FMT 4691 "doorbell handshake int failed (line=%d)\n", 4692 ioc->name, __LINE__); 4693 return -EFAULT; 4694 } 4695 if (i >= reply_bytes/2) /* overflow case */ 4696 readl(&ioc->chip->Doorbell); 4697 else 4698 reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell) 4699 & MPI2_DOORBELL_DATA_MASK); 4700 writel(0, &ioc->chip->HostInterruptStatus); 4701 } 4702 4703 _base_wait_for_doorbell_int(ioc, 5); 4704 if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) { 4705 dhsprintk(ioc, pr_info(MPT3SAS_FMT 4706 "doorbell is in use (line=%d)\n", ioc->name, __LINE__)); 4707 } 4708 writel(0, &ioc->chip->HostInterruptStatus); 4709 4710 if (ioc->logging_level & MPT_DEBUG_INIT) { 4711 mfp = (__le32 *)reply; 4712 pr_info("\toffset:data\n"); 4713 for (i = 0; i < reply_bytes/4; i++) 4714 pr_info("\t[0x%02x]:%08x\n", i*4, 4715 le32_to_cpu(mfp[i])); 4716 } 4717 return 0; 4718 } 4719 4720 /** 4721 * mpt3sas_base_sas_iounit_control - send sas iounit control to FW 4722 * @ioc: per adapter object 4723 * @mpi_reply: the reply payload from FW 4724 * @mpi_request: the request payload sent to FW 4725 * 4726 * The SAS IO Unit Control Request message allows the host to perform low-level 4727 * operations, such as resets on the PHYs of the IO Unit, also allows the host 4728 * to obtain the IOC assigned device handles for a device if it has other 4729 * identifying information about the device, in addition allows the host to 4730 * remove IOC resources associated with the device. 4731 * 4732 * Returns 0 for success, non-zero for failure. 4733 */ 4734 int 4735 mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, 4736 Mpi2SasIoUnitControlReply_t *mpi_reply, 4737 Mpi2SasIoUnitControlRequest_t *mpi_request) 4738 { 4739 u16 smid; 4740 u32 ioc_state; 4741 bool issue_reset = false; 4742 int rc; 4743 void *request; 4744 u16 wait_state_count; 4745 4746 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 4747 __func__)); 4748 4749 mutex_lock(&ioc->base_cmds.mutex); 4750 4751 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) { 4752 pr_err(MPT3SAS_FMT "%s: base_cmd in use\n", 4753 ioc->name, __func__); 4754 rc = -EAGAIN; 4755 goto out; 4756 } 4757 4758 wait_state_count = 0; 4759 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 4760 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 4761 if (wait_state_count++ == 10) { 4762 pr_err(MPT3SAS_FMT 4763 "%s: failed due to ioc not operational\n", 4764 ioc->name, __func__); 4765 rc = -EFAULT; 4766 goto out; 4767 } 4768 ssleep(1); 4769 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 4770 pr_info(MPT3SAS_FMT 4771 "%s: waiting for operational state(count=%d)\n", 4772 ioc->name, __func__, wait_state_count); 4773 } 4774 4775 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); 4776 if (!smid) { 4777 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 4778 ioc->name, __func__); 4779 rc = -EAGAIN; 4780 goto out; 4781 } 4782 4783 rc = 0; 4784 ioc->base_cmds.status = MPT3_CMD_PENDING; 4785 request = mpt3sas_base_get_msg_frame(ioc, smid); 4786 ioc->base_cmds.smid = smid; 4787 memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)); 4788 if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || 4789 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) 4790 ioc->ioc_link_reset_in_progress = 1; 4791 init_completion(&ioc->base_cmds.done); 4792 ioc->put_smid_default(ioc, smid); 4793 wait_for_completion_timeout(&ioc->base_cmds.done, 4794 msecs_to_jiffies(10000)); 4795 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || 4796 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) && 4797 ioc->ioc_link_reset_in_progress) 4798 ioc->ioc_link_reset_in_progress = 0; 4799 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { 4800 pr_err(MPT3SAS_FMT "%s: timeout\n", 4801 ioc->name, __func__); 4802 _debug_dump_mf(mpi_request, 4803 sizeof(Mpi2SasIoUnitControlRequest_t)/4); 4804 if (!(ioc->base_cmds.status & MPT3_CMD_RESET)) 4805 issue_reset = true; 4806 goto issue_host_reset; 4807 } 4808 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) 4809 memcpy(mpi_reply, ioc->base_cmds.reply, 4810 sizeof(Mpi2SasIoUnitControlReply_t)); 4811 else 4812 memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t)); 4813 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 4814 goto out; 4815 4816 issue_host_reset: 4817 if (issue_reset) 4818 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 4819 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 4820 rc = -EFAULT; 4821 out: 4822 mutex_unlock(&ioc->base_cmds.mutex); 4823 return rc; 4824 } 4825 4826 /** 4827 * mpt3sas_base_scsi_enclosure_processor - sending request to sep device 4828 * @ioc: per adapter object 4829 * @mpi_reply: the reply payload from FW 4830 * @mpi_request: the request payload sent to FW 4831 * 4832 * The SCSI Enclosure Processor request message causes the IOC to 4833 * communicate with SES devices to control LED status signals. 4834 * 4835 * Returns 0 for success, non-zero for failure. 4836 */ 4837 int 4838 mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, 4839 Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request) 4840 { 4841 u16 smid; 4842 u32 ioc_state; 4843 bool issue_reset = false; 4844 int rc; 4845 void *request; 4846 u16 wait_state_count; 4847 4848 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 4849 __func__)); 4850 4851 mutex_lock(&ioc->base_cmds.mutex); 4852 4853 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) { 4854 pr_err(MPT3SAS_FMT "%s: base_cmd in use\n", 4855 ioc->name, __func__); 4856 rc = -EAGAIN; 4857 goto out; 4858 } 4859 4860 wait_state_count = 0; 4861 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 4862 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 4863 if (wait_state_count++ == 10) { 4864 pr_err(MPT3SAS_FMT 4865 "%s: failed due to ioc not operational\n", 4866 ioc->name, __func__); 4867 rc = -EFAULT; 4868 goto out; 4869 } 4870 ssleep(1); 4871 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 4872 pr_info(MPT3SAS_FMT 4873 "%s: waiting for operational state(count=%d)\n", 4874 ioc->name, 4875 __func__, wait_state_count); 4876 } 4877 4878 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); 4879 if (!smid) { 4880 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 4881 ioc->name, __func__); 4882 rc = -EAGAIN; 4883 goto out; 4884 } 4885 4886 rc = 0; 4887 ioc->base_cmds.status = MPT3_CMD_PENDING; 4888 request = mpt3sas_base_get_msg_frame(ioc, smid); 4889 ioc->base_cmds.smid = smid; 4890 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t)); 4891 init_completion(&ioc->base_cmds.done); 4892 ioc->put_smid_default(ioc, smid); 4893 wait_for_completion_timeout(&ioc->base_cmds.done, 4894 msecs_to_jiffies(10000)); 4895 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { 4896 pr_err(MPT3SAS_FMT "%s: timeout\n", 4897 ioc->name, __func__); 4898 _debug_dump_mf(mpi_request, 4899 sizeof(Mpi2SepRequest_t)/4); 4900 if (!(ioc->base_cmds.status & MPT3_CMD_RESET)) 4901 issue_reset = false; 4902 goto issue_host_reset; 4903 } 4904 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) 4905 memcpy(mpi_reply, ioc->base_cmds.reply, 4906 sizeof(Mpi2SepReply_t)); 4907 else 4908 memset(mpi_reply, 0, sizeof(Mpi2SepReply_t)); 4909 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 4910 goto out; 4911 4912 issue_host_reset: 4913 if (issue_reset) 4914 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 4915 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 4916 rc = -EFAULT; 4917 out: 4918 mutex_unlock(&ioc->base_cmds.mutex); 4919 return rc; 4920 } 4921 4922 /** 4923 * _base_get_port_facts - obtain port facts reply and save in ioc 4924 * @ioc: per adapter object 4925 * 4926 * Returns 0 for success, non-zero for failure. 4927 */ 4928 static int 4929 _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port) 4930 { 4931 Mpi2PortFactsRequest_t mpi_request; 4932 Mpi2PortFactsReply_t mpi_reply; 4933 struct mpt3sas_port_facts *pfacts; 4934 int mpi_reply_sz, mpi_request_sz, r; 4935 4936 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 4937 __func__)); 4938 4939 mpi_reply_sz = sizeof(Mpi2PortFactsReply_t); 4940 mpi_request_sz = sizeof(Mpi2PortFactsRequest_t); 4941 memset(&mpi_request, 0, mpi_request_sz); 4942 mpi_request.Function = MPI2_FUNCTION_PORT_FACTS; 4943 mpi_request.PortNumber = port; 4944 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz, 4945 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5); 4946 4947 if (r != 0) { 4948 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", 4949 ioc->name, __func__, r); 4950 return r; 4951 } 4952 4953 pfacts = &ioc->pfacts[port]; 4954 memset(pfacts, 0, sizeof(struct mpt3sas_port_facts)); 4955 pfacts->PortNumber = mpi_reply.PortNumber; 4956 pfacts->VP_ID = mpi_reply.VP_ID; 4957 pfacts->VF_ID = mpi_reply.VF_ID; 4958 pfacts->MaxPostedCmdBuffers = 4959 le16_to_cpu(mpi_reply.MaxPostedCmdBuffers); 4960 4961 return 0; 4962 } 4963 4964 /** 4965 * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL 4966 * @ioc: per adapter object 4967 * @timeout: 4968 * 4969 * Returns 0 for success, non-zero for failure. 4970 */ 4971 static int 4972 _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout) 4973 { 4974 u32 ioc_state; 4975 int rc; 4976 4977 dinitprintk(ioc, printk(MPT3SAS_FMT "%s\n", ioc->name, 4978 __func__)); 4979 4980 if (ioc->pci_error_recovery) { 4981 dfailprintk(ioc, printk(MPT3SAS_FMT 4982 "%s: host in pci error recovery\n", ioc->name, __func__)); 4983 return -EFAULT; 4984 } 4985 4986 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 4987 dhsprintk(ioc, printk(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n", 4988 ioc->name, __func__, ioc_state)); 4989 4990 if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) || 4991 (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL) 4992 return 0; 4993 4994 if (ioc_state & MPI2_DOORBELL_USED) { 4995 dhsprintk(ioc, printk(MPT3SAS_FMT 4996 "unexpected doorbell active!\n", ioc->name)); 4997 goto issue_diag_reset; 4998 } 4999 5000 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 5001 mpt3sas_base_fault_info(ioc, ioc_state & 5002 MPI2_DOORBELL_DATA_MASK); 5003 goto issue_diag_reset; 5004 } 5005 5006 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout); 5007 if (ioc_state) { 5008 dfailprintk(ioc, printk(MPT3SAS_FMT 5009 "%s: failed going to ready state (ioc_state=0x%x)\n", 5010 ioc->name, __func__, ioc_state)); 5011 return -EFAULT; 5012 } 5013 5014 issue_diag_reset: 5015 rc = _base_diag_reset(ioc); 5016 return rc; 5017 } 5018 5019 /** 5020 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc 5021 * @ioc: per adapter object 5022 * 5023 * Returns 0 for success, non-zero for failure. 5024 */ 5025 static int 5026 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc) 5027 { 5028 Mpi2IOCFactsRequest_t mpi_request; 5029 Mpi2IOCFactsReply_t mpi_reply; 5030 struct mpt3sas_facts *facts; 5031 int mpi_reply_sz, mpi_request_sz, r; 5032 5033 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5034 __func__)); 5035 5036 r = _base_wait_for_iocstate(ioc, 10); 5037 if (r) { 5038 dfailprintk(ioc, printk(MPT3SAS_FMT 5039 "%s: failed getting to correct state\n", 5040 ioc->name, __func__)); 5041 return r; 5042 } 5043 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t); 5044 mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t); 5045 memset(&mpi_request, 0, mpi_request_sz); 5046 mpi_request.Function = MPI2_FUNCTION_IOC_FACTS; 5047 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz, 5048 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5); 5049 5050 if (r != 0) { 5051 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", 5052 ioc->name, __func__, r); 5053 return r; 5054 } 5055 5056 facts = &ioc->facts; 5057 memset(facts, 0, sizeof(struct mpt3sas_facts)); 5058 facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion); 5059 facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion); 5060 facts->VP_ID = mpi_reply.VP_ID; 5061 facts->VF_ID = mpi_reply.VF_ID; 5062 facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions); 5063 facts->MaxChainDepth = mpi_reply.MaxChainDepth; 5064 facts->WhoInit = mpi_reply.WhoInit; 5065 facts->NumberOfPorts = mpi_reply.NumberOfPorts; 5066 facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors; 5067 facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit); 5068 facts->MaxReplyDescriptorPostQueueDepth = 5069 le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth); 5070 facts->ProductID = le16_to_cpu(mpi_reply.ProductID); 5071 facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities); 5072 if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) 5073 ioc->ir_firmware = 1; 5074 if ((facts->IOCCapabilities & 5075 MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices)) 5076 ioc->rdpq_array_capable = 1; 5077 if (facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ) 5078 ioc->atomic_desc_capable = 1; 5079 facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word); 5080 facts->IOCRequestFrameSize = 5081 le16_to_cpu(mpi_reply.IOCRequestFrameSize); 5082 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { 5083 facts->IOCMaxChainSegmentSize = 5084 le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize); 5085 } 5086 facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators); 5087 facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets); 5088 ioc->shost->max_id = -1; 5089 facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders); 5090 facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures); 5091 facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags); 5092 facts->HighPriorityCredit = 5093 le16_to_cpu(mpi_reply.HighPriorityCredit); 5094 facts->ReplyFrameSize = mpi_reply.ReplyFrameSize; 5095 facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle); 5096 facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize; 5097 5098 /* 5099 * Get the Page Size from IOC Facts. If it's 0, default to 4k. 5100 */ 5101 ioc->page_size = 1 << facts->CurrentHostPageSize; 5102 if (ioc->page_size == 1) { 5103 pr_info(MPT3SAS_FMT "CurrentHostPageSize is 0: Setting " 5104 "default host page size to 4k\n", ioc->name); 5105 ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K; 5106 } 5107 dinitprintk(ioc, pr_info(MPT3SAS_FMT "CurrentHostPageSize(%d)\n", 5108 ioc->name, facts->CurrentHostPageSize)); 5109 5110 dinitprintk(ioc, pr_info(MPT3SAS_FMT 5111 "hba queue depth(%d), max chains per io(%d)\n", 5112 ioc->name, facts->RequestCredit, 5113 facts->MaxChainDepth)); 5114 dinitprintk(ioc, pr_info(MPT3SAS_FMT 5115 "request frame size(%d), reply frame size(%d)\n", ioc->name, 5116 facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4)); 5117 return 0; 5118 } 5119 5120 /** 5121 * _base_send_ioc_init - send ioc_init to firmware 5122 * @ioc: per adapter object 5123 * 5124 * Returns 0 for success, non-zero for failure. 5125 */ 5126 static int 5127 _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc) 5128 { 5129 Mpi2IOCInitRequest_t mpi_request; 5130 Mpi2IOCInitReply_t mpi_reply; 5131 int i, r = 0; 5132 ktime_t current_time; 5133 u16 ioc_status; 5134 u32 reply_post_free_array_sz = 0; 5135 Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL; 5136 dma_addr_t reply_post_free_array_dma; 5137 5138 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5139 __func__)); 5140 5141 memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t)); 5142 mpi_request.Function = MPI2_FUNCTION_IOC_INIT; 5143 mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER; 5144 mpi_request.VF_ID = 0; /* TODO */ 5145 mpi_request.VP_ID = 0; 5146 mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged); 5147 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); 5148 mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K; 5149 5150 if (_base_is_controller_msix_enabled(ioc)) 5151 mpi_request.HostMSIxVectors = ioc->reply_queue_count; 5152 mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4); 5153 mpi_request.ReplyDescriptorPostQueueDepth = 5154 cpu_to_le16(ioc->reply_post_queue_depth); 5155 mpi_request.ReplyFreeQueueDepth = 5156 cpu_to_le16(ioc->reply_free_queue_depth); 5157 5158 mpi_request.SenseBufferAddressHigh = 5159 cpu_to_le32((u64)ioc->sense_dma >> 32); 5160 mpi_request.SystemReplyAddressHigh = 5161 cpu_to_le32((u64)ioc->reply_dma >> 32); 5162 mpi_request.SystemRequestFrameBaseAddress = 5163 cpu_to_le64((u64)ioc->request_dma); 5164 mpi_request.ReplyFreeQueueAddress = 5165 cpu_to_le64((u64)ioc->reply_free_dma); 5166 5167 if (ioc->rdpq_array_enable) { 5168 reply_post_free_array_sz = ioc->reply_queue_count * 5169 sizeof(Mpi2IOCInitRDPQArrayEntry); 5170 reply_post_free_array = pci_alloc_consistent(ioc->pdev, 5171 reply_post_free_array_sz, &reply_post_free_array_dma); 5172 if (!reply_post_free_array) { 5173 pr_err(MPT3SAS_FMT 5174 "reply_post_free_array: pci_alloc_consistent failed\n", 5175 ioc->name); 5176 r = -ENOMEM; 5177 goto out; 5178 } 5179 memset(reply_post_free_array, 0, reply_post_free_array_sz); 5180 for (i = 0; i < ioc->reply_queue_count; i++) 5181 reply_post_free_array[i].RDPQBaseAddress = 5182 cpu_to_le64( 5183 (u64)ioc->reply_post[i].reply_post_free_dma); 5184 mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE; 5185 mpi_request.ReplyDescriptorPostQueueAddress = 5186 cpu_to_le64((u64)reply_post_free_array_dma); 5187 } else { 5188 mpi_request.ReplyDescriptorPostQueueAddress = 5189 cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma); 5190 } 5191 5192 /* This time stamp specifies number of milliseconds 5193 * since epoch ~ midnight January 1, 1970. 5194 */ 5195 current_time = ktime_get_real(); 5196 mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time)); 5197 5198 if (ioc->logging_level & MPT_DEBUG_INIT) { 5199 __le32 *mfp; 5200 int i; 5201 5202 mfp = (__le32 *)&mpi_request; 5203 pr_info("\toffset:data\n"); 5204 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++) 5205 pr_info("\t[0x%02x]:%08x\n", i*4, 5206 le32_to_cpu(mfp[i])); 5207 } 5208 5209 r = _base_handshake_req_reply_wait(ioc, 5210 sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request, 5211 sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10); 5212 5213 if (r != 0) { 5214 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", 5215 ioc->name, __func__, r); 5216 goto out; 5217 } 5218 5219 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 5220 if (ioc_status != MPI2_IOCSTATUS_SUCCESS || 5221 mpi_reply.IOCLogInfo) { 5222 pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__); 5223 r = -EIO; 5224 } 5225 5226 out: 5227 if (reply_post_free_array) 5228 pci_free_consistent(ioc->pdev, reply_post_free_array_sz, 5229 reply_post_free_array, 5230 reply_post_free_array_dma); 5231 return r; 5232 } 5233 5234 /** 5235 * mpt3sas_port_enable_done - command completion routine for port enable 5236 * @ioc: per adapter object 5237 * @smid: system request message index 5238 * @msix_index: MSIX table index supplied by the OS 5239 * @reply: reply message frame(lower 32bit addr) 5240 * 5241 * Return 1 meaning mf should be freed from _base_interrupt 5242 * 0 means the mf is freed from this function. 5243 */ 5244 u8 5245 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 5246 u32 reply) 5247 { 5248 MPI2DefaultReply_t *mpi_reply; 5249 u16 ioc_status; 5250 5251 if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED) 5252 return 1; 5253 5254 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 5255 if (!mpi_reply) 5256 return 1; 5257 5258 if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE) 5259 return 1; 5260 5261 ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING; 5262 ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE; 5263 ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID; 5264 memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); 5265 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 5266 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 5267 ioc->port_enable_failed = 1; 5268 5269 if (ioc->is_driver_loading) { 5270 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 5271 mpt3sas_port_enable_complete(ioc); 5272 return 1; 5273 } else { 5274 ioc->start_scan_failed = ioc_status; 5275 ioc->start_scan = 0; 5276 return 1; 5277 } 5278 } 5279 complete(&ioc->port_enable_cmds.done); 5280 return 1; 5281 } 5282 5283 /** 5284 * _base_send_port_enable - send port_enable(discovery stuff) to firmware 5285 * @ioc: per adapter object 5286 * 5287 * Returns 0 for success, non-zero for failure. 5288 */ 5289 static int 5290 _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc) 5291 { 5292 Mpi2PortEnableRequest_t *mpi_request; 5293 Mpi2PortEnableReply_t *mpi_reply; 5294 int r = 0; 5295 u16 smid; 5296 u16 ioc_status; 5297 5298 pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name); 5299 5300 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { 5301 pr_err(MPT3SAS_FMT "%s: internal command already in use\n", 5302 ioc->name, __func__); 5303 return -EAGAIN; 5304 } 5305 5306 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx); 5307 if (!smid) { 5308 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 5309 ioc->name, __func__); 5310 return -EAGAIN; 5311 } 5312 5313 ioc->port_enable_cmds.status = MPT3_CMD_PENDING; 5314 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 5315 ioc->port_enable_cmds.smid = smid; 5316 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); 5317 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; 5318 5319 init_completion(&ioc->port_enable_cmds.done); 5320 ioc->put_smid_default(ioc, smid); 5321 wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ); 5322 if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) { 5323 pr_err(MPT3SAS_FMT "%s: timeout\n", 5324 ioc->name, __func__); 5325 _debug_dump_mf(mpi_request, 5326 sizeof(Mpi2PortEnableRequest_t)/4); 5327 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) 5328 r = -EFAULT; 5329 else 5330 r = -ETIME; 5331 goto out; 5332 } 5333 5334 mpi_reply = ioc->port_enable_cmds.reply; 5335 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 5336 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 5337 pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n", 5338 ioc->name, __func__, ioc_status); 5339 r = -EFAULT; 5340 goto out; 5341 } 5342 5343 out: 5344 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 5345 pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ? 5346 "SUCCESS" : "FAILED")); 5347 return r; 5348 } 5349 5350 /** 5351 * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply) 5352 * @ioc: per adapter object 5353 * 5354 * Returns 0 for success, non-zero for failure. 5355 */ 5356 int 5357 mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc) 5358 { 5359 Mpi2PortEnableRequest_t *mpi_request; 5360 u16 smid; 5361 5362 pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name); 5363 5364 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { 5365 pr_err(MPT3SAS_FMT "%s: internal command already in use\n", 5366 ioc->name, __func__); 5367 return -EAGAIN; 5368 } 5369 5370 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx); 5371 if (!smid) { 5372 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 5373 ioc->name, __func__); 5374 return -EAGAIN; 5375 } 5376 5377 ioc->port_enable_cmds.status = MPT3_CMD_PENDING; 5378 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 5379 ioc->port_enable_cmds.smid = smid; 5380 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); 5381 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; 5382 5383 ioc->put_smid_default(ioc, smid); 5384 return 0; 5385 } 5386 5387 /** 5388 * _base_determine_wait_on_discovery - desposition 5389 * @ioc: per adapter object 5390 * 5391 * Decide whether to wait on discovery to complete. Used to either 5392 * locate boot device, or report volumes ahead of physical devices. 5393 * 5394 * Returns 1 for wait, 0 for don't wait 5395 */ 5396 static int 5397 _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc) 5398 { 5399 /* We wait for discovery to complete if IR firmware is loaded. 5400 * The sas topology events arrive before PD events, so we need time to 5401 * turn on the bit in ioc->pd_handles to indicate PD 5402 * Also, it maybe required to report Volumes ahead of physical 5403 * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set. 5404 */ 5405 if (ioc->ir_firmware) 5406 return 1; 5407 5408 /* if no Bios, then we don't need to wait */ 5409 if (!ioc->bios_pg3.BiosVersion) 5410 return 0; 5411 5412 /* Bios is present, then we drop down here. 5413 * 5414 * If there any entries in the Bios Page 2, then we wait 5415 * for discovery to complete. 5416 */ 5417 5418 /* Current Boot Device */ 5419 if ((ioc->bios_pg2.CurrentBootDeviceForm & 5420 MPI2_BIOSPAGE2_FORM_MASK) == 5421 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && 5422 /* Request Boot Device */ 5423 (ioc->bios_pg2.ReqBootDeviceForm & 5424 MPI2_BIOSPAGE2_FORM_MASK) == 5425 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && 5426 /* Alternate Request Boot Device */ 5427 (ioc->bios_pg2.ReqAltBootDeviceForm & 5428 MPI2_BIOSPAGE2_FORM_MASK) == 5429 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED) 5430 return 0; 5431 5432 return 1; 5433 } 5434 5435 /** 5436 * _base_unmask_events - turn on notification for this event 5437 * @ioc: per adapter object 5438 * @event: firmware event 5439 * 5440 * The mask is stored in ioc->event_masks. 5441 */ 5442 static void 5443 _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event) 5444 { 5445 u32 desired_event; 5446 5447 if (event >= 128) 5448 return; 5449 5450 desired_event = (1 << (event % 32)); 5451 5452 if (event < 32) 5453 ioc->event_masks[0] &= ~desired_event; 5454 else if (event < 64) 5455 ioc->event_masks[1] &= ~desired_event; 5456 else if (event < 96) 5457 ioc->event_masks[2] &= ~desired_event; 5458 else if (event < 128) 5459 ioc->event_masks[3] &= ~desired_event; 5460 } 5461 5462 /** 5463 * _base_event_notification - send event notification 5464 * @ioc: per adapter object 5465 * 5466 * Returns 0 for success, non-zero for failure. 5467 */ 5468 static int 5469 _base_event_notification(struct MPT3SAS_ADAPTER *ioc) 5470 { 5471 Mpi2EventNotificationRequest_t *mpi_request; 5472 u16 smid; 5473 int r = 0; 5474 int i; 5475 5476 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5477 __func__)); 5478 5479 if (ioc->base_cmds.status & MPT3_CMD_PENDING) { 5480 pr_err(MPT3SAS_FMT "%s: internal command already in use\n", 5481 ioc->name, __func__); 5482 return -EAGAIN; 5483 } 5484 5485 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); 5486 if (!smid) { 5487 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 5488 ioc->name, __func__); 5489 return -EAGAIN; 5490 } 5491 ioc->base_cmds.status = MPT3_CMD_PENDING; 5492 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 5493 ioc->base_cmds.smid = smid; 5494 memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t)); 5495 mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 5496 mpi_request->VF_ID = 0; /* TODO */ 5497 mpi_request->VP_ID = 0; 5498 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 5499 mpi_request->EventMasks[i] = 5500 cpu_to_le32(ioc->event_masks[i]); 5501 init_completion(&ioc->base_cmds.done); 5502 ioc->put_smid_default(ioc, smid); 5503 wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ); 5504 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { 5505 pr_err(MPT3SAS_FMT "%s: timeout\n", 5506 ioc->name, __func__); 5507 _debug_dump_mf(mpi_request, 5508 sizeof(Mpi2EventNotificationRequest_t)/4); 5509 if (ioc->base_cmds.status & MPT3_CMD_RESET) 5510 r = -EFAULT; 5511 else 5512 r = -ETIME; 5513 } else 5514 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n", 5515 ioc->name, __func__)); 5516 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 5517 return r; 5518 } 5519 5520 /** 5521 * mpt3sas_base_validate_event_type - validating event types 5522 * @ioc: per adapter object 5523 * @event: firmware event 5524 * 5525 * This will turn on firmware event notification when application 5526 * ask for that event. We don't mask events that are already enabled. 5527 */ 5528 void 5529 mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type) 5530 { 5531 int i, j; 5532 u32 event_mask, desired_event; 5533 u8 send_update_to_fw; 5534 5535 for (i = 0, send_update_to_fw = 0; i < 5536 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) { 5537 event_mask = ~event_type[i]; 5538 desired_event = 1; 5539 for (j = 0; j < 32; j++) { 5540 if (!(event_mask & desired_event) && 5541 (ioc->event_masks[i] & desired_event)) { 5542 ioc->event_masks[i] &= ~desired_event; 5543 send_update_to_fw = 1; 5544 } 5545 desired_event = (desired_event << 1); 5546 } 5547 } 5548 5549 if (!send_update_to_fw) 5550 return; 5551 5552 mutex_lock(&ioc->base_cmds.mutex); 5553 _base_event_notification(ioc); 5554 mutex_unlock(&ioc->base_cmds.mutex); 5555 } 5556 5557 /** 5558 * _base_diag_reset - the "big hammer" start of day reset 5559 * @ioc: per adapter object 5560 * 5561 * Returns 0 for success, non-zero for failure. 5562 */ 5563 static int 5564 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc) 5565 { 5566 u32 host_diagnostic; 5567 u32 ioc_state; 5568 u32 count; 5569 u32 hcb_size; 5570 5571 pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name); 5572 5573 drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n", 5574 ioc->name)); 5575 5576 count = 0; 5577 do { 5578 /* Write magic sequence to WriteSequence register 5579 * Loop until in diagnostic mode 5580 */ 5581 drsprintk(ioc, pr_info(MPT3SAS_FMT 5582 "write magic sequence\n", ioc->name)); 5583 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); 5584 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence); 5585 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence); 5586 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence); 5587 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence); 5588 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence); 5589 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence); 5590 5591 /* wait 100 msec */ 5592 msleep(100); 5593 5594 if (count++ > 20) 5595 goto out; 5596 5597 host_diagnostic = readl(&ioc->chip->HostDiagnostic); 5598 drsprintk(ioc, pr_info(MPT3SAS_FMT 5599 "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n", 5600 ioc->name, count, host_diagnostic)); 5601 5602 } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0); 5603 5604 hcb_size = readl(&ioc->chip->HCBSize); 5605 5606 drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n", 5607 ioc->name)); 5608 writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER, 5609 &ioc->chip->HostDiagnostic); 5610 5611 /*This delay allows the chip PCIe hardware time to finish reset tasks*/ 5612 msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000); 5613 5614 /* Approximately 300 second max wait */ 5615 for (count = 0; count < (300000000 / 5616 MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) { 5617 5618 host_diagnostic = readl(&ioc->chip->HostDiagnostic); 5619 5620 if (host_diagnostic == 0xFFFFFFFF) 5621 goto out; 5622 if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER)) 5623 break; 5624 5625 msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000); 5626 } 5627 5628 if (host_diagnostic & MPI2_DIAG_HCB_MODE) { 5629 5630 drsprintk(ioc, pr_info(MPT3SAS_FMT 5631 "restart the adapter assuming the HCB Address points to good F/W\n", 5632 ioc->name)); 5633 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK; 5634 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW; 5635 writel(host_diagnostic, &ioc->chip->HostDiagnostic); 5636 5637 drsprintk(ioc, pr_info(MPT3SAS_FMT 5638 "re-enable the HCDW\n", ioc->name)); 5639 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE, 5640 &ioc->chip->HCBSize); 5641 } 5642 5643 drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n", 5644 ioc->name)); 5645 writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET, 5646 &ioc->chip->HostDiagnostic); 5647 5648 drsprintk(ioc, pr_info(MPT3SAS_FMT 5649 "disable writes to the diagnostic register\n", ioc->name)); 5650 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); 5651 5652 drsprintk(ioc, pr_info(MPT3SAS_FMT 5653 "Wait for FW to go to the READY state\n", ioc->name)); 5654 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20); 5655 if (ioc_state) { 5656 pr_err(MPT3SAS_FMT 5657 "%s: failed going to ready state (ioc_state=0x%x)\n", 5658 ioc->name, __func__, ioc_state); 5659 goto out; 5660 } 5661 5662 pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name); 5663 return 0; 5664 5665 out: 5666 pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name); 5667 return -EFAULT; 5668 } 5669 5670 /** 5671 * _base_make_ioc_ready - put controller in READY state 5672 * @ioc: per adapter object 5673 * @type: FORCE_BIG_HAMMER or SOFT_RESET 5674 * 5675 * Returns 0 for success, non-zero for failure. 5676 */ 5677 static int 5678 _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type) 5679 { 5680 u32 ioc_state; 5681 int rc; 5682 int count; 5683 5684 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5685 __func__)); 5686 5687 if (ioc->pci_error_recovery) 5688 return 0; 5689 5690 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 5691 dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n", 5692 ioc->name, __func__, ioc_state)); 5693 5694 /* if in RESET state, it should move to READY state shortly */ 5695 count = 0; 5696 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) { 5697 while ((ioc_state & MPI2_IOC_STATE_MASK) != 5698 MPI2_IOC_STATE_READY) { 5699 if (count++ == 10) { 5700 pr_err(MPT3SAS_FMT 5701 "%s: failed going to ready state (ioc_state=0x%x)\n", 5702 ioc->name, __func__, ioc_state); 5703 return -EFAULT; 5704 } 5705 ssleep(1); 5706 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 5707 } 5708 } 5709 5710 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) 5711 return 0; 5712 5713 if (ioc_state & MPI2_DOORBELL_USED) { 5714 dhsprintk(ioc, pr_info(MPT3SAS_FMT 5715 "unexpected doorbell active!\n", 5716 ioc->name)); 5717 goto issue_diag_reset; 5718 } 5719 5720 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 5721 mpt3sas_base_fault_info(ioc, ioc_state & 5722 MPI2_DOORBELL_DATA_MASK); 5723 goto issue_diag_reset; 5724 } 5725 5726 if (type == FORCE_BIG_HAMMER) 5727 goto issue_diag_reset; 5728 5729 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL) 5730 if (!(_base_send_ioc_reset(ioc, 5731 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) { 5732 return 0; 5733 } 5734 5735 issue_diag_reset: 5736 rc = _base_diag_reset(ioc); 5737 return rc; 5738 } 5739 5740 /** 5741 * _base_make_ioc_operational - put controller in OPERATIONAL state 5742 * @ioc: per adapter object 5743 * 5744 * Returns 0 for success, non-zero for failure. 5745 */ 5746 static int 5747 _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc) 5748 { 5749 int r, i, index; 5750 unsigned long flags; 5751 u32 reply_address; 5752 u16 smid; 5753 struct _tr_list *delayed_tr, *delayed_tr_next; 5754 struct _sc_list *delayed_sc, *delayed_sc_next; 5755 struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next; 5756 u8 hide_flag; 5757 struct adapter_reply_queue *reply_q; 5758 Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig; 5759 5760 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5761 __func__)); 5762 5763 /* clean the delayed target reset list */ 5764 list_for_each_entry_safe(delayed_tr, delayed_tr_next, 5765 &ioc->delayed_tr_list, list) { 5766 list_del(&delayed_tr->list); 5767 kfree(delayed_tr); 5768 } 5769 5770 5771 list_for_each_entry_safe(delayed_tr, delayed_tr_next, 5772 &ioc->delayed_tr_volume_list, list) { 5773 list_del(&delayed_tr->list); 5774 kfree(delayed_tr); 5775 } 5776 5777 list_for_each_entry_safe(delayed_sc, delayed_sc_next, 5778 &ioc->delayed_sc_list, list) { 5779 list_del(&delayed_sc->list); 5780 kfree(delayed_sc); 5781 } 5782 5783 list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next, 5784 &ioc->delayed_event_ack_list, list) { 5785 list_del(&delayed_event_ack->list); 5786 kfree(delayed_event_ack); 5787 } 5788 5789 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 5790 5791 /* hi-priority queue */ 5792 INIT_LIST_HEAD(&ioc->hpr_free_list); 5793 smid = ioc->hi_priority_smid; 5794 for (i = 0; i < ioc->hi_priority_depth; i++, smid++) { 5795 ioc->hpr_lookup[i].cb_idx = 0xFF; 5796 ioc->hpr_lookup[i].smid = smid; 5797 list_add_tail(&ioc->hpr_lookup[i].tracker_list, 5798 &ioc->hpr_free_list); 5799 } 5800 5801 /* internal queue */ 5802 INIT_LIST_HEAD(&ioc->internal_free_list); 5803 smid = ioc->internal_smid; 5804 for (i = 0; i < ioc->internal_depth; i++, smid++) { 5805 ioc->internal_lookup[i].cb_idx = 0xFF; 5806 ioc->internal_lookup[i].smid = smid; 5807 list_add_tail(&ioc->internal_lookup[i].tracker_list, 5808 &ioc->internal_free_list); 5809 } 5810 5811 /* chain pool */ 5812 INIT_LIST_HEAD(&ioc->free_chain_list); 5813 for (i = 0; i < ioc->chain_depth; i++) 5814 list_add_tail(&ioc->chain_lookup[i].tracker_list, 5815 &ioc->free_chain_list); 5816 5817 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 5818 5819 /* initialize Reply Free Queue */ 5820 for (i = 0, reply_address = (u32)ioc->reply_dma ; 5821 i < ioc->reply_free_queue_depth ; i++, reply_address += 5822 ioc->reply_sz) 5823 ioc->reply_free[i] = cpu_to_le32(reply_address); 5824 5825 /* initialize reply queues */ 5826 if (ioc->is_driver_loading) 5827 _base_assign_reply_queues(ioc); 5828 5829 /* initialize Reply Post Free Queue */ 5830 index = 0; 5831 reply_post_free_contig = ioc->reply_post[0].reply_post_free; 5832 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 5833 /* 5834 * If RDPQ is enabled, switch to the next allocation. 5835 * Otherwise advance within the contiguous region. 5836 */ 5837 if (ioc->rdpq_array_enable) { 5838 reply_q->reply_post_free = 5839 ioc->reply_post[index++].reply_post_free; 5840 } else { 5841 reply_q->reply_post_free = reply_post_free_contig; 5842 reply_post_free_contig += ioc->reply_post_queue_depth; 5843 } 5844 5845 reply_q->reply_post_host_index = 0; 5846 for (i = 0; i < ioc->reply_post_queue_depth; i++) 5847 reply_q->reply_post_free[i].Words = 5848 cpu_to_le64(ULLONG_MAX); 5849 if (!_base_is_controller_msix_enabled(ioc)) 5850 goto skip_init_reply_post_free_queue; 5851 } 5852 skip_init_reply_post_free_queue: 5853 5854 r = _base_send_ioc_init(ioc); 5855 if (r) 5856 return r; 5857 5858 /* initialize reply free host index */ 5859 ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1; 5860 writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex); 5861 5862 /* initialize reply post host index */ 5863 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 5864 if (ioc->combined_reply_queue) 5865 writel((reply_q->msix_index & 7)<< 5866 MPI2_RPHI_MSIX_INDEX_SHIFT, 5867 ioc->replyPostRegisterIndex[reply_q->msix_index/8]); 5868 else 5869 writel(reply_q->msix_index << 5870 MPI2_RPHI_MSIX_INDEX_SHIFT, 5871 &ioc->chip->ReplyPostHostIndex); 5872 5873 if (!_base_is_controller_msix_enabled(ioc)) 5874 goto skip_init_reply_post_host_index; 5875 } 5876 5877 skip_init_reply_post_host_index: 5878 5879 _base_unmask_interrupts(ioc); 5880 r = _base_event_notification(ioc); 5881 if (r) 5882 return r; 5883 5884 _base_static_config_pages(ioc); 5885 5886 if (ioc->is_driver_loading) { 5887 5888 if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier 5889 == 0x80) { 5890 hide_flag = (u8) ( 5891 le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) & 5892 MFG_PAGE10_HIDE_SSDS_MASK); 5893 if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK) 5894 ioc->mfg_pg10_hide_flag = hide_flag; 5895 } 5896 5897 ioc->wait_for_discovery_to_complete = 5898 _base_determine_wait_on_discovery(ioc); 5899 5900 return r; /* scan_start and scan_finished support */ 5901 } 5902 5903 r = _base_send_port_enable(ioc); 5904 if (r) 5905 return r; 5906 5907 return r; 5908 } 5909 5910 /** 5911 * mpt3sas_base_free_resources - free resources controller resources 5912 * @ioc: per adapter object 5913 * 5914 * Return nothing. 5915 */ 5916 void 5917 mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc) 5918 { 5919 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5920 __func__)); 5921 5922 /* synchronizing freeing resource with pci_access_mutex lock */ 5923 mutex_lock(&ioc->pci_access_mutex); 5924 if (ioc->chip_phys && ioc->chip) { 5925 _base_mask_interrupts(ioc); 5926 ioc->shost_recovery = 1; 5927 _base_make_ioc_ready(ioc, SOFT_RESET); 5928 ioc->shost_recovery = 0; 5929 } 5930 5931 mpt3sas_base_unmap_resources(ioc); 5932 mutex_unlock(&ioc->pci_access_mutex); 5933 return; 5934 } 5935 5936 /** 5937 * mpt3sas_base_attach - attach controller instance 5938 * @ioc: per adapter object 5939 * 5940 * Returns 0 for success, non-zero for failure. 5941 */ 5942 int 5943 mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) 5944 { 5945 int r, i; 5946 int cpu_id, last_cpu_id = 0; 5947 5948 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5949 __func__)); 5950 5951 /* setup cpu_msix_table */ 5952 ioc->cpu_count = num_online_cpus(); 5953 for_each_online_cpu(cpu_id) 5954 last_cpu_id = cpu_id; 5955 ioc->cpu_msix_table_sz = last_cpu_id + 1; 5956 ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL); 5957 ioc->reply_queue_count = 1; 5958 if (!ioc->cpu_msix_table) { 5959 dfailprintk(ioc, pr_info(MPT3SAS_FMT 5960 "allocation for cpu_msix_table failed!!!\n", 5961 ioc->name)); 5962 r = -ENOMEM; 5963 goto out_free_resources; 5964 } 5965 5966 if (ioc->is_warpdrive) { 5967 ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz, 5968 sizeof(resource_size_t *), GFP_KERNEL); 5969 if (!ioc->reply_post_host_index) { 5970 dfailprintk(ioc, pr_info(MPT3SAS_FMT "allocation " 5971 "for reply_post_host_index failed!!!\n", 5972 ioc->name)); 5973 r = -ENOMEM; 5974 goto out_free_resources; 5975 } 5976 } 5977 5978 ioc->rdpq_array_enable_assigned = 0; 5979 ioc->dma_mask = 0; 5980 r = mpt3sas_base_map_resources(ioc); 5981 if (r) 5982 goto out_free_resources; 5983 5984 pci_set_drvdata(ioc->pdev, ioc->shost); 5985 r = _base_get_ioc_facts(ioc); 5986 if (r) 5987 goto out_free_resources; 5988 5989 switch (ioc->hba_mpi_version_belonged) { 5990 case MPI2_VERSION: 5991 ioc->build_sg_scmd = &_base_build_sg_scmd; 5992 ioc->build_sg = &_base_build_sg; 5993 ioc->build_zero_len_sge = &_base_build_zero_len_sge; 5994 break; 5995 case MPI25_VERSION: 5996 case MPI26_VERSION: 5997 /* 5998 * In SAS3.0, 5999 * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and 6000 * Target Status - all require the IEEE formated scatter gather 6001 * elements. 6002 */ 6003 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee; 6004 ioc->build_sg = &_base_build_sg_ieee; 6005 ioc->build_nvme_prp = &_base_build_nvme_prp; 6006 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee; 6007 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t); 6008 6009 break; 6010 } 6011 6012 if (ioc->atomic_desc_capable) { 6013 ioc->put_smid_default = &_base_put_smid_default_atomic; 6014 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic; 6015 ioc->put_smid_fast_path = &_base_put_smid_fast_path_atomic; 6016 ioc->put_smid_hi_priority = &_base_put_smid_hi_priority_atomic; 6017 ioc->put_smid_nvme_encap = &_base_put_smid_nvme_encap_atomic; 6018 } else { 6019 ioc->put_smid_default = &_base_put_smid_default; 6020 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io; 6021 ioc->put_smid_fast_path = &_base_put_smid_fast_path; 6022 ioc->put_smid_hi_priority = &_base_put_smid_hi_priority; 6023 ioc->put_smid_nvme_encap = &_base_put_smid_nvme_encap; 6024 } 6025 6026 6027 /* 6028 * These function pointers for other requests that don't 6029 * the require IEEE scatter gather elements. 6030 * 6031 * For example Configuration Pages and SAS IOUNIT Control don't. 6032 */ 6033 ioc->build_sg_mpi = &_base_build_sg; 6034 ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge; 6035 6036 r = _base_make_ioc_ready(ioc, SOFT_RESET); 6037 if (r) 6038 goto out_free_resources; 6039 6040 ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts, 6041 sizeof(struct mpt3sas_port_facts), GFP_KERNEL); 6042 if (!ioc->pfacts) { 6043 r = -ENOMEM; 6044 goto out_free_resources; 6045 } 6046 6047 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) { 6048 r = _base_get_port_facts(ioc, i); 6049 if (r) 6050 goto out_free_resources; 6051 } 6052 6053 r = _base_allocate_memory_pools(ioc); 6054 if (r) 6055 goto out_free_resources; 6056 6057 init_waitqueue_head(&ioc->reset_wq); 6058 6059 /* allocate memory pd handle bitmask list */ 6060 ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8); 6061 if (ioc->facts.MaxDevHandle % 8) 6062 ioc->pd_handles_sz++; 6063 ioc->pd_handles = kzalloc(ioc->pd_handles_sz, 6064 GFP_KERNEL); 6065 if (!ioc->pd_handles) { 6066 r = -ENOMEM; 6067 goto out_free_resources; 6068 } 6069 ioc->blocking_handles = kzalloc(ioc->pd_handles_sz, 6070 GFP_KERNEL); 6071 if (!ioc->blocking_handles) { 6072 r = -ENOMEM; 6073 goto out_free_resources; 6074 } 6075 6076 /* allocate memory for pending OS device add list */ 6077 ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8); 6078 if (ioc->facts.MaxDevHandle % 8) 6079 ioc->pend_os_device_add_sz++; 6080 ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz, 6081 GFP_KERNEL); 6082 if (!ioc->pend_os_device_add) 6083 goto out_free_resources; 6084 6085 ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz; 6086 ioc->device_remove_in_progress = 6087 kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL); 6088 if (!ioc->device_remove_in_progress) 6089 goto out_free_resources; 6090 6091 ioc->fwfault_debug = mpt3sas_fwfault_debug; 6092 6093 /* base internal command bits */ 6094 mutex_init(&ioc->base_cmds.mutex); 6095 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 6096 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 6097 6098 /* port_enable command bits */ 6099 ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 6100 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 6101 6102 /* transport internal command bits */ 6103 ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 6104 ioc->transport_cmds.status = MPT3_CMD_NOT_USED; 6105 mutex_init(&ioc->transport_cmds.mutex); 6106 6107 /* scsih internal command bits */ 6108 ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 6109 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 6110 mutex_init(&ioc->scsih_cmds.mutex); 6111 6112 /* task management internal command bits */ 6113 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 6114 ioc->tm_cmds.status = MPT3_CMD_NOT_USED; 6115 mutex_init(&ioc->tm_cmds.mutex); 6116 6117 /* config page internal command bits */ 6118 ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 6119 ioc->config_cmds.status = MPT3_CMD_NOT_USED; 6120 mutex_init(&ioc->config_cmds.mutex); 6121 6122 /* ctl module internal command bits */ 6123 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 6124 ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); 6125 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 6126 mutex_init(&ioc->ctl_cmds.mutex); 6127 6128 if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply || 6129 !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply || 6130 !ioc->tm_cmds.reply || !ioc->config_cmds.reply || 6131 !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) { 6132 r = -ENOMEM; 6133 goto out_free_resources; 6134 } 6135 6136 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 6137 ioc->event_masks[i] = -1; 6138 6139 /* here we enable the events we care about */ 6140 _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY); 6141 _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE); 6142 _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST); 6143 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); 6144 _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); 6145 _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST); 6146 _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME); 6147 _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK); 6148 _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); 6149 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); 6150 _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD); 6151 _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION); 6152 if (ioc->hba_mpi_version_belonged == MPI26_VERSION) { 6153 if (ioc->is_gen35_ioc) { 6154 _base_unmask_events(ioc, 6155 MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE); 6156 _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION); 6157 _base_unmask_events(ioc, 6158 MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST); 6159 } 6160 } 6161 r = _base_make_ioc_operational(ioc); 6162 if (r) 6163 goto out_free_resources; 6164 6165 ioc->non_operational_loop = 0; 6166 ioc->got_task_abort_from_ioctl = 0; 6167 return 0; 6168 6169 out_free_resources: 6170 6171 ioc->remove_host = 1; 6172 6173 mpt3sas_base_free_resources(ioc); 6174 _base_release_memory_pools(ioc); 6175 pci_set_drvdata(ioc->pdev, NULL); 6176 kfree(ioc->cpu_msix_table); 6177 if (ioc->is_warpdrive) 6178 kfree(ioc->reply_post_host_index); 6179 kfree(ioc->pd_handles); 6180 kfree(ioc->blocking_handles); 6181 kfree(ioc->device_remove_in_progress); 6182 kfree(ioc->pend_os_device_add); 6183 kfree(ioc->tm_cmds.reply); 6184 kfree(ioc->transport_cmds.reply); 6185 kfree(ioc->scsih_cmds.reply); 6186 kfree(ioc->config_cmds.reply); 6187 kfree(ioc->base_cmds.reply); 6188 kfree(ioc->port_enable_cmds.reply); 6189 kfree(ioc->ctl_cmds.reply); 6190 kfree(ioc->ctl_cmds.sense); 6191 kfree(ioc->pfacts); 6192 ioc->ctl_cmds.reply = NULL; 6193 ioc->base_cmds.reply = NULL; 6194 ioc->tm_cmds.reply = NULL; 6195 ioc->scsih_cmds.reply = NULL; 6196 ioc->transport_cmds.reply = NULL; 6197 ioc->config_cmds.reply = NULL; 6198 ioc->pfacts = NULL; 6199 return r; 6200 } 6201 6202 6203 /** 6204 * mpt3sas_base_detach - remove controller instance 6205 * @ioc: per adapter object 6206 * 6207 * Return nothing. 6208 */ 6209 void 6210 mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc) 6211 { 6212 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 6213 __func__)); 6214 6215 mpt3sas_base_stop_watchdog(ioc); 6216 mpt3sas_base_free_resources(ioc); 6217 _base_release_memory_pools(ioc); 6218 pci_set_drvdata(ioc->pdev, NULL); 6219 kfree(ioc->cpu_msix_table); 6220 if (ioc->is_warpdrive) 6221 kfree(ioc->reply_post_host_index); 6222 kfree(ioc->pd_handles); 6223 kfree(ioc->blocking_handles); 6224 kfree(ioc->device_remove_in_progress); 6225 kfree(ioc->pend_os_device_add); 6226 kfree(ioc->pfacts); 6227 kfree(ioc->ctl_cmds.reply); 6228 kfree(ioc->ctl_cmds.sense); 6229 kfree(ioc->base_cmds.reply); 6230 kfree(ioc->port_enable_cmds.reply); 6231 kfree(ioc->tm_cmds.reply); 6232 kfree(ioc->transport_cmds.reply); 6233 kfree(ioc->scsih_cmds.reply); 6234 kfree(ioc->config_cmds.reply); 6235 } 6236 6237 /** 6238 * _base_reset_handler - reset callback handler (for base) 6239 * @ioc: per adapter object 6240 * @reset_phase: phase 6241 * 6242 * The handler for doing any required cleanup or initialization. 6243 * 6244 * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET, 6245 * MPT3_IOC_DONE_RESET 6246 * 6247 * Return nothing. 6248 */ 6249 static void 6250 _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) 6251 { 6252 mpt3sas_scsih_reset_handler(ioc, reset_phase); 6253 mpt3sas_ctl_reset_handler(ioc, reset_phase); 6254 switch (reset_phase) { 6255 case MPT3_IOC_PRE_RESET: 6256 dtmprintk(ioc, pr_info(MPT3SAS_FMT 6257 "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__)); 6258 break; 6259 case MPT3_IOC_AFTER_RESET: 6260 dtmprintk(ioc, pr_info(MPT3SAS_FMT 6261 "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__)); 6262 if (ioc->transport_cmds.status & MPT3_CMD_PENDING) { 6263 ioc->transport_cmds.status |= MPT3_CMD_RESET; 6264 mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid); 6265 complete(&ioc->transport_cmds.done); 6266 } 6267 if (ioc->base_cmds.status & MPT3_CMD_PENDING) { 6268 ioc->base_cmds.status |= MPT3_CMD_RESET; 6269 mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid); 6270 complete(&ioc->base_cmds.done); 6271 } 6272 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { 6273 ioc->port_enable_failed = 1; 6274 ioc->port_enable_cmds.status |= MPT3_CMD_RESET; 6275 mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid); 6276 if (ioc->is_driver_loading) { 6277 ioc->start_scan_failed = 6278 MPI2_IOCSTATUS_INTERNAL_ERROR; 6279 ioc->start_scan = 0; 6280 ioc->port_enable_cmds.status = 6281 MPT3_CMD_NOT_USED; 6282 } else 6283 complete(&ioc->port_enable_cmds.done); 6284 } 6285 if (ioc->config_cmds.status & MPT3_CMD_PENDING) { 6286 ioc->config_cmds.status |= MPT3_CMD_RESET; 6287 mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid); 6288 ioc->config_cmds.smid = USHRT_MAX; 6289 complete(&ioc->config_cmds.done); 6290 } 6291 break; 6292 case MPT3_IOC_DONE_RESET: 6293 dtmprintk(ioc, pr_info(MPT3SAS_FMT 6294 "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__)); 6295 break; 6296 } 6297 } 6298 6299 /** 6300 * _wait_for_commands_to_complete - reset controller 6301 * @ioc: Pointer to MPT_ADAPTER structure 6302 * 6303 * This function is waiting 10s for all pending commands to complete 6304 * prior to putting controller in reset. 6305 */ 6306 static void 6307 _wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc) 6308 { 6309 u32 ioc_state; 6310 6311 ioc->pending_io_count = 0; 6312 6313 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 6314 if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) 6315 return; 6316 6317 /* pending command count */ 6318 ioc->pending_io_count = atomic_read(&ioc->shost->host_busy); 6319 6320 if (!ioc->pending_io_count) 6321 return; 6322 6323 /* wait for pending commands to complete */ 6324 wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ); 6325 } 6326 6327 /** 6328 * mpt3sas_base_hard_reset_handler - reset controller 6329 * @ioc: Pointer to MPT_ADAPTER structure 6330 * @type: FORCE_BIG_HAMMER or SOFT_RESET 6331 * 6332 * Returns 0 for success, non-zero for failure. 6333 */ 6334 int 6335 mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, 6336 enum reset_type type) 6337 { 6338 int r; 6339 unsigned long flags; 6340 u32 ioc_state; 6341 u8 is_fault = 0, is_trigger = 0; 6342 6343 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 6344 __func__)); 6345 6346 if (ioc->pci_error_recovery) { 6347 pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n", 6348 ioc->name, __func__); 6349 r = 0; 6350 goto out_unlocked; 6351 } 6352 6353 if (mpt3sas_fwfault_debug) 6354 mpt3sas_halt_firmware(ioc); 6355 6356 /* wait for an active reset in progress to complete */ 6357 if (!mutex_trylock(&ioc->reset_in_progress_mutex)) { 6358 do { 6359 ssleep(1); 6360 } while (ioc->shost_recovery == 1); 6361 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, 6362 __func__)); 6363 return ioc->ioc_reset_in_progress_status; 6364 } 6365 6366 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 6367 ioc->shost_recovery = 1; 6368 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 6369 6370 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 6371 MPT3_DIAG_BUFFER_IS_REGISTERED) && 6372 (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 6373 MPT3_DIAG_BUFFER_IS_RELEASED))) { 6374 is_trigger = 1; 6375 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 6376 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) 6377 is_fault = 1; 6378 } 6379 _base_reset_handler(ioc, MPT3_IOC_PRE_RESET); 6380 _wait_for_commands_to_complete(ioc); 6381 _base_mask_interrupts(ioc); 6382 r = _base_make_ioc_ready(ioc, type); 6383 if (r) 6384 goto out; 6385 _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET); 6386 6387 /* If this hard reset is called while port enable is active, then 6388 * there is no reason to call make_ioc_operational 6389 */ 6390 if (ioc->is_driver_loading && ioc->port_enable_failed) { 6391 ioc->remove_host = 1; 6392 r = -EFAULT; 6393 goto out; 6394 } 6395 r = _base_get_ioc_facts(ioc); 6396 if (r) 6397 goto out; 6398 6399 if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable) 6400 panic("%s: Issue occurred with flashing controller firmware." 6401 "Please reboot the system and ensure that the correct" 6402 " firmware version is running\n", ioc->name); 6403 6404 r = _base_make_ioc_operational(ioc); 6405 if (!r) 6406 _base_reset_handler(ioc, MPT3_IOC_DONE_RESET); 6407 6408 out: 6409 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n", 6410 ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED"))); 6411 6412 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 6413 ioc->ioc_reset_in_progress_status = r; 6414 ioc->shost_recovery = 0; 6415 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 6416 ioc->ioc_reset_count++; 6417 mutex_unlock(&ioc->reset_in_progress_mutex); 6418 6419 out_unlocked: 6420 if ((r == 0) && is_trigger) { 6421 if (is_fault) 6422 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT); 6423 else 6424 mpt3sas_trigger_master(ioc, 6425 MASTER_TRIGGER_ADAPTER_RESET); 6426 } 6427 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, 6428 __func__)); 6429 return r; 6430 } 6431