1 /* 2 * This is the Fusion MPT base driver providing common API layer interface 3 * for access to MPT (Message Passing Technology) firmware. 4 * 5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c 6 * Copyright (C) 2012-2014 LSI Corporation 7 * Copyright (C) 2013-2014 Avago Technologies 8 * (mailto: MPT-FusionLinux.pdl@avagotech.com) 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 2 13 * of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * NO WARRANTY 21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 25 * solely responsible for determining the appropriateness of using and 26 * distributing the Program and assumes all risks associated with its 27 * exercise of rights under this Agreement, including but not limited to 28 * the risks and costs of program errors, damage to or loss of data, 29 * programs or equipment, and unavailability or interruption of operations. 30 31 * DISCLAIMER OF LIABILITY 32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 39 40 * You should have received a copy of the GNU General Public License 41 * along with this program; if not, write to the Free Software 42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 43 * USA. 44 */ 45 46 #include <linux/kernel.h> 47 #include <linux/module.h> 48 #include <linux/errno.h> 49 #include <linux/init.h> 50 #include <linux/slab.h> 51 #include <linux/types.h> 52 #include <linux/pci.h> 53 #include <linux/kdev_t.h> 54 #include <linux/blkdev.h> 55 #include <linux/delay.h> 56 #include <linux/interrupt.h> 57 #include <linux/dma-mapping.h> 58 #include <linux/io.h> 59 #include <linux/time.h> 60 #include <linux/ktime.h> 61 #include <linux/kthread.h> 62 #include <asm/page.h> /* To get host page size per arch */ 63 #include <linux/aer.h> 64 65 66 #include "mpt3sas_base.h" 67 68 static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS]; 69 70 71 #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */ 72 73 /* maximum controller queue depth */ 74 #define MAX_HBA_QUEUE_DEPTH 30000 75 #define MAX_CHAIN_DEPTH 100000 76 static int max_queue_depth = -1; 77 module_param(max_queue_depth, int, 0); 78 MODULE_PARM_DESC(max_queue_depth, " max controller queue depth "); 79 80 static int max_sgl_entries = -1; 81 module_param(max_sgl_entries, int, 0); 82 MODULE_PARM_DESC(max_sgl_entries, " max sg entries "); 83 84 static int msix_disable = -1; 85 module_param(msix_disable, int, 0); 86 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); 87 88 static int smp_affinity_enable = 1; 89 module_param(smp_affinity_enable, int, S_IRUGO); 90 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)"); 91 92 static int max_msix_vectors = -1; 93 module_param(max_msix_vectors, int, 0); 94 MODULE_PARM_DESC(max_msix_vectors, 95 " max msix vectors"); 96 97 static int mpt3sas_fwfault_debug; 98 MODULE_PARM_DESC(mpt3sas_fwfault_debug, 99 " enable detection of firmware fault and halt firmware - (default=0)"); 100 101 static int 102 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc); 103 104 /** 105 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. 106 * 107 */ 108 static int 109 _scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp) 110 { 111 int ret = param_set_int(val, kp); 112 struct MPT3SAS_ADAPTER *ioc; 113 114 if (ret) 115 return ret; 116 117 /* global ioc spinlock to protect controller list on list operations */ 118 pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug); 119 spin_lock(&gioc_lock); 120 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) 121 ioc->fwfault_debug = mpt3sas_fwfault_debug; 122 spin_unlock(&gioc_lock); 123 return 0; 124 } 125 module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug, 126 param_get_int, &mpt3sas_fwfault_debug, 0644); 127 128 /** 129 * _base_clone_reply_to_sys_mem - copies reply to reply free iomem 130 * in BAR0 space. 131 * 132 * @ioc: per adapter object 133 * @reply: reply message frame(lower 32bit addr) 134 * @index: System request message index. 135 * 136 * @Returns - Nothing 137 */ 138 static void 139 _base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply, 140 u32 index) 141 { 142 /* 143 * 256 is offset within sys register. 144 * 256 offset MPI frame starts. Max MPI frame supported is 32. 145 * 32 * 128 = 4K. From here, Clone of reply free for mcpu starts 146 */ 147 u16 cmd_credit = ioc->facts.RequestCredit + 1; 148 void __iomem *reply_free_iomem = (void __iomem *)ioc->chip + 149 MPI_FRAME_START_OFFSET + 150 (cmd_credit * ioc->request_sz) + (index * sizeof(u32)); 151 152 writel(reply, reply_free_iomem); 153 } 154 155 /** 156 * _base_clone_mpi_to_sys_mem - Writes/copies MPI frames 157 * to system/BAR0 region. 158 * 159 * @dst_iomem: Pointer to the destinaltion location in BAR0 space. 160 * @src: Pointer to the Source data. 161 * @size: Size of data to be copied. 162 */ 163 static void 164 _base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size) 165 { 166 int i; 167 u32 *src_virt_mem = (u32 *)src; 168 169 for (i = 0; i < size/4; i++) 170 writel((u32)src_virt_mem[i], 171 (void __iomem *)dst_iomem + (i * 4)); 172 } 173 174 /** 175 * _base_clone_to_sys_mem - Writes/copies data to system/BAR0 region 176 * 177 * @dst_iomem: Pointer to the destination location in BAR0 space. 178 * @src: Pointer to the Source data. 179 * @size: Size of data to be copied. 180 */ 181 static void 182 _base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size) 183 { 184 int i; 185 u32 *src_virt_mem = (u32 *)(src); 186 187 for (i = 0; i < size/4; i++) 188 writel((u32)src_virt_mem[i], 189 (void __iomem *)dst_iomem + (i * 4)); 190 } 191 192 /** 193 * _base_get_chain - Calculates and Returns virtual chain address 194 * for the provided smid in BAR0 space. 195 * 196 * @ioc: per adapter object 197 * @smid: system request message index 198 * @sge_chain_count: Scatter gather chain count. 199 * 200 * @Return: chain address. 201 */ 202 static inline void __iomem* 203 _base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid, 204 u8 sge_chain_count) 205 { 206 void __iomem *base_chain, *chain_virt; 207 u16 cmd_credit = ioc->facts.RequestCredit + 1; 208 209 base_chain = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET + 210 (cmd_credit * ioc->request_sz) + 211 REPLY_FREE_POOL_SIZE; 212 chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth * 213 ioc->request_sz) + (sge_chain_count * ioc->request_sz); 214 return chain_virt; 215 } 216 217 /** 218 * _base_get_chain_phys - Calculates and Returns physical address 219 * in BAR0 for scatter gather chains, for 220 * the provided smid. 221 * 222 * @ioc: per adapter object 223 * @smid: system request message index 224 * @sge_chain_count: Scatter gather chain count. 225 * 226 * @Return - Physical chain address. 227 */ 228 static inline phys_addr_t 229 _base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid, 230 u8 sge_chain_count) 231 { 232 phys_addr_t base_chain_phys, chain_phys; 233 u16 cmd_credit = ioc->facts.RequestCredit + 1; 234 235 base_chain_phys = ioc->chip_phys + MPI_FRAME_START_OFFSET + 236 (cmd_credit * ioc->request_sz) + 237 REPLY_FREE_POOL_SIZE; 238 chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth * 239 ioc->request_sz) + (sge_chain_count * ioc->request_sz); 240 return chain_phys; 241 } 242 243 /** 244 * _base_get_buffer_bar0 - Calculates and Returns BAR0 mapped Host 245 * buffer address for the provided smid. 246 * (Each smid can have 64K starts from 17024) 247 * 248 * @ioc: per adapter object 249 * @smid: system request message index 250 * 251 * @Returns - Pointer to buffer location in BAR0. 252 */ 253 254 static void __iomem * 255 _base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid) 256 { 257 u16 cmd_credit = ioc->facts.RequestCredit + 1; 258 // Added extra 1 to reach end of chain. 259 void __iomem *chain_end = _base_get_chain(ioc, 260 cmd_credit + 1, 261 ioc->facts.MaxChainDepth); 262 return chain_end + (smid * 64 * 1024); 263 } 264 265 /** 266 * _base_get_buffer_phys_bar0 - Calculates and Returns BAR0 mapped 267 * Host buffer Physical address for the provided smid. 268 * (Each smid can have 64K starts from 17024) 269 * 270 * @ioc: per adapter object 271 * @smid: system request message index 272 * 273 * @Returns - Pointer to buffer location in BAR0. 274 */ 275 static phys_addr_t 276 _base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid) 277 { 278 u16 cmd_credit = ioc->facts.RequestCredit + 1; 279 phys_addr_t chain_end_phys = _base_get_chain_phys(ioc, 280 cmd_credit + 1, 281 ioc->facts.MaxChainDepth); 282 return chain_end_phys + (smid * 64 * 1024); 283 } 284 285 /** 286 * _base_get_chain_buffer_dma_to_chain_buffer - Iterates chain 287 * lookup list and Provides chain_buffer 288 * address for the matching dma address. 289 * (Each smid can have 64K starts from 17024) 290 * 291 * @ioc: per adapter object 292 * @chain_buffer_dma: Chain buffer dma address. 293 * 294 * @Returns - Pointer to chain buffer. Or Null on Failure. 295 */ 296 static void * 297 _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc, 298 dma_addr_t chain_buffer_dma) 299 { 300 u16 index; 301 302 for (index = 0; index < ioc->chain_depth; index++) { 303 if (ioc->chain_lookup[index].chain_buffer_dma == 304 chain_buffer_dma) 305 return ioc->chain_lookup[index].chain_buffer; 306 } 307 pr_info(MPT3SAS_FMT 308 "Provided chain_buffer_dma address is not in the lookup list\n", 309 ioc->name); 310 return NULL; 311 } 312 313 /** 314 * _clone_sg_entries - MPI EP's scsiio and config requests 315 * are handled here. Base function for 316 * double buffering, before submitting 317 * the requests. 318 * 319 * @ioc: per adapter object. 320 * @mpi_request: mf request pointer. 321 * @smid: system request message index. 322 * 323 * @Returns: Nothing. 324 */ 325 static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc, 326 void *mpi_request, u16 smid) 327 { 328 Mpi2SGESimple32_t *sgel, *sgel_next; 329 u32 sgl_flags, sge_chain_count = 0; 330 bool is_write = 0; 331 u16 i = 0; 332 void __iomem *buffer_iomem; 333 phys_addr_t buffer_iomem_phys; 334 void __iomem *buff_ptr; 335 phys_addr_t buff_ptr_phys; 336 void __iomem *dst_chain_addr[MCPU_MAX_CHAINS_PER_IO]; 337 void *src_chain_addr[MCPU_MAX_CHAINS_PER_IO]; 338 phys_addr_t dst_addr_phys; 339 MPI2RequestHeader_t *request_hdr; 340 struct scsi_cmnd *scmd; 341 struct scatterlist *sg_scmd = NULL; 342 int is_scsiio_req = 0; 343 344 request_hdr = (MPI2RequestHeader_t *) mpi_request; 345 346 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) { 347 Mpi25SCSIIORequest_t *scsiio_request = 348 (Mpi25SCSIIORequest_t *)mpi_request; 349 sgel = (Mpi2SGESimple32_t *) &scsiio_request->SGL; 350 is_scsiio_req = 1; 351 } else if (request_hdr->Function == MPI2_FUNCTION_CONFIG) { 352 Mpi2ConfigRequest_t *config_req = 353 (Mpi2ConfigRequest_t *)mpi_request; 354 sgel = (Mpi2SGESimple32_t *) &config_req->PageBufferSGE; 355 } else 356 return; 357 358 /* From smid we can get scsi_cmd, once we have sg_scmd, 359 * we just need to get sg_virt and sg_next to get virual 360 * address associated with sgel->Address. 361 */ 362 363 if (is_scsiio_req) { 364 /* Get scsi_cmd using smid */ 365 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 366 if (scmd == NULL) { 367 pr_err(MPT3SAS_FMT "scmd is NULL\n", ioc->name); 368 return; 369 } 370 371 /* Get sg_scmd from scmd provided */ 372 sg_scmd = scsi_sglist(scmd); 373 } 374 375 /* 376 * 0 - 255 System register 377 * 256 - 4352 MPI Frame. (This is based on maxCredit 32) 378 * 4352 - 4864 Reply_free pool (512 byte is reserved 379 * considering maxCredit 32. Reply need extra 380 * room, for mCPU case kept four times of 381 * maxCredit). 382 * 4864 - 17152 SGE chain element. (32cmd * 3 chain of 383 * 128 byte size = 12288) 384 * 17152 - x Host buffer mapped with smid. 385 * (Each smid can have 64K Max IO.) 386 * BAR0+Last 1K MSIX Addr and Data 387 * Total size in use 2113664 bytes of 4MB BAR0 388 */ 389 390 buffer_iomem = _base_get_buffer_bar0(ioc, smid); 391 buffer_iomem_phys = _base_get_buffer_phys_bar0(ioc, smid); 392 393 buff_ptr = buffer_iomem; 394 buff_ptr_phys = buffer_iomem_phys; 395 WARN_ON(buff_ptr_phys > U32_MAX); 396 397 if (sgel->FlagsLength & 398 (MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT)) 399 is_write = 1; 400 401 for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) { 402 403 sgl_flags = (sgel->FlagsLength >> MPI2_SGE_FLAGS_SHIFT); 404 405 switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) { 406 case MPI2_SGE_FLAGS_CHAIN_ELEMENT: 407 /* 408 * Helper function which on passing 409 * chain_buffer_dma returns chain_buffer. Get 410 * the virtual address for sgel->Address 411 */ 412 sgel_next = 413 _base_get_chain_buffer_dma_to_chain_buffer(ioc, 414 sgel->Address); 415 if (sgel_next == NULL) 416 return; 417 /* 418 * This is coping 128 byte chain 419 * frame (not a host buffer) 420 */ 421 dst_chain_addr[sge_chain_count] = 422 _base_get_chain(ioc, 423 smid, sge_chain_count); 424 src_chain_addr[sge_chain_count] = 425 (void *) sgel_next; 426 dst_addr_phys = _base_get_chain_phys(ioc, 427 smid, sge_chain_count); 428 WARN_ON(dst_addr_phys > U32_MAX); 429 sgel->Address = (u32)dst_addr_phys; 430 sgel = sgel_next; 431 sge_chain_count++; 432 break; 433 case MPI2_SGE_FLAGS_SIMPLE_ELEMENT: 434 if (is_write) { 435 if (is_scsiio_req) { 436 _base_clone_to_sys_mem(buff_ptr, 437 sg_virt(sg_scmd), 438 (sgel->FlagsLength & 0x00ffffff)); 439 /* 440 * FIXME: this relies on a a zero 441 * PCI mem_offset. 442 */ 443 sgel->Address = (u32)buff_ptr_phys; 444 } else { 445 _base_clone_to_sys_mem(buff_ptr, 446 ioc->config_vaddr, 447 (sgel->FlagsLength & 0x00ffffff)); 448 sgel->Address = (u32)buff_ptr_phys; 449 } 450 } 451 buff_ptr += (sgel->FlagsLength & 0x00ffffff); 452 buff_ptr_phys += (sgel->FlagsLength & 0x00ffffff); 453 if ((sgel->FlagsLength & 454 (MPI2_SGE_FLAGS_END_OF_BUFFER 455 << MPI2_SGE_FLAGS_SHIFT))) 456 goto eob_clone_chain; 457 else { 458 /* 459 * Every single element in MPT will have 460 * associated sg_next. Better to sanity that 461 * sg_next is not NULL, but it will be a bug 462 * if it is null. 463 */ 464 if (is_scsiio_req) { 465 sg_scmd = sg_next(sg_scmd); 466 if (sg_scmd) 467 sgel++; 468 else 469 goto eob_clone_chain; 470 } 471 } 472 break; 473 } 474 } 475 476 eob_clone_chain: 477 for (i = 0; i < sge_chain_count; i++) { 478 if (is_scsiio_req) 479 _base_clone_to_sys_mem(dst_chain_addr[i], 480 src_chain_addr[i], ioc->request_sz); 481 } 482 } 483 484 /** 485 * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc 486 * @arg: input argument, used to derive ioc 487 * 488 * Return 0 if controller is removed from pci subsystem. 489 * Return -1 for other case. 490 */ 491 static int mpt3sas_remove_dead_ioc_func(void *arg) 492 { 493 struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg; 494 struct pci_dev *pdev; 495 496 if ((ioc == NULL)) 497 return -1; 498 499 pdev = ioc->pdev; 500 if ((pdev == NULL)) 501 return -1; 502 pci_stop_and_remove_bus_device_locked(pdev); 503 return 0; 504 } 505 506 /** 507 * _base_fault_reset_work - workq handling ioc fault conditions 508 * @work: input argument, used to derive ioc 509 * Context: sleep. 510 * 511 * Return nothing. 512 */ 513 static void 514 _base_fault_reset_work(struct work_struct *work) 515 { 516 struct MPT3SAS_ADAPTER *ioc = 517 container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work); 518 unsigned long flags; 519 u32 doorbell; 520 int rc; 521 struct task_struct *p; 522 523 524 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 525 if (ioc->shost_recovery || ioc->pci_error_recovery) 526 goto rearm_timer; 527 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 528 529 doorbell = mpt3sas_base_get_iocstate(ioc, 0); 530 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) { 531 pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n", 532 ioc->name); 533 534 /* It may be possible that EEH recovery can resolve some of 535 * pci bus failure issues rather removing the dead ioc function 536 * by considering controller is in a non-operational state. So 537 * here priority is given to the EEH recovery. If it doesn't 538 * not resolve this issue, mpt3sas driver will consider this 539 * controller to non-operational state and remove the dead ioc 540 * function. 541 */ 542 if (ioc->non_operational_loop++ < 5) { 543 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, 544 flags); 545 goto rearm_timer; 546 } 547 548 /* 549 * Call _scsih_flush_pending_cmds callback so that we flush all 550 * pending commands back to OS. This call is required to aovid 551 * deadlock at block layer. Dead IOC will fail to do diag reset, 552 * and this call is safe since dead ioc will never return any 553 * command back from HW. 554 */ 555 ioc->schedule_dead_ioc_flush_running_cmds(ioc); 556 /* 557 * Set remove_host flag early since kernel thread will 558 * take some time to execute. 559 */ 560 ioc->remove_host = 1; 561 /*Remove the Dead Host */ 562 p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc, 563 "%s_dead_ioc_%d", ioc->driver_name, ioc->id); 564 if (IS_ERR(p)) 565 pr_err(MPT3SAS_FMT 566 "%s: Running mpt3sas_dead_ioc thread failed !!!!\n", 567 ioc->name, __func__); 568 else 569 pr_err(MPT3SAS_FMT 570 "%s: Running mpt3sas_dead_ioc thread success !!!!\n", 571 ioc->name, __func__); 572 return; /* don't rearm timer */ 573 } 574 575 ioc->non_operational_loop = 0; 576 577 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) { 578 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 579 pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name, 580 __func__, (rc == 0) ? "success" : "failed"); 581 doorbell = mpt3sas_base_get_iocstate(ioc, 0); 582 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) 583 mpt3sas_base_fault_info(ioc, doorbell & 584 MPI2_DOORBELL_DATA_MASK); 585 if (rc && (doorbell & MPI2_IOC_STATE_MASK) != 586 MPI2_IOC_STATE_OPERATIONAL) 587 return; /* don't rearm timer */ 588 } 589 590 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 591 rearm_timer: 592 if (ioc->fault_reset_work_q) 593 queue_delayed_work(ioc->fault_reset_work_q, 594 &ioc->fault_reset_work, 595 msecs_to_jiffies(FAULT_POLLING_INTERVAL)); 596 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 597 } 598 599 /** 600 * mpt3sas_base_start_watchdog - start the fault_reset_work_q 601 * @ioc: per adapter object 602 * Context: sleep. 603 * 604 * Return nothing. 605 */ 606 void 607 mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc) 608 { 609 unsigned long flags; 610 611 if (ioc->fault_reset_work_q) 612 return; 613 614 /* initialize fault polling */ 615 616 INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work); 617 snprintf(ioc->fault_reset_work_q_name, 618 sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status", 619 ioc->driver_name, ioc->id); 620 ioc->fault_reset_work_q = 621 create_singlethread_workqueue(ioc->fault_reset_work_q_name); 622 if (!ioc->fault_reset_work_q) { 623 pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n", 624 ioc->name, __func__, __LINE__); 625 return; 626 } 627 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 628 if (ioc->fault_reset_work_q) 629 queue_delayed_work(ioc->fault_reset_work_q, 630 &ioc->fault_reset_work, 631 msecs_to_jiffies(FAULT_POLLING_INTERVAL)); 632 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 633 } 634 635 /** 636 * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q 637 * @ioc: per adapter object 638 * Context: sleep. 639 * 640 * Return nothing. 641 */ 642 void 643 mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc) 644 { 645 unsigned long flags; 646 struct workqueue_struct *wq; 647 648 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 649 wq = ioc->fault_reset_work_q; 650 ioc->fault_reset_work_q = NULL; 651 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 652 if (wq) { 653 if (!cancel_delayed_work_sync(&ioc->fault_reset_work)) 654 flush_workqueue(wq); 655 destroy_workqueue(wq); 656 } 657 } 658 659 /** 660 * mpt3sas_base_fault_info - verbose translation of firmware FAULT code 661 * @ioc: per adapter object 662 * @fault_code: fault code 663 * 664 * Return nothing. 665 */ 666 void 667 mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code) 668 { 669 pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n", 670 ioc->name, fault_code); 671 } 672 673 /** 674 * mpt3sas_halt_firmware - halt's mpt controller firmware 675 * @ioc: per adapter object 676 * 677 * For debugging timeout related issues. Writing 0xCOFFEE00 678 * to the doorbell register will halt controller firmware. With 679 * the purpose to stop both driver and firmware, the enduser can 680 * obtain a ring buffer from controller UART. 681 */ 682 void 683 mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc) 684 { 685 u32 doorbell; 686 687 if (!ioc->fwfault_debug) 688 return; 689 690 dump_stack(); 691 692 doorbell = readl(&ioc->chip->Doorbell); 693 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) 694 mpt3sas_base_fault_info(ioc , doorbell); 695 else { 696 writel(0xC0FFEE00, &ioc->chip->Doorbell); 697 pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n", 698 ioc->name); 699 } 700 701 if (ioc->fwfault_debug == 2) 702 for (;;) 703 ; 704 else 705 panic("panic in %s\n", __func__); 706 } 707 708 /** 709 * _base_sas_ioc_info - verbose translation of the ioc status 710 * @ioc: per adapter object 711 * @mpi_reply: reply mf payload returned from firmware 712 * @request_hdr: request mf 713 * 714 * Return nothing. 715 */ 716 static void 717 _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply, 718 MPI2RequestHeader_t *request_hdr) 719 { 720 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & 721 MPI2_IOCSTATUS_MASK; 722 char *desc = NULL; 723 u16 frame_sz; 724 char *func_str = NULL; 725 726 /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */ 727 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 728 request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || 729 request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION) 730 return; 731 732 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) 733 return; 734 735 switch (ioc_status) { 736 737 /**************************************************************************** 738 * Common IOCStatus values for all replies 739 ****************************************************************************/ 740 741 case MPI2_IOCSTATUS_INVALID_FUNCTION: 742 desc = "invalid function"; 743 break; 744 case MPI2_IOCSTATUS_BUSY: 745 desc = "busy"; 746 break; 747 case MPI2_IOCSTATUS_INVALID_SGL: 748 desc = "invalid sgl"; 749 break; 750 case MPI2_IOCSTATUS_INTERNAL_ERROR: 751 desc = "internal error"; 752 break; 753 case MPI2_IOCSTATUS_INVALID_VPID: 754 desc = "invalid vpid"; 755 break; 756 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: 757 desc = "insufficient resources"; 758 break; 759 case MPI2_IOCSTATUS_INSUFFICIENT_POWER: 760 desc = "insufficient power"; 761 break; 762 case MPI2_IOCSTATUS_INVALID_FIELD: 763 desc = "invalid field"; 764 break; 765 case MPI2_IOCSTATUS_INVALID_STATE: 766 desc = "invalid state"; 767 break; 768 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED: 769 desc = "op state not supported"; 770 break; 771 772 /**************************************************************************** 773 * Config IOCStatus values 774 ****************************************************************************/ 775 776 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION: 777 desc = "config invalid action"; 778 break; 779 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE: 780 desc = "config invalid type"; 781 break; 782 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE: 783 desc = "config invalid page"; 784 break; 785 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA: 786 desc = "config invalid data"; 787 break; 788 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS: 789 desc = "config no defaults"; 790 break; 791 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT: 792 desc = "config cant commit"; 793 break; 794 795 /**************************************************************************** 796 * SCSI IO Reply 797 ****************************************************************************/ 798 799 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 800 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: 801 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 802 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 803 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 804 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 805 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 806 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 807 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 808 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 809 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 810 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 811 break; 812 813 /**************************************************************************** 814 * For use by SCSI Initiator and SCSI Target end-to-end data protection 815 ****************************************************************************/ 816 817 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 818 desc = "eedp guard error"; 819 break; 820 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 821 desc = "eedp ref tag error"; 822 break; 823 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 824 desc = "eedp app tag error"; 825 break; 826 827 /**************************************************************************** 828 * SCSI Target values 829 ****************************************************************************/ 830 831 case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX: 832 desc = "target invalid io index"; 833 break; 834 case MPI2_IOCSTATUS_TARGET_ABORTED: 835 desc = "target aborted"; 836 break; 837 case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE: 838 desc = "target no conn retryable"; 839 break; 840 case MPI2_IOCSTATUS_TARGET_NO_CONNECTION: 841 desc = "target no connection"; 842 break; 843 case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH: 844 desc = "target xfer count mismatch"; 845 break; 846 case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR: 847 desc = "target data offset error"; 848 break; 849 case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA: 850 desc = "target too much write data"; 851 break; 852 case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT: 853 desc = "target iu too short"; 854 break; 855 case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT: 856 desc = "target ack nak timeout"; 857 break; 858 case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED: 859 desc = "target nak received"; 860 break; 861 862 /**************************************************************************** 863 * Serial Attached SCSI values 864 ****************************************************************************/ 865 866 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED: 867 desc = "smp request failed"; 868 break; 869 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN: 870 desc = "smp data overrun"; 871 break; 872 873 /**************************************************************************** 874 * Diagnostic Buffer Post / Diagnostic Release values 875 ****************************************************************************/ 876 877 case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED: 878 desc = "diagnostic released"; 879 break; 880 default: 881 break; 882 } 883 884 if (!desc) 885 return; 886 887 switch (request_hdr->Function) { 888 case MPI2_FUNCTION_CONFIG: 889 frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size; 890 func_str = "config_page"; 891 break; 892 case MPI2_FUNCTION_SCSI_TASK_MGMT: 893 frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t); 894 func_str = "task_mgmt"; 895 break; 896 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: 897 frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t); 898 func_str = "sas_iounit_ctl"; 899 break; 900 case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR: 901 frame_sz = sizeof(Mpi2SepRequest_t); 902 func_str = "enclosure"; 903 break; 904 case MPI2_FUNCTION_IOC_INIT: 905 frame_sz = sizeof(Mpi2IOCInitRequest_t); 906 func_str = "ioc_init"; 907 break; 908 case MPI2_FUNCTION_PORT_ENABLE: 909 frame_sz = sizeof(Mpi2PortEnableRequest_t); 910 func_str = "port_enable"; 911 break; 912 case MPI2_FUNCTION_SMP_PASSTHROUGH: 913 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size; 914 func_str = "smp_passthru"; 915 break; 916 case MPI2_FUNCTION_NVME_ENCAPSULATED: 917 frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) + 918 ioc->sge_size; 919 func_str = "nvme_encapsulated"; 920 break; 921 default: 922 frame_sz = 32; 923 func_str = "unknown"; 924 break; 925 } 926 927 pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n", 928 ioc->name, desc, ioc_status, request_hdr, func_str); 929 930 _debug_dump_mf(request_hdr, frame_sz/4); 931 } 932 933 /** 934 * _base_display_event_data - verbose translation of firmware asyn events 935 * @ioc: per adapter object 936 * @mpi_reply: reply mf payload returned from firmware 937 * 938 * Return nothing. 939 */ 940 static void 941 _base_display_event_data(struct MPT3SAS_ADAPTER *ioc, 942 Mpi2EventNotificationReply_t *mpi_reply) 943 { 944 char *desc = NULL; 945 u16 event; 946 947 if (!(ioc->logging_level & MPT_DEBUG_EVENTS)) 948 return; 949 950 event = le16_to_cpu(mpi_reply->Event); 951 952 switch (event) { 953 case MPI2_EVENT_LOG_DATA: 954 desc = "Log Data"; 955 break; 956 case MPI2_EVENT_STATE_CHANGE: 957 desc = "Status Change"; 958 break; 959 case MPI2_EVENT_HARD_RESET_RECEIVED: 960 desc = "Hard Reset Received"; 961 break; 962 case MPI2_EVENT_EVENT_CHANGE: 963 desc = "Event Change"; 964 break; 965 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: 966 desc = "Device Status Change"; 967 break; 968 case MPI2_EVENT_IR_OPERATION_STATUS: 969 if (!ioc->hide_ir_msg) 970 desc = "IR Operation Status"; 971 break; 972 case MPI2_EVENT_SAS_DISCOVERY: 973 { 974 Mpi2EventDataSasDiscovery_t *event_data = 975 (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData; 976 pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name, 977 (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ? 978 "start" : "stop"); 979 if (event_data->DiscoveryStatus) 980 pr_cont(" discovery_status(0x%08x)", 981 le32_to_cpu(event_data->DiscoveryStatus)); 982 pr_cont("\n"); 983 return; 984 } 985 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: 986 desc = "SAS Broadcast Primitive"; 987 break; 988 case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: 989 desc = "SAS Init Device Status Change"; 990 break; 991 case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW: 992 desc = "SAS Init Table Overflow"; 993 break; 994 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 995 desc = "SAS Topology Change List"; 996 break; 997 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: 998 desc = "SAS Enclosure Device Status Change"; 999 break; 1000 case MPI2_EVENT_IR_VOLUME: 1001 if (!ioc->hide_ir_msg) 1002 desc = "IR Volume"; 1003 break; 1004 case MPI2_EVENT_IR_PHYSICAL_DISK: 1005 if (!ioc->hide_ir_msg) 1006 desc = "IR Physical Disk"; 1007 break; 1008 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: 1009 if (!ioc->hide_ir_msg) 1010 desc = "IR Configuration Change List"; 1011 break; 1012 case MPI2_EVENT_LOG_ENTRY_ADDED: 1013 if (!ioc->hide_ir_msg) 1014 desc = "Log Entry Added"; 1015 break; 1016 case MPI2_EVENT_TEMP_THRESHOLD: 1017 desc = "Temperature Threshold"; 1018 break; 1019 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION: 1020 desc = "Cable Event"; 1021 break; 1022 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE: 1023 desc = "PCIE Device Status Change"; 1024 break; 1025 case MPI2_EVENT_PCIE_ENUMERATION: 1026 { 1027 Mpi26EventDataPCIeEnumeration_t *event_data = 1028 (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData; 1029 pr_info(MPT3SAS_FMT "PCIE Enumeration: (%s)", ioc->name, 1030 (event_data->ReasonCode == 1031 MPI26_EVENT_PCIE_ENUM_RC_STARTED) ? 1032 "start" : "stop"); 1033 if (event_data->EnumerationStatus) 1034 pr_info("enumeration_status(0x%08x)", 1035 le32_to_cpu(event_data->EnumerationStatus)); 1036 pr_info("\n"); 1037 return; 1038 } 1039 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 1040 desc = "PCIE Topology Change List"; 1041 break; 1042 } 1043 1044 if (!desc) 1045 return; 1046 1047 pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc); 1048 } 1049 1050 /** 1051 * _base_sas_log_info - verbose translation of firmware log info 1052 * @ioc: per adapter object 1053 * @log_info: log info 1054 * 1055 * Return nothing. 1056 */ 1057 static void 1058 _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info) 1059 { 1060 union loginfo_type { 1061 u32 loginfo; 1062 struct { 1063 u32 subcode:16; 1064 u32 code:8; 1065 u32 originator:4; 1066 u32 bus_type:4; 1067 } dw; 1068 }; 1069 union loginfo_type sas_loginfo; 1070 char *originator_str = NULL; 1071 1072 sas_loginfo.loginfo = log_info; 1073 if (sas_loginfo.dw.bus_type != 3 /*SAS*/) 1074 return; 1075 1076 /* each nexus loss loginfo */ 1077 if (log_info == 0x31170000) 1078 return; 1079 1080 /* eat the loginfos associated with task aborts */ 1081 if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info == 1082 0x31140000 || log_info == 0x31130000)) 1083 return; 1084 1085 switch (sas_loginfo.dw.originator) { 1086 case 0: 1087 originator_str = "IOP"; 1088 break; 1089 case 1: 1090 originator_str = "PL"; 1091 break; 1092 case 2: 1093 if (!ioc->hide_ir_msg) 1094 originator_str = "IR"; 1095 else 1096 originator_str = "WarpDrive"; 1097 break; 1098 } 1099 1100 pr_warn(MPT3SAS_FMT 1101 "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n", 1102 ioc->name, log_info, 1103 originator_str, sas_loginfo.dw.code, 1104 sas_loginfo.dw.subcode); 1105 } 1106 1107 /** 1108 * _base_display_reply_info - 1109 * @ioc: per adapter object 1110 * @smid: system request message index 1111 * @msix_index: MSIX table index supplied by the OS 1112 * @reply: reply message frame(lower 32bit addr) 1113 * 1114 * Return nothing. 1115 */ 1116 static void 1117 _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 1118 u32 reply) 1119 { 1120 MPI2DefaultReply_t *mpi_reply; 1121 u16 ioc_status; 1122 u32 loginfo = 0; 1123 1124 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 1125 if (unlikely(!mpi_reply)) { 1126 pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n", 1127 ioc->name, __FILE__, __LINE__, __func__); 1128 return; 1129 } 1130 ioc_status = le16_to_cpu(mpi_reply->IOCStatus); 1131 1132 if ((ioc_status & MPI2_IOCSTATUS_MASK) && 1133 (ioc->logging_level & MPT_DEBUG_REPLY)) { 1134 _base_sas_ioc_info(ioc , mpi_reply, 1135 mpt3sas_base_get_msg_frame(ioc, smid)); 1136 } 1137 1138 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { 1139 loginfo = le32_to_cpu(mpi_reply->IOCLogInfo); 1140 _base_sas_log_info(ioc, loginfo); 1141 } 1142 1143 if (ioc_status || loginfo) { 1144 ioc_status &= MPI2_IOCSTATUS_MASK; 1145 mpt3sas_trigger_mpi(ioc, ioc_status, loginfo); 1146 } 1147 } 1148 1149 /** 1150 * mpt3sas_base_done - base internal command completion routine 1151 * @ioc: per adapter object 1152 * @smid: system request message index 1153 * @msix_index: MSIX table index supplied by the OS 1154 * @reply: reply message frame(lower 32bit addr) 1155 * 1156 * Return 1 meaning mf should be freed from _base_interrupt 1157 * 0 means the mf is freed from this function. 1158 */ 1159 u8 1160 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 1161 u32 reply) 1162 { 1163 MPI2DefaultReply_t *mpi_reply; 1164 1165 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 1166 if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK) 1167 return mpt3sas_check_for_pending_internal_cmds(ioc, smid); 1168 1169 if (ioc->base_cmds.status == MPT3_CMD_NOT_USED) 1170 return 1; 1171 1172 ioc->base_cmds.status |= MPT3_CMD_COMPLETE; 1173 if (mpi_reply) { 1174 ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID; 1175 memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); 1176 } 1177 ioc->base_cmds.status &= ~MPT3_CMD_PENDING; 1178 1179 complete(&ioc->base_cmds.done); 1180 return 1; 1181 } 1182 1183 /** 1184 * _base_async_event - main callback handler for firmware asyn events 1185 * @ioc: per adapter object 1186 * @msix_index: MSIX table index supplied by the OS 1187 * @reply: reply message frame(lower 32bit addr) 1188 * 1189 * Return 1 meaning mf should be freed from _base_interrupt 1190 * 0 means the mf is freed from this function. 1191 */ 1192 static u8 1193 _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply) 1194 { 1195 Mpi2EventNotificationReply_t *mpi_reply; 1196 Mpi2EventAckRequest_t *ack_request; 1197 u16 smid; 1198 struct _event_ack_list *delayed_event_ack; 1199 1200 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 1201 if (!mpi_reply) 1202 return 1; 1203 if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION) 1204 return 1; 1205 1206 _base_display_event_data(ioc, mpi_reply); 1207 1208 if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED)) 1209 goto out; 1210 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); 1211 if (!smid) { 1212 delayed_event_ack = kzalloc(sizeof(*delayed_event_ack), 1213 GFP_ATOMIC); 1214 if (!delayed_event_ack) 1215 goto out; 1216 INIT_LIST_HEAD(&delayed_event_ack->list); 1217 delayed_event_ack->Event = mpi_reply->Event; 1218 delayed_event_ack->EventContext = mpi_reply->EventContext; 1219 list_add_tail(&delayed_event_ack->list, 1220 &ioc->delayed_event_ack_list); 1221 dewtprintk(ioc, pr_info(MPT3SAS_FMT 1222 "DELAYED: EVENT ACK: event (0x%04x)\n", 1223 ioc->name, le16_to_cpu(mpi_reply->Event))); 1224 goto out; 1225 } 1226 1227 ack_request = mpt3sas_base_get_msg_frame(ioc, smid); 1228 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t)); 1229 ack_request->Function = MPI2_FUNCTION_EVENT_ACK; 1230 ack_request->Event = mpi_reply->Event; 1231 ack_request->EventContext = mpi_reply->EventContext; 1232 ack_request->VF_ID = 0; /* TODO */ 1233 ack_request->VP_ID = 0; 1234 mpt3sas_base_put_smid_default(ioc, smid); 1235 1236 out: 1237 1238 /* scsih callback handler */ 1239 mpt3sas_scsih_event_callback(ioc, msix_index, reply); 1240 1241 /* ctl callback handler */ 1242 mpt3sas_ctl_event_callback(ioc, msix_index, reply); 1243 1244 return 1; 1245 } 1246 1247 static struct scsiio_tracker * 1248 _get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) 1249 { 1250 struct scsi_cmnd *cmd; 1251 1252 if (WARN_ON(!smid) || 1253 WARN_ON(smid >= ioc->hi_priority_smid)) 1254 return NULL; 1255 1256 cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 1257 if (cmd) 1258 return scsi_cmd_priv(cmd); 1259 1260 return NULL; 1261 } 1262 1263 /** 1264 * _base_get_cb_idx - obtain the callback index 1265 * @ioc: per adapter object 1266 * @smid: system request message index 1267 * 1268 * Return callback index. 1269 */ 1270 static u8 1271 _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid) 1272 { 1273 int i; 1274 u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1; 1275 u8 cb_idx = 0xFF; 1276 1277 if (smid < ioc->hi_priority_smid) { 1278 struct scsiio_tracker *st; 1279 1280 if (smid < ctl_smid) { 1281 st = _get_st_from_smid(ioc, smid); 1282 if (st) 1283 cb_idx = st->cb_idx; 1284 } else if (smid == ctl_smid) 1285 cb_idx = ioc->ctl_cb_idx; 1286 } else if (smid < ioc->internal_smid) { 1287 i = smid - ioc->hi_priority_smid; 1288 cb_idx = ioc->hpr_lookup[i].cb_idx; 1289 } else if (smid <= ioc->hba_queue_depth) { 1290 i = smid - ioc->internal_smid; 1291 cb_idx = ioc->internal_lookup[i].cb_idx; 1292 } 1293 return cb_idx; 1294 } 1295 1296 /** 1297 * _base_mask_interrupts - disable interrupts 1298 * @ioc: per adapter object 1299 * 1300 * Disabling ResetIRQ, Reply and Doorbell Interrupts 1301 * 1302 * Return nothing. 1303 */ 1304 static void 1305 _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc) 1306 { 1307 u32 him_register; 1308 1309 ioc->mask_interrupts = 1; 1310 him_register = readl(&ioc->chip->HostInterruptMask); 1311 him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK; 1312 writel(him_register, &ioc->chip->HostInterruptMask); 1313 readl(&ioc->chip->HostInterruptMask); 1314 } 1315 1316 /** 1317 * _base_unmask_interrupts - enable interrupts 1318 * @ioc: per adapter object 1319 * 1320 * Enabling only Reply Interrupts 1321 * 1322 * Return nothing. 1323 */ 1324 static void 1325 _base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc) 1326 { 1327 u32 him_register; 1328 1329 him_register = readl(&ioc->chip->HostInterruptMask); 1330 him_register &= ~MPI2_HIM_RIM; 1331 writel(him_register, &ioc->chip->HostInterruptMask); 1332 ioc->mask_interrupts = 0; 1333 } 1334 1335 union reply_descriptor { 1336 u64 word; 1337 struct { 1338 u32 low; 1339 u32 high; 1340 } u; 1341 }; 1342 1343 /** 1344 * _base_interrupt - MPT adapter (IOC) specific interrupt handler. 1345 * @irq: irq number (not used) 1346 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure 1347 * @r: pt_regs pointer (not used) 1348 * 1349 * Return IRQ_HANDLE if processed, else IRQ_NONE. 1350 */ 1351 static irqreturn_t 1352 _base_interrupt(int irq, void *bus_id) 1353 { 1354 struct adapter_reply_queue *reply_q = bus_id; 1355 union reply_descriptor rd; 1356 u32 completed_cmds; 1357 u8 request_desript_type; 1358 u16 smid; 1359 u8 cb_idx; 1360 u32 reply; 1361 u8 msix_index = reply_q->msix_index; 1362 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc; 1363 Mpi2ReplyDescriptorsUnion_t *rpf; 1364 u8 rc; 1365 1366 if (ioc->mask_interrupts) 1367 return IRQ_NONE; 1368 1369 if (!atomic_add_unless(&reply_q->busy, 1, 1)) 1370 return IRQ_NONE; 1371 1372 rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index]; 1373 request_desript_type = rpf->Default.ReplyFlags 1374 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1375 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) { 1376 atomic_dec(&reply_q->busy); 1377 return IRQ_NONE; 1378 } 1379 1380 completed_cmds = 0; 1381 cb_idx = 0xFF; 1382 do { 1383 rd.word = le64_to_cpu(rpf->Words); 1384 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX) 1385 goto out; 1386 reply = 0; 1387 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1); 1388 if (request_desript_type == 1389 MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS || 1390 request_desript_type == 1391 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS || 1392 request_desript_type == 1393 MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) { 1394 cb_idx = _base_get_cb_idx(ioc, smid); 1395 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) && 1396 (likely(mpt_callbacks[cb_idx] != NULL))) { 1397 rc = mpt_callbacks[cb_idx](ioc, smid, 1398 msix_index, 0); 1399 if (rc) 1400 mpt3sas_base_free_smid(ioc, smid); 1401 } 1402 } else if (request_desript_type == 1403 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { 1404 reply = le32_to_cpu( 1405 rpf->AddressReply.ReplyFrameAddress); 1406 if (reply > ioc->reply_dma_max_address || 1407 reply < ioc->reply_dma_min_address) 1408 reply = 0; 1409 if (smid) { 1410 cb_idx = _base_get_cb_idx(ioc, smid); 1411 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) && 1412 (likely(mpt_callbacks[cb_idx] != NULL))) { 1413 rc = mpt_callbacks[cb_idx](ioc, smid, 1414 msix_index, reply); 1415 if (reply) 1416 _base_display_reply_info(ioc, 1417 smid, msix_index, reply); 1418 if (rc) 1419 mpt3sas_base_free_smid(ioc, 1420 smid); 1421 } 1422 } else { 1423 _base_async_event(ioc, msix_index, reply); 1424 } 1425 1426 /* reply free queue handling */ 1427 if (reply) { 1428 ioc->reply_free_host_index = 1429 (ioc->reply_free_host_index == 1430 (ioc->reply_free_queue_depth - 1)) ? 1431 0 : ioc->reply_free_host_index + 1; 1432 ioc->reply_free[ioc->reply_free_host_index] = 1433 cpu_to_le32(reply); 1434 if (ioc->is_mcpu_endpoint) 1435 _base_clone_reply_to_sys_mem(ioc, 1436 cpu_to_le32(reply), 1437 ioc->reply_free_host_index); 1438 writel(ioc->reply_free_host_index, 1439 &ioc->chip->ReplyFreeHostIndex); 1440 } 1441 } 1442 1443 rpf->Words = cpu_to_le64(ULLONG_MAX); 1444 reply_q->reply_post_host_index = 1445 (reply_q->reply_post_host_index == 1446 (ioc->reply_post_queue_depth - 1)) ? 0 : 1447 reply_q->reply_post_host_index + 1; 1448 request_desript_type = 1449 reply_q->reply_post_free[reply_q->reply_post_host_index]. 1450 Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1451 completed_cmds++; 1452 /* Update the reply post host index after continuously 1453 * processing the threshold number of Reply Descriptors. 1454 * So that FW can find enough entries to post the Reply 1455 * Descriptors in the reply descriptor post queue. 1456 */ 1457 if (completed_cmds > ioc->hba_queue_depth/3) { 1458 if (ioc->combined_reply_queue) { 1459 writel(reply_q->reply_post_host_index | 1460 ((msix_index & 7) << 1461 MPI2_RPHI_MSIX_INDEX_SHIFT), 1462 ioc->replyPostRegisterIndex[msix_index/8]); 1463 } else { 1464 writel(reply_q->reply_post_host_index | 1465 (msix_index << 1466 MPI2_RPHI_MSIX_INDEX_SHIFT), 1467 &ioc->chip->ReplyPostHostIndex); 1468 } 1469 completed_cmds = 1; 1470 } 1471 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 1472 goto out; 1473 if (!reply_q->reply_post_host_index) 1474 rpf = reply_q->reply_post_free; 1475 else 1476 rpf++; 1477 } while (1); 1478 1479 out: 1480 1481 if (!completed_cmds) { 1482 atomic_dec(&reply_q->busy); 1483 return IRQ_NONE; 1484 } 1485 1486 if (ioc->is_warpdrive) { 1487 writel(reply_q->reply_post_host_index, 1488 ioc->reply_post_host_index[msix_index]); 1489 atomic_dec(&reply_q->busy); 1490 return IRQ_HANDLED; 1491 } 1492 1493 /* Update Reply Post Host Index. 1494 * For those HBA's which support combined reply queue feature 1495 * 1. Get the correct Supplemental Reply Post Host Index Register. 1496 * i.e. (msix_index / 8)th entry from Supplemental Reply Post Host 1497 * Index Register address bank i.e replyPostRegisterIndex[], 1498 * 2. Then update this register with new reply host index value 1499 * in ReplyPostIndex field and the MSIxIndex field with 1500 * msix_index value reduced to a value between 0 and 7, 1501 * using a modulo 8 operation. Since each Supplemental Reply Post 1502 * Host Index Register supports 8 MSI-X vectors. 1503 * 1504 * For other HBA's just update the Reply Post Host Index register with 1505 * new reply host index value in ReplyPostIndex Field and msix_index 1506 * value in MSIxIndex field. 1507 */ 1508 if (ioc->combined_reply_queue) 1509 writel(reply_q->reply_post_host_index | ((msix_index & 7) << 1510 MPI2_RPHI_MSIX_INDEX_SHIFT), 1511 ioc->replyPostRegisterIndex[msix_index/8]); 1512 else 1513 writel(reply_q->reply_post_host_index | (msix_index << 1514 MPI2_RPHI_MSIX_INDEX_SHIFT), 1515 &ioc->chip->ReplyPostHostIndex); 1516 atomic_dec(&reply_q->busy); 1517 return IRQ_HANDLED; 1518 } 1519 1520 /** 1521 * _base_is_controller_msix_enabled - is controller support muli-reply queues 1522 * @ioc: per adapter object 1523 * 1524 */ 1525 static inline int 1526 _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc) 1527 { 1528 return (ioc->facts.IOCCapabilities & 1529 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable; 1530 } 1531 1532 /** 1533 * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts 1534 * @ioc: per adapter object 1535 * Context: non ISR conext 1536 * 1537 * Called when a Task Management request has completed. 1538 * 1539 * Return nothing. 1540 */ 1541 void 1542 mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc) 1543 { 1544 struct adapter_reply_queue *reply_q; 1545 1546 /* If MSIX capability is turned off 1547 * then multi-queues are not enabled 1548 */ 1549 if (!_base_is_controller_msix_enabled(ioc)) 1550 return; 1551 1552 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 1553 if (ioc->shost_recovery || ioc->remove_host || 1554 ioc->pci_error_recovery) 1555 return; 1556 /* TMs are on msix_index == 0 */ 1557 if (reply_q->msix_index == 0) 1558 continue; 1559 synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index)); 1560 } 1561 } 1562 1563 /** 1564 * mpt3sas_base_release_callback_handler - clear interrupt callback handler 1565 * @cb_idx: callback index 1566 * 1567 * Return nothing. 1568 */ 1569 void 1570 mpt3sas_base_release_callback_handler(u8 cb_idx) 1571 { 1572 mpt_callbacks[cb_idx] = NULL; 1573 } 1574 1575 /** 1576 * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler 1577 * @cb_func: callback function 1578 * 1579 * Returns cb_func. 1580 */ 1581 u8 1582 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func) 1583 { 1584 u8 cb_idx; 1585 1586 for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--) 1587 if (mpt_callbacks[cb_idx] == NULL) 1588 break; 1589 1590 mpt_callbacks[cb_idx] = cb_func; 1591 return cb_idx; 1592 } 1593 1594 /** 1595 * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler 1596 * 1597 * Return nothing. 1598 */ 1599 void 1600 mpt3sas_base_initialize_callback_handler(void) 1601 { 1602 u8 cb_idx; 1603 1604 for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++) 1605 mpt3sas_base_release_callback_handler(cb_idx); 1606 } 1607 1608 1609 /** 1610 * _base_build_zero_len_sge - build zero length sg entry 1611 * @ioc: per adapter object 1612 * @paddr: virtual address for SGE 1613 * 1614 * Create a zero length scatter gather entry to insure the IOCs hardware has 1615 * something to use if the target device goes brain dead and tries 1616 * to send data even when none is asked for. 1617 * 1618 * Return nothing. 1619 */ 1620 static void 1621 _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr) 1622 { 1623 u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT | 1624 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST | 1625 MPI2_SGE_FLAGS_SIMPLE_ELEMENT) << 1626 MPI2_SGE_FLAGS_SHIFT); 1627 ioc->base_add_sg_single(paddr, flags_length, -1); 1628 } 1629 1630 /** 1631 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr. 1632 * @paddr: virtual address for SGE 1633 * @flags_length: SGE flags and data transfer length 1634 * @dma_addr: Physical address 1635 * 1636 * Return nothing. 1637 */ 1638 static void 1639 _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr) 1640 { 1641 Mpi2SGESimple32_t *sgel = paddr; 1642 1643 flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING | 1644 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT; 1645 sgel->FlagsLength = cpu_to_le32(flags_length); 1646 sgel->Address = cpu_to_le32(dma_addr); 1647 } 1648 1649 1650 /** 1651 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr. 1652 * @paddr: virtual address for SGE 1653 * @flags_length: SGE flags and data transfer length 1654 * @dma_addr: Physical address 1655 * 1656 * Return nothing. 1657 */ 1658 static void 1659 _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr) 1660 { 1661 Mpi2SGESimple64_t *sgel = paddr; 1662 1663 flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING | 1664 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT; 1665 sgel->FlagsLength = cpu_to_le32(flags_length); 1666 sgel->Address = cpu_to_le64(dma_addr); 1667 } 1668 1669 /** 1670 * _base_get_chain_buffer_tracker - obtain chain tracker 1671 * @ioc: per adapter object 1672 * @scmd: SCSI commands of the IO request 1673 * 1674 * Returns chain tracker(from ioc->free_chain_list) 1675 */ 1676 static struct chain_tracker * 1677 _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, 1678 struct scsi_cmnd *scmd) 1679 { 1680 struct chain_tracker *chain_req; 1681 struct scsiio_tracker *st = scsi_cmd_priv(scmd); 1682 unsigned long flags; 1683 1684 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 1685 if (list_empty(&ioc->free_chain_list)) { 1686 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 1687 dfailprintk(ioc, pr_warn(MPT3SAS_FMT 1688 "chain buffers not available\n", ioc->name)); 1689 return NULL; 1690 } 1691 chain_req = list_entry(ioc->free_chain_list.next, 1692 struct chain_tracker, tracker_list); 1693 list_del_init(&chain_req->tracker_list); 1694 list_add_tail(&chain_req->tracker_list, &st->chain_list); 1695 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 1696 return chain_req; 1697 } 1698 1699 1700 /** 1701 * _base_build_sg - build generic sg 1702 * @ioc: per adapter object 1703 * @psge: virtual address for SGE 1704 * @data_out_dma: physical address for WRITES 1705 * @data_out_sz: data xfer size for WRITES 1706 * @data_in_dma: physical address for READS 1707 * @data_in_sz: data xfer size for READS 1708 * 1709 * Return nothing. 1710 */ 1711 static void 1712 _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge, 1713 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, 1714 size_t data_in_sz) 1715 { 1716 u32 sgl_flags; 1717 1718 if (!data_out_sz && !data_in_sz) { 1719 _base_build_zero_len_sge(ioc, psge); 1720 return; 1721 } 1722 1723 if (data_out_sz && data_in_sz) { 1724 /* WRITE sgel first */ 1725 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 1726 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); 1727 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1728 ioc->base_add_sg_single(psge, sgl_flags | 1729 data_out_sz, data_out_dma); 1730 1731 /* incr sgel */ 1732 psge += ioc->sge_size; 1733 1734 /* READ sgel last */ 1735 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 1736 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | 1737 MPI2_SGE_FLAGS_END_OF_LIST); 1738 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1739 ioc->base_add_sg_single(psge, sgl_flags | 1740 data_in_sz, data_in_dma); 1741 } else if (data_out_sz) /* WRITE */ { 1742 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 1743 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | 1744 MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC); 1745 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1746 ioc->base_add_sg_single(psge, sgl_flags | 1747 data_out_sz, data_out_dma); 1748 } else if (data_in_sz) /* READ */ { 1749 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 1750 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | 1751 MPI2_SGE_FLAGS_END_OF_LIST); 1752 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1753 ioc->base_add_sg_single(psge, sgl_flags | 1754 data_in_sz, data_in_dma); 1755 } 1756 } 1757 1758 /* IEEE format sgls */ 1759 1760 /** 1761 * _base_build_nvme_prp - This function is called for NVMe end devices to build 1762 * a native SGL (NVMe PRP). The native SGL is built starting in the first PRP 1763 * entry of the NVMe message (PRP1). If the data buffer is small enough to be 1764 * described entirely using PRP1, then PRP2 is not used. If needed, PRP2 is 1765 * used to describe a larger data buffer. If the data buffer is too large to 1766 * describe using the two PRP entriess inside the NVMe message, then PRP1 1767 * describes the first data memory segment, and PRP2 contains a pointer to a PRP 1768 * list located elsewhere in memory to describe the remaining data memory 1769 * segments. The PRP list will be contiguous. 1770 1771 * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP 1772 * consists of a list of PRP entries to describe a number of noncontigous 1773 * physical memory segments as a single memory buffer, just as a SGL does. Note 1774 * however, that this function is only used by the IOCTL call, so the memory 1775 * given will be guaranteed to be contiguous. There is no need to translate 1776 * non-contiguous SGL into a PRP in this case. All PRPs will describe 1777 * contiguous space that is one page size each. 1778 * 1779 * Each NVMe message contains two PRP entries. The first (PRP1) either contains 1780 * a PRP list pointer or a PRP element, depending upon the command. PRP2 1781 * contains the second PRP element if the memory being described fits within 2 1782 * PRP entries, or a PRP list pointer if the PRP spans more than two entries. 1783 * 1784 * A PRP list pointer contains the address of a PRP list, structured as a linear 1785 * array of PRP entries. Each PRP entry in this list describes a segment of 1786 * physical memory. 1787 * 1788 * Each 64-bit PRP entry comprises an address and an offset field. The address 1789 * always points at the beginning of a 4KB physical memory page, and the offset 1790 * describes where within that 4KB page the memory segment begins. Only the 1791 * first element in a PRP list may contain a non-zero offest, implying that all 1792 * memory segments following the first begin at the start of a 4KB page. 1793 * 1794 * Each PRP element normally describes 4KB of physical memory, with exceptions 1795 * for the first and last elements in the list. If the memory being described 1796 * by the list begins at a non-zero offset within the first 4KB page, then the 1797 * first PRP element will contain a non-zero offset indicating where the region 1798 * begins within the 4KB page. The last memory segment may end before the end 1799 * of the 4KB segment, depending upon the overall size of the memory being 1800 * described by the PRP list. 1801 * 1802 * Since PRP entries lack any indication of size, the overall data buffer length 1803 * is used to determine where the end of the data memory buffer is located, and 1804 * how many PRP entries are required to describe it. 1805 * 1806 * @ioc: per adapter object 1807 * @smid: system request message index for getting asscociated SGL 1808 * @nvme_encap_request: the NVMe request msg frame pointer 1809 * @data_out_dma: physical address for WRITES 1810 * @data_out_sz: data xfer size for WRITES 1811 * @data_in_dma: physical address for READS 1812 * @data_in_sz: data xfer size for READS 1813 * 1814 * Returns nothing. 1815 */ 1816 static void 1817 _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid, 1818 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request, 1819 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, 1820 size_t data_in_sz) 1821 { 1822 int prp_size = NVME_PRP_SIZE; 1823 __le64 *prp_entry, *prp1_entry, *prp2_entry; 1824 __le64 *prp_page; 1825 dma_addr_t prp_entry_dma, prp_page_dma, dma_addr; 1826 u32 offset, entry_len; 1827 u32 page_mask_result, page_mask; 1828 size_t length; 1829 1830 /* 1831 * Not all commands require a data transfer. If no data, just return 1832 * without constructing any PRP. 1833 */ 1834 if (!data_in_sz && !data_out_sz) 1835 return; 1836 /* 1837 * Set pointers to PRP1 and PRP2, which are in the NVMe command. 1838 * PRP1 is located at a 24 byte offset from the start of the NVMe 1839 * command. Then set the current PRP entry pointer to PRP1. 1840 */ 1841 prp1_entry = (__le64 *)(nvme_encap_request->NVMe_Command + 1842 NVME_CMD_PRP1_OFFSET); 1843 prp2_entry = (__le64 *)(nvme_encap_request->NVMe_Command + 1844 NVME_CMD_PRP2_OFFSET); 1845 prp_entry = prp1_entry; 1846 /* 1847 * For the PRP entries, use the specially allocated buffer of 1848 * contiguous memory. 1849 */ 1850 prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid); 1851 prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid); 1852 1853 /* 1854 * Check if we are within 1 entry of a page boundary we don't 1855 * want our first entry to be a PRP List entry. 1856 */ 1857 page_mask = ioc->page_size - 1; 1858 page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask; 1859 if (!page_mask_result) { 1860 /* Bump up to next page boundary. */ 1861 prp_page = (__le64 *)((u8 *)prp_page + prp_size); 1862 prp_page_dma = prp_page_dma + prp_size; 1863 } 1864 1865 /* 1866 * Set PRP physical pointer, which initially points to the current PRP 1867 * DMA memory page. 1868 */ 1869 prp_entry_dma = prp_page_dma; 1870 1871 /* Get physical address and length of the data buffer. */ 1872 if (data_in_sz) { 1873 dma_addr = data_in_dma; 1874 length = data_in_sz; 1875 } else { 1876 dma_addr = data_out_dma; 1877 length = data_out_sz; 1878 } 1879 1880 /* Loop while the length is not zero. */ 1881 while (length) { 1882 /* 1883 * Check if we need to put a list pointer here if we are at 1884 * page boundary - prp_size (8 bytes). 1885 */ 1886 page_mask_result = (prp_entry_dma + prp_size) & page_mask; 1887 if (!page_mask_result) { 1888 /* 1889 * This is the last entry in a PRP List, so we need to 1890 * put a PRP list pointer here. What this does is: 1891 * - bump the current memory pointer to the next 1892 * address, which will be the next full page. 1893 * - set the PRP Entry to point to that page. This 1894 * is now the PRP List pointer. 1895 * - bump the PRP Entry pointer the start of the 1896 * next page. Since all of this PRP memory is 1897 * contiguous, no need to get a new page - it's 1898 * just the next address. 1899 */ 1900 prp_entry_dma++; 1901 *prp_entry = cpu_to_le64(prp_entry_dma); 1902 prp_entry++; 1903 } 1904 1905 /* Need to handle if entry will be part of a page. */ 1906 offset = dma_addr & page_mask; 1907 entry_len = ioc->page_size - offset; 1908 1909 if (prp_entry == prp1_entry) { 1910 /* 1911 * Must fill in the first PRP pointer (PRP1) before 1912 * moving on. 1913 */ 1914 *prp1_entry = cpu_to_le64(dma_addr); 1915 1916 /* 1917 * Now point to the second PRP entry within the 1918 * command (PRP2). 1919 */ 1920 prp_entry = prp2_entry; 1921 } else if (prp_entry == prp2_entry) { 1922 /* 1923 * Should the PRP2 entry be a PRP List pointer or just 1924 * a regular PRP pointer? If there is more than one 1925 * more page of data, must use a PRP List pointer. 1926 */ 1927 if (length > ioc->page_size) { 1928 /* 1929 * PRP2 will contain a PRP List pointer because 1930 * more PRP's are needed with this command. The 1931 * list will start at the beginning of the 1932 * contiguous buffer. 1933 */ 1934 *prp2_entry = cpu_to_le64(prp_entry_dma); 1935 1936 /* 1937 * The next PRP Entry will be the start of the 1938 * first PRP List. 1939 */ 1940 prp_entry = prp_page; 1941 } else { 1942 /* 1943 * After this, the PRP Entries are complete. 1944 * This command uses 2 PRP's and no PRP list. 1945 */ 1946 *prp2_entry = cpu_to_le64(dma_addr); 1947 } 1948 } else { 1949 /* 1950 * Put entry in list and bump the addresses. 1951 * 1952 * After PRP1 and PRP2 are filled in, this will fill in 1953 * all remaining PRP entries in a PRP List, one per 1954 * each time through the loop. 1955 */ 1956 *prp_entry = cpu_to_le64(dma_addr); 1957 prp_entry++; 1958 prp_entry_dma++; 1959 } 1960 1961 /* 1962 * Bump the phys address of the command's data buffer by the 1963 * entry_len. 1964 */ 1965 dma_addr += entry_len; 1966 1967 /* Decrement length accounting for last partial page. */ 1968 if (entry_len > length) 1969 length = 0; 1970 else 1971 length -= entry_len; 1972 } 1973 } 1974 1975 /** 1976 * base_make_prp_nvme - 1977 * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only 1978 * 1979 * @ioc: per adapter object 1980 * @scmd: SCSI command from the mid-layer 1981 * @mpi_request: mpi request 1982 * @smid: msg Index 1983 * @sge_count: scatter gather element count. 1984 * 1985 * Returns: true: PRPs are built 1986 * false: IEEE SGLs needs to be built 1987 */ 1988 static void 1989 base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc, 1990 struct scsi_cmnd *scmd, 1991 Mpi25SCSIIORequest_t *mpi_request, 1992 u16 smid, int sge_count) 1993 { 1994 int sge_len, num_prp_in_chain = 0; 1995 Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl; 1996 __le64 *curr_buff; 1997 dma_addr_t msg_dma, sge_addr, offset; 1998 u32 page_mask, page_mask_result; 1999 struct scatterlist *sg_scmd; 2000 u32 first_prp_len; 2001 int data_len = scsi_bufflen(scmd); 2002 u32 nvme_pg_size; 2003 2004 nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE); 2005 /* 2006 * Nvme has a very convoluted prp format. One prp is required 2007 * for each page or partial page. Driver need to split up OS sg_list 2008 * entries if it is longer than one page or cross a page 2009 * boundary. Driver also have to insert a PRP list pointer entry as 2010 * the last entry in each physical page of the PRP list. 2011 * 2012 * NOTE: The first PRP "entry" is actually placed in the first 2013 * SGL entry in the main message as IEEE 64 format. The 2nd 2014 * entry in the main message is the chain element, and the rest 2015 * of the PRP entries are built in the contiguous pcie buffer. 2016 */ 2017 page_mask = nvme_pg_size - 1; 2018 2019 /* 2020 * Native SGL is needed. 2021 * Put a chain element in main message frame that points to the first 2022 * chain buffer. 2023 * 2024 * NOTE: The ChainOffset field must be 0 when using a chain pointer to 2025 * a native SGL. 2026 */ 2027 2028 /* Set main message chain element pointer */ 2029 main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL; 2030 /* 2031 * For NVMe the chain element needs to be the 2nd SG entry in the main 2032 * message. 2033 */ 2034 main_chain_element = (Mpi25IeeeSgeChain64_t *) 2035 ((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64)); 2036 2037 /* 2038 * For the PRP entries, use the specially allocated buffer of 2039 * contiguous memory. Normal chain buffers can't be used 2040 * because each chain buffer would need to be the size of an OS 2041 * page (4k). 2042 */ 2043 curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid); 2044 msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid); 2045 2046 main_chain_element->Address = cpu_to_le64(msg_dma); 2047 main_chain_element->NextChainOffset = 0; 2048 main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | 2049 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | 2050 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP; 2051 2052 /* Build first prp, sge need not to be page aligned*/ 2053 ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL; 2054 sg_scmd = scsi_sglist(scmd); 2055 sge_addr = sg_dma_address(sg_scmd); 2056 sge_len = sg_dma_len(sg_scmd); 2057 2058 offset = sge_addr & page_mask; 2059 first_prp_len = nvme_pg_size - offset; 2060 2061 ptr_first_sgl->Address = cpu_to_le64(sge_addr); 2062 ptr_first_sgl->Length = cpu_to_le32(first_prp_len); 2063 2064 data_len -= first_prp_len; 2065 2066 if (sge_len > first_prp_len) { 2067 sge_addr += first_prp_len; 2068 sge_len -= first_prp_len; 2069 } else if (data_len && (sge_len == first_prp_len)) { 2070 sg_scmd = sg_next(sg_scmd); 2071 sge_addr = sg_dma_address(sg_scmd); 2072 sge_len = sg_dma_len(sg_scmd); 2073 } 2074 2075 for (;;) { 2076 offset = sge_addr & page_mask; 2077 2078 /* Put PRP pointer due to page boundary*/ 2079 page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask; 2080 if (unlikely(!page_mask_result)) { 2081 scmd_printk(KERN_NOTICE, 2082 scmd, "page boundary curr_buff: 0x%p\n", 2083 curr_buff); 2084 msg_dma += 8; 2085 *curr_buff = cpu_to_le64(msg_dma); 2086 curr_buff++; 2087 num_prp_in_chain++; 2088 } 2089 2090 *curr_buff = cpu_to_le64(sge_addr); 2091 curr_buff++; 2092 msg_dma += 8; 2093 num_prp_in_chain++; 2094 2095 sge_addr += nvme_pg_size; 2096 sge_len -= nvme_pg_size; 2097 data_len -= nvme_pg_size; 2098 2099 if (data_len <= 0) 2100 break; 2101 2102 if (sge_len > 0) 2103 continue; 2104 2105 sg_scmd = sg_next(sg_scmd); 2106 sge_addr = sg_dma_address(sg_scmd); 2107 sge_len = sg_dma_len(sg_scmd); 2108 } 2109 2110 main_chain_element->Length = 2111 cpu_to_le32(num_prp_in_chain * sizeof(u64)); 2112 return; 2113 } 2114 2115 static bool 2116 base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc, 2117 struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count) 2118 { 2119 u32 data_length = 0; 2120 struct scatterlist *sg_scmd; 2121 bool build_prp = true; 2122 2123 data_length = scsi_bufflen(scmd); 2124 sg_scmd = scsi_sglist(scmd); 2125 2126 /* If Datalenth is <= 16K and number of SGE’s entries are <= 2 2127 * we built IEEE SGL 2128 */ 2129 if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2)) 2130 build_prp = false; 2131 2132 return build_prp; 2133 } 2134 2135 /** 2136 * _base_check_pcie_native_sgl - This function is called for PCIe end devices to 2137 * determine if the driver needs to build a native SGL. If so, that native 2138 * SGL is built in the special contiguous buffers allocated especially for 2139 * PCIe SGL creation. If the driver will not build a native SGL, return 2140 * TRUE and a normal IEEE SGL will be built. Currently this routine 2141 * supports NVMe. 2142 * @ioc: per adapter object 2143 * @mpi_request: mf request pointer 2144 * @smid: system request message index 2145 * @scmd: scsi command 2146 * @pcie_device: points to the PCIe device's info 2147 * 2148 * Returns 0 if native SGL was built, 1 if no SGL was built 2149 */ 2150 static int 2151 _base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc, 2152 Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd, 2153 struct _pcie_device *pcie_device) 2154 { 2155 struct scatterlist *sg_scmd; 2156 int sges_left; 2157 2158 /* Get the SG list pointer and info. */ 2159 sg_scmd = scsi_sglist(scmd); 2160 sges_left = scsi_dma_map(scmd); 2161 if (sges_left < 0) { 2162 sdev_printk(KERN_ERR, scmd->device, 2163 "scsi_dma_map failed: request for %d bytes!\n", 2164 scsi_bufflen(scmd)); 2165 return 1; 2166 } 2167 2168 /* Check if we need to build a native SG list. */ 2169 if (base_is_prp_possible(ioc, pcie_device, 2170 scmd, sges_left) == 0) { 2171 /* We built a native SG list, just return. */ 2172 goto out; 2173 } 2174 2175 /* 2176 * Build native NVMe PRP. 2177 */ 2178 base_make_prp_nvme(ioc, scmd, mpi_request, 2179 smid, sges_left); 2180 2181 return 0; 2182 out: 2183 scsi_dma_unmap(scmd); 2184 return 1; 2185 } 2186 2187 /** 2188 * _base_add_sg_single_ieee - add sg element for IEEE format 2189 * @paddr: virtual address for SGE 2190 * @flags: SGE flags 2191 * @chain_offset: number of 128 byte elements from start of segment 2192 * @length: data transfer length 2193 * @dma_addr: Physical address 2194 * 2195 * Return nothing. 2196 */ 2197 static void 2198 _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length, 2199 dma_addr_t dma_addr) 2200 { 2201 Mpi25IeeeSgeChain64_t *sgel = paddr; 2202 2203 sgel->Flags = flags; 2204 sgel->NextChainOffset = chain_offset; 2205 sgel->Length = cpu_to_le32(length); 2206 sgel->Address = cpu_to_le64(dma_addr); 2207 } 2208 2209 /** 2210 * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format 2211 * @ioc: per adapter object 2212 * @paddr: virtual address for SGE 2213 * 2214 * Create a zero length scatter gather entry to insure the IOCs hardware has 2215 * something to use if the target device goes brain dead and tries 2216 * to send data even when none is asked for. 2217 * 2218 * Return nothing. 2219 */ 2220 static void 2221 _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr) 2222 { 2223 u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 2224 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | 2225 MPI25_IEEE_SGE_FLAGS_END_OF_LIST); 2226 2227 _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1); 2228 } 2229 2230 /** 2231 * _base_build_sg_scmd - main sg creation routine 2232 * pcie_device is unused here! 2233 * @ioc: per adapter object 2234 * @scmd: scsi command 2235 * @smid: system request message index 2236 * @unused: unused pcie_device pointer 2237 * Context: none. 2238 * 2239 * The main routine that builds scatter gather table from a given 2240 * scsi request sent via the .queuecommand main handler. 2241 * 2242 * Returns 0 success, anything else error 2243 */ 2244 static int 2245 _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc, 2246 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused) 2247 { 2248 Mpi2SCSIIORequest_t *mpi_request; 2249 dma_addr_t chain_dma; 2250 struct scatterlist *sg_scmd; 2251 void *sg_local, *chain; 2252 u32 chain_offset; 2253 u32 chain_length; 2254 u32 chain_flags; 2255 int sges_left; 2256 u32 sges_in_segment; 2257 u32 sgl_flags; 2258 u32 sgl_flags_last_element; 2259 u32 sgl_flags_end_buffer; 2260 struct chain_tracker *chain_req; 2261 2262 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 2263 2264 /* init scatter gather flags */ 2265 sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT; 2266 if (scmd->sc_data_direction == DMA_TO_DEVICE) 2267 sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC; 2268 sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT) 2269 << MPI2_SGE_FLAGS_SHIFT; 2270 sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT | 2271 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST) 2272 << MPI2_SGE_FLAGS_SHIFT; 2273 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 2274 2275 sg_scmd = scsi_sglist(scmd); 2276 sges_left = scsi_dma_map(scmd); 2277 if (sges_left < 0) { 2278 sdev_printk(KERN_ERR, scmd->device, 2279 "pci_map_sg failed: request for %d bytes!\n", 2280 scsi_bufflen(scmd)); 2281 return -ENOMEM; 2282 } 2283 2284 sg_local = &mpi_request->SGL; 2285 sges_in_segment = ioc->max_sges_in_main_message; 2286 if (sges_left <= sges_in_segment) 2287 goto fill_in_last_segment; 2288 2289 mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) + 2290 (sges_in_segment * ioc->sge_size))/4; 2291 2292 /* fill in main message segment when there is a chain following */ 2293 while (sges_in_segment) { 2294 if (sges_in_segment == 1) 2295 ioc->base_add_sg_single(sg_local, 2296 sgl_flags_last_element | sg_dma_len(sg_scmd), 2297 sg_dma_address(sg_scmd)); 2298 else 2299 ioc->base_add_sg_single(sg_local, sgl_flags | 2300 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 2301 sg_scmd = sg_next(sg_scmd); 2302 sg_local += ioc->sge_size; 2303 sges_left--; 2304 sges_in_segment--; 2305 } 2306 2307 /* initializing the chain flags and pointers */ 2308 chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT; 2309 chain_req = _base_get_chain_buffer_tracker(ioc, scmd); 2310 if (!chain_req) 2311 return -1; 2312 chain = chain_req->chain_buffer; 2313 chain_dma = chain_req->chain_buffer_dma; 2314 do { 2315 sges_in_segment = (sges_left <= 2316 ioc->max_sges_in_chain_message) ? sges_left : 2317 ioc->max_sges_in_chain_message; 2318 chain_offset = (sges_left == sges_in_segment) ? 2319 0 : (sges_in_segment * ioc->sge_size)/4; 2320 chain_length = sges_in_segment * ioc->sge_size; 2321 if (chain_offset) { 2322 chain_offset = chain_offset << 2323 MPI2_SGE_CHAIN_OFFSET_SHIFT; 2324 chain_length += ioc->sge_size; 2325 } 2326 ioc->base_add_sg_single(sg_local, chain_flags | chain_offset | 2327 chain_length, chain_dma); 2328 sg_local = chain; 2329 if (!chain_offset) 2330 goto fill_in_last_segment; 2331 2332 /* fill in chain segments */ 2333 while (sges_in_segment) { 2334 if (sges_in_segment == 1) 2335 ioc->base_add_sg_single(sg_local, 2336 sgl_flags_last_element | 2337 sg_dma_len(sg_scmd), 2338 sg_dma_address(sg_scmd)); 2339 else 2340 ioc->base_add_sg_single(sg_local, sgl_flags | 2341 sg_dma_len(sg_scmd), 2342 sg_dma_address(sg_scmd)); 2343 sg_scmd = sg_next(sg_scmd); 2344 sg_local += ioc->sge_size; 2345 sges_left--; 2346 sges_in_segment--; 2347 } 2348 2349 chain_req = _base_get_chain_buffer_tracker(ioc, scmd); 2350 if (!chain_req) 2351 return -1; 2352 chain = chain_req->chain_buffer; 2353 chain_dma = chain_req->chain_buffer_dma; 2354 } while (1); 2355 2356 2357 fill_in_last_segment: 2358 2359 /* fill the last segment */ 2360 while (sges_left) { 2361 if (sges_left == 1) 2362 ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer | 2363 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 2364 else 2365 ioc->base_add_sg_single(sg_local, sgl_flags | 2366 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 2367 sg_scmd = sg_next(sg_scmd); 2368 sg_local += ioc->sge_size; 2369 sges_left--; 2370 } 2371 2372 return 0; 2373 } 2374 2375 /** 2376 * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format 2377 * @ioc: per adapter object 2378 * @scmd: scsi command 2379 * @smid: system request message index 2380 * @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be 2381 * constructed on need. 2382 * Context: none. 2383 * 2384 * The main routine that builds scatter gather table from a given 2385 * scsi request sent via the .queuecommand main handler. 2386 * 2387 * Returns 0 success, anything else error 2388 */ 2389 static int 2390 _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc, 2391 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device) 2392 { 2393 Mpi25SCSIIORequest_t *mpi_request; 2394 dma_addr_t chain_dma; 2395 struct scatterlist *sg_scmd; 2396 void *sg_local, *chain; 2397 u32 chain_offset; 2398 u32 chain_length; 2399 int sges_left; 2400 u32 sges_in_segment; 2401 u8 simple_sgl_flags; 2402 u8 simple_sgl_flags_last; 2403 u8 chain_sgl_flags; 2404 struct chain_tracker *chain_req; 2405 2406 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 2407 2408 /* init scatter gather flags */ 2409 simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 2410 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; 2411 simple_sgl_flags_last = simple_sgl_flags | 2412 MPI25_IEEE_SGE_FLAGS_END_OF_LIST; 2413 chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | 2414 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; 2415 2416 /* Check if we need to build a native SG list. */ 2417 if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request, 2418 smid, scmd, pcie_device) == 0)) { 2419 /* We built a native SG list, just return. */ 2420 return 0; 2421 } 2422 2423 sg_scmd = scsi_sglist(scmd); 2424 sges_left = scsi_dma_map(scmd); 2425 if (sges_left < 0) { 2426 sdev_printk(KERN_ERR, scmd->device, 2427 "pci_map_sg failed: request for %d bytes!\n", 2428 scsi_bufflen(scmd)); 2429 return -ENOMEM; 2430 } 2431 2432 sg_local = &mpi_request->SGL; 2433 sges_in_segment = (ioc->request_sz - 2434 offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee; 2435 if (sges_left <= sges_in_segment) 2436 goto fill_in_last_segment; 2437 2438 mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) + 2439 (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee); 2440 2441 /* fill in main message segment when there is a chain following */ 2442 while (sges_in_segment > 1) { 2443 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, 2444 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 2445 sg_scmd = sg_next(sg_scmd); 2446 sg_local += ioc->sge_size_ieee; 2447 sges_left--; 2448 sges_in_segment--; 2449 } 2450 2451 /* initializing the pointers */ 2452 chain_req = _base_get_chain_buffer_tracker(ioc, scmd); 2453 if (!chain_req) 2454 return -1; 2455 chain = chain_req->chain_buffer; 2456 chain_dma = chain_req->chain_buffer_dma; 2457 do { 2458 sges_in_segment = (sges_left <= 2459 ioc->max_sges_in_chain_message) ? sges_left : 2460 ioc->max_sges_in_chain_message; 2461 chain_offset = (sges_left == sges_in_segment) ? 2462 0 : sges_in_segment; 2463 chain_length = sges_in_segment * ioc->sge_size_ieee; 2464 if (chain_offset) 2465 chain_length += ioc->sge_size_ieee; 2466 _base_add_sg_single_ieee(sg_local, chain_sgl_flags, 2467 chain_offset, chain_length, chain_dma); 2468 2469 sg_local = chain; 2470 if (!chain_offset) 2471 goto fill_in_last_segment; 2472 2473 /* fill in chain segments */ 2474 while (sges_in_segment) { 2475 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, 2476 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 2477 sg_scmd = sg_next(sg_scmd); 2478 sg_local += ioc->sge_size_ieee; 2479 sges_left--; 2480 sges_in_segment--; 2481 } 2482 2483 chain_req = _base_get_chain_buffer_tracker(ioc, scmd); 2484 if (!chain_req) 2485 return -1; 2486 chain = chain_req->chain_buffer; 2487 chain_dma = chain_req->chain_buffer_dma; 2488 } while (1); 2489 2490 2491 fill_in_last_segment: 2492 2493 /* fill the last segment */ 2494 while (sges_left > 0) { 2495 if (sges_left == 1) 2496 _base_add_sg_single_ieee(sg_local, 2497 simple_sgl_flags_last, 0, sg_dma_len(sg_scmd), 2498 sg_dma_address(sg_scmd)); 2499 else 2500 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, 2501 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 2502 sg_scmd = sg_next(sg_scmd); 2503 sg_local += ioc->sge_size_ieee; 2504 sges_left--; 2505 } 2506 2507 return 0; 2508 } 2509 2510 /** 2511 * _base_build_sg_ieee - build generic sg for IEEE format 2512 * @ioc: per adapter object 2513 * @psge: virtual address for SGE 2514 * @data_out_dma: physical address for WRITES 2515 * @data_out_sz: data xfer size for WRITES 2516 * @data_in_dma: physical address for READS 2517 * @data_in_sz: data xfer size for READS 2518 * 2519 * Return nothing. 2520 */ 2521 static void 2522 _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge, 2523 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, 2524 size_t data_in_sz) 2525 { 2526 u8 sgl_flags; 2527 2528 if (!data_out_sz && !data_in_sz) { 2529 _base_build_zero_len_sge_ieee(ioc, psge); 2530 return; 2531 } 2532 2533 if (data_out_sz && data_in_sz) { 2534 /* WRITE sgel first */ 2535 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 2536 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; 2537 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz, 2538 data_out_dma); 2539 2540 /* incr sgel */ 2541 psge += ioc->sge_size_ieee; 2542 2543 /* READ sgel last */ 2544 sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST; 2545 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz, 2546 data_in_dma); 2547 } else if (data_out_sz) /* WRITE */ { 2548 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 2549 MPI25_IEEE_SGE_FLAGS_END_OF_LIST | 2550 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; 2551 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz, 2552 data_out_dma); 2553 } else if (data_in_sz) /* READ */ { 2554 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 2555 MPI25_IEEE_SGE_FLAGS_END_OF_LIST | 2556 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; 2557 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz, 2558 data_in_dma); 2559 } 2560 } 2561 2562 #define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10)) 2563 2564 /** 2565 * _base_config_dma_addressing - set dma addressing 2566 * @ioc: per adapter object 2567 * @pdev: PCI device struct 2568 * 2569 * Returns 0 for success, non-zero for failure. 2570 */ 2571 static int 2572 _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) 2573 { 2574 struct sysinfo s; 2575 u64 consistent_dma_mask; 2576 2577 if (ioc->is_mcpu_endpoint) 2578 goto try_32bit; 2579 2580 if (ioc->dma_mask) 2581 consistent_dma_mask = DMA_BIT_MASK(64); 2582 else 2583 consistent_dma_mask = DMA_BIT_MASK(32); 2584 2585 if (sizeof(dma_addr_t) > 4) { 2586 const uint64_t required_mask = 2587 dma_get_required_mask(&pdev->dev); 2588 if ((required_mask > DMA_BIT_MASK(32)) && 2589 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && 2590 !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) { 2591 ioc->base_add_sg_single = &_base_add_sg_single_64; 2592 ioc->sge_size = sizeof(Mpi2SGESimple64_t); 2593 ioc->dma_mask = 64; 2594 goto out; 2595 } 2596 } 2597 2598 try_32bit: 2599 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) 2600 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { 2601 ioc->base_add_sg_single = &_base_add_sg_single_32; 2602 ioc->sge_size = sizeof(Mpi2SGESimple32_t); 2603 ioc->dma_mask = 32; 2604 } else 2605 return -ENODEV; 2606 2607 out: 2608 si_meminfo(&s); 2609 pr_info(MPT3SAS_FMT 2610 "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n", 2611 ioc->name, ioc->dma_mask, convert_to_kb(s.totalram)); 2612 2613 return 0; 2614 } 2615 2616 static int 2617 _base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc, 2618 struct pci_dev *pdev) 2619 { 2620 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 2621 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) 2622 return -ENODEV; 2623 } 2624 return 0; 2625 } 2626 2627 /** 2628 * _base_check_enable_msix - checks MSIX capabable. 2629 * @ioc: per adapter object 2630 * 2631 * Check to see if card is capable of MSIX, and set number 2632 * of available msix vectors 2633 */ 2634 static int 2635 _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc) 2636 { 2637 int base; 2638 u16 message_control; 2639 2640 /* Check whether controller SAS2008 B0 controller, 2641 * if it is SAS2008 B0 controller use IO-APIC instead of MSIX 2642 */ 2643 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 && 2644 ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) { 2645 return -EINVAL; 2646 } 2647 2648 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX); 2649 if (!base) { 2650 dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n", 2651 ioc->name)); 2652 return -EINVAL; 2653 } 2654 2655 /* get msix vector count */ 2656 /* NUMA_IO not supported for older controllers */ 2657 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 || 2658 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 || 2659 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 || 2660 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 || 2661 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 || 2662 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 || 2663 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2) 2664 ioc->msix_vector_count = 1; 2665 else { 2666 pci_read_config_word(ioc->pdev, base + 2, &message_control); 2667 ioc->msix_vector_count = (message_control & 0x3FF) + 1; 2668 } 2669 dinitprintk(ioc, pr_info(MPT3SAS_FMT 2670 "msix is supported, vector_count(%d)\n", 2671 ioc->name, ioc->msix_vector_count)); 2672 return 0; 2673 } 2674 2675 /** 2676 * _base_free_irq - free irq 2677 * @ioc: per adapter object 2678 * 2679 * Freeing respective reply_queue from the list. 2680 */ 2681 static void 2682 _base_free_irq(struct MPT3SAS_ADAPTER *ioc) 2683 { 2684 struct adapter_reply_queue *reply_q, *next; 2685 2686 if (list_empty(&ioc->reply_queue_list)) 2687 return; 2688 2689 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { 2690 list_del(&reply_q->list); 2691 free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index), 2692 reply_q); 2693 kfree(reply_q); 2694 } 2695 } 2696 2697 /** 2698 * _base_request_irq - request irq 2699 * @ioc: per adapter object 2700 * @index: msix index into vector table 2701 * 2702 * Inserting respective reply_queue into the list. 2703 */ 2704 static int 2705 _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index) 2706 { 2707 struct pci_dev *pdev = ioc->pdev; 2708 struct adapter_reply_queue *reply_q; 2709 int r; 2710 2711 reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL); 2712 if (!reply_q) { 2713 pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n", 2714 ioc->name, (int)sizeof(struct adapter_reply_queue)); 2715 return -ENOMEM; 2716 } 2717 reply_q->ioc = ioc; 2718 reply_q->msix_index = index; 2719 2720 atomic_set(&reply_q->busy, 0); 2721 if (ioc->msix_enable) 2722 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d", 2723 ioc->driver_name, ioc->id, index); 2724 else 2725 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d", 2726 ioc->driver_name, ioc->id); 2727 r = request_irq(pci_irq_vector(pdev, index), _base_interrupt, 2728 IRQF_SHARED, reply_q->name, reply_q); 2729 if (r) { 2730 pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n", 2731 reply_q->name, pci_irq_vector(pdev, index)); 2732 kfree(reply_q); 2733 return -EBUSY; 2734 } 2735 2736 INIT_LIST_HEAD(&reply_q->list); 2737 list_add_tail(&reply_q->list, &ioc->reply_queue_list); 2738 return 0; 2739 } 2740 2741 /** 2742 * _base_assign_reply_queues - assigning msix index for each cpu 2743 * @ioc: per adapter object 2744 * 2745 * The enduser would need to set the affinity via /proc/irq/#/smp_affinity 2746 * 2747 * It would nice if we could call irq_set_affinity, however it is not 2748 * an exported symbol 2749 */ 2750 static void 2751 _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) 2752 { 2753 unsigned int cpu, nr_cpus, nr_msix, index = 0; 2754 struct adapter_reply_queue *reply_q; 2755 2756 if (!_base_is_controller_msix_enabled(ioc)) 2757 return; 2758 2759 memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz); 2760 2761 nr_cpus = num_online_cpus(); 2762 nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count, 2763 ioc->facts.MaxMSIxVectors); 2764 if (!nr_msix) 2765 return; 2766 2767 if (smp_affinity_enable) { 2768 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 2769 const cpumask_t *mask = pci_irq_get_affinity(ioc->pdev, 2770 reply_q->msix_index); 2771 if (!mask) { 2772 pr_warn(MPT3SAS_FMT "no affinity for msi %x\n", 2773 ioc->name, reply_q->msix_index); 2774 continue; 2775 } 2776 2777 for_each_cpu_and(cpu, mask, cpu_online_mask) { 2778 if (cpu >= ioc->cpu_msix_table_sz) 2779 break; 2780 ioc->cpu_msix_table[cpu] = reply_q->msix_index; 2781 } 2782 } 2783 return; 2784 } 2785 cpu = cpumask_first(cpu_online_mask); 2786 2787 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 2788 2789 unsigned int i, group = nr_cpus / nr_msix; 2790 2791 if (cpu >= nr_cpus) 2792 break; 2793 2794 if (index < nr_cpus % nr_msix) 2795 group++; 2796 2797 for (i = 0 ; i < group ; i++) { 2798 ioc->cpu_msix_table[cpu] = reply_q->msix_index; 2799 cpu = cpumask_next(cpu, cpu_online_mask); 2800 } 2801 index++; 2802 } 2803 } 2804 2805 /** 2806 * _base_disable_msix - disables msix 2807 * @ioc: per adapter object 2808 * 2809 */ 2810 static void 2811 _base_disable_msix(struct MPT3SAS_ADAPTER *ioc) 2812 { 2813 if (!ioc->msix_enable) 2814 return; 2815 pci_disable_msix(ioc->pdev); 2816 ioc->msix_enable = 0; 2817 } 2818 2819 /** 2820 * _base_enable_msix - enables msix, failback to io_apic 2821 * @ioc: per adapter object 2822 * 2823 */ 2824 static int 2825 _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) 2826 { 2827 int r; 2828 int i, local_max_msix_vectors; 2829 u8 try_msix = 0; 2830 unsigned int irq_flags = PCI_IRQ_MSIX; 2831 2832 if (msix_disable == -1 || msix_disable == 0) 2833 try_msix = 1; 2834 2835 if (!try_msix) 2836 goto try_ioapic; 2837 2838 if (_base_check_enable_msix(ioc) != 0) 2839 goto try_ioapic; 2840 2841 ioc->reply_queue_count = min_t(int, ioc->cpu_count, 2842 ioc->msix_vector_count); 2843 2844 printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores" 2845 ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count, 2846 ioc->cpu_count, max_msix_vectors); 2847 2848 if (!ioc->rdpq_array_enable && max_msix_vectors == -1) 2849 local_max_msix_vectors = (reset_devices) ? 1 : 8; 2850 else 2851 local_max_msix_vectors = max_msix_vectors; 2852 2853 if (local_max_msix_vectors > 0) 2854 ioc->reply_queue_count = min_t(int, local_max_msix_vectors, 2855 ioc->reply_queue_count); 2856 else if (local_max_msix_vectors == 0) 2857 goto try_ioapic; 2858 2859 if (ioc->msix_vector_count < ioc->cpu_count) 2860 smp_affinity_enable = 0; 2861 2862 if (smp_affinity_enable) 2863 irq_flags |= PCI_IRQ_AFFINITY; 2864 2865 r = pci_alloc_irq_vectors(ioc->pdev, 1, ioc->reply_queue_count, 2866 irq_flags); 2867 if (r < 0) { 2868 dfailprintk(ioc, pr_info(MPT3SAS_FMT 2869 "pci_alloc_irq_vectors failed (r=%d) !!!\n", 2870 ioc->name, r)); 2871 goto try_ioapic; 2872 } 2873 2874 ioc->msix_enable = 1; 2875 ioc->reply_queue_count = r; 2876 for (i = 0; i < ioc->reply_queue_count; i++) { 2877 r = _base_request_irq(ioc, i); 2878 if (r) { 2879 _base_free_irq(ioc); 2880 _base_disable_msix(ioc); 2881 goto try_ioapic; 2882 } 2883 } 2884 2885 return 0; 2886 2887 /* failback to io_apic interrupt routing */ 2888 try_ioapic: 2889 2890 ioc->reply_queue_count = 1; 2891 r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY); 2892 if (r < 0) { 2893 dfailprintk(ioc, pr_info(MPT3SAS_FMT 2894 "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n", 2895 ioc->name, r)); 2896 } else 2897 r = _base_request_irq(ioc, 0); 2898 2899 return r; 2900 } 2901 2902 /** 2903 * mpt3sas_base_unmap_resources - free controller resources 2904 * @ioc: per adapter object 2905 */ 2906 static void 2907 mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc) 2908 { 2909 struct pci_dev *pdev = ioc->pdev; 2910 2911 dexitprintk(ioc, printk(MPT3SAS_FMT "%s\n", 2912 ioc->name, __func__)); 2913 2914 _base_free_irq(ioc); 2915 _base_disable_msix(ioc); 2916 2917 if (ioc->combined_reply_queue) { 2918 kfree(ioc->replyPostRegisterIndex); 2919 ioc->replyPostRegisterIndex = NULL; 2920 } 2921 2922 if (ioc->chip_phys) { 2923 iounmap(ioc->chip); 2924 ioc->chip_phys = 0; 2925 } 2926 2927 if (pci_is_enabled(pdev)) { 2928 pci_release_selected_regions(ioc->pdev, ioc->bars); 2929 pci_disable_pcie_error_reporting(pdev); 2930 pci_disable_device(pdev); 2931 } 2932 } 2933 2934 /** 2935 * mpt3sas_base_map_resources - map in controller resources (io/irq/memap) 2936 * @ioc: per adapter object 2937 * 2938 * Returns 0 for success, non-zero for failure. 2939 */ 2940 int 2941 mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) 2942 { 2943 struct pci_dev *pdev = ioc->pdev; 2944 u32 memap_sz; 2945 u32 pio_sz; 2946 int i, r = 0; 2947 u64 pio_chip = 0; 2948 phys_addr_t chip_phys = 0; 2949 struct adapter_reply_queue *reply_q; 2950 2951 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", 2952 ioc->name, __func__)); 2953 2954 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); 2955 if (pci_enable_device_mem(pdev)) { 2956 pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n", 2957 ioc->name); 2958 ioc->bars = 0; 2959 return -ENODEV; 2960 } 2961 2962 2963 if (pci_request_selected_regions(pdev, ioc->bars, 2964 ioc->driver_name)) { 2965 pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n", 2966 ioc->name); 2967 ioc->bars = 0; 2968 r = -ENODEV; 2969 goto out_fail; 2970 } 2971 2972 /* AER (Advanced Error Reporting) hooks */ 2973 pci_enable_pcie_error_reporting(pdev); 2974 2975 pci_set_master(pdev); 2976 2977 2978 if (_base_config_dma_addressing(ioc, pdev) != 0) { 2979 pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n", 2980 ioc->name, pci_name(pdev)); 2981 r = -ENODEV; 2982 goto out_fail; 2983 } 2984 2985 for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) && 2986 (!memap_sz || !pio_sz); i++) { 2987 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { 2988 if (pio_sz) 2989 continue; 2990 pio_chip = (u64)pci_resource_start(pdev, i); 2991 pio_sz = pci_resource_len(pdev, i); 2992 } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 2993 if (memap_sz) 2994 continue; 2995 ioc->chip_phys = pci_resource_start(pdev, i); 2996 chip_phys = ioc->chip_phys; 2997 memap_sz = pci_resource_len(pdev, i); 2998 ioc->chip = ioremap(ioc->chip_phys, memap_sz); 2999 } 3000 } 3001 3002 if (ioc->chip == NULL) { 3003 pr_err(MPT3SAS_FMT "unable to map adapter memory! " 3004 " or resource not found\n", ioc->name); 3005 r = -EINVAL; 3006 goto out_fail; 3007 } 3008 3009 _base_mask_interrupts(ioc); 3010 3011 r = _base_get_ioc_facts(ioc); 3012 if (r) 3013 goto out_fail; 3014 3015 if (!ioc->rdpq_array_enable_assigned) { 3016 ioc->rdpq_array_enable = ioc->rdpq_array_capable; 3017 ioc->rdpq_array_enable_assigned = 1; 3018 } 3019 3020 r = _base_enable_msix(ioc); 3021 if (r) 3022 goto out_fail; 3023 3024 /* Use the Combined reply queue feature only for SAS3 C0 & higher 3025 * revision HBAs and also only when reply queue count is greater than 8 3026 */ 3027 if (ioc->combined_reply_queue && ioc->reply_queue_count > 8) { 3028 /* Determine the Supplemental Reply Post Host Index Registers 3029 * Addresse. Supplemental Reply Post Host Index Registers 3030 * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and 3031 * each register is at offset bytes of 3032 * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one. 3033 */ 3034 ioc->replyPostRegisterIndex = kcalloc( 3035 ioc->combined_reply_index_count, 3036 sizeof(resource_size_t *), GFP_KERNEL); 3037 if (!ioc->replyPostRegisterIndex) { 3038 dfailprintk(ioc, printk(MPT3SAS_FMT 3039 "allocation for reply Post Register Index failed!!!\n", 3040 ioc->name)); 3041 r = -ENOMEM; 3042 goto out_fail; 3043 } 3044 3045 for (i = 0; i < ioc->combined_reply_index_count; i++) { 3046 ioc->replyPostRegisterIndex[i] = (resource_size_t *) 3047 ((u8 *)&ioc->chip->Doorbell + 3048 MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET + 3049 (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET)); 3050 } 3051 } else 3052 ioc->combined_reply_queue = 0; 3053 3054 if (ioc->is_warpdrive) { 3055 ioc->reply_post_host_index[0] = (resource_size_t __iomem *) 3056 &ioc->chip->ReplyPostHostIndex; 3057 3058 for (i = 1; i < ioc->cpu_msix_table_sz; i++) 3059 ioc->reply_post_host_index[i] = 3060 (resource_size_t __iomem *) 3061 ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1) 3062 * 4))); 3063 } 3064 3065 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) 3066 pr_info(MPT3SAS_FMT "%s: IRQ %d\n", 3067 reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : 3068 "IO-APIC enabled"), 3069 pci_irq_vector(ioc->pdev, reply_q->msix_index)); 3070 3071 pr_info(MPT3SAS_FMT "iomem(%pap), mapped(0x%p), size(%d)\n", 3072 ioc->name, &chip_phys, ioc->chip, memap_sz); 3073 pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n", 3074 ioc->name, (unsigned long long)pio_chip, pio_sz); 3075 3076 /* Save PCI configuration state for recovery from PCI AER/EEH errors */ 3077 pci_save_state(pdev); 3078 return 0; 3079 3080 out_fail: 3081 mpt3sas_base_unmap_resources(ioc); 3082 return r; 3083 } 3084 3085 /** 3086 * mpt3sas_base_get_msg_frame - obtain request mf pointer 3087 * @ioc: per adapter object 3088 * @smid: system request message index(smid zero is invalid) 3089 * 3090 * Returns virt pointer to message frame. 3091 */ 3092 void * 3093 mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid) 3094 { 3095 return (void *)(ioc->request + (smid * ioc->request_sz)); 3096 } 3097 3098 /** 3099 * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr 3100 * @ioc: per adapter object 3101 * @smid: system request message index 3102 * 3103 * Returns virt pointer to sense buffer. 3104 */ 3105 void * 3106 mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid) 3107 { 3108 return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE)); 3109 } 3110 3111 /** 3112 * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr 3113 * @ioc: per adapter object 3114 * @smid: system request message index 3115 * 3116 * Returns phys pointer to the low 32bit address of the sense buffer. 3117 */ 3118 __le32 3119 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid) 3120 { 3121 return cpu_to_le32(ioc->sense_dma + ((smid - 1) * 3122 SCSI_SENSE_BUFFERSIZE)); 3123 } 3124 3125 /** 3126 * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr 3127 * @ioc: per adapter object 3128 * @smid: system request message index 3129 * 3130 * Returns virt pointer to a PCIe SGL. 3131 */ 3132 void * 3133 mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid) 3134 { 3135 return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl); 3136 } 3137 3138 /** 3139 * mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr 3140 * @ioc: per adapter object 3141 * @smid: system request message index 3142 * 3143 * Returns phys pointer to the address of the PCIe buffer. 3144 */ 3145 dma_addr_t 3146 mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid) 3147 { 3148 return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma; 3149 } 3150 3151 /** 3152 * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address 3153 * @ioc: per adapter object 3154 * @phys_addr: lower 32 physical addr of the reply 3155 * 3156 * Converts 32bit lower physical addr into a virt address. 3157 */ 3158 void * 3159 mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr) 3160 { 3161 if (!phys_addr) 3162 return NULL; 3163 return ioc->reply + (phys_addr - (u32)ioc->reply_dma); 3164 } 3165 3166 static inline u8 3167 _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc) 3168 { 3169 return ioc->cpu_msix_table[raw_smp_processor_id()]; 3170 } 3171 3172 /** 3173 * mpt3sas_base_get_smid - obtain a free smid from internal queue 3174 * @ioc: per adapter object 3175 * @cb_idx: callback index 3176 * 3177 * Returns smid (zero is invalid) 3178 */ 3179 u16 3180 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) 3181 { 3182 unsigned long flags; 3183 struct request_tracker *request; 3184 u16 smid; 3185 3186 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 3187 if (list_empty(&ioc->internal_free_list)) { 3188 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 3189 pr_err(MPT3SAS_FMT "%s: smid not available\n", 3190 ioc->name, __func__); 3191 return 0; 3192 } 3193 3194 request = list_entry(ioc->internal_free_list.next, 3195 struct request_tracker, tracker_list); 3196 request->cb_idx = cb_idx; 3197 smid = request->smid; 3198 list_del(&request->tracker_list); 3199 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 3200 return smid; 3201 } 3202 3203 /** 3204 * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue 3205 * @ioc: per adapter object 3206 * @cb_idx: callback index 3207 * @scmd: pointer to scsi command object 3208 * 3209 * Returns smid (zero is invalid) 3210 */ 3211 u16 3212 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx, 3213 struct scsi_cmnd *scmd) 3214 { 3215 struct scsiio_tracker *request = scsi_cmd_priv(scmd); 3216 unsigned int tag = scmd->request->tag; 3217 u16 smid; 3218 3219 smid = tag + 1; 3220 request->cb_idx = cb_idx; 3221 request->msix_io = _base_get_msix_index(ioc); 3222 request->smid = smid; 3223 INIT_LIST_HEAD(&request->chain_list); 3224 return smid; 3225 } 3226 3227 /** 3228 * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue 3229 * @ioc: per adapter object 3230 * @cb_idx: callback index 3231 * 3232 * Returns smid (zero is invalid) 3233 */ 3234 u16 3235 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) 3236 { 3237 unsigned long flags; 3238 struct request_tracker *request; 3239 u16 smid; 3240 3241 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 3242 if (list_empty(&ioc->hpr_free_list)) { 3243 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 3244 return 0; 3245 } 3246 3247 request = list_entry(ioc->hpr_free_list.next, 3248 struct request_tracker, tracker_list); 3249 request->cb_idx = cb_idx; 3250 smid = request->smid; 3251 list_del(&request->tracker_list); 3252 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 3253 return smid; 3254 } 3255 3256 static void 3257 _base_recovery_check(struct MPT3SAS_ADAPTER *ioc) 3258 { 3259 /* 3260 * See _wait_for_commands_to_complete() call with regards to this code. 3261 */ 3262 if (ioc->shost_recovery && ioc->pending_io_count) { 3263 ioc->pending_io_count = atomic_read(&ioc->shost->host_busy); 3264 if (ioc->pending_io_count == 0) 3265 wake_up(&ioc->reset_wq); 3266 } 3267 } 3268 3269 void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc, 3270 struct scsiio_tracker *st) 3271 { 3272 if (WARN_ON(st->smid == 0)) 3273 return; 3274 st->cb_idx = 0xFF; 3275 st->direct_io = 0; 3276 if (!list_empty(&st->chain_list)) { 3277 unsigned long flags; 3278 3279 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 3280 list_splice_init(&st->chain_list, &ioc->free_chain_list); 3281 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 3282 } 3283 } 3284 3285 /** 3286 * mpt3sas_base_free_smid - put smid back on free_list 3287 * @ioc: per adapter object 3288 * @smid: system request message index 3289 * 3290 * Return nothing. 3291 */ 3292 void 3293 mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) 3294 { 3295 unsigned long flags; 3296 int i; 3297 3298 if (smid < ioc->hi_priority_smid) { 3299 struct scsiio_tracker *st; 3300 3301 st = _get_st_from_smid(ioc, smid); 3302 if (!st) { 3303 _base_recovery_check(ioc); 3304 return; 3305 } 3306 mpt3sas_base_clear_st(ioc, st); 3307 _base_recovery_check(ioc); 3308 return; 3309 } 3310 3311 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 3312 if (smid < ioc->internal_smid) { 3313 /* hi-priority */ 3314 i = smid - ioc->hi_priority_smid; 3315 ioc->hpr_lookup[i].cb_idx = 0xFF; 3316 list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list); 3317 } else if (smid <= ioc->hba_queue_depth) { 3318 /* internal queue */ 3319 i = smid - ioc->internal_smid; 3320 ioc->internal_lookup[i].cb_idx = 0xFF; 3321 list_add(&ioc->internal_lookup[i].tracker_list, 3322 &ioc->internal_free_list); 3323 } 3324 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 3325 } 3326 3327 /** 3328 * _base_mpi_ep_writeq - 32 bit write to MMIO 3329 * @b: data payload 3330 * @addr: address in MMIO space 3331 * @writeq_lock: spin lock 3332 * 3333 * This special handling for MPI EP to take care of 32 bit 3334 * environment where its not quarenteed to send the entire word 3335 * in one transfer. 3336 */ 3337 static inline void 3338 _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr, 3339 spinlock_t *writeq_lock) 3340 { 3341 unsigned long flags; 3342 __u64 data_out = cpu_to_le64(b); 3343 3344 spin_lock_irqsave(writeq_lock, flags); 3345 writel((u32)(data_out), addr); 3346 writel((u32)(data_out >> 32), (addr + 4)); 3347 spin_unlock_irqrestore(writeq_lock, flags); 3348 } 3349 3350 /** 3351 * _base_writeq - 64 bit write to MMIO 3352 * @ioc: per adapter object 3353 * @b: data payload 3354 * @addr: address in MMIO space 3355 * @writeq_lock: spin lock 3356 * 3357 * Glue for handling an atomic 64 bit word to MMIO. This special handling takes 3358 * care of 32 bit environment where its not quarenteed to send the entire word 3359 * in one transfer. 3360 */ 3361 #if defined(writeq) && defined(CONFIG_64BIT) 3362 static inline void 3363 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) 3364 { 3365 writeq(cpu_to_le64(b), addr); 3366 } 3367 #else 3368 static inline void 3369 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) 3370 { 3371 _base_mpi_ep_writeq(b, addr, writeq_lock); 3372 } 3373 #endif 3374 3375 /** 3376 * _base_put_smid_mpi_ep_scsi_io - send SCSI_IO request to firmware 3377 * @ioc: per adapter object 3378 * @smid: system request message index 3379 * @handle: device handle 3380 * 3381 * Return nothing. 3382 */ 3383 static void 3384 _base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) 3385 { 3386 Mpi2RequestDescriptorUnion_t descriptor; 3387 u64 *request = (u64 *)&descriptor; 3388 void *mpi_req_iomem; 3389 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid); 3390 3391 _clone_sg_entries(ioc, (void *) mfp, smid); 3392 mpi_req_iomem = (void *)ioc->chip + 3393 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz); 3394 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp, 3395 ioc->request_sz); 3396 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; 3397 descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc); 3398 descriptor.SCSIIO.SMID = cpu_to_le16(smid); 3399 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); 3400 descriptor.SCSIIO.LMID = 0; 3401 _base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 3402 &ioc->scsi_lookup_lock); 3403 } 3404 3405 /** 3406 * _base_put_smid_scsi_io - send SCSI_IO request to firmware 3407 * @ioc: per adapter object 3408 * @smid: system request message index 3409 * @handle: device handle 3410 * 3411 * Return nothing. 3412 */ 3413 static void 3414 _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) 3415 { 3416 Mpi2RequestDescriptorUnion_t descriptor; 3417 u64 *request = (u64 *)&descriptor; 3418 3419 3420 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; 3421 descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc); 3422 descriptor.SCSIIO.SMID = cpu_to_le16(smid); 3423 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); 3424 descriptor.SCSIIO.LMID = 0; 3425 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 3426 &ioc->scsi_lookup_lock); 3427 } 3428 3429 /** 3430 * mpt3sas_base_put_smid_fast_path - send fast path request to firmware 3431 * @ioc: per adapter object 3432 * @smid: system request message index 3433 * @handle: device handle 3434 * 3435 * Return nothing. 3436 */ 3437 void 3438 mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, 3439 u16 handle) 3440 { 3441 Mpi2RequestDescriptorUnion_t descriptor; 3442 u64 *request = (u64 *)&descriptor; 3443 3444 descriptor.SCSIIO.RequestFlags = 3445 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; 3446 descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc); 3447 descriptor.SCSIIO.SMID = cpu_to_le16(smid); 3448 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); 3449 descriptor.SCSIIO.LMID = 0; 3450 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 3451 &ioc->scsi_lookup_lock); 3452 } 3453 3454 /** 3455 * mpt3sas_base_put_smid_hi_priority - send Task Management request to firmware 3456 * @ioc: per adapter object 3457 * @smid: system request message index 3458 * @msix_task: msix_task will be same as msix of IO incase of task abort else 0. 3459 * Return nothing. 3460 */ 3461 void 3462 mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid, 3463 u16 msix_task) 3464 { 3465 Mpi2RequestDescriptorUnion_t descriptor; 3466 void *mpi_req_iomem; 3467 u64 *request; 3468 3469 if (ioc->is_mcpu_endpoint) { 3470 MPI2RequestHeader_t *request_hdr; 3471 3472 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid); 3473 3474 request_hdr = (MPI2RequestHeader_t *)mfp; 3475 /* TBD 256 is offset within sys register. */ 3476 mpi_req_iomem = (void *)ioc->chip + MPI_FRAME_START_OFFSET 3477 + (smid * ioc->request_sz); 3478 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp, 3479 ioc->request_sz); 3480 } 3481 3482 request = (u64 *)&descriptor; 3483 3484 descriptor.HighPriority.RequestFlags = 3485 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 3486 descriptor.HighPriority.MSIxIndex = msix_task; 3487 descriptor.HighPriority.SMID = cpu_to_le16(smid); 3488 descriptor.HighPriority.LMID = 0; 3489 descriptor.HighPriority.Reserved1 = 0; 3490 if (ioc->is_mcpu_endpoint) 3491 _base_mpi_ep_writeq(*request, 3492 &ioc->chip->RequestDescriptorPostLow, 3493 &ioc->scsi_lookup_lock); 3494 else 3495 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 3496 &ioc->scsi_lookup_lock); 3497 } 3498 3499 /** 3500 * mpt3sas_base_put_smid_nvme_encap - send NVMe encapsulated request to 3501 * firmware 3502 * @ioc: per adapter object 3503 * @smid: system request message index 3504 * 3505 * Return nothing. 3506 */ 3507 void 3508 mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid) 3509 { 3510 Mpi2RequestDescriptorUnion_t descriptor; 3511 u64 *request = (u64 *)&descriptor; 3512 3513 descriptor.Default.RequestFlags = 3514 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED; 3515 descriptor.Default.MSIxIndex = _base_get_msix_index(ioc); 3516 descriptor.Default.SMID = cpu_to_le16(smid); 3517 descriptor.Default.LMID = 0; 3518 descriptor.Default.DescriptorTypeDependent = 0; 3519 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 3520 &ioc->scsi_lookup_lock); 3521 } 3522 3523 /** 3524 * mpt3sas_base_put_smid_default - Default, primarily used for config pages 3525 * @ioc: per adapter object 3526 * @smid: system request message index 3527 * 3528 * Return nothing. 3529 */ 3530 void 3531 mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid) 3532 { 3533 Mpi2RequestDescriptorUnion_t descriptor; 3534 void *mpi_req_iomem; 3535 u64 *request; 3536 MPI2RequestHeader_t *request_hdr; 3537 3538 if (ioc->is_mcpu_endpoint) { 3539 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid); 3540 3541 request_hdr = (MPI2RequestHeader_t *)mfp; 3542 3543 _clone_sg_entries(ioc, (void *) mfp, smid); 3544 /* TBD 256 is offset within sys register */ 3545 mpi_req_iomem = (void *)ioc->chip + 3546 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz); 3547 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp, 3548 ioc->request_sz); 3549 } 3550 request = (u64 *)&descriptor; 3551 descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 3552 descriptor.Default.MSIxIndex = _base_get_msix_index(ioc); 3553 descriptor.Default.SMID = cpu_to_le16(smid); 3554 descriptor.Default.LMID = 0; 3555 descriptor.Default.DescriptorTypeDependent = 0; 3556 if (ioc->is_mcpu_endpoint) 3557 _base_mpi_ep_writeq(*request, 3558 &ioc->chip->RequestDescriptorPostLow, 3559 &ioc->scsi_lookup_lock); 3560 else 3561 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 3562 &ioc->scsi_lookup_lock); 3563 } 3564 3565 /** 3566 * _base_display_OEMs_branding - Display branding string 3567 * @ioc: per adapter object 3568 * 3569 * Return nothing. 3570 */ 3571 static void 3572 _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc) 3573 { 3574 if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL) 3575 return; 3576 3577 switch (ioc->pdev->subsystem_vendor) { 3578 case PCI_VENDOR_ID_INTEL: 3579 switch (ioc->pdev->device) { 3580 case MPI2_MFGPAGE_DEVID_SAS2008: 3581 switch (ioc->pdev->subsystem_device) { 3582 case MPT2SAS_INTEL_RMS2LL080_SSDID: 3583 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3584 MPT2SAS_INTEL_RMS2LL080_BRANDING); 3585 break; 3586 case MPT2SAS_INTEL_RMS2LL040_SSDID: 3587 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3588 MPT2SAS_INTEL_RMS2LL040_BRANDING); 3589 break; 3590 case MPT2SAS_INTEL_SSD910_SSDID: 3591 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3592 MPT2SAS_INTEL_SSD910_BRANDING); 3593 break; 3594 default: 3595 pr_info(MPT3SAS_FMT 3596 "Intel(R) Controller: Subsystem ID: 0x%X\n", 3597 ioc->name, ioc->pdev->subsystem_device); 3598 break; 3599 } 3600 case MPI2_MFGPAGE_DEVID_SAS2308_2: 3601 switch (ioc->pdev->subsystem_device) { 3602 case MPT2SAS_INTEL_RS25GB008_SSDID: 3603 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3604 MPT2SAS_INTEL_RS25GB008_BRANDING); 3605 break; 3606 case MPT2SAS_INTEL_RMS25JB080_SSDID: 3607 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3608 MPT2SAS_INTEL_RMS25JB080_BRANDING); 3609 break; 3610 case MPT2SAS_INTEL_RMS25JB040_SSDID: 3611 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3612 MPT2SAS_INTEL_RMS25JB040_BRANDING); 3613 break; 3614 case MPT2SAS_INTEL_RMS25KB080_SSDID: 3615 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3616 MPT2SAS_INTEL_RMS25KB080_BRANDING); 3617 break; 3618 case MPT2SAS_INTEL_RMS25KB040_SSDID: 3619 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3620 MPT2SAS_INTEL_RMS25KB040_BRANDING); 3621 break; 3622 case MPT2SAS_INTEL_RMS25LB040_SSDID: 3623 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3624 MPT2SAS_INTEL_RMS25LB040_BRANDING); 3625 break; 3626 case MPT2SAS_INTEL_RMS25LB080_SSDID: 3627 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3628 MPT2SAS_INTEL_RMS25LB080_BRANDING); 3629 break; 3630 default: 3631 pr_info(MPT3SAS_FMT 3632 "Intel(R) Controller: Subsystem ID: 0x%X\n", 3633 ioc->name, ioc->pdev->subsystem_device); 3634 break; 3635 } 3636 case MPI25_MFGPAGE_DEVID_SAS3008: 3637 switch (ioc->pdev->subsystem_device) { 3638 case MPT3SAS_INTEL_RMS3JC080_SSDID: 3639 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3640 MPT3SAS_INTEL_RMS3JC080_BRANDING); 3641 break; 3642 3643 case MPT3SAS_INTEL_RS3GC008_SSDID: 3644 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3645 MPT3SAS_INTEL_RS3GC008_BRANDING); 3646 break; 3647 case MPT3SAS_INTEL_RS3FC044_SSDID: 3648 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3649 MPT3SAS_INTEL_RS3FC044_BRANDING); 3650 break; 3651 case MPT3SAS_INTEL_RS3UC080_SSDID: 3652 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3653 MPT3SAS_INTEL_RS3UC080_BRANDING); 3654 break; 3655 default: 3656 pr_info(MPT3SAS_FMT 3657 "Intel(R) Controller: Subsystem ID: 0x%X\n", 3658 ioc->name, ioc->pdev->subsystem_device); 3659 break; 3660 } 3661 break; 3662 default: 3663 pr_info(MPT3SAS_FMT 3664 "Intel(R) Controller: Subsystem ID: 0x%X\n", 3665 ioc->name, ioc->pdev->subsystem_device); 3666 break; 3667 } 3668 break; 3669 case PCI_VENDOR_ID_DELL: 3670 switch (ioc->pdev->device) { 3671 case MPI2_MFGPAGE_DEVID_SAS2008: 3672 switch (ioc->pdev->subsystem_device) { 3673 case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID: 3674 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3675 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING); 3676 break; 3677 case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID: 3678 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3679 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING); 3680 break; 3681 case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID: 3682 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3683 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING); 3684 break; 3685 case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID: 3686 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3687 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING); 3688 break; 3689 case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID: 3690 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3691 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING); 3692 break; 3693 case MPT2SAS_DELL_PERC_H200_SSDID: 3694 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3695 MPT2SAS_DELL_PERC_H200_BRANDING); 3696 break; 3697 case MPT2SAS_DELL_6GBPS_SAS_SSDID: 3698 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3699 MPT2SAS_DELL_6GBPS_SAS_BRANDING); 3700 break; 3701 default: 3702 pr_info(MPT3SAS_FMT 3703 "Dell 6Gbps HBA: Subsystem ID: 0x%X\n", 3704 ioc->name, ioc->pdev->subsystem_device); 3705 break; 3706 } 3707 break; 3708 case MPI25_MFGPAGE_DEVID_SAS3008: 3709 switch (ioc->pdev->subsystem_device) { 3710 case MPT3SAS_DELL_12G_HBA_SSDID: 3711 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3712 MPT3SAS_DELL_12G_HBA_BRANDING); 3713 break; 3714 default: 3715 pr_info(MPT3SAS_FMT 3716 "Dell 12Gbps HBA: Subsystem ID: 0x%X\n", 3717 ioc->name, ioc->pdev->subsystem_device); 3718 break; 3719 } 3720 break; 3721 default: 3722 pr_info(MPT3SAS_FMT 3723 "Dell HBA: Subsystem ID: 0x%X\n", ioc->name, 3724 ioc->pdev->subsystem_device); 3725 break; 3726 } 3727 break; 3728 case PCI_VENDOR_ID_CISCO: 3729 switch (ioc->pdev->device) { 3730 case MPI25_MFGPAGE_DEVID_SAS3008: 3731 switch (ioc->pdev->subsystem_device) { 3732 case MPT3SAS_CISCO_12G_8E_HBA_SSDID: 3733 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3734 MPT3SAS_CISCO_12G_8E_HBA_BRANDING); 3735 break; 3736 case MPT3SAS_CISCO_12G_8I_HBA_SSDID: 3737 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3738 MPT3SAS_CISCO_12G_8I_HBA_BRANDING); 3739 break; 3740 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID: 3741 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3742 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING); 3743 break; 3744 default: 3745 pr_info(MPT3SAS_FMT 3746 "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n", 3747 ioc->name, ioc->pdev->subsystem_device); 3748 break; 3749 } 3750 break; 3751 case MPI25_MFGPAGE_DEVID_SAS3108_1: 3752 switch (ioc->pdev->subsystem_device) { 3753 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID: 3754 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3755 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING); 3756 break; 3757 case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID: 3758 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3759 MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING 3760 ); 3761 break; 3762 default: 3763 pr_info(MPT3SAS_FMT 3764 "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n", 3765 ioc->name, ioc->pdev->subsystem_device); 3766 break; 3767 } 3768 break; 3769 default: 3770 pr_info(MPT3SAS_FMT 3771 "Cisco SAS HBA: Subsystem ID: 0x%X\n", 3772 ioc->name, ioc->pdev->subsystem_device); 3773 break; 3774 } 3775 break; 3776 case MPT2SAS_HP_3PAR_SSVID: 3777 switch (ioc->pdev->device) { 3778 case MPI2_MFGPAGE_DEVID_SAS2004: 3779 switch (ioc->pdev->subsystem_device) { 3780 case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID: 3781 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3782 MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING); 3783 break; 3784 default: 3785 pr_info(MPT3SAS_FMT 3786 "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n", 3787 ioc->name, ioc->pdev->subsystem_device); 3788 break; 3789 } 3790 case MPI2_MFGPAGE_DEVID_SAS2308_2: 3791 switch (ioc->pdev->subsystem_device) { 3792 case MPT2SAS_HP_2_4_INTERNAL_SSDID: 3793 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3794 MPT2SAS_HP_2_4_INTERNAL_BRANDING); 3795 break; 3796 case MPT2SAS_HP_2_4_EXTERNAL_SSDID: 3797 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3798 MPT2SAS_HP_2_4_EXTERNAL_BRANDING); 3799 break; 3800 case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID: 3801 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3802 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING); 3803 break; 3804 case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID: 3805 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3806 MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING); 3807 break; 3808 default: 3809 pr_info(MPT3SAS_FMT 3810 "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n", 3811 ioc->name, ioc->pdev->subsystem_device); 3812 break; 3813 } 3814 default: 3815 pr_info(MPT3SAS_FMT 3816 "HP SAS HBA: Subsystem ID: 0x%X\n", 3817 ioc->name, ioc->pdev->subsystem_device); 3818 break; 3819 } 3820 default: 3821 break; 3822 } 3823 } 3824 3825 /** 3826 * _base_display_ioc_capabilities - Disply IOC's capabilities. 3827 * @ioc: per adapter object 3828 * 3829 * Return nothing. 3830 */ 3831 static void 3832 _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc) 3833 { 3834 int i = 0; 3835 char desc[16]; 3836 u32 iounit_pg1_flags; 3837 u32 bios_version; 3838 3839 bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); 3840 strncpy(desc, ioc->manu_pg0.ChipName, 16); 3841 pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\ 3842 "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n", 3843 ioc->name, desc, 3844 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, 3845 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, 3846 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, 3847 ioc->facts.FWVersion.Word & 0x000000FF, 3848 ioc->pdev->revision, 3849 (bios_version & 0xFF000000) >> 24, 3850 (bios_version & 0x00FF0000) >> 16, 3851 (bios_version & 0x0000FF00) >> 8, 3852 bios_version & 0x000000FF); 3853 3854 _base_display_OEMs_branding(ioc); 3855 3856 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) { 3857 pr_info("%sNVMe", i ? "," : ""); 3858 i++; 3859 } 3860 3861 pr_info(MPT3SAS_FMT "Protocol=(", ioc->name); 3862 3863 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) { 3864 pr_info("Initiator"); 3865 i++; 3866 } 3867 3868 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) { 3869 pr_info("%sTarget", i ? "," : ""); 3870 i++; 3871 } 3872 3873 i = 0; 3874 pr_info("), "); 3875 pr_info("Capabilities=("); 3876 3877 if (!ioc->hide_ir_msg) { 3878 if (ioc->facts.IOCCapabilities & 3879 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) { 3880 pr_info("Raid"); 3881 i++; 3882 } 3883 } 3884 3885 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) { 3886 pr_info("%sTLR", i ? "," : ""); 3887 i++; 3888 } 3889 3890 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) { 3891 pr_info("%sMulticast", i ? "," : ""); 3892 i++; 3893 } 3894 3895 if (ioc->facts.IOCCapabilities & 3896 MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) { 3897 pr_info("%sBIDI Target", i ? "," : ""); 3898 i++; 3899 } 3900 3901 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) { 3902 pr_info("%sEEDP", i ? "," : ""); 3903 i++; 3904 } 3905 3906 if (ioc->facts.IOCCapabilities & 3907 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) { 3908 pr_info("%sSnapshot Buffer", i ? "," : ""); 3909 i++; 3910 } 3911 3912 if (ioc->facts.IOCCapabilities & 3913 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) { 3914 pr_info("%sDiag Trace Buffer", i ? "," : ""); 3915 i++; 3916 } 3917 3918 if (ioc->facts.IOCCapabilities & 3919 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) { 3920 pr_info("%sDiag Extended Buffer", i ? "," : ""); 3921 i++; 3922 } 3923 3924 if (ioc->facts.IOCCapabilities & 3925 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) { 3926 pr_info("%sTask Set Full", i ? "," : ""); 3927 i++; 3928 } 3929 3930 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); 3931 if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) { 3932 pr_info("%sNCQ", i ? "," : ""); 3933 i++; 3934 } 3935 3936 pr_info(")\n"); 3937 } 3938 3939 /** 3940 * mpt3sas_base_update_missing_delay - change the missing delay timers 3941 * @ioc: per adapter object 3942 * @device_missing_delay: amount of time till device is reported missing 3943 * @io_missing_delay: interval IO is returned when there is a missing device 3944 * 3945 * Return nothing. 3946 * 3947 * Passed on the command line, this function will modify the device missing 3948 * delay, as well as the io missing delay. This should be called at driver 3949 * load time. 3950 */ 3951 void 3952 mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc, 3953 u16 device_missing_delay, u8 io_missing_delay) 3954 { 3955 u16 dmd, dmd_new, dmd_orignal; 3956 u8 io_missing_delay_original; 3957 u16 sz; 3958 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; 3959 Mpi2ConfigReply_t mpi_reply; 3960 u8 num_phys = 0; 3961 u16 ioc_status; 3962 3963 mpt3sas_config_get_number_hba_phys(ioc, &num_phys); 3964 if (!num_phys) 3965 return; 3966 3967 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys * 3968 sizeof(Mpi2SasIOUnit1PhyData_t)); 3969 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); 3970 if (!sas_iounit_pg1) { 3971 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 3972 ioc->name, __FILE__, __LINE__, __func__); 3973 goto out; 3974 } 3975 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, 3976 sas_iounit_pg1, sz))) { 3977 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 3978 ioc->name, __FILE__, __LINE__, __func__); 3979 goto out; 3980 } 3981 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 3982 MPI2_IOCSTATUS_MASK; 3983 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 3984 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 3985 ioc->name, __FILE__, __LINE__, __func__); 3986 goto out; 3987 } 3988 3989 /* device missing delay */ 3990 dmd = sas_iounit_pg1->ReportDeviceMissingDelay; 3991 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) 3992 dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; 3993 else 3994 dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; 3995 dmd_orignal = dmd; 3996 if (device_missing_delay > 0x7F) { 3997 dmd = (device_missing_delay > 0x7F0) ? 0x7F0 : 3998 device_missing_delay; 3999 dmd = dmd / 16; 4000 dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16; 4001 } else 4002 dmd = device_missing_delay; 4003 sas_iounit_pg1->ReportDeviceMissingDelay = dmd; 4004 4005 /* io missing delay */ 4006 io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay; 4007 sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay; 4008 4009 if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, 4010 sz)) { 4011 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) 4012 dmd_new = (dmd & 4013 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; 4014 else 4015 dmd_new = 4016 dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; 4017 pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n", 4018 ioc->name, dmd_orignal, dmd_new); 4019 pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n", 4020 ioc->name, io_missing_delay_original, 4021 io_missing_delay); 4022 ioc->device_missing_delay = dmd_new; 4023 ioc->io_missing_delay = io_missing_delay; 4024 } 4025 4026 out: 4027 kfree(sas_iounit_pg1); 4028 } 4029 /** 4030 * _base_static_config_pages - static start of day config pages 4031 * @ioc: per adapter object 4032 * 4033 * Return nothing. 4034 */ 4035 static void 4036 _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) 4037 { 4038 Mpi2ConfigReply_t mpi_reply; 4039 u32 iounit_pg1_flags; 4040 4041 mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0); 4042 if (ioc->ir_firmware) 4043 mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply, 4044 &ioc->manu_pg10); 4045 4046 /* 4047 * Ensure correct T10 PI operation if vendor left EEDPTagMode 4048 * flag unset in NVDATA. 4049 */ 4050 mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11); 4051 if (ioc->manu_pg11.EEDPTagMode == 0) { 4052 pr_err("%s: overriding NVDATA EEDPTagMode setting\n", 4053 ioc->name); 4054 ioc->manu_pg11.EEDPTagMode &= ~0x3; 4055 ioc->manu_pg11.EEDPTagMode |= 0x1; 4056 mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply, 4057 &ioc->manu_pg11); 4058 } 4059 4060 mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2); 4061 mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3); 4062 mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8); 4063 mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0); 4064 mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); 4065 mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8); 4066 _base_display_ioc_capabilities(ioc); 4067 4068 /* 4069 * Enable task_set_full handling in iounit_pg1 when the 4070 * facts capabilities indicate that its supported. 4071 */ 4072 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); 4073 if ((ioc->facts.IOCCapabilities & 4074 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING)) 4075 iounit_pg1_flags &= 4076 ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; 4077 else 4078 iounit_pg1_flags |= 4079 MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; 4080 ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); 4081 mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); 4082 4083 if (ioc->iounit_pg8.NumSensors) 4084 ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors; 4085 } 4086 4087 /** 4088 * _base_release_memory_pools - release memory 4089 * @ioc: per adapter object 4090 * 4091 * Free memory allocated from _base_allocate_memory_pools. 4092 * 4093 * Return nothing. 4094 */ 4095 static void 4096 _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) 4097 { 4098 int i = 0; 4099 struct reply_post_struct *rps; 4100 4101 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 4102 __func__)); 4103 4104 if (ioc->request) { 4105 pci_free_consistent(ioc->pdev, ioc->request_dma_sz, 4106 ioc->request, ioc->request_dma); 4107 dexitprintk(ioc, pr_info(MPT3SAS_FMT 4108 "request_pool(0x%p): free\n", 4109 ioc->name, ioc->request)); 4110 ioc->request = NULL; 4111 } 4112 4113 if (ioc->sense) { 4114 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma); 4115 dma_pool_destroy(ioc->sense_dma_pool); 4116 dexitprintk(ioc, pr_info(MPT3SAS_FMT 4117 "sense_pool(0x%p): free\n", 4118 ioc->name, ioc->sense)); 4119 ioc->sense = NULL; 4120 } 4121 4122 if (ioc->reply) { 4123 dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma); 4124 dma_pool_destroy(ioc->reply_dma_pool); 4125 dexitprintk(ioc, pr_info(MPT3SAS_FMT 4126 "reply_pool(0x%p): free\n", 4127 ioc->name, ioc->reply)); 4128 ioc->reply = NULL; 4129 } 4130 4131 if (ioc->reply_free) { 4132 dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free, 4133 ioc->reply_free_dma); 4134 dma_pool_destroy(ioc->reply_free_dma_pool); 4135 dexitprintk(ioc, pr_info(MPT3SAS_FMT 4136 "reply_free_pool(0x%p): free\n", 4137 ioc->name, ioc->reply_free)); 4138 ioc->reply_free = NULL; 4139 } 4140 4141 if (ioc->reply_post) { 4142 do { 4143 rps = &ioc->reply_post[i]; 4144 if (rps->reply_post_free) { 4145 dma_pool_free( 4146 ioc->reply_post_free_dma_pool, 4147 rps->reply_post_free, 4148 rps->reply_post_free_dma); 4149 dexitprintk(ioc, pr_info(MPT3SAS_FMT 4150 "reply_post_free_pool(0x%p): free\n", 4151 ioc->name, rps->reply_post_free)); 4152 rps->reply_post_free = NULL; 4153 } 4154 } while (ioc->rdpq_array_enable && 4155 (++i < ioc->reply_queue_count)); 4156 4157 dma_pool_destroy(ioc->reply_post_free_dma_pool); 4158 kfree(ioc->reply_post); 4159 } 4160 4161 if (ioc->pcie_sgl_dma_pool) { 4162 for (i = 0; i < ioc->scsiio_depth; i++) { 4163 dma_pool_free(ioc->pcie_sgl_dma_pool, 4164 ioc->pcie_sg_lookup[i].pcie_sgl, 4165 ioc->pcie_sg_lookup[i].pcie_sgl_dma); 4166 } 4167 if (ioc->pcie_sgl_dma_pool) 4168 dma_pool_destroy(ioc->pcie_sgl_dma_pool); 4169 } 4170 4171 if (ioc->config_page) { 4172 dexitprintk(ioc, pr_info(MPT3SAS_FMT 4173 "config_page(0x%p): free\n", ioc->name, 4174 ioc->config_page)); 4175 pci_free_consistent(ioc->pdev, ioc->config_page_sz, 4176 ioc->config_page, ioc->config_page_dma); 4177 } 4178 4179 kfree(ioc->hpr_lookup); 4180 kfree(ioc->internal_lookup); 4181 if (ioc->chain_lookup) { 4182 for (i = 0; i < ioc->chain_depth; i++) { 4183 if (ioc->chain_lookup[i].chain_buffer) 4184 dma_pool_free(ioc->chain_dma_pool, 4185 ioc->chain_lookup[i].chain_buffer, 4186 ioc->chain_lookup[i].chain_buffer_dma); 4187 } 4188 dma_pool_destroy(ioc->chain_dma_pool); 4189 free_pages((ulong)ioc->chain_lookup, ioc->chain_pages); 4190 ioc->chain_lookup = NULL; 4191 } 4192 } 4193 4194 /** 4195 * _base_allocate_memory_pools - allocate start of day memory pools 4196 * @ioc: per adapter object 4197 * 4198 * Returns 0 success, anything else error 4199 */ 4200 static int 4201 _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) 4202 { 4203 struct mpt3sas_facts *facts; 4204 u16 max_sge_elements; 4205 u16 chains_needed_per_io; 4206 u32 sz, total_sz, reply_post_free_sz; 4207 u32 retry_sz; 4208 u16 max_request_credit, nvme_blocks_needed; 4209 unsigned short sg_tablesize; 4210 u16 sge_size; 4211 int i; 4212 4213 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 4214 __func__)); 4215 4216 4217 retry_sz = 0; 4218 facts = &ioc->facts; 4219 4220 /* command line tunables for max sgl entries */ 4221 if (max_sgl_entries != -1) 4222 sg_tablesize = max_sgl_entries; 4223 else { 4224 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) 4225 sg_tablesize = MPT2SAS_SG_DEPTH; 4226 else 4227 sg_tablesize = MPT3SAS_SG_DEPTH; 4228 } 4229 4230 /* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */ 4231 if (reset_devices) 4232 sg_tablesize = min_t(unsigned short, sg_tablesize, 4233 MPT_KDUMP_MIN_PHYS_SEGMENTS); 4234 4235 if (ioc->is_mcpu_endpoint) 4236 ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS; 4237 else { 4238 if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS) 4239 sg_tablesize = MPT_MIN_PHYS_SEGMENTS; 4240 else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) { 4241 sg_tablesize = min_t(unsigned short, sg_tablesize, 4242 SG_MAX_SEGMENTS); 4243 pr_warn(MPT3SAS_FMT 4244 "sg_tablesize(%u) is bigger than kernel " 4245 "defined SG_CHUNK_SIZE(%u)\n", ioc->name, 4246 sg_tablesize, MPT_MAX_PHYS_SEGMENTS); 4247 } 4248 ioc->shost->sg_tablesize = sg_tablesize; 4249 } 4250 4251 ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)), 4252 (facts->RequestCredit / 4)); 4253 if (ioc->internal_depth < INTERNAL_CMDS_COUNT) { 4254 if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT + 4255 INTERNAL_SCSIIO_CMDS_COUNT)) { 4256 pr_err(MPT3SAS_FMT "IOC doesn't have enough Request \ 4257 Credits, it has just %d number of credits\n", 4258 ioc->name, facts->RequestCredit); 4259 return -ENOMEM; 4260 } 4261 ioc->internal_depth = 10; 4262 } 4263 4264 ioc->hi_priority_depth = ioc->internal_depth - (5); 4265 /* command line tunables for max controller queue depth */ 4266 if (max_queue_depth != -1 && max_queue_depth != 0) { 4267 max_request_credit = min_t(u16, max_queue_depth + 4268 ioc->internal_depth, facts->RequestCredit); 4269 if (max_request_credit > MAX_HBA_QUEUE_DEPTH) 4270 max_request_credit = MAX_HBA_QUEUE_DEPTH; 4271 } else if (reset_devices) 4272 max_request_credit = min_t(u16, facts->RequestCredit, 4273 (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth)); 4274 else 4275 max_request_credit = min_t(u16, facts->RequestCredit, 4276 MAX_HBA_QUEUE_DEPTH); 4277 4278 /* Firmware maintains additional facts->HighPriorityCredit number of 4279 * credits for HiPriprity Request messages, so hba queue depth will be 4280 * sum of max_request_credit and high priority queue depth. 4281 */ 4282 ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth; 4283 4284 /* request frame size */ 4285 ioc->request_sz = facts->IOCRequestFrameSize * 4; 4286 4287 /* reply frame size */ 4288 ioc->reply_sz = facts->ReplyFrameSize * 4; 4289 4290 /* chain segment size */ 4291 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { 4292 if (facts->IOCMaxChainSegmentSize) 4293 ioc->chain_segment_sz = 4294 facts->IOCMaxChainSegmentSize * 4295 MAX_CHAIN_ELEMT_SZ; 4296 else 4297 /* set to 128 bytes size if IOCMaxChainSegmentSize is zero */ 4298 ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS * 4299 MAX_CHAIN_ELEMT_SZ; 4300 } else 4301 ioc->chain_segment_sz = ioc->request_sz; 4302 4303 /* calculate the max scatter element size */ 4304 sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee); 4305 4306 retry_allocation: 4307 total_sz = 0; 4308 /* calculate number of sg elements left over in the 1st frame */ 4309 max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) - 4310 sizeof(Mpi2SGEIOUnion_t)) + sge_size); 4311 ioc->max_sges_in_main_message = max_sge_elements/sge_size; 4312 4313 /* now do the same for a chain buffer */ 4314 max_sge_elements = ioc->chain_segment_sz - sge_size; 4315 ioc->max_sges_in_chain_message = max_sge_elements/sge_size; 4316 4317 /* 4318 * MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE 4319 */ 4320 chains_needed_per_io = ((ioc->shost->sg_tablesize - 4321 ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message) 4322 + 1; 4323 if (chains_needed_per_io > facts->MaxChainDepth) { 4324 chains_needed_per_io = facts->MaxChainDepth; 4325 ioc->shost->sg_tablesize = min_t(u16, 4326 ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message 4327 * chains_needed_per_io), ioc->shost->sg_tablesize); 4328 } 4329 ioc->chains_needed_per_io = chains_needed_per_io; 4330 4331 /* reply free queue sizing - taking into account for 64 FW events */ 4332 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; 4333 4334 /* mCPU manage single counters for simplicity */ 4335 if (ioc->is_mcpu_endpoint) 4336 ioc->reply_post_queue_depth = ioc->reply_free_queue_depth; 4337 else { 4338 /* calculate reply descriptor post queue depth */ 4339 ioc->reply_post_queue_depth = ioc->hba_queue_depth + 4340 ioc->reply_free_queue_depth + 1; 4341 /* align the reply post queue on the next 16 count boundary */ 4342 if (ioc->reply_post_queue_depth % 16) 4343 ioc->reply_post_queue_depth += 16 - 4344 (ioc->reply_post_queue_depth % 16); 4345 } 4346 4347 if (ioc->reply_post_queue_depth > 4348 facts->MaxReplyDescriptorPostQueueDepth) { 4349 ioc->reply_post_queue_depth = 4350 facts->MaxReplyDescriptorPostQueueDepth - 4351 (facts->MaxReplyDescriptorPostQueueDepth % 16); 4352 ioc->hba_queue_depth = 4353 ((ioc->reply_post_queue_depth - 64) / 2) - 1; 4354 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; 4355 } 4356 4357 dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \ 4358 "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), " 4359 "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message, 4360 ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize, 4361 ioc->chains_needed_per_io)); 4362 4363 /* reply post queue, 16 byte align */ 4364 reply_post_free_sz = ioc->reply_post_queue_depth * 4365 sizeof(Mpi2DefaultReplyDescriptor_t); 4366 4367 sz = reply_post_free_sz; 4368 if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable) 4369 sz *= ioc->reply_queue_count; 4370 4371 ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ? 4372 (ioc->reply_queue_count):1, 4373 sizeof(struct reply_post_struct), GFP_KERNEL); 4374 4375 if (!ioc->reply_post) { 4376 pr_err(MPT3SAS_FMT "reply_post_free pool: kcalloc failed\n", 4377 ioc->name); 4378 goto out; 4379 } 4380 ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool", 4381 &ioc->pdev->dev, sz, 16, 0); 4382 if (!ioc->reply_post_free_dma_pool) { 4383 pr_err(MPT3SAS_FMT 4384 "reply_post_free pool: dma_pool_create failed\n", 4385 ioc->name); 4386 goto out; 4387 } 4388 i = 0; 4389 do { 4390 ioc->reply_post[i].reply_post_free = 4391 dma_pool_alloc(ioc->reply_post_free_dma_pool, 4392 GFP_KERNEL, 4393 &ioc->reply_post[i].reply_post_free_dma); 4394 if (!ioc->reply_post[i].reply_post_free) { 4395 pr_err(MPT3SAS_FMT 4396 "reply_post_free pool: dma_pool_alloc failed\n", 4397 ioc->name); 4398 goto out; 4399 } 4400 memset(ioc->reply_post[i].reply_post_free, 0, sz); 4401 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4402 "reply post free pool (0x%p): depth(%d)," 4403 "element_size(%d), pool_size(%d kB)\n", ioc->name, 4404 ioc->reply_post[i].reply_post_free, 4405 ioc->reply_post_queue_depth, 8, sz/1024)); 4406 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4407 "reply_post_free_dma = (0x%llx)\n", ioc->name, 4408 (unsigned long long) 4409 ioc->reply_post[i].reply_post_free_dma)); 4410 total_sz += sz; 4411 } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count)); 4412 4413 if (ioc->dma_mask == 64) { 4414 if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) { 4415 pr_warn(MPT3SAS_FMT 4416 "no suitable consistent DMA mask for %s\n", 4417 ioc->name, pci_name(ioc->pdev)); 4418 goto out; 4419 } 4420 } 4421 4422 ioc->scsiio_depth = ioc->hba_queue_depth - 4423 ioc->hi_priority_depth - ioc->internal_depth; 4424 4425 /* set the scsi host can_queue depth 4426 * with some internal commands that could be outstanding 4427 */ 4428 ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT; 4429 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4430 "scsi host: can_queue depth (%d)\n", 4431 ioc->name, ioc->shost->can_queue)); 4432 4433 4434 /* contiguous pool for request and chains, 16 byte align, one extra " 4435 * "frame for smid=0 4436 */ 4437 ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth; 4438 sz = ((ioc->scsiio_depth + 1) * ioc->request_sz); 4439 4440 /* hi-priority queue */ 4441 sz += (ioc->hi_priority_depth * ioc->request_sz); 4442 4443 /* internal queue */ 4444 sz += (ioc->internal_depth * ioc->request_sz); 4445 4446 ioc->request_dma_sz = sz; 4447 ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma); 4448 if (!ioc->request) { 4449 pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \ 4450 "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), " 4451 "total(%d kB)\n", ioc->name, ioc->hba_queue_depth, 4452 ioc->chains_needed_per_io, ioc->request_sz, sz/1024); 4453 if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH) 4454 goto out; 4455 retry_sz = 64; 4456 ioc->hba_queue_depth -= retry_sz; 4457 _base_release_memory_pools(ioc); 4458 goto retry_allocation; 4459 } 4460 4461 if (retry_sz) 4462 pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \ 4463 "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), " 4464 "total(%d kb)\n", ioc->name, ioc->hba_queue_depth, 4465 ioc->chains_needed_per_io, ioc->request_sz, sz/1024); 4466 4467 /* hi-priority queue */ 4468 ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) * 4469 ioc->request_sz); 4470 ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) * 4471 ioc->request_sz); 4472 4473 /* internal queue */ 4474 ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth * 4475 ioc->request_sz); 4476 ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth * 4477 ioc->request_sz); 4478 4479 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4480 "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n", 4481 ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz, 4482 (ioc->hba_queue_depth * ioc->request_sz)/1024)); 4483 4484 dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n", 4485 ioc->name, (unsigned long long) ioc->request_dma)); 4486 total_sz += sz; 4487 4488 dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n", 4489 ioc->name, ioc->request, ioc->scsiio_depth)); 4490 4491 ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH); 4492 sz = ioc->chain_depth * sizeof(struct chain_tracker); 4493 ioc->chain_pages = get_order(sz); 4494 ioc->chain_lookup = (struct chain_tracker *)__get_free_pages( 4495 GFP_KERNEL, ioc->chain_pages); 4496 if (!ioc->chain_lookup) { 4497 pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages failed\n", 4498 ioc->name); 4499 goto out; 4500 } 4501 ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev, 4502 ioc->chain_segment_sz, 16, 0); 4503 if (!ioc->chain_dma_pool) { 4504 pr_err(MPT3SAS_FMT "chain_dma_pool: dma_pool_create failed\n", 4505 ioc->name); 4506 goto out; 4507 } 4508 for (i = 0; i < ioc->chain_depth; i++) { 4509 ioc->chain_lookup[i].chain_buffer = dma_pool_alloc( 4510 ioc->chain_dma_pool , GFP_KERNEL, 4511 &ioc->chain_lookup[i].chain_buffer_dma); 4512 if (!ioc->chain_lookup[i].chain_buffer) { 4513 ioc->chain_depth = i; 4514 goto chain_done; 4515 } 4516 total_sz += ioc->chain_segment_sz; 4517 } 4518 chain_done: 4519 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4520 "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n", 4521 ioc->name, ioc->chain_depth, ioc->chain_segment_sz, 4522 ((ioc->chain_depth * ioc->chain_segment_sz))/1024)); 4523 4524 /* initialize hi-priority queue smid's */ 4525 ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth, 4526 sizeof(struct request_tracker), GFP_KERNEL); 4527 if (!ioc->hpr_lookup) { 4528 pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n", 4529 ioc->name); 4530 goto out; 4531 } 4532 ioc->hi_priority_smid = ioc->scsiio_depth + 1; 4533 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4534 "hi_priority(0x%p): depth(%d), start smid(%d)\n", 4535 ioc->name, ioc->hi_priority, 4536 ioc->hi_priority_depth, ioc->hi_priority_smid)); 4537 4538 /* initialize internal queue smid's */ 4539 ioc->internal_lookup = kcalloc(ioc->internal_depth, 4540 sizeof(struct request_tracker), GFP_KERNEL); 4541 if (!ioc->internal_lookup) { 4542 pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n", 4543 ioc->name); 4544 goto out; 4545 } 4546 ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth; 4547 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4548 "internal(0x%p): depth(%d), start smid(%d)\n", 4549 ioc->name, ioc->internal, 4550 ioc->internal_depth, ioc->internal_smid)); 4551 /* 4552 * The number of NVMe page sized blocks needed is: 4553 * (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1 4554 * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry 4555 * that is placed in the main message frame. 8 is the size of each PRP 4556 * entry or PRP list pointer entry. 8 is subtracted from page_size 4557 * because of the PRP list pointer entry at the end of a page, so this 4558 * is not counted as a PRP entry. The 1 added page is a round up. 4559 * 4560 * To avoid allocation failures due to the amount of memory that could 4561 * be required for NVMe PRP's, only each set of NVMe blocks will be 4562 * contiguous, so a new set is allocated for each possible I/O. 4563 */ 4564 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) { 4565 nvme_blocks_needed = 4566 (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1; 4567 nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE); 4568 nvme_blocks_needed++; 4569 4570 sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth; 4571 ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL); 4572 if (!ioc->pcie_sg_lookup) { 4573 pr_info(MPT3SAS_FMT 4574 "PCIe SGL lookup: kzalloc failed\n", ioc->name); 4575 goto out; 4576 } 4577 sz = nvme_blocks_needed * ioc->page_size; 4578 ioc->pcie_sgl_dma_pool = 4579 dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0); 4580 if (!ioc->pcie_sgl_dma_pool) { 4581 pr_info(MPT3SAS_FMT 4582 "PCIe SGL pool: dma_pool_create failed\n", 4583 ioc->name); 4584 goto out; 4585 } 4586 for (i = 0; i < ioc->scsiio_depth; i++) { 4587 ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc( 4588 ioc->pcie_sgl_dma_pool, GFP_KERNEL, 4589 &ioc->pcie_sg_lookup[i].pcie_sgl_dma); 4590 if (!ioc->pcie_sg_lookup[i].pcie_sgl) { 4591 pr_info(MPT3SAS_FMT 4592 "PCIe SGL pool: dma_pool_alloc failed\n", 4593 ioc->name); 4594 goto out; 4595 } 4596 } 4597 4598 dinitprintk(ioc, pr_info(MPT3SAS_FMT "PCIe sgl pool depth(%d), " 4599 "element_size(%d), pool_size(%d kB)\n", ioc->name, 4600 ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024)); 4601 total_sz += sz * ioc->scsiio_depth; 4602 } 4603 /* sense buffers, 4 byte align */ 4604 sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE; 4605 ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4606 4, 0); 4607 if (!ioc->sense_dma_pool) { 4608 pr_err(MPT3SAS_FMT "sense pool: dma_pool_create failed\n", 4609 ioc->name); 4610 goto out; 4611 } 4612 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL, 4613 &ioc->sense_dma); 4614 if (!ioc->sense) { 4615 pr_err(MPT3SAS_FMT "sense pool: dma_pool_alloc failed\n", 4616 ioc->name); 4617 goto out; 4618 } 4619 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4620 "sense pool(0x%p): depth(%d), element_size(%d), pool_size" 4621 "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth, 4622 SCSI_SENSE_BUFFERSIZE, sz/1024)); 4623 dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n", 4624 ioc->name, (unsigned long long)ioc->sense_dma)); 4625 total_sz += sz; 4626 4627 /* reply pool, 4 byte align */ 4628 sz = ioc->reply_free_queue_depth * ioc->reply_sz; 4629 ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz, 4630 4, 0); 4631 if (!ioc->reply_dma_pool) { 4632 pr_err(MPT3SAS_FMT "reply pool: dma_pool_create failed\n", 4633 ioc->name); 4634 goto out; 4635 } 4636 ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL, 4637 &ioc->reply_dma); 4638 if (!ioc->reply) { 4639 pr_err(MPT3SAS_FMT "reply pool: dma_pool_alloc failed\n", 4640 ioc->name); 4641 goto out; 4642 } 4643 ioc->reply_dma_min_address = (u32)(ioc->reply_dma); 4644 ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz; 4645 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4646 "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n", 4647 ioc->name, ioc->reply, 4648 ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024)); 4649 dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n", 4650 ioc->name, (unsigned long long)ioc->reply_dma)); 4651 total_sz += sz; 4652 4653 /* reply free queue, 16 byte align */ 4654 sz = ioc->reply_free_queue_depth * 4; 4655 ioc->reply_free_dma_pool = dma_pool_create("reply_free pool", 4656 &ioc->pdev->dev, sz, 16, 0); 4657 if (!ioc->reply_free_dma_pool) { 4658 pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_create failed\n", 4659 ioc->name); 4660 goto out; 4661 } 4662 ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool, GFP_KERNEL, 4663 &ioc->reply_free_dma); 4664 if (!ioc->reply_free) { 4665 pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_alloc failed\n", 4666 ioc->name); 4667 goto out; 4668 } 4669 memset(ioc->reply_free, 0, sz); 4670 dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \ 4671 "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name, 4672 ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024)); 4673 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4674 "reply_free_dma (0x%llx)\n", 4675 ioc->name, (unsigned long long)ioc->reply_free_dma)); 4676 total_sz += sz; 4677 4678 ioc->config_page_sz = 512; 4679 ioc->config_page = pci_alloc_consistent(ioc->pdev, 4680 ioc->config_page_sz, &ioc->config_page_dma); 4681 if (!ioc->config_page) { 4682 pr_err(MPT3SAS_FMT 4683 "config page: dma_pool_alloc failed\n", 4684 ioc->name); 4685 goto out; 4686 } 4687 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4688 "config page(0x%p): size(%d)\n", 4689 ioc->name, ioc->config_page, ioc->config_page_sz)); 4690 dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n", 4691 ioc->name, (unsigned long long)ioc->config_page_dma)); 4692 total_sz += ioc->config_page_sz; 4693 4694 pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n", 4695 ioc->name, total_sz/1024); 4696 pr_info(MPT3SAS_FMT 4697 "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n", 4698 ioc->name, ioc->shost->can_queue, facts->RequestCredit); 4699 pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n", 4700 ioc->name, ioc->shost->sg_tablesize); 4701 return 0; 4702 4703 out: 4704 return -ENOMEM; 4705 } 4706 4707 /** 4708 * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter. 4709 * @ioc: Pointer to MPT_ADAPTER structure 4710 * @cooked: Request raw or cooked IOC state 4711 * 4712 * Returns all IOC Doorbell register bits if cooked==0, else just the 4713 * Doorbell bits in MPI_IOC_STATE_MASK. 4714 */ 4715 u32 4716 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked) 4717 { 4718 u32 s, sc; 4719 4720 s = readl(&ioc->chip->Doorbell); 4721 sc = s & MPI2_IOC_STATE_MASK; 4722 return cooked ? sc : s; 4723 } 4724 4725 /** 4726 * _base_wait_on_iocstate - waiting on a particular ioc state 4727 * @ioc_state: controller state { READY, OPERATIONAL, or RESET } 4728 * @timeout: timeout in second 4729 * 4730 * Returns 0 for success, non-zero for failure. 4731 */ 4732 static int 4733 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout) 4734 { 4735 u32 count, cntdn; 4736 u32 current_state; 4737 4738 count = 0; 4739 cntdn = 1000 * timeout; 4740 do { 4741 current_state = mpt3sas_base_get_iocstate(ioc, 1); 4742 if (current_state == ioc_state) 4743 return 0; 4744 if (count && current_state == MPI2_IOC_STATE_FAULT) 4745 break; 4746 4747 usleep_range(1000, 1500); 4748 count++; 4749 } while (--cntdn); 4750 4751 return current_state; 4752 } 4753 4754 /** 4755 * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by 4756 * a write to the doorbell) 4757 * @ioc: per adapter object 4758 * @timeout: timeout in second 4759 * 4760 * Returns 0 for success, non-zero for failure. 4761 * 4762 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell. 4763 */ 4764 static int 4765 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc); 4766 4767 static int 4768 _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout) 4769 { 4770 u32 cntdn, count; 4771 u32 int_status; 4772 4773 count = 0; 4774 cntdn = 1000 * timeout; 4775 do { 4776 int_status = readl(&ioc->chip->HostInterruptStatus); 4777 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 4778 dhsprintk(ioc, pr_info(MPT3SAS_FMT 4779 "%s: successful count(%d), timeout(%d)\n", 4780 ioc->name, __func__, count, timeout)); 4781 return 0; 4782 } 4783 4784 usleep_range(1000, 1500); 4785 count++; 4786 } while (--cntdn); 4787 4788 pr_err(MPT3SAS_FMT 4789 "%s: failed due to timeout count(%d), int_status(%x)!\n", 4790 ioc->name, __func__, count, int_status); 4791 return -EFAULT; 4792 } 4793 4794 static int 4795 _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout) 4796 { 4797 u32 cntdn, count; 4798 u32 int_status; 4799 4800 count = 0; 4801 cntdn = 2000 * timeout; 4802 do { 4803 int_status = readl(&ioc->chip->HostInterruptStatus); 4804 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 4805 dhsprintk(ioc, pr_info(MPT3SAS_FMT 4806 "%s: successful count(%d), timeout(%d)\n", 4807 ioc->name, __func__, count, timeout)); 4808 return 0; 4809 } 4810 4811 udelay(500); 4812 count++; 4813 } while (--cntdn); 4814 4815 pr_err(MPT3SAS_FMT 4816 "%s: failed due to timeout count(%d), int_status(%x)!\n", 4817 ioc->name, __func__, count, int_status); 4818 return -EFAULT; 4819 4820 } 4821 4822 /** 4823 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell. 4824 * @ioc: per adapter object 4825 * @timeout: timeout in second 4826 * 4827 * Returns 0 for success, non-zero for failure. 4828 * 4829 * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to 4830 * doorbell. 4831 */ 4832 static int 4833 _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout) 4834 { 4835 u32 cntdn, count; 4836 u32 int_status; 4837 u32 doorbell; 4838 4839 count = 0; 4840 cntdn = 1000 * timeout; 4841 do { 4842 int_status = readl(&ioc->chip->HostInterruptStatus); 4843 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) { 4844 dhsprintk(ioc, pr_info(MPT3SAS_FMT 4845 "%s: successful count(%d), timeout(%d)\n", 4846 ioc->name, __func__, count, timeout)); 4847 return 0; 4848 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 4849 doorbell = readl(&ioc->chip->Doorbell); 4850 if ((doorbell & MPI2_IOC_STATE_MASK) == 4851 MPI2_IOC_STATE_FAULT) { 4852 mpt3sas_base_fault_info(ioc , doorbell); 4853 return -EFAULT; 4854 } 4855 } else if (int_status == 0xFFFFFFFF) 4856 goto out; 4857 4858 usleep_range(1000, 1500); 4859 count++; 4860 } while (--cntdn); 4861 4862 out: 4863 pr_err(MPT3SAS_FMT 4864 "%s: failed due to timeout count(%d), int_status(%x)!\n", 4865 ioc->name, __func__, count, int_status); 4866 return -EFAULT; 4867 } 4868 4869 /** 4870 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use 4871 * @ioc: per adapter object 4872 * @timeout: timeout in second 4873 * 4874 * Returns 0 for success, non-zero for failure. 4875 * 4876 */ 4877 static int 4878 _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout) 4879 { 4880 u32 cntdn, count; 4881 u32 doorbell_reg; 4882 4883 count = 0; 4884 cntdn = 1000 * timeout; 4885 do { 4886 doorbell_reg = readl(&ioc->chip->Doorbell); 4887 if (!(doorbell_reg & MPI2_DOORBELL_USED)) { 4888 dhsprintk(ioc, pr_info(MPT3SAS_FMT 4889 "%s: successful count(%d), timeout(%d)\n", 4890 ioc->name, __func__, count, timeout)); 4891 return 0; 4892 } 4893 4894 usleep_range(1000, 1500); 4895 count++; 4896 } while (--cntdn); 4897 4898 pr_err(MPT3SAS_FMT 4899 "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n", 4900 ioc->name, __func__, count, doorbell_reg); 4901 return -EFAULT; 4902 } 4903 4904 /** 4905 * _base_send_ioc_reset - send doorbell reset 4906 * @ioc: per adapter object 4907 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET 4908 * @timeout: timeout in second 4909 * 4910 * Returns 0 for success, non-zero for failure. 4911 */ 4912 static int 4913 _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout) 4914 { 4915 u32 ioc_state; 4916 int r = 0; 4917 4918 if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) { 4919 pr_err(MPT3SAS_FMT "%s: unknown reset_type\n", 4920 ioc->name, __func__); 4921 return -EFAULT; 4922 } 4923 4924 if (!(ioc->facts.IOCCapabilities & 4925 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY)) 4926 return -EFAULT; 4927 4928 pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name); 4929 4930 writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT, 4931 &ioc->chip->Doorbell); 4932 if ((_base_wait_for_doorbell_ack(ioc, 15))) { 4933 r = -EFAULT; 4934 goto out; 4935 } 4936 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout); 4937 if (ioc_state) { 4938 pr_err(MPT3SAS_FMT 4939 "%s: failed going to ready state (ioc_state=0x%x)\n", 4940 ioc->name, __func__, ioc_state); 4941 r = -EFAULT; 4942 goto out; 4943 } 4944 out: 4945 pr_info(MPT3SAS_FMT "message unit reset: %s\n", 4946 ioc->name, ((r == 0) ? "SUCCESS" : "FAILED")); 4947 return r; 4948 } 4949 4950 /** 4951 * _base_handshake_req_reply_wait - send request thru doorbell interface 4952 * @ioc: per adapter object 4953 * @request_bytes: request length 4954 * @request: pointer having request payload 4955 * @reply_bytes: reply length 4956 * @reply: pointer to reply payload 4957 * @timeout: timeout in second 4958 * 4959 * Returns 0 for success, non-zero for failure. 4960 */ 4961 static int 4962 _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, 4963 u32 *request, int reply_bytes, u16 *reply, int timeout) 4964 { 4965 MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply; 4966 int i; 4967 u8 failed; 4968 __le32 *mfp; 4969 4970 /* make sure doorbell is not in use */ 4971 if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) { 4972 pr_err(MPT3SAS_FMT 4973 "doorbell is in use (line=%d)\n", 4974 ioc->name, __LINE__); 4975 return -EFAULT; 4976 } 4977 4978 /* clear pending doorbell interrupts from previous state changes */ 4979 if (readl(&ioc->chip->HostInterruptStatus) & 4980 MPI2_HIS_IOC2SYS_DB_STATUS) 4981 writel(0, &ioc->chip->HostInterruptStatus); 4982 4983 /* send message to ioc */ 4984 writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) | 4985 ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)), 4986 &ioc->chip->Doorbell); 4987 4988 if ((_base_spin_on_doorbell_int(ioc, 5))) { 4989 pr_err(MPT3SAS_FMT 4990 "doorbell handshake int failed (line=%d)\n", 4991 ioc->name, __LINE__); 4992 return -EFAULT; 4993 } 4994 writel(0, &ioc->chip->HostInterruptStatus); 4995 4996 if ((_base_wait_for_doorbell_ack(ioc, 5))) { 4997 pr_err(MPT3SAS_FMT 4998 "doorbell handshake ack failed (line=%d)\n", 4999 ioc->name, __LINE__); 5000 return -EFAULT; 5001 } 5002 5003 /* send message 32-bits at a time */ 5004 for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) { 5005 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell); 5006 if ((_base_wait_for_doorbell_ack(ioc, 5))) 5007 failed = 1; 5008 } 5009 5010 if (failed) { 5011 pr_err(MPT3SAS_FMT 5012 "doorbell handshake sending request failed (line=%d)\n", 5013 ioc->name, __LINE__); 5014 return -EFAULT; 5015 } 5016 5017 /* now wait for the reply */ 5018 if ((_base_wait_for_doorbell_int(ioc, timeout))) { 5019 pr_err(MPT3SAS_FMT 5020 "doorbell handshake int failed (line=%d)\n", 5021 ioc->name, __LINE__); 5022 return -EFAULT; 5023 } 5024 5025 /* read the first two 16-bits, it gives the total length of the reply */ 5026 reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell) 5027 & MPI2_DOORBELL_DATA_MASK); 5028 writel(0, &ioc->chip->HostInterruptStatus); 5029 if ((_base_wait_for_doorbell_int(ioc, 5))) { 5030 pr_err(MPT3SAS_FMT 5031 "doorbell handshake int failed (line=%d)\n", 5032 ioc->name, __LINE__); 5033 return -EFAULT; 5034 } 5035 reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell) 5036 & MPI2_DOORBELL_DATA_MASK); 5037 writel(0, &ioc->chip->HostInterruptStatus); 5038 5039 for (i = 2; i < default_reply->MsgLength * 2; i++) { 5040 if ((_base_wait_for_doorbell_int(ioc, 5))) { 5041 pr_err(MPT3SAS_FMT 5042 "doorbell handshake int failed (line=%d)\n", 5043 ioc->name, __LINE__); 5044 return -EFAULT; 5045 } 5046 if (i >= reply_bytes/2) /* overflow case */ 5047 readl(&ioc->chip->Doorbell); 5048 else 5049 reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell) 5050 & MPI2_DOORBELL_DATA_MASK); 5051 writel(0, &ioc->chip->HostInterruptStatus); 5052 } 5053 5054 _base_wait_for_doorbell_int(ioc, 5); 5055 if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) { 5056 dhsprintk(ioc, pr_info(MPT3SAS_FMT 5057 "doorbell is in use (line=%d)\n", ioc->name, __LINE__)); 5058 } 5059 writel(0, &ioc->chip->HostInterruptStatus); 5060 5061 if (ioc->logging_level & MPT_DEBUG_INIT) { 5062 mfp = (__le32 *)reply; 5063 pr_info("\toffset:data\n"); 5064 for (i = 0; i < reply_bytes/4; i++) 5065 pr_info("\t[0x%02x]:%08x\n", i*4, 5066 le32_to_cpu(mfp[i])); 5067 } 5068 return 0; 5069 } 5070 5071 /** 5072 * mpt3sas_base_sas_iounit_control - send sas iounit control to FW 5073 * @ioc: per adapter object 5074 * @mpi_reply: the reply payload from FW 5075 * @mpi_request: the request payload sent to FW 5076 * 5077 * The SAS IO Unit Control Request message allows the host to perform low-level 5078 * operations, such as resets on the PHYs of the IO Unit, also allows the host 5079 * to obtain the IOC assigned device handles for a device if it has other 5080 * identifying information about the device, in addition allows the host to 5081 * remove IOC resources associated with the device. 5082 * 5083 * Returns 0 for success, non-zero for failure. 5084 */ 5085 int 5086 mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, 5087 Mpi2SasIoUnitControlReply_t *mpi_reply, 5088 Mpi2SasIoUnitControlRequest_t *mpi_request) 5089 { 5090 u16 smid; 5091 u32 ioc_state; 5092 bool issue_reset = false; 5093 int rc; 5094 void *request; 5095 u16 wait_state_count; 5096 5097 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5098 __func__)); 5099 5100 mutex_lock(&ioc->base_cmds.mutex); 5101 5102 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) { 5103 pr_err(MPT3SAS_FMT "%s: base_cmd in use\n", 5104 ioc->name, __func__); 5105 rc = -EAGAIN; 5106 goto out; 5107 } 5108 5109 wait_state_count = 0; 5110 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 5111 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 5112 if (wait_state_count++ == 10) { 5113 pr_err(MPT3SAS_FMT 5114 "%s: failed due to ioc not operational\n", 5115 ioc->name, __func__); 5116 rc = -EFAULT; 5117 goto out; 5118 } 5119 ssleep(1); 5120 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 5121 pr_info(MPT3SAS_FMT 5122 "%s: waiting for operational state(count=%d)\n", 5123 ioc->name, __func__, wait_state_count); 5124 } 5125 5126 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); 5127 if (!smid) { 5128 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 5129 ioc->name, __func__); 5130 rc = -EAGAIN; 5131 goto out; 5132 } 5133 5134 rc = 0; 5135 ioc->base_cmds.status = MPT3_CMD_PENDING; 5136 request = mpt3sas_base_get_msg_frame(ioc, smid); 5137 ioc->base_cmds.smid = smid; 5138 memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)); 5139 if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || 5140 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) 5141 ioc->ioc_link_reset_in_progress = 1; 5142 init_completion(&ioc->base_cmds.done); 5143 mpt3sas_base_put_smid_default(ioc, smid); 5144 wait_for_completion_timeout(&ioc->base_cmds.done, 5145 msecs_to_jiffies(10000)); 5146 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || 5147 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) && 5148 ioc->ioc_link_reset_in_progress) 5149 ioc->ioc_link_reset_in_progress = 0; 5150 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { 5151 pr_err(MPT3SAS_FMT "%s: timeout\n", 5152 ioc->name, __func__); 5153 _debug_dump_mf(mpi_request, 5154 sizeof(Mpi2SasIoUnitControlRequest_t)/4); 5155 if (!(ioc->base_cmds.status & MPT3_CMD_RESET)) 5156 issue_reset = true; 5157 goto issue_host_reset; 5158 } 5159 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) 5160 memcpy(mpi_reply, ioc->base_cmds.reply, 5161 sizeof(Mpi2SasIoUnitControlReply_t)); 5162 else 5163 memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t)); 5164 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 5165 goto out; 5166 5167 issue_host_reset: 5168 if (issue_reset) 5169 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 5170 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 5171 rc = -EFAULT; 5172 out: 5173 mutex_unlock(&ioc->base_cmds.mutex); 5174 return rc; 5175 } 5176 5177 /** 5178 * mpt3sas_base_scsi_enclosure_processor - sending request to sep device 5179 * @ioc: per adapter object 5180 * @mpi_reply: the reply payload from FW 5181 * @mpi_request: the request payload sent to FW 5182 * 5183 * The SCSI Enclosure Processor request message causes the IOC to 5184 * communicate with SES devices to control LED status signals. 5185 * 5186 * Returns 0 for success, non-zero for failure. 5187 */ 5188 int 5189 mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, 5190 Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request) 5191 { 5192 u16 smid; 5193 u32 ioc_state; 5194 bool issue_reset = false; 5195 int rc; 5196 void *request; 5197 u16 wait_state_count; 5198 5199 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5200 __func__)); 5201 5202 mutex_lock(&ioc->base_cmds.mutex); 5203 5204 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) { 5205 pr_err(MPT3SAS_FMT "%s: base_cmd in use\n", 5206 ioc->name, __func__); 5207 rc = -EAGAIN; 5208 goto out; 5209 } 5210 5211 wait_state_count = 0; 5212 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 5213 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 5214 if (wait_state_count++ == 10) { 5215 pr_err(MPT3SAS_FMT 5216 "%s: failed due to ioc not operational\n", 5217 ioc->name, __func__); 5218 rc = -EFAULT; 5219 goto out; 5220 } 5221 ssleep(1); 5222 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 5223 pr_info(MPT3SAS_FMT 5224 "%s: waiting for operational state(count=%d)\n", 5225 ioc->name, 5226 __func__, wait_state_count); 5227 } 5228 5229 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); 5230 if (!smid) { 5231 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 5232 ioc->name, __func__); 5233 rc = -EAGAIN; 5234 goto out; 5235 } 5236 5237 rc = 0; 5238 ioc->base_cmds.status = MPT3_CMD_PENDING; 5239 request = mpt3sas_base_get_msg_frame(ioc, smid); 5240 ioc->base_cmds.smid = smid; 5241 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t)); 5242 init_completion(&ioc->base_cmds.done); 5243 mpt3sas_base_put_smid_default(ioc, smid); 5244 wait_for_completion_timeout(&ioc->base_cmds.done, 5245 msecs_to_jiffies(10000)); 5246 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { 5247 pr_err(MPT3SAS_FMT "%s: timeout\n", 5248 ioc->name, __func__); 5249 _debug_dump_mf(mpi_request, 5250 sizeof(Mpi2SepRequest_t)/4); 5251 if (!(ioc->base_cmds.status & MPT3_CMD_RESET)) 5252 issue_reset = false; 5253 goto issue_host_reset; 5254 } 5255 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) 5256 memcpy(mpi_reply, ioc->base_cmds.reply, 5257 sizeof(Mpi2SepReply_t)); 5258 else 5259 memset(mpi_reply, 0, sizeof(Mpi2SepReply_t)); 5260 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 5261 goto out; 5262 5263 issue_host_reset: 5264 if (issue_reset) 5265 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 5266 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 5267 rc = -EFAULT; 5268 out: 5269 mutex_unlock(&ioc->base_cmds.mutex); 5270 return rc; 5271 } 5272 5273 /** 5274 * _base_get_port_facts - obtain port facts reply and save in ioc 5275 * @ioc: per adapter object 5276 * 5277 * Returns 0 for success, non-zero for failure. 5278 */ 5279 static int 5280 _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port) 5281 { 5282 Mpi2PortFactsRequest_t mpi_request; 5283 Mpi2PortFactsReply_t mpi_reply; 5284 struct mpt3sas_port_facts *pfacts; 5285 int mpi_reply_sz, mpi_request_sz, r; 5286 5287 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5288 __func__)); 5289 5290 mpi_reply_sz = sizeof(Mpi2PortFactsReply_t); 5291 mpi_request_sz = sizeof(Mpi2PortFactsRequest_t); 5292 memset(&mpi_request, 0, mpi_request_sz); 5293 mpi_request.Function = MPI2_FUNCTION_PORT_FACTS; 5294 mpi_request.PortNumber = port; 5295 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz, 5296 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5); 5297 5298 if (r != 0) { 5299 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", 5300 ioc->name, __func__, r); 5301 return r; 5302 } 5303 5304 pfacts = &ioc->pfacts[port]; 5305 memset(pfacts, 0, sizeof(struct mpt3sas_port_facts)); 5306 pfacts->PortNumber = mpi_reply.PortNumber; 5307 pfacts->VP_ID = mpi_reply.VP_ID; 5308 pfacts->VF_ID = mpi_reply.VF_ID; 5309 pfacts->MaxPostedCmdBuffers = 5310 le16_to_cpu(mpi_reply.MaxPostedCmdBuffers); 5311 5312 return 0; 5313 } 5314 5315 /** 5316 * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL 5317 * @ioc: per adapter object 5318 * @timeout: 5319 * 5320 * Returns 0 for success, non-zero for failure. 5321 */ 5322 static int 5323 _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout) 5324 { 5325 u32 ioc_state; 5326 int rc; 5327 5328 dinitprintk(ioc, printk(MPT3SAS_FMT "%s\n", ioc->name, 5329 __func__)); 5330 5331 if (ioc->pci_error_recovery) { 5332 dfailprintk(ioc, printk(MPT3SAS_FMT 5333 "%s: host in pci error recovery\n", ioc->name, __func__)); 5334 return -EFAULT; 5335 } 5336 5337 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 5338 dhsprintk(ioc, printk(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n", 5339 ioc->name, __func__, ioc_state)); 5340 5341 if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) || 5342 (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL) 5343 return 0; 5344 5345 if (ioc_state & MPI2_DOORBELL_USED) { 5346 dhsprintk(ioc, printk(MPT3SAS_FMT 5347 "unexpected doorbell active!\n", ioc->name)); 5348 goto issue_diag_reset; 5349 } 5350 5351 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 5352 mpt3sas_base_fault_info(ioc, ioc_state & 5353 MPI2_DOORBELL_DATA_MASK); 5354 goto issue_diag_reset; 5355 } 5356 5357 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout); 5358 if (ioc_state) { 5359 dfailprintk(ioc, printk(MPT3SAS_FMT 5360 "%s: failed going to ready state (ioc_state=0x%x)\n", 5361 ioc->name, __func__, ioc_state)); 5362 return -EFAULT; 5363 } 5364 5365 issue_diag_reset: 5366 rc = _base_diag_reset(ioc); 5367 return rc; 5368 } 5369 5370 /** 5371 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc 5372 * @ioc: per adapter object 5373 * 5374 * Returns 0 for success, non-zero for failure. 5375 */ 5376 static int 5377 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc) 5378 { 5379 Mpi2IOCFactsRequest_t mpi_request; 5380 Mpi2IOCFactsReply_t mpi_reply; 5381 struct mpt3sas_facts *facts; 5382 int mpi_reply_sz, mpi_request_sz, r; 5383 5384 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5385 __func__)); 5386 5387 r = _base_wait_for_iocstate(ioc, 10); 5388 if (r) { 5389 dfailprintk(ioc, printk(MPT3SAS_FMT 5390 "%s: failed getting to correct state\n", 5391 ioc->name, __func__)); 5392 return r; 5393 } 5394 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t); 5395 mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t); 5396 memset(&mpi_request, 0, mpi_request_sz); 5397 mpi_request.Function = MPI2_FUNCTION_IOC_FACTS; 5398 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz, 5399 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5); 5400 5401 if (r != 0) { 5402 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", 5403 ioc->name, __func__, r); 5404 return r; 5405 } 5406 5407 facts = &ioc->facts; 5408 memset(facts, 0, sizeof(struct mpt3sas_facts)); 5409 facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion); 5410 facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion); 5411 facts->VP_ID = mpi_reply.VP_ID; 5412 facts->VF_ID = mpi_reply.VF_ID; 5413 facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions); 5414 facts->MaxChainDepth = mpi_reply.MaxChainDepth; 5415 facts->WhoInit = mpi_reply.WhoInit; 5416 facts->NumberOfPorts = mpi_reply.NumberOfPorts; 5417 facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors; 5418 facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit); 5419 facts->MaxReplyDescriptorPostQueueDepth = 5420 le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth); 5421 facts->ProductID = le16_to_cpu(mpi_reply.ProductID); 5422 facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities); 5423 if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) 5424 ioc->ir_firmware = 1; 5425 if ((facts->IOCCapabilities & 5426 MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices)) 5427 ioc->rdpq_array_capable = 1; 5428 facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word); 5429 facts->IOCRequestFrameSize = 5430 le16_to_cpu(mpi_reply.IOCRequestFrameSize); 5431 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { 5432 facts->IOCMaxChainSegmentSize = 5433 le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize); 5434 } 5435 facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators); 5436 facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets); 5437 ioc->shost->max_id = -1; 5438 facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders); 5439 facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures); 5440 facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags); 5441 facts->HighPriorityCredit = 5442 le16_to_cpu(mpi_reply.HighPriorityCredit); 5443 facts->ReplyFrameSize = mpi_reply.ReplyFrameSize; 5444 facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle); 5445 facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize; 5446 5447 /* 5448 * Get the Page Size from IOC Facts. If it's 0, default to 4k. 5449 */ 5450 ioc->page_size = 1 << facts->CurrentHostPageSize; 5451 if (ioc->page_size == 1) { 5452 pr_info(MPT3SAS_FMT "CurrentHostPageSize is 0: Setting " 5453 "default host page size to 4k\n", ioc->name); 5454 ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K; 5455 } 5456 dinitprintk(ioc, pr_info(MPT3SAS_FMT "CurrentHostPageSize(%d)\n", 5457 ioc->name, facts->CurrentHostPageSize)); 5458 5459 dinitprintk(ioc, pr_info(MPT3SAS_FMT 5460 "hba queue depth(%d), max chains per io(%d)\n", 5461 ioc->name, facts->RequestCredit, 5462 facts->MaxChainDepth)); 5463 dinitprintk(ioc, pr_info(MPT3SAS_FMT 5464 "request frame size(%d), reply frame size(%d)\n", ioc->name, 5465 facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4)); 5466 return 0; 5467 } 5468 5469 /** 5470 * _base_send_ioc_init - send ioc_init to firmware 5471 * @ioc: per adapter object 5472 * 5473 * Returns 0 for success, non-zero for failure. 5474 */ 5475 static int 5476 _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc) 5477 { 5478 Mpi2IOCInitRequest_t mpi_request; 5479 Mpi2IOCInitReply_t mpi_reply; 5480 int i, r = 0; 5481 ktime_t current_time; 5482 u16 ioc_status; 5483 u32 reply_post_free_array_sz = 0; 5484 Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL; 5485 dma_addr_t reply_post_free_array_dma; 5486 5487 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5488 __func__)); 5489 5490 memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t)); 5491 mpi_request.Function = MPI2_FUNCTION_IOC_INIT; 5492 mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER; 5493 mpi_request.VF_ID = 0; /* TODO */ 5494 mpi_request.VP_ID = 0; 5495 mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged); 5496 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); 5497 mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K; 5498 5499 if (_base_is_controller_msix_enabled(ioc)) 5500 mpi_request.HostMSIxVectors = ioc->reply_queue_count; 5501 mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4); 5502 mpi_request.ReplyDescriptorPostQueueDepth = 5503 cpu_to_le16(ioc->reply_post_queue_depth); 5504 mpi_request.ReplyFreeQueueDepth = 5505 cpu_to_le16(ioc->reply_free_queue_depth); 5506 5507 mpi_request.SenseBufferAddressHigh = 5508 cpu_to_le32((u64)ioc->sense_dma >> 32); 5509 mpi_request.SystemReplyAddressHigh = 5510 cpu_to_le32((u64)ioc->reply_dma >> 32); 5511 mpi_request.SystemRequestFrameBaseAddress = 5512 cpu_to_le64((u64)ioc->request_dma); 5513 mpi_request.ReplyFreeQueueAddress = 5514 cpu_to_le64((u64)ioc->reply_free_dma); 5515 5516 if (ioc->rdpq_array_enable) { 5517 reply_post_free_array_sz = ioc->reply_queue_count * 5518 sizeof(Mpi2IOCInitRDPQArrayEntry); 5519 reply_post_free_array = pci_alloc_consistent(ioc->pdev, 5520 reply_post_free_array_sz, &reply_post_free_array_dma); 5521 if (!reply_post_free_array) { 5522 pr_err(MPT3SAS_FMT 5523 "reply_post_free_array: pci_alloc_consistent failed\n", 5524 ioc->name); 5525 r = -ENOMEM; 5526 goto out; 5527 } 5528 memset(reply_post_free_array, 0, reply_post_free_array_sz); 5529 for (i = 0; i < ioc->reply_queue_count; i++) 5530 reply_post_free_array[i].RDPQBaseAddress = 5531 cpu_to_le64( 5532 (u64)ioc->reply_post[i].reply_post_free_dma); 5533 mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE; 5534 mpi_request.ReplyDescriptorPostQueueAddress = 5535 cpu_to_le64((u64)reply_post_free_array_dma); 5536 } else { 5537 mpi_request.ReplyDescriptorPostQueueAddress = 5538 cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma); 5539 } 5540 5541 /* This time stamp specifies number of milliseconds 5542 * since epoch ~ midnight January 1, 1970. 5543 */ 5544 current_time = ktime_get_real(); 5545 mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time)); 5546 5547 if (ioc->logging_level & MPT_DEBUG_INIT) { 5548 __le32 *mfp; 5549 int i; 5550 5551 mfp = (__le32 *)&mpi_request; 5552 pr_info("\toffset:data\n"); 5553 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++) 5554 pr_info("\t[0x%02x]:%08x\n", i*4, 5555 le32_to_cpu(mfp[i])); 5556 } 5557 5558 r = _base_handshake_req_reply_wait(ioc, 5559 sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request, 5560 sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10); 5561 5562 if (r != 0) { 5563 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", 5564 ioc->name, __func__, r); 5565 goto out; 5566 } 5567 5568 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 5569 if (ioc_status != MPI2_IOCSTATUS_SUCCESS || 5570 mpi_reply.IOCLogInfo) { 5571 pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__); 5572 r = -EIO; 5573 } 5574 5575 out: 5576 if (reply_post_free_array) 5577 pci_free_consistent(ioc->pdev, reply_post_free_array_sz, 5578 reply_post_free_array, 5579 reply_post_free_array_dma); 5580 return r; 5581 } 5582 5583 /** 5584 * mpt3sas_port_enable_done - command completion routine for port enable 5585 * @ioc: per adapter object 5586 * @smid: system request message index 5587 * @msix_index: MSIX table index supplied by the OS 5588 * @reply: reply message frame(lower 32bit addr) 5589 * 5590 * Return 1 meaning mf should be freed from _base_interrupt 5591 * 0 means the mf is freed from this function. 5592 */ 5593 u8 5594 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 5595 u32 reply) 5596 { 5597 MPI2DefaultReply_t *mpi_reply; 5598 u16 ioc_status; 5599 5600 if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED) 5601 return 1; 5602 5603 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 5604 if (!mpi_reply) 5605 return 1; 5606 5607 if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE) 5608 return 1; 5609 5610 ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING; 5611 ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE; 5612 ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID; 5613 memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); 5614 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 5615 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 5616 ioc->port_enable_failed = 1; 5617 5618 if (ioc->is_driver_loading) { 5619 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 5620 mpt3sas_port_enable_complete(ioc); 5621 return 1; 5622 } else { 5623 ioc->start_scan_failed = ioc_status; 5624 ioc->start_scan = 0; 5625 return 1; 5626 } 5627 } 5628 complete(&ioc->port_enable_cmds.done); 5629 return 1; 5630 } 5631 5632 /** 5633 * _base_send_port_enable - send port_enable(discovery stuff) to firmware 5634 * @ioc: per adapter object 5635 * 5636 * Returns 0 for success, non-zero for failure. 5637 */ 5638 static int 5639 _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc) 5640 { 5641 Mpi2PortEnableRequest_t *mpi_request; 5642 Mpi2PortEnableReply_t *mpi_reply; 5643 int r = 0; 5644 u16 smid; 5645 u16 ioc_status; 5646 5647 pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name); 5648 5649 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { 5650 pr_err(MPT3SAS_FMT "%s: internal command already in use\n", 5651 ioc->name, __func__); 5652 return -EAGAIN; 5653 } 5654 5655 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx); 5656 if (!smid) { 5657 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 5658 ioc->name, __func__); 5659 return -EAGAIN; 5660 } 5661 5662 ioc->port_enable_cmds.status = MPT3_CMD_PENDING; 5663 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 5664 ioc->port_enable_cmds.smid = smid; 5665 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); 5666 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; 5667 5668 init_completion(&ioc->port_enable_cmds.done); 5669 mpt3sas_base_put_smid_default(ioc, smid); 5670 wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ); 5671 if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) { 5672 pr_err(MPT3SAS_FMT "%s: timeout\n", 5673 ioc->name, __func__); 5674 _debug_dump_mf(mpi_request, 5675 sizeof(Mpi2PortEnableRequest_t)/4); 5676 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) 5677 r = -EFAULT; 5678 else 5679 r = -ETIME; 5680 goto out; 5681 } 5682 5683 mpi_reply = ioc->port_enable_cmds.reply; 5684 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 5685 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 5686 pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n", 5687 ioc->name, __func__, ioc_status); 5688 r = -EFAULT; 5689 goto out; 5690 } 5691 5692 out: 5693 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 5694 pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ? 5695 "SUCCESS" : "FAILED")); 5696 return r; 5697 } 5698 5699 /** 5700 * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply) 5701 * @ioc: per adapter object 5702 * 5703 * Returns 0 for success, non-zero for failure. 5704 */ 5705 int 5706 mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc) 5707 { 5708 Mpi2PortEnableRequest_t *mpi_request; 5709 u16 smid; 5710 5711 pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name); 5712 5713 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { 5714 pr_err(MPT3SAS_FMT "%s: internal command already in use\n", 5715 ioc->name, __func__); 5716 return -EAGAIN; 5717 } 5718 5719 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx); 5720 if (!smid) { 5721 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 5722 ioc->name, __func__); 5723 return -EAGAIN; 5724 } 5725 5726 ioc->port_enable_cmds.status = MPT3_CMD_PENDING; 5727 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 5728 ioc->port_enable_cmds.smid = smid; 5729 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); 5730 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; 5731 5732 mpt3sas_base_put_smid_default(ioc, smid); 5733 return 0; 5734 } 5735 5736 /** 5737 * _base_determine_wait_on_discovery - desposition 5738 * @ioc: per adapter object 5739 * 5740 * Decide whether to wait on discovery to complete. Used to either 5741 * locate boot device, or report volumes ahead of physical devices. 5742 * 5743 * Returns 1 for wait, 0 for don't wait 5744 */ 5745 static int 5746 _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc) 5747 { 5748 /* We wait for discovery to complete if IR firmware is loaded. 5749 * The sas topology events arrive before PD events, so we need time to 5750 * turn on the bit in ioc->pd_handles to indicate PD 5751 * Also, it maybe required to report Volumes ahead of physical 5752 * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set. 5753 */ 5754 if (ioc->ir_firmware) 5755 return 1; 5756 5757 /* if no Bios, then we don't need to wait */ 5758 if (!ioc->bios_pg3.BiosVersion) 5759 return 0; 5760 5761 /* Bios is present, then we drop down here. 5762 * 5763 * If there any entries in the Bios Page 2, then we wait 5764 * for discovery to complete. 5765 */ 5766 5767 /* Current Boot Device */ 5768 if ((ioc->bios_pg2.CurrentBootDeviceForm & 5769 MPI2_BIOSPAGE2_FORM_MASK) == 5770 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && 5771 /* Request Boot Device */ 5772 (ioc->bios_pg2.ReqBootDeviceForm & 5773 MPI2_BIOSPAGE2_FORM_MASK) == 5774 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && 5775 /* Alternate Request Boot Device */ 5776 (ioc->bios_pg2.ReqAltBootDeviceForm & 5777 MPI2_BIOSPAGE2_FORM_MASK) == 5778 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED) 5779 return 0; 5780 5781 return 1; 5782 } 5783 5784 /** 5785 * _base_unmask_events - turn on notification for this event 5786 * @ioc: per adapter object 5787 * @event: firmware event 5788 * 5789 * The mask is stored in ioc->event_masks. 5790 */ 5791 static void 5792 _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event) 5793 { 5794 u32 desired_event; 5795 5796 if (event >= 128) 5797 return; 5798 5799 desired_event = (1 << (event % 32)); 5800 5801 if (event < 32) 5802 ioc->event_masks[0] &= ~desired_event; 5803 else if (event < 64) 5804 ioc->event_masks[1] &= ~desired_event; 5805 else if (event < 96) 5806 ioc->event_masks[2] &= ~desired_event; 5807 else if (event < 128) 5808 ioc->event_masks[3] &= ~desired_event; 5809 } 5810 5811 /** 5812 * _base_event_notification - send event notification 5813 * @ioc: per adapter object 5814 * 5815 * Returns 0 for success, non-zero for failure. 5816 */ 5817 static int 5818 _base_event_notification(struct MPT3SAS_ADAPTER *ioc) 5819 { 5820 Mpi2EventNotificationRequest_t *mpi_request; 5821 u16 smid; 5822 int r = 0; 5823 int i; 5824 5825 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5826 __func__)); 5827 5828 if (ioc->base_cmds.status & MPT3_CMD_PENDING) { 5829 pr_err(MPT3SAS_FMT "%s: internal command already in use\n", 5830 ioc->name, __func__); 5831 return -EAGAIN; 5832 } 5833 5834 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); 5835 if (!smid) { 5836 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 5837 ioc->name, __func__); 5838 return -EAGAIN; 5839 } 5840 ioc->base_cmds.status = MPT3_CMD_PENDING; 5841 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 5842 ioc->base_cmds.smid = smid; 5843 memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t)); 5844 mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 5845 mpi_request->VF_ID = 0; /* TODO */ 5846 mpi_request->VP_ID = 0; 5847 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 5848 mpi_request->EventMasks[i] = 5849 cpu_to_le32(ioc->event_masks[i]); 5850 init_completion(&ioc->base_cmds.done); 5851 mpt3sas_base_put_smid_default(ioc, smid); 5852 wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ); 5853 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { 5854 pr_err(MPT3SAS_FMT "%s: timeout\n", 5855 ioc->name, __func__); 5856 _debug_dump_mf(mpi_request, 5857 sizeof(Mpi2EventNotificationRequest_t)/4); 5858 if (ioc->base_cmds.status & MPT3_CMD_RESET) 5859 r = -EFAULT; 5860 else 5861 r = -ETIME; 5862 } else 5863 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n", 5864 ioc->name, __func__)); 5865 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 5866 return r; 5867 } 5868 5869 /** 5870 * mpt3sas_base_validate_event_type - validating event types 5871 * @ioc: per adapter object 5872 * @event: firmware event 5873 * 5874 * This will turn on firmware event notification when application 5875 * ask for that event. We don't mask events that are already enabled. 5876 */ 5877 void 5878 mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type) 5879 { 5880 int i, j; 5881 u32 event_mask, desired_event; 5882 u8 send_update_to_fw; 5883 5884 for (i = 0, send_update_to_fw = 0; i < 5885 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) { 5886 event_mask = ~event_type[i]; 5887 desired_event = 1; 5888 for (j = 0; j < 32; j++) { 5889 if (!(event_mask & desired_event) && 5890 (ioc->event_masks[i] & desired_event)) { 5891 ioc->event_masks[i] &= ~desired_event; 5892 send_update_to_fw = 1; 5893 } 5894 desired_event = (desired_event << 1); 5895 } 5896 } 5897 5898 if (!send_update_to_fw) 5899 return; 5900 5901 mutex_lock(&ioc->base_cmds.mutex); 5902 _base_event_notification(ioc); 5903 mutex_unlock(&ioc->base_cmds.mutex); 5904 } 5905 5906 /** 5907 * _base_diag_reset - the "big hammer" start of day reset 5908 * @ioc: per adapter object 5909 * 5910 * Returns 0 for success, non-zero for failure. 5911 */ 5912 static int 5913 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc) 5914 { 5915 u32 host_diagnostic; 5916 u32 ioc_state; 5917 u32 count; 5918 u32 hcb_size; 5919 5920 pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name); 5921 5922 drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n", 5923 ioc->name)); 5924 5925 count = 0; 5926 do { 5927 /* Write magic sequence to WriteSequence register 5928 * Loop until in diagnostic mode 5929 */ 5930 drsprintk(ioc, pr_info(MPT3SAS_FMT 5931 "write magic sequence\n", ioc->name)); 5932 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); 5933 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence); 5934 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence); 5935 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence); 5936 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence); 5937 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence); 5938 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence); 5939 5940 /* wait 100 msec */ 5941 msleep(100); 5942 5943 if (count++ > 20) 5944 goto out; 5945 5946 host_diagnostic = readl(&ioc->chip->HostDiagnostic); 5947 drsprintk(ioc, pr_info(MPT3SAS_FMT 5948 "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n", 5949 ioc->name, count, host_diagnostic)); 5950 5951 } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0); 5952 5953 hcb_size = readl(&ioc->chip->HCBSize); 5954 5955 drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n", 5956 ioc->name)); 5957 writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER, 5958 &ioc->chip->HostDiagnostic); 5959 5960 /*This delay allows the chip PCIe hardware time to finish reset tasks*/ 5961 msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000); 5962 5963 /* Approximately 300 second max wait */ 5964 for (count = 0; count < (300000000 / 5965 MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) { 5966 5967 host_diagnostic = readl(&ioc->chip->HostDiagnostic); 5968 5969 if (host_diagnostic == 0xFFFFFFFF) 5970 goto out; 5971 if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER)) 5972 break; 5973 5974 msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000); 5975 } 5976 5977 if (host_diagnostic & MPI2_DIAG_HCB_MODE) { 5978 5979 drsprintk(ioc, pr_info(MPT3SAS_FMT 5980 "restart the adapter assuming the HCB Address points to good F/W\n", 5981 ioc->name)); 5982 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK; 5983 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW; 5984 writel(host_diagnostic, &ioc->chip->HostDiagnostic); 5985 5986 drsprintk(ioc, pr_info(MPT3SAS_FMT 5987 "re-enable the HCDW\n", ioc->name)); 5988 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE, 5989 &ioc->chip->HCBSize); 5990 } 5991 5992 drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n", 5993 ioc->name)); 5994 writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET, 5995 &ioc->chip->HostDiagnostic); 5996 5997 drsprintk(ioc, pr_info(MPT3SAS_FMT 5998 "disable writes to the diagnostic register\n", ioc->name)); 5999 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); 6000 6001 drsprintk(ioc, pr_info(MPT3SAS_FMT 6002 "Wait for FW to go to the READY state\n", ioc->name)); 6003 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20); 6004 if (ioc_state) { 6005 pr_err(MPT3SAS_FMT 6006 "%s: failed going to ready state (ioc_state=0x%x)\n", 6007 ioc->name, __func__, ioc_state); 6008 goto out; 6009 } 6010 6011 pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name); 6012 return 0; 6013 6014 out: 6015 pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name); 6016 return -EFAULT; 6017 } 6018 6019 /** 6020 * _base_make_ioc_ready - put controller in READY state 6021 * @ioc: per adapter object 6022 * @type: FORCE_BIG_HAMMER or SOFT_RESET 6023 * 6024 * Returns 0 for success, non-zero for failure. 6025 */ 6026 static int 6027 _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type) 6028 { 6029 u32 ioc_state; 6030 int rc; 6031 int count; 6032 6033 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 6034 __func__)); 6035 6036 if (ioc->pci_error_recovery) 6037 return 0; 6038 6039 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 6040 dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n", 6041 ioc->name, __func__, ioc_state)); 6042 6043 /* if in RESET state, it should move to READY state shortly */ 6044 count = 0; 6045 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) { 6046 while ((ioc_state & MPI2_IOC_STATE_MASK) != 6047 MPI2_IOC_STATE_READY) { 6048 if (count++ == 10) { 6049 pr_err(MPT3SAS_FMT 6050 "%s: failed going to ready state (ioc_state=0x%x)\n", 6051 ioc->name, __func__, ioc_state); 6052 return -EFAULT; 6053 } 6054 ssleep(1); 6055 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 6056 } 6057 } 6058 6059 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) 6060 return 0; 6061 6062 if (ioc_state & MPI2_DOORBELL_USED) { 6063 dhsprintk(ioc, pr_info(MPT3SAS_FMT 6064 "unexpected doorbell active!\n", 6065 ioc->name)); 6066 goto issue_diag_reset; 6067 } 6068 6069 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 6070 mpt3sas_base_fault_info(ioc, ioc_state & 6071 MPI2_DOORBELL_DATA_MASK); 6072 goto issue_diag_reset; 6073 } 6074 6075 if (type == FORCE_BIG_HAMMER) 6076 goto issue_diag_reset; 6077 6078 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL) 6079 if (!(_base_send_ioc_reset(ioc, 6080 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) { 6081 return 0; 6082 } 6083 6084 issue_diag_reset: 6085 rc = _base_diag_reset(ioc); 6086 return rc; 6087 } 6088 6089 /** 6090 * _base_make_ioc_operational - put controller in OPERATIONAL state 6091 * @ioc: per adapter object 6092 * 6093 * Returns 0 for success, non-zero for failure. 6094 */ 6095 static int 6096 _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc) 6097 { 6098 int r, i, index; 6099 unsigned long flags; 6100 u32 reply_address; 6101 u16 smid; 6102 struct _tr_list *delayed_tr, *delayed_tr_next; 6103 struct _sc_list *delayed_sc, *delayed_sc_next; 6104 struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next; 6105 u8 hide_flag; 6106 struct adapter_reply_queue *reply_q; 6107 Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig; 6108 6109 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 6110 __func__)); 6111 6112 /* clean the delayed target reset list */ 6113 list_for_each_entry_safe(delayed_tr, delayed_tr_next, 6114 &ioc->delayed_tr_list, list) { 6115 list_del(&delayed_tr->list); 6116 kfree(delayed_tr); 6117 } 6118 6119 6120 list_for_each_entry_safe(delayed_tr, delayed_tr_next, 6121 &ioc->delayed_tr_volume_list, list) { 6122 list_del(&delayed_tr->list); 6123 kfree(delayed_tr); 6124 } 6125 6126 list_for_each_entry_safe(delayed_sc, delayed_sc_next, 6127 &ioc->delayed_sc_list, list) { 6128 list_del(&delayed_sc->list); 6129 kfree(delayed_sc); 6130 } 6131 6132 list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next, 6133 &ioc->delayed_event_ack_list, list) { 6134 list_del(&delayed_event_ack->list); 6135 kfree(delayed_event_ack); 6136 } 6137 6138 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 6139 6140 /* hi-priority queue */ 6141 INIT_LIST_HEAD(&ioc->hpr_free_list); 6142 smid = ioc->hi_priority_smid; 6143 for (i = 0; i < ioc->hi_priority_depth; i++, smid++) { 6144 ioc->hpr_lookup[i].cb_idx = 0xFF; 6145 ioc->hpr_lookup[i].smid = smid; 6146 list_add_tail(&ioc->hpr_lookup[i].tracker_list, 6147 &ioc->hpr_free_list); 6148 } 6149 6150 /* internal queue */ 6151 INIT_LIST_HEAD(&ioc->internal_free_list); 6152 smid = ioc->internal_smid; 6153 for (i = 0; i < ioc->internal_depth; i++, smid++) { 6154 ioc->internal_lookup[i].cb_idx = 0xFF; 6155 ioc->internal_lookup[i].smid = smid; 6156 list_add_tail(&ioc->internal_lookup[i].tracker_list, 6157 &ioc->internal_free_list); 6158 } 6159 6160 /* chain pool */ 6161 INIT_LIST_HEAD(&ioc->free_chain_list); 6162 for (i = 0; i < ioc->chain_depth; i++) 6163 list_add_tail(&ioc->chain_lookup[i].tracker_list, 6164 &ioc->free_chain_list); 6165 6166 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 6167 6168 /* initialize Reply Free Queue */ 6169 for (i = 0, reply_address = (u32)ioc->reply_dma ; 6170 i < ioc->reply_free_queue_depth ; i++, reply_address += 6171 ioc->reply_sz) { 6172 ioc->reply_free[i] = cpu_to_le32(reply_address); 6173 if (ioc->is_mcpu_endpoint) 6174 _base_clone_reply_to_sys_mem(ioc, 6175 (__le32)reply_address, i); 6176 } 6177 6178 /* initialize reply queues */ 6179 if (ioc->is_driver_loading) 6180 _base_assign_reply_queues(ioc); 6181 6182 /* initialize Reply Post Free Queue */ 6183 index = 0; 6184 reply_post_free_contig = ioc->reply_post[0].reply_post_free; 6185 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 6186 /* 6187 * If RDPQ is enabled, switch to the next allocation. 6188 * Otherwise advance within the contiguous region. 6189 */ 6190 if (ioc->rdpq_array_enable) { 6191 reply_q->reply_post_free = 6192 ioc->reply_post[index++].reply_post_free; 6193 } else { 6194 reply_q->reply_post_free = reply_post_free_contig; 6195 reply_post_free_contig += ioc->reply_post_queue_depth; 6196 } 6197 6198 reply_q->reply_post_host_index = 0; 6199 for (i = 0; i < ioc->reply_post_queue_depth; i++) 6200 reply_q->reply_post_free[i].Words = 6201 cpu_to_le64(ULLONG_MAX); 6202 if (!_base_is_controller_msix_enabled(ioc)) 6203 goto skip_init_reply_post_free_queue; 6204 } 6205 skip_init_reply_post_free_queue: 6206 6207 r = _base_send_ioc_init(ioc); 6208 if (r) 6209 return r; 6210 6211 /* initialize reply free host index */ 6212 ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1; 6213 writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex); 6214 6215 /* initialize reply post host index */ 6216 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 6217 if (ioc->combined_reply_queue) 6218 writel((reply_q->msix_index & 7)<< 6219 MPI2_RPHI_MSIX_INDEX_SHIFT, 6220 ioc->replyPostRegisterIndex[reply_q->msix_index/8]); 6221 else 6222 writel(reply_q->msix_index << 6223 MPI2_RPHI_MSIX_INDEX_SHIFT, 6224 &ioc->chip->ReplyPostHostIndex); 6225 6226 if (!_base_is_controller_msix_enabled(ioc)) 6227 goto skip_init_reply_post_host_index; 6228 } 6229 6230 skip_init_reply_post_host_index: 6231 6232 _base_unmask_interrupts(ioc); 6233 r = _base_event_notification(ioc); 6234 if (r) 6235 return r; 6236 6237 _base_static_config_pages(ioc); 6238 6239 if (ioc->is_driver_loading) { 6240 6241 if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier 6242 == 0x80) { 6243 hide_flag = (u8) ( 6244 le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) & 6245 MFG_PAGE10_HIDE_SSDS_MASK); 6246 if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK) 6247 ioc->mfg_pg10_hide_flag = hide_flag; 6248 } 6249 6250 ioc->wait_for_discovery_to_complete = 6251 _base_determine_wait_on_discovery(ioc); 6252 6253 return r; /* scan_start and scan_finished support */ 6254 } 6255 6256 r = _base_send_port_enable(ioc); 6257 if (r) 6258 return r; 6259 6260 return r; 6261 } 6262 6263 /** 6264 * mpt3sas_base_free_resources - free resources controller resources 6265 * @ioc: per adapter object 6266 * 6267 * Return nothing. 6268 */ 6269 void 6270 mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc) 6271 { 6272 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 6273 __func__)); 6274 6275 /* synchronizing freeing resource with pci_access_mutex lock */ 6276 mutex_lock(&ioc->pci_access_mutex); 6277 if (ioc->chip_phys && ioc->chip) { 6278 _base_mask_interrupts(ioc); 6279 ioc->shost_recovery = 1; 6280 _base_make_ioc_ready(ioc, SOFT_RESET); 6281 ioc->shost_recovery = 0; 6282 } 6283 6284 mpt3sas_base_unmap_resources(ioc); 6285 mutex_unlock(&ioc->pci_access_mutex); 6286 return; 6287 } 6288 6289 /** 6290 * mpt3sas_base_attach - attach controller instance 6291 * @ioc: per adapter object 6292 * 6293 * Returns 0 for success, non-zero for failure. 6294 */ 6295 int 6296 mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) 6297 { 6298 int r, i; 6299 int cpu_id, last_cpu_id = 0; 6300 6301 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 6302 __func__)); 6303 6304 /* setup cpu_msix_table */ 6305 ioc->cpu_count = num_online_cpus(); 6306 for_each_online_cpu(cpu_id) 6307 last_cpu_id = cpu_id; 6308 ioc->cpu_msix_table_sz = last_cpu_id + 1; 6309 ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL); 6310 ioc->reply_queue_count = 1; 6311 if (!ioc->cpu_msix_table) { 6312 dfailprintk(ioc, pr_info(MPT3SAS_FMT 6313 "allocation for cpu_msix_table failed!!!\n", 6314 ioc->name)); 6315 r = -ENOMEM; 6316 goto out_free_resources; 6317 } 6318 6319 if (ioc->is_warpdrive) { 6320 ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz, 6321 sizeof(resource_size_t *), GFP_KERNEL); 6322 if (!ioc->reply_post_host_index) { 6323 dfailprintk(ioc, pr_info(MPT3SAS_FMT "allocation " 6324 "for reply_post_host_index failed!!!\n", 6325 ioc->name)); 6326 r = -ENOMEM; 6327 goto out_free_resources; 6328 } 6329 } 6330 6331 ioc->rdpq_array_enable_assigned = 0; 6332 ioc->dma_mask = 0; 6333 r = mpt3sas_base_map_resources(ioc); 6334 if (r) 6335 goto out_free_resources; 6336 6337 pci_set_drvdata(ioc->pdev, ioc->shost); 6338 r = _base_get_ioc_facts(ioc); 6339 if (r) 6340 goto out_free_resources; 6341 6342 switch (ioc->hba_mpi_version_belonged) { 6343 case MPI2_VERSION: 6344 ioc->build_sg_scmd = &_base_build_sg_scmd; 6345 ioc->build_sg = &_base_build_sg; 6346 ioc->build_zero_len_sge = &_base_build_zero_len_sge; 6347 break; 6348 case MPI25_VERSION: 6349 case MPI26_VERSION: 6350 /* 6351 * In SAS3.0, 6352 * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and 6353 * Target Status - all require the IEEE formated scatter gather 6354 * elements. 6355 */ 6356 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee; 6357 ioc->build_sg = &_base_build_sg_ieee; 6358 ioc->build_nvme_prp = &_base_build_nvme_prp; 6359 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee; 6360 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t); 6361 6362 break; 6363 } 6364 6365 if (ioc->is_mcpu_endpoint) 6366 ioc->put_smid_scsi_io = &_base_put_smid_mpi_ep_scsi_io; 6367 else 6368 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io; 6369 6370 /* 6371 * These function pointers for other requests that don't 6372 * the require IEEE scatter gather elements. 6373 * 6374 * For example Configuration Pages and SAS IOUNIT Control don't. 6375 */ 6376 ioc->build_sg_mpi = &_base_build_sg; 6377 ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge; 6378 6379 r = _base_make_ioc_ready(ioc, SOFT_RESET); 6380 if (r) 6381 goto out_free_resources; 6382 6383 ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts, 6384 sizeof(struct mpt3sas_port_facts), GFP_KERNEL); 6385 if (!ioc->pfacts) { 6386 r = -ENOMEM; 6387 goto out_free_resources; 6388 } 6389 6390 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) { 6391 r = _base_get_port_facts(ioc, i); 6392 if (r) 6393 goto out_free_resources; 6394 } 6395 6396 r = _base_allocate_memory_pools(ioc); 6397 if (r) 6398 goto out_free_resources; 6399 6400 init_waitqueue_head(&ioc->reset_wq); 6401 6402 /* allocate memory pd handle bitmask list */ 6403 ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8); 6404 if (ioc->facts.MaxDevHandle % 8) 6405 ioc->pd_handles_sz++; 6406 ioc->pd_handles = kzalloc(ioc->pd_handles_sz, 6407 GFP_KERNEL); 6408 if (!ioc->pd_handles) { 6409 r = -ENOMEM; 6410 goto out_free_resources; 6411 } 6412 ioc->blocking_handles = kzalloc(ioc->pd_handles_sz, 6413 GFP_KERNEL); 6414 if (!ioc->blocking_handles) { 6415 r = -ENOMEM; 6416 goto out_free_resources; 6417 } 6418 6419 /* allocate memory for pending OS device add list */ 6420 ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8); 6421 if (ioc->facts.MaxDevHandle % 8) 6422 ioc->pend_os_device_add_sz++; 6423 ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz, 6424 GFP_KERNEL); 6425 if (!ioc->pend_os_device_add) 6426 goto out_free_resources; 6427 6428 ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz; 6429 ioc->device_remove_in_progress = 6430 kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL); 6431 if (!ioc->device_remove_in_progress) 6432 goto out_free_resources; 6433 6434 ioc->fwfault_debug = mpt3sas_fwfault_debug; 6435 6436 /* base internal command bits */ 6437 mutex_init(&ioc->base_cmds.mutex); 6438 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 6439 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 6440 6441 /* port_enable command bits */ 6442 ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 6443 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 6444 6445 /* transport internal command bits */ 6446 ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 6447 ioc->transport_cmds.status = MPT3_CMD_NOT_USED; 6448 mutex_init(&ioc->transport_cmds.mutex); 6449 6450 /* scsih internal command bits */ 6451 ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 6452 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 6453 mutex_init(&ioc->scsih_cmds.mutex); 6454 6455 /* task management internal command bits */ 6456 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 6457 ioc->tm_cmds.status = MPT3_CMD_NOT_USED; 6458 mutex_init(&ioc->tm_cmds.mutex); 6459 6460 /* config page internal command bits */ 6461 ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 6462 ioc->config_cmds.status = MPT3_CMD_NOT_USED; 6463 mutex_init(&ioc->config_cmds.mutex); 6464 6465 /* ctl module internal command bits */ 6466 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 6467 ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); 6468 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 6469 mutex_init(&ioc->ctl_cmds.mutex); 6470 6471 if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply || 6472 !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply || 6473 !ioc->tm_cmds.reply || !ioc->config_cmds.reply || 6474 !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) { 6475 r = -ENOMEM; 6476 goto out_free_resources; 6477 } 6478 6479 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 6480 ioc->event_masks[i] = -1; 6481 6482 /* here we enable the events we care about */ 6483 _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY); 6484 _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE); 6485 _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST); 6486 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); 6487 _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); 6488 _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST); 6489 _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME); 6490 _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK); 6491 _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); 6492 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); 6493 _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD); 6494 _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION); 6495 if (ioc->hba_mpi_version_belonged == MPI26_VERSION) { 6496 if (ioc->is_gen35_ioc) { 6497 _base_unmask_events(ioc, 6498 MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE); 6499 _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION); 6500 _base_unmask_events(ioc, 6501 MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST); 6502 } 6503 } 6504 r = _base_make_ioc_operational(ioc); 6505 if (r) 6506 goto out_free_resources; 6507 6508 ioc->non_operational_loop = 0; 6509 ioc->got_task_abort_from_ioctl = 0; 6510 return 0; 6511 6512 out_free_resources: 6513 6514 ioc->remove_host = 1; 6515 6516 mpt3sas_base_free_resources(ioc); 6517 _base_release_memory_pools(ioc); 6518 pci_set_drvdata(ioc->pdev, NULL); 6519 kfree(ioc->cpu_msix_table); 6520 if (ioc->is_warpdrive) 6521 kfree(ioc->reply_post_host_index); 6522 kfree(ioc->pd_handles); 6523 kfree(ioc->blocking_handles); 6524 kfree(ioc->device_remove_in_progress); 6525 kfree(ioc->pend_os_device_add); 6526 kfree(ioc->tm_cmds.reply); 6527 kfree(ioc->transport_cmds.reply); 6528 kfree(ioc->scsih_cmds.reply); 6529 kfree(ioc->config_cmds.reply); 6530 kfree(ioc->base_cmds.reply); 6531 kfree(ioc->port_enable_cmds.reply); 6532 kfree(ioc->ctl_cmds.reply); 6533 kfree(ioc->ctl_cmds.sense); 6534 kfree(ioc->pfacts); 6535 ioc->ctl_cmds.reply = NULL; 6536 ioc->base_cmds.reply = NULL; 6537 ioc->tm_cmds.reply = NULL; 6538 ioc->scsih_cmds.reply = NULL; 6539 ioc->transport_cmds.reply = NULL; 6540 ioc->config_cmds.reply = NULL; 6541 ioc->pfacts = NULL; 6542 return r; 6543 } 6544 6545 6546 /** 6547 * mpt3sas_base_detach - remove controller instance 6548 * @ioc: per adapter object 6549 * 6550 * Return nothing. 6551 */ 6552 void 6553 mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc) 6554 { 6555 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 6556 __func__)); 6557 6558 mpt3sas_base_stop_watchdog(ioc); 6559 mpt3sas_base_free_resources(ioc); 6560 _base_release_memory_pools(ioc); 6561 pci_set_drvdata(ioc->pdev, NULL); 6562 kfree(ioc->cpu_msix_table); 6563 if (ioc->is_warpdrive) 6564 kfree(ioc->reply_post_host_index); 6565 kfree(ioc->pd_handles); 6566 kfree(ioc->blocking_handles); 6567 kfree(ioc->device_remove_in_progress); 6568 kfree(ioc->pend_os_device_add); 6569 kfree(ioc->pfacts); 6570 kfree(ioc->ctl_cmds.reply); 6571 kfree(ioc->ctl_cmds.sense); 6572 kfree(ioc->base_cmds.reply); 6573 kfree(ioc->port_enable_cmds.reply); 6574 kfree(ioc->tm_cmds.reply); 6575 kfree(ioc->transport_cmds.reply); 6576 kfree(ioc->scsih_cmds.reply); 6577 kfree(ioc->config_cmds.reply); 6578 } 6579 6580 /** 6581 * _base_reset_handler - reset callback handler (for base) 6582 * @ioc: per adapter object 6583 * @reset_phase: phase 6584 * 6585 * The handler for doing any required cleanup or initialization. 6586 * 6587 * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET, 6588 * MPT3_IOC_DONE_RESET 6589 * 6590 * Return nothing. 6591 */ 6592 static void 6593 _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) 6594 { 6595 mpt3sas_scsih_reset_handler(ioc, reset_phase); 6596 mpt3sas_ctl_reset_handler(ioc, reset_phase); 6597 switch (reset_phase) { 6598 case MPT3_IOC_PRE_RESET: 6599 dtmprintk(ioc, pr_info(MPT3SAS_FMT 6600 "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__)); 6601 break; 6602 case MPT3_IOC_AFTER_RESET: 6603 dtmprintk(ioc, pr_info(MPT3SAS_FMT 6604 "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__)); 6605 if (ioc->transport_cmds.status & MPT3_CMD_PENDING) { 6606 ioc->transport_cmds.status |= MPT3_CMD_RESET; 6607 mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid); 6608 complete(&ioc->transport_cmds.done); 6609 } 6610 if (ioc->base_cmds.status & MPT3_CMD_PENDING) { 6611 ioc->base_cmds.status |= MPT3_CMD_RESET; 6612 mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid); 6613 complete(&ioc->base_cmds.done); 6614 } 6615 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { 6616 ioc->port_enable_failed = 1; 6617 ioc->port_enable_cmds.status |= MPT3_CMD_RESET; 6618 mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid); 6619 if (ioc->is_driver_loading) { 6620 ioc->start_scan_failed = 6621 MPI2_IOCSTATUS_INTERNAL_ERROR; 6622 ioc->start_scan = 0; 6623 ioc->port_enable_cmds.status = 6624 MPT3_CMD_NOT_USED; 6625 } else 6626 complete(&ioc->port_enable_cmds.done); 6627 } 6628 if (ioc->config_cmds.status & MPT3_CMD_PENDING) { 6629 ioc->config_cmds.status |= MPT3_CMD_RESET; 6630 mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid); 6631 ioc->config_cmds.smid = USHRT_MAX; 6632 complete(&ioc->config_cmds.done); 6633 } 6634 break; 6635 case MPT3_IOC_DONE_RESET: 6636 dtmprintk(ioc, pr_info(MPT3SAS_FMT 6637 "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__)); 6638 break; 6639 } 6640 } 6641 6642 /** 6643 * mpt3sas_wait_for_commands_to_complete - reset controller 6644 * @ioc: Pointer to MPT_ADAPTER structure 6645 * 6646 * This function is waiting 10s for all pending commands to complete 6647 * prior to putting controller in reset. 6648 */ 6649 void 6650 mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc) 6651 { 6652 u32 ioc_state; 6653 6654 ioc->pending_io_count = 0; 6655 6656 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 6657 if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) 6658 return; 6659 6660 /* pending command count */ 6661 ioc->pending_io_count = atomic_read(&ioc->shost->host_busy); 6662 6663 if (!ioc->pending_io_count) 6664 return; 6665 6666 /* wait for pending commands to complete */ 6667 wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ); 6668 } 6669 6670 /** 6671 * mpt3sas_base_hard_reset_handler - reset controller 6672 * @ioc: Pointer to MPT_ADAPTER structure 6673 * @type: FORCE_BIG_HAMMER or SOFT_RESET 6674 * 6675 * Returns 0 for success, non-zero for failure. 6676 */ 6677 int 6678 mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, 6679 enum reset_type type) 6680 { 6681 int r; 6682 unsigned long flags; 6683 u32 ioc_state; 6684 u8 is_fault = 0, is_trigger = 0; 6685 6686 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 6687 __func__)); 6688 6689 if (ioc->pci_error_recovery) { 6690 pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n", 6691 ioc->name, __func__); 6692 r = 0; 6693 goto out_unlocked; 6694 } 6695 6696 if (mpt3sas_fwfault_debug) 6697 mpt3sas_halt_firmware(ioc); 6698 6699 /* wait for an active reset in progress to complete */ 6700 if (!mutex_trylock(&ioc->reset_in_progress_mutex)) { 6701 do { 6702 ssleep(1); 6703 } while (ioc->shost_recovery == 1); 6704 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, 6705 __func__)); 6706 return ioc->ioc_reset_in_progress_status; 6707 } 6708 6709 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 6710 ioc->shost_recovery = 1; 6711 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 6712 6713 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 6714 MPT3_DIAG_BUFFER_IS_REGISTERED) && 6715 (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 6716 MPT3_DIAG_BUFFER_IS_RELEASED))) { 6717 is_trigger = 1; 6718 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 6719 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) 6720 is_fault = 1; 6721 } 6722 _base_reset_handler(ioc, MPT3_IOC_PRE_RESET); 6723 mpt3sas_wait_for_commands_to_complete(ioc); 6724 _base_mask_interrupts(ioc); 6725 r = _base_make_ioc_ready(ioc, type); 6726 if (r) 6727 goto out; 6728 _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET); 6729 6730 /* If this hard reset is called while port enable is active, then 6731 * there is no reason to call make_ioc_operational 6732 */ 6733 if (ioc->is_driver_loading && ioc->port_enable_failed) { 6734 ioc->remove_host = 1; 6735 r = -EFAULT; 6736 goto out; 6737 } 6738 r = _base_get_ioc_facts(ioc); 6739 if (r) 6740 goto out; 6741 6742 if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable) 6743 panic("%s: Issue occurred with flashing controller firmware." 6744 "Please reboot the system and ensure that the correct" 6745 " firmware version is running\n", ioc->name); 6746 6747 r = _base_make_ioc_operational(ioc); 6748 if (!r) 6749 _base_reset_handler(ioc, MPT3_IOC_DONE_RESET); 6750 6751 out: 6752 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n", 6753 ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED"))); 6754 6755 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 6756 ioc->ioc_reset_in_progress_status = r; 6757 ioc->shost_recovery = 0; 6758 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 6759 ioc->ioc_reset_count++; 6760 mutex_unlock(&ioc->reset_in_progress_mutex); 6761 6762 out_unlocked: 6763 if ((r == 0) && is_trigger) { 6764 if (is_fault) 6765 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT); 6766 else 6767 mpt3sas_trigger_master(ioc, 6768 MASTER_TRIGGER_ADAPTER_RESET); 6769 } 6770 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, 6771 __func__)); 6772 return r; 6773 } 6774