1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2021 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 #include <linux/io-64-nonatomic-lo-hi.h> 12 13 #if defined(writeq) && defined(CONFIG_64BIT) 14 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) 15 { 16 writeq(b, addr); 17 } 18 #else 19 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) 20 { 21 __u64 data_out = b; 22 23 writel((u32)(data_out), addr); 24 writel((u32)(data_out >> 32), (addr + 4)); 25 } 26 #endif 27 28 static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc) 29 { 30 u16 i, max_vectors; 31 32 max_vectors = mrioc->intr_info_count; 33 34 for (i = 0; i < max_vectors; i++) 35 synchronize_irq(pci_irq_vector(mrioc->pdev, i)); 36 } 37 38 void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc) 39 { 40 mrioc->intr_enabled = 0; 41 mpi3mr_sync_irqs(mrioc); 42 } 43 44 void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc) 45 { 46 mrioc->intr_enabled = 1; 47 } 48 49 static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc) 50 { 51 u16 i; 52 53 mpi3mr_ioc_disable_intr(mrioc); 54 55 if (!mrioc->intr_info) 56 return; 57 58 for (i = 0; i < mrioc->intr_info_count; i++) 59 free_irq(pci_irq_vector(mrioc->pdev, i), 60 (mrioc->intr_info + i)); 61 62 kfree(mrioc->intr_info); 63 mrioc->intr_info = NULL; 64 mrioc->intr_info_count = 0; 65 pci_free_irq_vectors(mrioc->pdev); 66 } 67 68 void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length, 69 dma_addr_t dma_addr) 70 { 71 struct mpi3_sge_common *sgel = paddr; 72 73 sgel->flags = flags; 74 sgel->length = cpu_to_le32(length); 75 sgel->address = cpu_to_le64(dma_addr); 76 } 77 78 void mpi3mr_build_zero_len_sge(void *paddr) 79 { 80 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 81 82 mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1); 83 } 84 85 void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc, 86 dma_addr_t phys_addr) 87 { 88 if (!phys_addr) 89 return NULL; 90 91 if ((phys_addr < mrioc->reply_buf_dma) || 92 (phys_addr > mrioc->reply_buf_dma_max_address)) 93 return NULL; 94 95 return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma); 96 } 97 98 void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc, 99 dma_addr_t phys_addr) 100 { 101 if (!phys_addr) 102 return NULL; 103 104 return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma); 105 } 106 107 static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc, 108 u64 reply_dma) 109 { 110 u32 old_idx = 0; 111 112 spin_lock(&mrioc->reply_free_queue_lock); 113 old_idx = mrioc->reply_free_queue_host_index; 114 mrioc->reply_free_queue_host_index = ( 115 (mrioc->reply_free_queue_host_index == 116 (mrioc->reply_free_qsz - 1)) ? 0 : 117 (mrioc->reply_free_queue_host_index + 1)); 118 mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma); 119 writel(mrioc->reply_free_queue_host_index, 120 &mrioc->sysif_regs->reply_free_host_index); 121 spin_unlock(&mrioc->reply_free_queue_lock); 122 } 123 124 void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc, 125 u64 sense_buf_dma) 126 { 127 u32 old_idx = 0; 128 129 spin_lock(&mrioc->sbq_lock); 130 old_idx = mrioc->sbq_host_index; 131 mrioc->sbq_host_index = ((mrioc->sbq_host_index == 132 (mrioc->sense_buf_q_sz - 1)) ? 0 : 133 (mrioc->sbq_host_index + 1)); 134 mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma); 135 writel(mrioc->sbq_host_index, 136 &mrioc->sysif_regs->sense_buffer_free_host_index); 137 spin_unlock(&mrioc->sbq_lock); 138 } 139 140 static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc, 141 struct mpi3_default_reply *def_reply) 142 { 143 struct mpi3_event_notification_reply *event_reply = 144 (struct mpi3_event_notification_reply *)def_reply; 145 146 mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count); 147 } 148 149 static struct mpi3mr_drv_cmd * 150 mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag, 151 struct mpi3_default_reply *def_reply) 152 { 153 switch (host_tag) { 154 case MPI3MR_HOSTTAG_INITCMDS: 155 return &mrioc->init_cmds; 156 case MPI3MR_HOSTTAG_INVALID: 157 if (def_reply && def_reply->function == 158 MPI3_FUNCTION_EVENT_NOTIFICATION) 159 mpi3mr_handle_events(mrioc, def_reply); 160 return NULL; 161 default: 162 break; 163 } 164 165 return NULL; 166 } 167 168 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc, 169 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma) 170 { 171 u16 reply_desc_type, host_tag = 0; 172 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; 173 u32 ioc_loginfo = 0; 174 struct mpi3_status_reply_descriptor *status_desc; 175 struct mpi3_address_reply_descriptor *addr_desc; 176 struct mpi3_success_reply_descriptor *success_desc; 177 struct mpi3_default_reply *def_reply = NULL; 178 struct mpi3mr_drv_cmd *cmdptr = NULL; 179 struct mpi3_scsi_io_reply *scsi_reply; 180 u8 *sense_buf = NULL; 181 182 *reply_dma = 0; 183 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & 184 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK; 185 switch (reply_desc_type) { 186 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS: 187 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc; 188 host_tag = le16_to_cpu(status_desc->host_tag); 189 ioc_status = le16_to_cpu(status_desc->ioc_status); 190 if (ioc_status & 191 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 192 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); 193 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 194 break; 195 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: 196 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; 197 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address); 198 def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma); 199 if (!def_reply) 200 goto out; 201 host_tag = le16_to_cpu(def_reply->host_tag); 202 ioc_status = le16_to_cpu(def_reply->ioc_status); 203 if (ioc_status & 204 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 205 ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info); 206 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 207 if (def_reply->function == MPI3_FUNCTION_SCSI_IO) { 208 scsi_reply = (struct mpi3_scsi_io_reply *)def_reply; 209 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, 210 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 211 } 212 break; 213 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: 214 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; 215 host_tag = le16_to_cpu(success_desc->host_tag); 216 break; 217 default: 218 break; 219 } 220 221 cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply); 222 if (cmdptr) { 223 if (cmdptr->state & MPI3MR_CMD_PENDING) { 224 cmdptr->state |= MPI3MR_CMD_COMPLETE; 225 cmdptr->ioc_loginfo = ioc_loginfo; 226 cmdptr->ioc_status = ioc_status; 227 cmdptr->state &= ~MPI3MR_CMD_PENDING; 228 if (def_reply) { 229 cmdptr->state |= MPI3MR_CMD_REPLY_VALID; 230 memcpy((u8 *)cmdptr->reply, (u8 *)def_reply, 231 mrioc->facts.reply_sz); 232 } 233 if (cmdptr->is_waiting) { 234 complete(&cmdptr->done); 235 cmdptr->is_waiting = 0; 236 } else if (cmdptr->callback) 237 cmdptr->callback(mrioc, cmdptr); 238 } 239 } 240 out: 241 if (sense_buf) 242 mpi3mr_repost_sense_buf(mrioc, 243 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 244 } 245 246 static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc) 247 { 248 u32 exp_phase = mrioc->admin_reply_ephase; 249 u32 admin_reply_ci = mrioc->admin_reply_ci; 250 u32 num_admin_replies = 0; 251 u64 reply_dma = 0; 252 struct mpi3_default_reply_descriptor *reply_desc; 253 254 reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base + 255 admin_reply_ci; 256 257 if ((le16_to_cpu(reply_desc->reply_flags) & 258 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 259 return 0; 260 261 do { 262 mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci); 263 mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma); 264 if (reply_dma) 265 mpi3mr_repost_reply_buf(mrioc, reply_dma); 266 num_admin_replies++; 267 if (++admin_reply_ci == mrioc->num_admin_replies) { 268 admin_reply_ci = 0; 269 exp_phase ^= 1; 270 } 271 reply_desc = 272 (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base + 273 admin_reply_ci; 274 if ((le16_to_cpu(reply_desc->reply_flags) & 275 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 276 break; 277 } while (1); 278 279 writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); 280 mrioc->admin_reply_ci = admin_reply_ci; 281 mrioc->admin_reply_ephase = exp_phase; 282 283 return num_admin_replies; 284 } 285 286 static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata) 287 { 288 struct mpi3mr_intr_info *intr_info = privdata; 289 struct mpi3mr_ioc *mrioc; 290 u16 midx; 291 u32 num_admin_replies = 0; 292 293 if (!intr_info) 294 return IRQ_NONE; 295 296 mrioc = intr_info->mrioc; 297 298 if (!mrioc->intr_enabled) 299 return IRQ_NONE; 300 301 midx = intr_info->msix_index; 302 303 if (!midx) 304 num_admin_replies = mpi3mr_process_admin_reply_q(mrioc); 305 306 if (num_admin_replies) 307 return IRQ_HANDLED; 308 else 309 return IRQ_NONE; 310 } 311 312 static irqreturn_t mpi3mr_isr(int irq, void *privdata) 313 { 314 struct mpi3mr_intr_info *intr_info = privdata; 315 int ret; 316 317 if (!intr_info) 318 return IRQ_NONE; 319 320 /* Call primary ISR routine */ 321 ret = mpi3mr_isr_primary(irq, privdata); 322 323 return ret; 324 } 325 326 /** 327 * mpi3mr_isr_poll - Reply queue polling routine 328 * @irq: IRQ 329 * @privdata: Interrupt info 330 * 331 * poll for pending I/O completions in a loop until pending I/Os 332 * present or controller queue depth I/Os are processed. 333 * 334 * Return: IRQ_NONE or IRQ_HANDLED 335 */ 336 static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata) 337 { 338 return IRQ_HANDLED; 339 } 340 341 /** 342 * mpi3mr_request_irq - Request IRQ and register ISR 343 * @mrioc: Adapter instance reference 344 * @index: IRQ vector index 345 * 346 * Request threaded ISR with primary ISR and secondary 347 * 348 * Return: 0 on success and non zero on failures. 349 */ 350 static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index) 351 { 352 struct pci_dev *pdev = mrioc->pdev; 353 struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index; 354 int retval = 0; 355 356 intr_info->mrioc = mrioc; 357 intr_info->msix_index = index; 358 intr_info->op_reply_q = NULL; 359 360 snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d", 361 mrioc->driver_name, mrioc->id, index); 362 363 retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr, 364 mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info); 365 if (retval) { 366 ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n", 367 intr_info->name, pci_irq_vector(pdev, index)); 368 return retval; 369 } 370 371 return retval; 372 } 373 374 /** 375 * mpi3mr_setup_isr - Setup ISR for the controller 376 * @mrioc: Adapter instance reference 377 * @setup_one: Request one IRQ or more 378 * 379 * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR 380 * 381 * Return: 0 on success and non zero on failures. 382 */ 383 static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one) 384 { 385 unsigned int irq_flags = PCI_IRQ_MSIX; 386 u16 max_vectors = 0, i; 387 int retval = 0; 388 struct irq_affinity desc = { .pre_vectors = 1}; 389 390 mpi3mr_cleanup_isr(mrioc); 391 392 if (setup_one || reset_devices) 393 max_vectors = 1; 394 else { 395 max_vectors = 396 min_t(int, mrioc->cpu_count + 1, mrioc->msix_count); 397 398 ioc_info(mrioc, 399 "MSI-X vectors supported: %d, no of cores: %d,", 400 mrioc->msix_count, mrioc->cpu_count); 401 ioc_info(mrioc, 402 "MSI-x vectors requested: %d\n", max_vectors); 403 } 404 405 irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; 406 407 mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0; 408 i = pci_alloc_irq_vectors_affinity(mrioc->pdev, 409 1, max_vectors, irq_flags, &desc); 410 if (i <= 0) { 411 ioc_err(mrioc, "Cannot alloc irq vectors\n"); 412 goto out_failed; 413 } 414 if (i != max_vectors) { 415 ioc_info(mrioc, 416 "allocated vectors (%d) are less than configured (%d)\n", 417 i, max_vectors); 418 /* 419 * If only one MSI-x is allocated, then MSI-x 0 will be shared 420 * between Admin queue and operational queue 421 */ 422 if (i == 1) 423 mrioc->op_reply_q_offset = 0; 424 425 max_vectors = i; 426 } 427 mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors, 428 GFP_KERNEL); 429 if (!mrioc->intr_info) { 430 retval = -1; 431 pci_free_irq_vectors(mrioc->pdev); 432 goto out_failed; 433 } 434 for (i = 0; i < max_vectors; i++) { 435 retval = mpi3mr_request_irq(mrioc, i); 436 if (retval) { 437 mrioc->intr_info_count = i; 438 goto out_failed; 439 } 440 } 441 mrioc->intr_info_count = max_vectors; 442 mpi3mr_ioc_enable_intr(mrioc); 443 return retval; 444 out_failed: 445 mpi3mr_cleanup_isr(mrioc); 446 447 return retval; 448 } 449 450 static const struct { 451 enum mpi3mr_iocstate value; 452 char *name; 453 } mrioc_states[] = { 454 { MRIOC_STATE_READY, "ready" }, 455 { MRIOC_STATE_FAULT, "fault" }, 456 { MRIOC_STATE_RESET, "reset" }, 457 { MRIOC_STATE_BECOMING_READY, "becoming ready" }, 458 { MRIOC_STATE_RESET_REQUESTED, "reset requested" }, 459 { MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" }, 460 }; 461 462 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state) 463 { 464 int i; 465 char *name = NULL; 466 467 for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) { 468 if (mrioc_states[i].value == mrioc_state) { 469 name = mrioc_states[i].name; 470 break; 471 } 472 } 473 return name; 474 } 475 476 /** 477 * mpi3mr_print_fault_info - Display fault information 478 * @mrioc: Adapter instance reference 479 * 480 * Display the controller fault information if there is a 481 * controller fault. 482 * 483 * Return: Nothing. 484 */ 485 static void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc) 486 { 487 u32 ioc_status, code, code1, code2, code3; 488 489 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 490 491 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) { 492 code = readl(&mrioc->sysif_regs->fault); 493 code1 = readl(&mrioc->sysif_regs->fault_info[0]); 494 code2 = readl(&mrioc->sysif_regs->fault_info[1]); 495 code3 = readl(&mrioc->sysif_regs->fault_info[2]); 496 497 ioc_info(mrioc, 498 "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n", 499 code, code1, code2, code3); 500 } 501 } 502 503 /** 504 * mpi3mr_get_iocstate - Get IOC State 505 * @mrioc: Adapter instance reference 506 * 507 * Return a proper IOC state enum based on the IOC status and 508 * IOC configuration and unrcoverable state of the controller. 509 * 510 * Return: Current IOC state. 511 */ 512 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc) 513 { 514 u32 ioc_status, ioc_config; 515 u8 ready, enabled; 516 517 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 518 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 519 520 if (mrioc->unrecoverable) 521 return MRIOC_STATE_UNRECOVERABLE; 522 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) 523 return MRIOC_STATE_FAULT; 524 525 ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY); 526 enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC); 527 528 if (ready && enabled) 529 return MRIOC_STATE_READY; 530 if ((!ready) && (!enabled)) 531 return MRIOC_STATE_RESET; 532 if ((!ready) && (enabled)) 533 return MRIOC_STATE_BECOMING_READY; 534 535 return MRIOC_STATE_RESET_REQUESTED; 536 } 537 538 /** 539 * mpi3mr_clear_reset_history - clear reset history 540 * @mrioc: Adapter instance reference 541 * 542 * Write the reset history bit in IOC status to clear the bit, 543 * if it is already set. 544 * 545 * Return: Nothing. 546 */ 547 static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc) 548 { 549 u32 ioc_status; 550 551 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 552 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) 553 writel(ioc_status, &mrioc->sysif_regs->ioc_status); 554 } 555 556 /** 557 * mpi3mr_issue_and_process_mur - Message unit Reset handler 558 * @mrioc: Adapter instance reference 559 * @reset_reason: Reset reason code 560 * 561 * Issue Message unit Reset to the controller and wait for it to 562 * be complete. 563 * 564 * Return: 0 on success, -1 on failure. 565 */ 566 static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc, 567 u32 reset_reason) 568 { 569 u32 ioc_config, timeout, ioc_status; 570 int retval = -1; 571 572 ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n"); 573 if (mrioc->unrecoverable) { 574 ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n"); 575 return retval; 576 } 577 mpi3mr_clear_reset_history(mrioc); 578 writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]); 579 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 580 ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; 581 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 582 583 timeout = mrioc->ready_timeout * 10; 584 do { 585 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 586 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) { 587 mpi3mr_clear_reset_history(mrioc); 588 ioc_config = 589 readl(&mrioc->sysif_regs->ioc_configuration); 590 if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) || 591 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) || 592 (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) { 593 retval = 0; 594 break; 595 } 596 } 597 msleep(100); 598 } while (--timeout); 599 600 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 601 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 602 603 ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n", 604 (!retval) ? "successful" : "failed", ioc_status, ioc_config); 605 return retval; 606 } 607 608 /** 609 * mpi3mr_bring_ioc_ready - Bring controller to ready state 610 * @mrioc: Adapter instance reference 611 * 612 * Set Enable IOC bit in IOC configuration register and wait for 613 * the controller to become ready. 614 * 615 * Return: 0 on success, -1 on failure. 616 */ 617 static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc) 618 { 619 u32 ioc_config, timeout; 620 enum mpi3mr_iocstate current_state; 621 622 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 623 ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; 624 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 625 626 timeout = mrioc->ready_timeout * 10; 627 do { 628 current_state = mpi3mr_get_iocstate(mrioc); 629 if (current_state == MRIOC_STATE_READY) 630 return 0; 631 msleep(100); 632 } while (--timeout); 633 634 return -1; 635 } 636 637 /** 638 * mpi3mr_set_diagsave - Set diag save bit for snapdump 639 * @mrioc: Adapter reference 640 * 641 * Set diag save bit in IOC configuration register to enable 642 * snapdump. 643 * 644 * Return: Nothing. 645 */ 646 static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc) 647 { 648 u32 ioc_config; 649 650 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 651 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE; 652 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 653 } 654 655 /** 656 * mpi3mr_issue_reset - Issue reset to the controller 657 * @mrioc: Adapter reference 658 * @reset_type: Reset type 659 * @reset_reason: Reset reason code 660 * 661 * TBD 662 * 663 * Return: 0 on success, non-zero on failure. 664 */ 665 static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, 666 u32 reset_reason) 667 { 668 return 0; 669 } 670 671 /** 672 * mpi3mr_admin_request_post - Post request to admin queue 673 * @mrioc: Adapter reference 674 * @admin_req: MPI3 request 675 * @admin_req_sz: Request size 676 * @ignore_reset: Ignore reset in process 677 * 678 * Post the MPI3 request into admin request queue and 679 * inform the controller, if the queue is full return 680 * appropriate error. 681 * 682 * Return: 0 on success, non-zero on failure. 683 */ 684 int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req, 685 u16 admin_req_sz, u8 ignore_reset) 686 { 687 u16 areq_pi = 0, areq_ci = 0, max_entries = 0; 688 int retval = 0; 689 unsigned long flags; 690 u8 *areq_entry; 691 692 if (mrioc->unrecoverable) { 693 ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__); 694 return -EFAULT; 695 } 696 697 spin_lock_irqsave(&mrioc->admin_req_lock, flags); 698 areq_pi = mrioc->admin_req_pi; 699 areq_ci = mrioc->admin_req_ci; 700 max_entries = mrioc->num_admin_req; 701 if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) && 702 (areq_pi == (max_entries - 1)))) { 703 ioc_err(mrioc, "AdminReqQ full condition detected\n"); 704 retval = -EAGAIN; 705 goto out; 706 } 707 if (!ignore_reset && mrioc->reset_in_progress) { 708 ioc_err(mrioc, "AdminReqQ submit reset in progress\n"); 709 retval = -EAGAIN; 710 goto out; 711 } 712 areq_entry = (u8 *)mrioc->admin_req_base + 713 (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ); 714 memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); 715 memcpy(areq_entry, (u8 *)admin_req, admin_req_sz); 716 717 if (++areq_pi == max_entries) 718 areq_pi = 0; 719 mrioc->admin_req_pi = areq_pi; 720 721 writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); 722 723 out: 724 spin_unlock_irqrestore(&mrioc->admin_req_lock, flags); 725 726 return retval; 727 } 728 729 /** 730 * mpi3mr_free_op_req_q_segments - free request memory segments 731 * @mrioc: Adapter instance reference 732 * @q_idx: operational request queue index 733 * 734 * Free memory segments allocated for operational request queue 735 * 736 * Return: Nothing. 737 */ 738 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) 739 { 740 u16 j; 741 int size; 742 struct segments *segments; 743 744 segments = mrioc->req_qinfo[q_idx].q_segments; 745 if (!segments) 746 return; 747 748 if (mrioc->enable_segqueue) { 749 size = MPI3MR_OP_REQ_Q_SEG_SIZE; 750 if (mrioc->req_qinfo[q_idx].q_segment_list) { 751 dma_free_coherent(&mrioc->pdev->dev, 752 MPI3MR_MAX_SEG_LIST_SIZE, 753 mrioc->req_qinfo[q_idx].q_segment_list, 754 mrioc->req_qinfo[q_idx].q_segment_list_dma); 755 mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL; 756 } 757 } else 758 size = mrioc->req_qinfo[q_idx].num_requests * 759 mrioc->facts.op_req_sz; 760 761 for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) { 762 if (!segments[j].segment) 763 continue; 764 dma_free_coherent(&mrioc->pdev->dev, 765 size, segments[j].segment, segments[j].segment_dma); 766 segments[j].segment = NULL; 767 } 768 kfree(mrioc->req_qinfo[q_idx].q_segments); 769 mrioc->req_qinfo[q_idx].q_segments = NULL; 770 mrioc->req_qinfo[q_idx].qid = 0; 771 } 772 773 /** 774 * mpi3mr_free_op_reply_q_segments - free reply memory segments 775 * @mrioc: Adapter instance reference 776 * @q_idx: operational reply queue index 777 * 778 * Free memory segments allocated for operational reply queue 779 * 780 * Return: Nothing. 781 */ 782 static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) 783 { 784 u16 j; 785 int size; 786 struct segments *segments; 787 788 segments = mrioc->op_reply_qinfo[q_idx].q_segments; 789 if (!segments) 790 return; 791 792 if (mrioc->enable_segqueue) { 793 size = MPI3MR_OP_REP_Q_SEG_SIZE; 794 if (mrioc->op_reply_qinfo[q_idx].q_segment_list) { 795 dma_free_coherent(&mrioc->pdev->dev, 796 MPI3MR_MAX_SEG_LIST_SIZE, 797 mrioc->op_reply_qinfo[q_idx].q_segment_list, 798 mrioc->op_reply_qinfo[q_idx].q_segment_list_dma); 799 mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL; 800 } 801 } else 802 size = mrioc->op_reply_qinfo[q_idx].segment_qd * 803 mrioc->op_reply_desc_sz; 804 805 for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) { 806 if (!segments[j].segment) 807 continue; 808 dma_free_coherent(&mrioc->pdev->dev, 809 size, segments[j].segment, segments[j].segment_dma); 810 segments[j].segment = NULL; 811 } 812 813 kfree(mrioc->op_reply_qinfo[q_idx].q_segments); 814 mrioc->op_reply_qinfo[q_idx].q_segments = NULL; 815 mrioc->op_reply_qinfo[q_idx].qid = 0; 816 } 817 818 /** 819 * mpi3mr_delete_op_reply_q - delete operational reply queue 820 * @mrioc: Adapter instance reference 821 * @qidx: operational reply queue index 822 * 823 * Delete operatinal reply queue by issuing MPI request 824 * through admin queue. 825 * 826 * Return: 0 on success, non-zero on failure. 827 */ 828 static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) 829 { 830 struct mpi3_delete_reply_queue_request delq_req; 831 int retval = 0; 832 u16 reply_qid = 0, midx; 833 834 reply_qid = mrioc->op_reply_qinfo[qidx].qid; 835 836 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset); 837 838 if (!reply_qid) { 839 retval = -1; 840 ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n"); 841 goto out; 842 } 843 844 memset(&delq_req, 0, sizeof(delq_req)); 845 mutex_lock(&mrioc->init_cmds.mutex); 846 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 847 retval = -1; 848 ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n"); 849 mutex_unlock(&mrioc->init_cmds.mutex); 850 goto out; 851 } 852 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 853 mrioc->init_cmds.is_waiting = 1; 854 mrioc->init_cmds.callback = NULL; 855 delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 856 delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE; 857 delq_req.queue_id = cpu_to_le16(reply_qid); 858 859 init_completion(&mrioc->init_cmds.done); 860 retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req), 861 1); 862 if (retval) { 863 ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n"); 864 goto out_unlock; 865 } 866 wait_for_completion_timeout(&mrioc->init_cmds.done, 867 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 868 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 869 ioc_err(mrioc, "Issue DelRepQ: command timed out\n"); 870 mpi3mr_set_diagsave(mrioc); 871 mpi3mr_issue_reset(mrioc, 872 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 873 MPI3MR_RESET_FROM_DELREPQ_TIMEOUT); 874 mrioc->unrecoverable = 1; 875 876 retval = -1; 877 goto out_unlock; 878 } 879 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 880 != MPI3_IOCSTATUS_SUCCESS) { 881 ioc_err(mrioc, 882 "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 883 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 884 mrioc->init_cmds.ioc_loginfo); 885 retval = -1; 886 goto out_unlock; 887 } 888 mrioc->intr_info[midx].op_reply_q = NULL; 889 890 mpi3mr_free_op_reply_q_segments(mrioc, qidx); 891 out_unlock: 892 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 893 mutex_unlock(&mrioc->init_cmds.mutex); 894 out: 895 896 return retval; 897 } 898 899 /** 900 * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool 901 * @mrioc: Adapter instance reference 902 * @qidx: request queue index 903 * 904 * Allocate segmented memory pools for operational reply 905 * queue. 906 * 907 * Return: 0 on success, non-zero on failure. 908 */ 909 static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx) 910 { 911 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 912 int i, size; 913 u64 *q_segment_list_entry = NULL; 914 struct segments *segments; 915 916 if (mrioc->enable_segqueue) { 917 op_reply_q->segment_qd = 918 MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz; 919 920 size = MPI3MR_OP_REP_Q_SEG_SIZE; 921 922 op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev, 923 MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma, 924 GFP_KERNEL); 925 if (!op_reply_q->q_segment_list) 926 return -ENOMEM; 927 q_segment_list_entry = (u64 *)op_reply_q->q_segment_list; 928 } else { 929 op_reply_q->segment_qd = op_reply_q->num_replies; 930 size = op_reply_q->num_replies * mrioc->op_reply_desc_sz; 931 } 932 933 op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies, 934 op_reply_q->segment_qd); 935 936 op_reply_q->q_segments = kcalloc(op_reply_q->num_segments, 937 sizeof(struct segments), GFP_KERNEL); 938 if (!op_reply_q->q_segments) 939 return -ENOMEM; 940 941 segments = op_reply_q->q_segments; 942 for (i = 0; i < op_reply_q->num_segments; i++) { 943 segments[i].segment = 944 dma_alloc_coherent(&mrioc->pdev->dev, 945 size, &segments[i].segment_dma, GFP_KERNEL); 946 if (!segments[i].segment) 947 return -ENOMEM; 948 if (mrioc->enable_segqueue) 949 q_segment_list_entry[i] = 950 (unsigned long)segments[i].segment_dma; 951 } 952 953 return 0; 954 } 955 956 /** 957 * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool. 958 * @mrioc: Adapter instance reference 959 * @qidx: request queue index 960 * 961 * Allocate segmented memory pools for operational request 962 * queue. 963 * 964 * Return: 0 on success, non-zero on failure. 965 */ 966 static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx) 967 { 968 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx; 969 int i, size; 970 u64 *q_segment_list_entry = NULL; 971 struct segments *segments; 972 973 if (mrioc->enable_segqueue) { 974 op_req_q->segment_qd = 975 MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz; 976 977 size = MPI3MR_OP_REQ_Q_SEG_SIZE; 978 979 op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev, 980 MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma, 981 GFP_KERNEL); 982 if (!op_req_q->q_segment_list) 983 return -ENOMEM; 984 q_segment_list_entry = (u64 *)op_req_q->q_segment_list; 985 986 } else { 987 op_req_q->segment_qd = op_req_q->num_requests; 988 size = op_req_q->num_requests * mrioc->facts.op_req_sz; 989 } 990 991 op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests, 992 op_req_q->segment_qd); 993 994 op_req_q->q_segments = kcalloc(op_req_q->num_segments, 995 sizeof(struct segments), GFP_KERNEL); 996 if (!op_req_q->q_segments) 997 return -ENOMEM; 998 999 segments = op_req_q->q_segments; 1000 for (i = 0; i < op_req_q->num_segments; i++) { 1001 segments[i].segment = 1002 dma_alloc_coherent(&mrioc->pdev->dev, 1003 size, &segments[i].segment_dma, GFP_KERNEL); 1004 if (!segments[i].segment) 1005 return -ENOMEM; 1006 if (mrioc->enable_segqueue) 1007 q_segment_list_entry[i] = 1008 (unsigned long)segments[i].segment_dma; 1009 } 1010 1011 return 0; 1012 } 1013 1014 /** 1015 * mpi3mr_create_op_reply_q - create operational reply queue 1016 * @mrioc: Adapter instance reference 1017 * @qidx: operational reply queue index 1018 * 1019 * Create operatinal reply queue by issuing MPI request 1020 * through admin queue. 1021 * 1022 * Return: 0 on success, non-zero on failure. 1023 */ 1024 static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) 1025 { 1026 struct mpi3_create_reply_queue_request create_req; 1027 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 1028 int retval = 0; 1029 u16 reply_qid = 0, midx; 1030 1031 reply_qid = op_reply_q->qid; 1032 1033 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset); 1034 1035 if (reply_qid) { 1036 retval = -1; 1037 ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n", 1038 reply_qid); 1039 1040 return retval; 1041 } 1042 1043 reply_qid = qidx + 1; 1044 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD; 1045 op_reply_q->ci = 0; 1046 op_reply_q->ephase = 1; 1047 1048 if (!op_reply_q->q_segments) { 1049 retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx); 1050 if (retval) { 1051 mpi3mr_free_op_reply_q_segments(mrioc, qidx); 1052 goto out; 1053 } 1054 } 1055 1056 memset(&create_req, 0, sizeof(create_req)); 1057 mutex_lock(&mrioc->init_cmds.mutex); 1058 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1059 retval = -1; 1060 ioc_err(mrioc, "CreateRepQ: Init command is in use\n"); 1061 goto out; 1062 } 1063 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1064 mrioc->init_cmds.is_waiting = 1; 1065 mrioc->init_cmds.callback = NULL; 1066 create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1067 create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE; 1068 create_req.queue_id = cpu_to_le16(reply_qid); 1069 create_req.flags = MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE; 1070 create_req.msix_index = cpu_to_le16(mrioc->intr_info[midx].msix_index); 1071 if (mrioc->enable_segqueue) { 1072 create_req.flags |= 1073 MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED; 1074 create_req.base_address = cpu_to_le64( 1075 op_reply_q->q_segment_list_dma); 1076 } else 1077 create_req.base_address = cpu_to_le64( 1078 op_reply_q->q_segments[0].segment_dma); 1079 1080 create_req.size = cpu_to_le16(op_reply_q->num_replies); 1081 1082 init_completion(&mrioc->init_cmds.done); 1083 retval = mpi3mr_admin_request_post(mrioc, &create_req, 1084 sizeof(create_req), 1); 1085 if (retval) { 1086 ioc_err(mrioc, "CreateRepQ: Admin Post failed\n"); 1087 goto out_unlock; 1088 } 1089 wait_for_completion_timeout(&mrioc->init_cmds.done, 1090 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1091 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1092 ioc_err(mrioc, "CreateRepQ: command timed out\n"); 1093 mpi3mr_set_diagsave(mrioc); 1094 mpi3mr_issue_reset(mrioc, 1095 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 1096 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT); 1097 mrioc->unrecoverable = 1; 1098 retval = -1; 1099 goto out_unlock; 1100 } 1101 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1102 != MPI3_IOCSTATUS_SUCCESS) { 1103 ioc_err(mrioc, 1104 "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1105 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1106 mrioc->init_cmds.ioc_loginfo); 1107 retval = -1; 1108 goto out_unlock; 1109 } 1110 op_reply_q->qid = reply_qid; 1111 mrioc->intr_info[midx].op_reply_q = op_reply_q; 1112 1113 out_unlock: 1114 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1115 mutex_unlock(&mrioc->init_cmds.mutex); 1116 out: 1117 1118 return retval; 1119 } 1120 1121 /** 1122 * mpi3mr_create_op_req_q - create operational request queue 1123 * @mrioc: Adapter instance reference 1124 * @idx: operational request queue index 1125 * @reply_qid: Reply queue ID 1126 * 1127 * Create operatinal request queue by issuing MPI request 1128 * through admin queue. 1129 * 1130 * Return: 0 on success, non-zero on failure. 1131 */ 1132 static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx, 1133 u16 reply_qid) 1134 { 1135 struct mpi3_create_request_queue_request create_req; 1136 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx; 1137 int retval = 0; 1138 u16 req_qid = 0; 1139 1140 req_qid = op_req_q->qid; 1141 1142 if (req_qid) { 1143 retval = -1; 1144 ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n", 1145 req_qid); 1146 1147 return retval; 1148 } 1149 req_qid = idx + 1; 1150 1151 op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD; 1152 op_req_q->ci = 0; 1153 op_req_q->pi = 0; 1154 op_req_q->reply_qid = reply_qid; 1155 spin_lock_init(&op_req_q->q_lock); 1156 1157 if (!op_req_q->q_segments) { 1158 retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx); 1159 if (retval) { 1160 mpi3mr_free_op_req_q_segments(mrioc, idx); 1161 goto out; 1162 } 1163 } 1164 1165 memset(&create_req, 0, sizeof(create_req)); 1166 mutex_lock(&mrioc->init_cmds.mutex); 1167 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1168 retval = -1; 1169 ioc_err(mrioc, "CreateReqQ: Init command is in use\n"); 1170 goto out; 1171 } 1172 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1173 mrioc->init_cmds.is_waiting = 1; 1174 mrioc->init_cmds.callback = NULL; 1175 create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1176 create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE; 1177 create_req.queue_id = cpu_to_le16(req_qid); 1178 if (mrioc->enable_segqueue) { 1179 create_req.flags = 1180 MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED; 1181 create_req.base_address = cpu_to_le64( 1182 op_req_q->q_segment_list_dma); 1183 } else 1184 create_req.base_address = cpu_to_le64( 1185 op_req_q->q_segments[0].segment_dma); 1186 create_req.reply_queue_id = cpu_to_le16(reply_qid); 1187 create_req.size = cpu_to_le16(op_req_q->num_requests); 1188 1189 init_completion(&mrioc->init_cmds.done); 1190 retval = mpi3mr_admin_request_post(mrioc, &create_req, 1191 sizeof(create_req), 1); 1192 if (retval) { 1193 ioc_err(mrioc, "CreateReqQ: Admin Post failed\n"); 1194 goto out_unlock; 1195 } 1196 wait_for_completion_timeout(&mrioc->init_cmds.done, 1197 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1198 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1199 ioc_err(mrioc, "CreateReqQ: command timed out\n"); 1200 mpi3mr_set_diagsave(mrioc); 1201 if (mpi3mr_issue_reset(mrioc, 1202 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 1203 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT)) 1204 mrioc->unrecoverable = 1; 1205 retval = -1; 1206 goto out_unlock; 1207 } 1208 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1209 != MPI3_IOCSTATUS_SUCCESS) { 1210 ioc_err(mrioc, 1211 "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1212 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1213 mrioc->init_cmds.ioc_loginfo); 1214 retval = -1; 1215 goto out_unlock; 1216 } 1217 op_req_q->qid = req_qid; 1218 1219 out_unlock: 1220 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1221 mutex_unlock(&mrioc->init_cmds.mutex); 1222 out: 1223 1224 return retval; 1225 } 1226 1227 /** 1228 * mpi3mr_create_op_queues - create operational queue pairs 1229 * @mrioc: Adapter instance reference 1230 * 1231 * Allocate memory for operational queue meta data and call 1232 * create request and reply queue functions. 1233 * 1234 * Return: 0 on success, non-zero on failures. 1235 */ 1236 static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc) 1237 { 1238 int retval = 0; 1239 u16 num_queues = 0, i = 0, msix_count_op_q = 1; 1240 1241 num_queues = min_t(int, mrioc->facts.max_op_reply_q, 1242 mrioc->facts.max_op_req_q); 1243 1244 msix_count_op_q = 1245 mrioc->intr_info_count - mrioc->op_reply_q_offset; 1246 if (!mrioc->num_queues) 1247 mrioc->num_queues = min_t(int, num_queues, msix_count_op_q); 1248 num_queues = mrioc->num_queues; 1249 ioc_info(mrioc, "Trying to create %d Operational Q pairs\n", 1250 num_queues); 1251 1252 if (!mrioc->req_qinfo) { 1253 mrioc->req_qinfo = kcalloc(num_queues, 1254 sizeof(struct op_req_qinfo), GFP_KERNEL); 1255 if (!mrioc->req_qinfo) { 1256 retval = -1; 1257 goto out_failed; 1258 } 1259 1260 mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) * 1261 num_queues, GFP_KERNEL); 1262 if (!mrioc->op_reply_qinfo) { 1263 retval = -1; 1264 goto out_failed; 1265 } 1266 } 1267 1268 if (mrioc->enable_segqueue) 1269 ioc_info(mrioc, 1270 "allocating operational queues through segmented queues\n"); 1271 1272 for (i = 0; i < num_queues; i++) { 1273 if (mpi3mr_create_op_reply_q(mrioc, i)) { 1274 ioc_err(mrioc, "Cannot create OP RepQ %d\n", i); 1275 break; 1276 } 1277 if (mpi3mr_create_op_req_q(mrioc, i, 1278 mrioc->op_reply_qinfo[i].qid)) { 1279 ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i); 1280 mpi3mr_delete_op_reply_q(mrioc, i); 1281 break; 1282 } 1283 } 1284 1285 if (i == 0) { 1286 /* Not even one queue is created successfully*/ 1287 retval = -1; 1288 goto out_failed; 1289 } 1290 mrioc->num_op_reply_q = mrioc->num_op_req_q = i; 1291 ioc_info(mrioc, "Successfully created %d Operational Q pairs\n", 1292 mrioc->num_op_reply_q); 1293 1294 return retval; 1295 out_failed: 1296 kfree(mrioc->req_qinfo); 1297 mrioc->req_qinfo = NULL; 1298 1299 kfree(mrioc->op_reply_qinfo); 1300 mrioc->op_reply_qinfo = NULL; 1301 1302 return retval; 1303 } 1304 1305 /** 1306 * mpi3mr_setup_admin_qpair - Setup admin queue pair 1307 * @mrioc: Adapter instance reference 1308 * 1309 * Allocate memory for admin queue pair if required and register 1310 * the admin queue with the controller. 1311 * 1312 * Return: 0 on success, non-zero on failures. 1313 */ 1314 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc) 1315 { 1316 int retval = 0; 1317 u32 num_admin_entries = 0; 1318 1319 mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE; 1320 mrioc->num_admin_req = mrioc->admin_req_q_sz / 1321 MPI3MR_ADMIN_REQ_FRAME_SZ; 1322 mrioc->admin_req_ci = mrioc->admin_req_pi = 0; 1323 mrioc->admin_req_base = NULL; 1324 1325 mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE; 1326 mrioc->num_admin_replies = mrioc->admin_reply_q_sz / 1327 MPI3MR_ADMIN_REPLY_FRAME_SZ; 1328 mrioc->admin_reply_ci = 0; 1329 mrioc->admin_reply_ephase = 1; 1330 mrioc->admin_reply_base = NULL; 1331 1332 if (!mrioc->admin_req_base) { 1333 mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev, 1334 mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL); 1335 1336 if (!mrioc->admin_req_base) { 1337 retval = -1; 1338 goto out_failed; 1339 } 1340 1341 mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev, 1342 mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma, 1343 GFP_KERNEL); 1344 1345 if (!mrioc->admin_reply_base) { 1346 retval = -1; 1347 goto out_failed; 1348 } 1349 } 1350 1351 num_admin_entries = (mrioc->num_admin_replies << 16) | 1352 (mrioc->num_admin_req); 1353 writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries); 1354 mpi3mr_writeq(mrioc->admin_req_dma, 1355 &mrioc->sysif_regs->admin_request_queue_address); 1356 mpi3mr_writeq(mrioc->admin_reply_dma, 1357 &mrioc->sysif_regs->admin_reply_queue_address); 1358 writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); 1359 writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); 1360 return retval; 1361 1362 out_failed: 1363 1364 if (mrioc->admin_reply_base) { 1365 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz, 1366 mrioc->admin_reply_base, mrioc->admin_reply_dma); 1367 mrioc->admin_reply_base = NULL; 1368 } 1369 if (mrioc->admin_req_base) { 1370 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz, 1371 mrioc->admin_req_base, mrioc->admin_req_dma); 1372 mrioc->admin_req_base = NULL; 1373 } 1374 return retval; 1375 } 1376 1377 /** 1378 * mpi3mr_issue_iocfacts - Send IOC Facts 1379 * @mrioc: Adapter instance reference 1380 * @facts_data: Cached IOC facts data 1381 * 1382 * Issue IOC Facts MPI request through admin queue and wait for 1383 * the completion of it or time out. 1384 * 1385 * Return: 0 on success, non-zero on failures. 1386 */ 1387 static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc, 1388 struct mpi3_ioc_facts_data *facts_data) 1389 { 1390 struct mpi3_ioc_facts_request iocfacts_req; 1391 void *data = NULL; 1392 dma_addr_t data_dma; 1393 u32 data_len = sizeof(*facts_data); 1394 int retval = 0; 1395 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 1396 1397 data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 1398 GFP_KERNEL); 1399 1400 if (!data) { 1401 retval = -1; 1402 goto out; 1403 } 1404 1405 memset(&iocfacts_req, 0, sizeof(iocfacts_req)); 1406 mutex_lock(&mrioc->init_cmds.mutex); 1407 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1408 retval = -1; 1409 ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n"); 1410 mutex_unlock(&mrioc->init_cmds.mutex); 1411 goto out; 1412 } 1413 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1414 mrioc->init_cmds.is_waiting = 1; 1415 mrioc->init_cmds.callback = NULL; 1416 iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1417 iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS; 1418 1419 mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len, 1420 data_dma); 1421 1422 init_completion(&mrioc->init_cmds.done); 1423 retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req, 1424 sizeof(iocfacts_req), 1); 1425 if (retval) { 1426 ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n"); 1427 goto out_unlock; 1428 } 1429 wait_for_completion_timeout(&mrioc->init_cmds.done, 1430 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1431 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1432 ioc_err(mrioc, "Issue IOCFacts: command timed out\n"); 1433 mpi3mr_set_diagsave(mrioc); 1434 mpi3mr_issue_reset(mrioc, 1435 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 1436 MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT); 1437 mrioc->unrecoverable = 1; 1438 retval = -1; 1439 goto out_unlock; 1440 } 1441 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1442 != MPI3_IOCSTATUS_SUCCESS) { 1443 ioc_err(mrioc, 1444 "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1445 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1446 mrioc->init_cmds.ioc_loginfo); 1447 retval = -1; 1448 goto out_unlock; 1449 } 1450 memcpy(facts_data, (u8 *)data, data_len); 1451 out_unlock: 1452 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1453 mutex_unlock(&mrioc->init_cmds.mutex); 1454 1455 out: 1456 if (data) 1457 dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma); 1458 1459 return retval; 1460 } 1461 1462 /** 1463 * mpi3mr_check_reset_dma_mask - Process IOC facts data 1464 * @mrioc: Adapter instance reference 1465 * 1466 * Check whether the new DMA mask requested through IOCFacts by 1467 * firmware needs to be set, if so set it . 1468 * 1469 * Return: 0 on success, non-zero on failure. 1470 */ 1471 static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc) 1472 { 1473 struct pci_dev *pdev = mrioc->pdev; 1474 int r; 1475 u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask); 1476 1477 if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask)) 1478 return 0; 1479 1480 ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n", 1481 mrioc->dma_mask, facts_dma_mask); 1482 1483 r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask); 1484 if (r) { 1485 ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n", 1486 facts_dma_mask, r); 1487 return r; 1488 } 1489 mrioc->dma_mask = facts_dma_mask; 1490 return r; 1491 } 1492 1493 /** 1494 * mpi3mr_process_factsdata - Process IOC facts data 1495 * @mrioc: Adapter instance reference 1496 * @facts_data: Cached IOC facts data 1497 * 1498 * Convert IOC facts data into cpu endianness and cache it in 1499 * the driver . 1500 * 1501 * Return: Nothing. 1502 */ 1503 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, 1504 struct mpi3_ioc_facts_data *facts_data) 1505 { 1506 u32 ioc_config, req_sz, facts_flags; 1507 1508 if ((le16_to_cpu(facts_data->ioc_facts_data_length)) != 1509 (sizeof(*facts_data) / 4)) { 1510 ioc_warn(mrioc, 1511 "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n", 1512 sizeof(*facts_data), 1513 le16_to_cpu(facts_data->ioc_facts_data_length) * 4); 1514 } 1515 1516 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1517 req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >> 1518 MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT); 1519 if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) { 1520 ioc_err(mrioc, 1521 "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n", 1522 req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size)); 1523 } 1524 1525 memset(&mrioc->facts, 0, sizeof(mrioc->facts)); 1526 1527 facts_flags = le32_to_cpu(facts_data->flags); 1528 mrioc->facts.op_req_sz = req_sz; 1529 mrioc->op_reply_desc_sz = 1 << ((ioc_config & 1530 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >> 1531 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT); 1532 1533 mrioc->facts.ioc_num = facts_data->ioc_number; 1534 mrioc->facts.who_init = facts_data->who_init; 1535 mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors); 1536 mrioc->facts.personality = (facts_flags & 1537 MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK); 1538 mrioc->facts.dma_mask = (facts_flags & 1539 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >> 1540 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT; 1541 mrioc->facts.protocol_flags = facts_data->protocol_flags; 1542 mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word); 1543 mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_request); 1544 mrioc->facts.product_id = le16_to_cpu(facts_data->product_id); 1545 mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4; 1546 mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions); 1547 mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id); 1548 mrioc->facts.max_pds = le16_to_cpu(facts_data->max_pds); 1549 mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds); 1550 mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds); 1551 mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_advanced_host_pds); 1552 mrioc->facts.max_raidpds = le16_to_cpu(facts_data->max_raid_pds); 1553 mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme); 1554 mrioc->facts.max_pcie_switches = 1555 le16_to_cpu(facts_data->max_pc_ie_switches); 1556 mrioc->facts.max_sasexpanders = 1557 le16_to_cpu(facts_data->max_sas_expanders); 1558 mrioc->facts.max_sasinitiators = 1559 le16_to_cpu(facts_data->max_sas_initiators); 1560 mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures); 1561 mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle); 1562 mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle); 1563 mrioc->facts.max_op_req_q = 1564 le16_to_cpu(facts_data->max_operational_request_queues); 1565 mrioc->facts.max_op_reply_q = 1566 le16_to_cpu(facts_data->max_operational_reply_queues); 1567 mrioc->facts.ioc_capabilities = 1568 le32_to_cpu(facts_data->ioc_capabilities); 1569 mrioc->facts.fw_ver.build_num = 1570 le16_to_cpu(facts_data->fw_version.build_num); 1571 mrioc->facts.fw_ver.cust_id = 1572 le16_to_cpu(facts_data->fw_version.customer_id); 1573 mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor; 1574 mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major; 1575 mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor; 1576 mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major; 1577 mrioc->msix_count = min_t(int, mrioc->msix_count, 1578 mrioc->facts.max_msix_vectors); 1579 mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask; 1580 mrioc->facts.sge_mod_value = facts_data->sge_modifier_value; 1581 mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift; 1582 mrioc->facts.shutdown_timeout = 1583 le16_to_cpu(facts_data->shutdown_timeout); 1584 1585 ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),", 1586 mrioc->facts.ioc_num, mrioc->facts.max_op_req_q, 1587 mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle); 1588 ioc_info(mrioc, 1589 "maxreqs(%d), mindh(%d) maxPDs(%d) maxvectors(%d) maxperids(%d)\n", 1590 mrioc->facts.max_reqs, mrioc->facts.min_devhandle, 1591 mrioc->facts.max_pds, mrioc->facts.max_msix_vectors, 1592 mrioc->facts.max_perids); 1593 ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ", 1594 mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value, 1595 mrioc->facts.sge_mod_shift); 1596 ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n", 1597 mrioc->facts.dma_mask, (facts_flags & 1598 MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK)); 1599 1600 mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD; 1601 1602 if (reset_devices) 1603 mrioc->max_host_ios = min_t(int, mrioc->max_host_ios, 1604 MPI3MR_HOST_IOS_KDUMP); 1605 } 1606 1607 /** 1608 * mpi3mr_alloc_reply_sense_bufs - Send IOC Init 1609 * @mrioc: Adapter instance reference 1610 * 1611 * Allocate and initialize the reply free buffers, sense 1612 * buffers, reply free queue and sense buffer queue. 1613 * 1614 * Return: 0 on success, non-zero on failures. 1615 */ 1616 static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc) 1617 { 1618 int retval = 0; 1619 u32 sz, i; 1620 dma_addr_t phy_addr; 1621 1622 if (mrioc->init_cmds.reply) 1623 goto post_reply_sbuf; 1624 1625 mrioc->init_cmds.reply = kzalloc(mrioc->facts.reply_sz, GFP_KERNEL); 1626 if (!mrioc->init_cmds.reply) 1627 goto out_failed; 1628 1629 mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES; 1630 mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1; 1631 mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR; 1632 mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1; 1633 1634 /* reply buffer pool, 16 byte align */ 1635 sz = mrioc->num_reply_bufs * mrioc->facts.reply_sz; 1636 mrioc->reply_buf_pool = dma_pool_create("reply_buf pool", 1637 &mrioc->pdev->dev, sz, 16, 0); 1638 if (!mrioc->reply_buf_pool) { 1639 ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n"); 1640 goto out_failed; 1641 } 1642 1643 mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL, 1644 &mrioc->reply_buf_dma); 1645 if (!mrioc->reply_buf) 1646 goto out_failed; 1647 1648 mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz; 1649 1650 /* reply free queue, 8 byte align */ 1651 sz = mrioc->reply_free_qsz * 8; 1652 mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool", 1653 &mrioc->pdev->dev, sz, 8, 0); 1654 if (!mrioc->reply_free_q_pool) { 1655 ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n"); 1656 goto out_failed; 1657 } 1658 mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool, 1659 GFP_KERNEL, &mrioc->reply_free_q_dma); 1660 if (!mrioc->reply_free_q) 1661 goto out_failed; 1662 1663 /* sense buffer pool, 4 byte align */ 1664 sz = mrioc->num_sense_bufs * MPI3MR_SENSEBUF_SZ; 1665 mrioc->sense_buf_pool = dma_pool_create("sense_buf pool", 1666 &mrioc->pdev->dev, sz, 4, 0); 1667 if (!mrioc->sense_buf_pool) { 1668 ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n"); 1669 goto out_failed; 1670 } 1671 mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL, 1672 &mrioc->sense_buf_dma); 1673 if (!mrioc->sense_buf) 1674 goto out_failed; 1675 1676 /* sense buffer queue, 8 byte align */ 1677 sz = mrioc->sense_buf_q_sz * 8; 1678 mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool", 1679 &mrioc->pdev->dev, sz, 8, 0); 1680 if (!mrioc->sense_buf_q_pool) { 1681 ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n"); 1682 goto out_failed; 1683 } 1684 mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool, 1685 GFP_KERNEL, &mrioc->sense_buf_q_dma); 1686 if (!mrioc->sense_buf_q) 1687 goto out_failed; 1688 1689 post_reply_sbuf: 1690 sz = mrioc->num_reply_bufs * mrioc->facts.reply_sz; 1691 ioc_info(mrioc, 1692 "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n", 1693 mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->facts.reply_sz, 1694 (sz / 1024), (unsigned long long)mrioc->reply_buf_dma); 1695 sz = mrioc->reply_free_qsz * 8; 1696 ioc_info(mrioc, 1697 "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n", 1698 mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024), 1699 (unsigned long long)mrioc->reply_free_q_dma); 1700 sz = mrioc->num_sense_bufs * MPI3MR_SENSEBUF_SZ; 1701 ioc_info(mrioc, 1702 "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n", 1703 mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSEBUF_SZ, 1704 (sz / 1024), (unsigned long long)mrioc->sense_buf_dma); 1705 sz = mrioc->sense_buf_q_sz * 8; 1706 ioc_info(mrioc, 1707 "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n", 1708 mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024), 1709 (unsigned long long)mrioc->sense_buf_q_dma); 1710 1711 /* initialize Reply buffer Queue */ 1712 for (i = 0, phy_addr = mrioc->reply_buf_dma; 1713 i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->facts.reply_sz) 1714 mrioc->reply_free_q[i] = cpu_to_le64(phy_addr); 1715 mrioc->reply_free_q[i] = cpu_to_le64(0); 1716 1717 /* initialize Sense Buffer Queue */ 1718 for (i = 0, phy_addr = mrioc->sense_buf_dma; 1719 i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSEBUF_SZ) 1720 mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr); 1721 mrioc->sense_buf_q[i] = cpu_to_le64(0); 1722 return retval; 1723 1724 out_failed: 1725 retval = -1; 1726 return retval; 1727 } 1728 1729 /** 1730 * mpi3mr_issue_iocinit - Send IOC Init 1731 * @mrioc: Adapter instance reference 1732 * 1733 * Issue IOC Init MPI request through admin queue and wait for 1734 * the completion of it or time out. 1735 * 1736 * Return: 0 on success, non-zero on failures. 1737 */ 1738 static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc) 1739 { 1740 struct mpi3_ioc_init_request iocinit_req; 1741 struct mpi3_driver_info_layout *drv_info; 1742 dma_addr_t data_dma; 1743 u32 data_len = sizeof(*drv_info); 1744 int retval = 0; 1745 ktime_t current_time; 1746 1747 drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 1748 GFP_KERNEL); 1749 if (!drv_info) { 1750 retval = -1; 1751 goto out; 1752 } 1753 drv_info->information_length = cpu_to_le32(data_len); 1754 strncpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature)); 1755 strncpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name)); 1756 drv_info->os_name[sizeof(drv_info->os_name) - 1] = 0; 1757 strncpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version)); 1758 drv_info->os_version[sizeof(drv_info->os_version) - 1] = 0; 1759 strncpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name)); 1760 strncpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version)); 1761 strncpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE, sizeof(drv_info->driver_release_date)); 1762 drv_info->driver_capabilities = 0; 1763 memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info, 1764 sizeof(mrioc->driver_info)); 1765 1766 memset(&iocinit_req, 0, sizeof(iocinit_req)); 1767 mutex_lock(&mrioc->init_cmds.mutex); 1768 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1769 retval = -1; 1770 ioc_err(mrioc, "Issue IOCInit: Init command is in use\n"); 1771 mutex_unlock(&mrioc->init_cmds.mutex); 1772 goto out; 1773 } 1774 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1775 mrioc->init_cmds.is_waiting = 1; 1776 mrioc->init_cmds.callback = NULL; 1777 iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1778 iocinit_req.function = MPI3_FUNCTION_IOC_INIT; 1779 iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV; 1780 iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT; 1781 iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR; 1782 iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR; 1783 iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER; 1784 iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz); 1785 iocinit_req.reply_free_queue_address = 1786 cpu_to_le64(mrioc->reply_free_q_dma); 1787 iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSEBUF_SZ); 1788 iocinit_req.sense_buffer_free_queue_depth = 1789 cpu_to_le16(mrioc->sense_buf_q_sz); 1790 iocinit_req.sense_buffer_free_queue_address = 1791 cpu_to_le64(mrioc->sense_buf_q_dma); 1792 iocinit_req.driver_information_address = cpu_to_le64(data_dma); 1793 1794 current_time = ktime_get_real(); 1795 iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time)); 1796 1797 init_completion(&mrioc->init_cmds.done); 1798 retval = mpi3mr_admin_request_post(mrioc, &iocinit_req, 1799 sizeof(iocinit_req), 1); 1800 if (retval) { 1801 ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n"); 1802 goto out_unlock; 1803 } 1804 wait_for_completion_timeout(&mrioc->init_cmds.done, 1805 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1806 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1807 mpi3mr_set_diagsave(mrioc); 1808 mpi3mr_issue_reset(mrioc, 1809 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 1810 MPI3MR_RESET_FROM_IOCINIT_TIMEOUT); 1811 mrioc->unrecoverable = 1; 1812 ioc_err(mrioc, "Issue IOCInit: command timed out\n"); 1813 retval = -1; 1814 goto out_unlock; 1815 } 1816 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1817 != MPI3_IOCSTATUS_SUCCESS) { 1818 ioc_err(mrioc, 1819 "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1820 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1821 mrioc->init_cmds.ioc_loginfo); 1822 retval = -1; 1823 goto out_unlock; 1824 } 1825 1826 out_unlock: 1827 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1828 mutex_unlock(&mrioc->init_cmds.mutex); 1829 1830 out: 1831 if (drv_info) 1832 dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info, 1833 data_dma); 1834 1835 return retval; 1836 } 1837 1838 /** 1839 * mpi3mr_alloc_chain_bufs - Allocate chain buffers 1840 * @mrioc: Adapter instance reference 1841 * 1842 * Allocate chain buffers and set a bitmap to indicate free 1843 * chain buffers. Chain buffers are used to pass the SGE 1844 * information along with MPI3 SCSI IO requests for host I/O. 1845 * 1846 * Return: 0 on success, non-zero on failure 1847 */ 1848 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc) 1849 { 1850 int retval = 0; 1851 u32 sz, i; 1852 u16 num_chains; 1853 1854 num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR; 1855 1856 mrioc->chain_buf_count = num_chains; 1857 sz = sizeof(struct chain_element) * num_chains; 1858 mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL); 1859 if (!mrioc->chain_sgl_list) 1860 goto out_failed; 1861 1862 sz = MPI3MR_PAGE_SIZE_4K; 1863 mrioc->chain_buf_pool = dma_pool_create("chain_buf pool", 1864 &mrioc->pdev->dev, sz, 16, 0); 1865 if (!mrioc->chain_buf_pool) { 1866 ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n"); 1867 goto out_failed; 1868 } 1869 1870 for (i = 0; i < num_chains; i++) { 1871 mrioc->chain_sgl_list[i].addr = 1872 dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL, 1873 &mrioc->chain_sgl_list[i].dma_addr); 1874 1875 if (!mrioc->chain_sgl_list[i].addr) 1876 goto out_failed; 1877 } 1878 mrioc->chain_bitmap_sz = num_chains / 8; 1879 if (num_chains % 8) 1880 mrioc->chain_bitmap_sz++; 1881 mrioc->chain_bitmap = kzalloc(mrioc->chain_bitmap_sz, GFP_KERNEL); 1882 if (!mrioc->chain_bitmap) 1883 goto out_failed; 1884 return retval; 1885 out_failed: 1886 retval = -1; 1887 return retval; 1888 } 1889 1890 /** 1891 * mpi3mr_cleanup_resources - Free PCI resources 1892 * @mrioc: Adapter instance reference 1893 * 1894 * Unmap PCI device memory and disable PCI device. 1895 * 1896 * Return: 0 on success and non-zero on failure. 1897 */ 1898 void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc) 1899 { 1900 struct pci_dev *pdev = mrioc->pdev; 1901 1902 mpi3mr_cleanup_isr(mrioc); 1903 1904 if (mrioc->sysif_regs) { 1905 iounmap((void __iomem *)mrioc->sysif_regs); 1906 mrioc->sysif_regs = NULL; 1907 } 1908 1909 if (pci_is_enabled(pdev)) { 1910 if (mrioc->bars) 1911 pci_release_selected_regions(pdev, mrioc->bars); 1912 pci_disable_device(pdev); 1913 } 1914 } 1915 1916 /** 1917 * mpi3mr_setup_resources - Enable PCI resources 1918 * @mrioc: Adapter instance reference 1919 * 1920 * Enable PCI device memory, MSI-x registers and set DMA mask. 1921 * 1922 * Return: 0 on success and non-zero on failure. 1923 */ 1924 int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc) 1925 { 1926 struct pci_dev *pdev = mrioc->pdev; 1927 u32 memap_sz = 0; 1928 int i, retval = 0, capb = 0; 1929 u16 message_control; 1930 u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask : 1931 (((dma_get_required_mask(&pdev->dev) > DMA_BIT_MASK(32)) && 1932 (sizeof(dma_addr_t) > 4)) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32)); 1933 1934 if (pci_enable_device_mem(pdev)) { 1935 ioc_err(mrioc, "pci_enable_device_mem: failed\n"); 1936 retval = -ENODEV; 1937 goto out_failed; 1938 } 1939 1940 capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 1941 if (!capb) { 1942 ioc_err(mrioc, "Unable to find MSI-X Capabilities\n"); 1943 retval = -ENODEV; 1944 goto out_failed; 1945 } 1946 mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); 1947 1948 if (pci_request_selected_regions(pdev, mrioc->bars, 1949 mrioc->driver_name)) { 1950 ioc_err(mrioc, "pci_request_selected_regions: failed\n"); 1951 retval = -ENODEV; 1952 goto out_failed; 1953 } 1954 1955 for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) { 1956 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 1957 mrioc->sysif_regs_phys = pci_resource_start(pdev, i); 1958 memap_sz = pci_resource_len(pdev, i); 1959 mrioc->sysif_regs = 1960 ioremap(mrioc->sysif_regs_phys, memap_sz); 1961 break; 1962 } 1963 } 1964 1965 pci_set_master(pdev); 1966 1967 retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask); 1968 if (retval) { 1969 if (dma_mask != DMA_BIT_MASK(32)) { 1970 ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n"); 1971 dma_mask = DMA_BIT_MASK(32); 1972 retval = dma_set_mask_and_coherent(&pdev->dev, 1973 dma_mask); 1974 } 1975 if (retval) { 1976 mrioc->dma_mask = 0; 1977 ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n"); 1978 goto out_failed; 1979 } 1980 } 1981 mrioc->dma_mask = dma_mask; 1982 1983 if (!mrioc->sysif_regs) { 1984 ioc_err(mrioc, 1985 "Unable to map adapter memory or resource not found\n"); 1986 retval = -EINVAL; 1987 goto out_failed; 1988 } 1989 1990 pci_read_config_word(pdev, capb + 2, &message_control); 1991 mrioc->msix_count = (message_control & 0x3FF) + 1; 1992 1993 pci_save_state(pdev); 1994 1995 pci_set_drvdata(pdev, mrioc->shost); 1996 1997 mpi3mr_ioc_disable_intr(mrioc); 1998 1999 ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n", 2000 (unsigned long long)mrioc->sysif_regs_phys, 2001 mrioc->sysif_regs, memap_sz); 2002 ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n", 2003 mrioc->msix_count); 2004 return retval; 2005 2006 out_failed: 2007 mpi3mr_cleanup_resources(mrioc); 2008 return retval; 2009 } 2010 2011 /** 2012 * mpi3mr_init_ioc - Initialize the controller 2013 * @mrioc: Adapter instance reference 2014 * 2015 * This the controller initialization routine, executed either 2016 * after soft reset or from pci probe callback. 2017 * Setup the required resources, memory map the controller 2018 * registers, create admin and operational reply queue pairs, 2019 * allocate required memory for reply pool, sense buffer pool, 2020 * issue IOC init request to the firmware, unmask the events and 2021 * issue port enable to discover SAS/SATA/NVMe devies and RAID 2022 * volumes. 2023 * 2024 * Return: 0 on success and non-zero on failure. 2025 */ 2026 int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc) 2027 { 2028 int retval = 0; 2029 enum mpi3mr_iocstate ioc_state; 2030 u64 base_info; 2031 u32 timeout; 2032 u32 ioc_status, ioc_config; 2033 struct mpi3_ioc_facts_data facts_data; 2034 2035 mrioc->change_count = 0; 2036 mrioc->cpu_count = num_online_cpus(); 2037 retval = mpi3mr_setup_resources(mrioc); 2038 if (retval) { 2039 ioc_err(mrioc, "Failed to setup resources:error %d\n", 2040 retval); 2041 goto out_nocleanup; 2042 } 2043 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 2044 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 2045 2046 ioc_info(mrioc, "SOD status %x configuration %x\n", 2047 ioc_status, ioc_config); 2048 2049 base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information); 2050 ioc_info(mrioc, "SOD base_info %llx\n", base_info); 2051 2052 /*The timeout value is in 2sec unit, changing it to seconds*/ 2053 mrioc->ready_timeout = 2054 ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >> 2055 MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2; 2056 2057 ioc_info(mrioc, "IOC ready timeout %d\n", mrioc->ready_timeout); 2058 2059 ioc_state = mpi3mr_get_iocstate(mrioc); 2060 ioc_info(mrioc, "IOC in %s state during detection\n", 2061 mpi3mr_iocstate_name(ioc_state)); 2062 2063 if (ioc_state == MRIOC_STATE_BECOMING_READY || 2064 ioc_state == MRIOC_STATE_RESET_REQUESTED) { 2065 timeout = mrioc->ready_timeout * 10; 2066 do { 2067 msleep(100); 2068 } while (--timeout); 2069 2070 ioc_state = mpi3mr_get_iocstate(mrioc); 2071 ioc_info(mrioc, 2072 "IOC in %s state after waiting for reset time\n", 2073 mpi3mr_iocstate_name(ioc_state)); 2074 } 2075 2076 if (ioc_state == MRIOC_STATE_READY) { 2077 retval = mpi3mr_issue_and_process_mur(mrioc, 2078 MPI3MR_RESET_FROM_BRINGUP); 2079 if (retval) { 2080 ioc_err(mrioc, "Failed to MU reset IOC error %d\n", 2081 retval); 2082 } 2083 ioc_state = mpi3mr_get_iocstate(mrioc); 2084 } 2085 if (ioc_state != MRIOC_STATE_RESET) { 2086 mpi3mr_print_fault_info(mrioc); 2087 retval = mpi3mr_issue_reset(mrioc, 2088 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, 2089 MPI3MR_RESET_FROM_BRINGUP); 2090 if (retval) { 2091 ioc_err(mrioc, 2092 "%s :Failed to soft reset IOC error %d\n", 2093 __func__, retval); 2094 goto out_failed; 2095 } 2096 } 2097 ioc_state = mpi3mr_get_iocstate(mrioc); 2098 if (ioc_state != MRIOC_STATE_RESET) { 2099 ioc_err(mrioc, "Cannot bring IOC to reset state\n"); 2100 goto out_failed; 2101 } 2102 2103 retval = mpi3mr_setup_admin_qpair(mrioc); 2104 if (retval) { 2105 ioc_err(mrioc, "Failed to setup admin Qs: error %d\n", 2106 retval); 2107 goto out_failed; 2108 } 2109 2110 retval = mpi3mr_bring_ioc_ready(mrioc); 2111 if (retval) { 2112 ioc_err(mrioc, "Failed to bring ioc ready: error %d\n", 2113 retval); 2114 goto out_failed; 2115 } 2116 2117 retval = mpi3mr_setup_isr(mrioc, 1); 2118 if (retval) { 2119 ioc_err(mrioc, "Failed to setup ISR error %d\n", 2120 retval); 2121 goto out_failed; 2122 } 2123 2124 retval = mpi3mr_issue_iocfacts(mrioc, &facts_data); 2125 if (retval) { 2126 ioc_err(mrioc, "Failed to Issue IOC Facts %d\n", 2127 retval); 2128 goto out_failed; 2129 } 2130 2131 mpi3mr_process_factsdata(mrioc, &facts_data); 2132 retval = mpi3mr_check_reset_dma_mask(mrioc); 2133 if (retval) { 2134 ioc_err(mrioc, "Resetting dma mask failed %d\n", 2135 retval); 2136 goto out_failed; 2137 } 2138 2139 retval = mpi3mr_alloc_reply_sense_bufs(mrioc); 2140 if (retval) { 2141 ioc_err(mrioc, 2142 "%s :Failed to allocated reply sense buffers %d\n", 2143 __func__, retval); 2144 goto out_failed; 2145 } 2146 2147 retval = mpi3mr_alloc_chain_bufs(mrioc); 2148 if (retval) { 2149 ioc_err(mrioc, "Failed to allocated chain buffers %d\n", 2150 retval); 2151 goto out_failed; 2152 } 2153 2154 retval = mpi3mr_issue_iocinit(mrioc); 2155 if (retval) { 2156 ioc_err(mrioc, "Failed to Issue IOC Init %d\n", 2157 retval); 2158 goto out_failed; 2159 } 2160 mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs; 2161 writel(mrioc->reply_free_queue_host_index, 2162 &mrioc->sysif_regs->reply_free_host_index); 2163 2164 mrioc->sbq_host_index = mrioc->num_sense_bufs; 2165 writel(mrioc->sbq_host_index, 2166 &mrioc->sysif_regs->sense_buffer_free_host_index); 2167 2168 retval = mpi3mr_setup_isr(mrioc, 0); 2169 if (retval) { 2170 ioc_err(mrioc, "Failed to re-setup ISR, error %d\n", 2171 retval); 2172 goto out_failed; 2173 } 2174 2175 retval = mpi3mr_create_op_queues(mrioc); 2176 if (retval) { 2177 ioc_err(mrioc, "Failed to create OpQueues error %d\n", 2178 retval); 2179 goto out_failed; 2180 } 2181 2182 return retval; 2183 2184 out_failed: 2185 mpi3mr_cleanup_ioc(mrioc); 2186 out_nocleanup: 2187 return retval; 2188 } 2189 2190 /** 2191 * mpi3mr_free_mem - Free memory allocated for a controller 2192 * @mrioc: Adapter instance reference 2193 * 2194 * Free all the memory allocated for a controller. 2195 * 2196 * Return: Nothing. 2197 */ 2198 static void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc) 2199 { 2200 u16 i; 2201 struct mpi3mr_intr_info *intr_info; 2202 2203 if (mrioc->sense_buf_pool) { 2204 if (mrioc->sense_buf) 2205 dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf, 2206 mrioc->sense_buf_dma); 2207 dma_pool_destroy(mrioc->sense_buf_pool); 2208 mrioc->sense_buf = NULL; 2209 mrioc->sense_buf_pool = NULL; 2210 } 2211 if (mrioc->sense_buf_q_pool) { 2212 if (mrioc->sense_buf_q) 2213 dma_pool_free(mrioc->sense_buf_q_pool, 2214 mrioc->sense_buf_q, mrioc->sense_buf_q_dma); 2215 dma_pool_destroy(mrioc->sense_buf_q_pool); 2216 mrioc->sense_buf_q = NULL; 2217 mrioc->sense_buf_q_pool = NULL; 2218 } 2219 2220 if (mrioc->reply_buf_pool) { 2221 if (mrioc->reply_buf) 2222 dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf, 2223 mrioc->reply_buf_dma); 2224 dma_pool_destroy(mrioc->reply_buf_pool); 2225 mrioc->reply_buf = NULL; 2226 mrioc->reply_buf_pool = NULL; 2227 } 2228 if (mrioc->reply_free_q_pool) { 2229 if (mrioc->reply_free_q) 2230 dma_pool_free(mrioc->reply_free_q_pool, 2231 mrioc->reply_free_q, mrioc->reply_free_q_dma); 2232 dma_pool_destroy(mrioc->reply_free_q_pool); 2233 mrioc->reply_free_q = NULL; 2234 mrioc->reply_free_q_pool = NULL; 2235 } 2236 2237 for (i = 0; i < mrioc->num_op_req_q; i++) 2238 mpi3mr_free_op_req_q_segments(mrioc, i); 2239 2240 for (i = 0; i < mrioc->num_op_reply_q; i++) 2241 mpi3mr_free_op_reply_q_segments(mrioc, i); 2242 2243 for (i = 0; i < mrioc->intr_info_count; i++) { 2244 intr_info = mrioc->intr_info + i; 2245 if (intr_info) 2246 intr_info->op_reply_q = NULL; 2247 } 2248 2249 kfree(mrioc->req_qinfo); 2250 mrioc->req_qinfo = NULL; 2251 mrioc->num_op_req_q = 0; 2252 2253 kfree(mrioc->op_reply_qinfo); 2254 mrioc->op_reply_qinfo = NULL; 2255 mrioc->num_op_reply_q = 0; 2256 2257 kfree(mrioc->init_cmds.reply); 2258 mrioc->init_cmds.reply = NULL; 2259 2260 kfree(mrioc->chain_bitmap); 2261 mrioc->chain_bitmap = NULL; 2262 2263 if (mrioc->chain_buf_pool) { 2264 for (i = 0; i < mrioc->chain_buf_count; i++) { 2265 if (mrioc->chain_sgl_list[i].addr) { 2266 dma_pool_free(mrioc->chain_buf_pool, 2267 mrioc->chain_sgl_list[i].addr, 2268 mrioc->chain_sgl_list[i].dma_addr); 2269 mrioc->chain_sgl_list[i].addr = NULL; 2270 } 2271 } 2272 dma_pool_destroy(mrioc->chain_buf_pool); 2273 mrioc->chain_buf_pool = NULL; 2274 } 2275 2276 kfree(mrioc->chain_sgl_list); 2277 mrioc->chain_sgl_list = NULL; 2278 2279 if (mrioc->admin_reply_base) { 2280 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz, 2281 mrioc->admin_reply_base, mrioc->admin_reply_dma); 2282 mrioc->admin_reply_base = NULL; 2283 } 2284 if (mrioc->admin_req_base) { 2285 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz, 2286 mrioc->admin_req_base, mrioc->admin_req_dma); 2287 mrioc->admin_req_base = NULL; 2288 } 2289 } 2290 2291 /** 2292 * mpi3mr_issue_ioc_shutdown - shutdown controller 2293 * @mrioc: Adapter instance reference 2294 * 2295 * Send shutodwn notification to the controller and wait for the 2296 * shutdown_timeout for it to be completed. 2297 * 2298 * Return: Nothing. 2299 */ 2300 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc) 2301 { 2302 u32 ioc_config, ioc_status; 2303 u8 retval = 1; 2304 u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10; 2305 2306 ioc_info(mrioc, "Issuing shutdown Notification\n"); 2307 if (mrioc->unrecoverable) { 2308 ioc_warn(mrioc, 2309 "IOC is unrecoverable shutdown is not issued\n"); 2310 return; 2311 } 2312 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 2313 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 2314 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) { 2315 ioc_info(mrioc, "shutdown already in progress\n"); 2316 return; 2317 } 2318 2319 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 2320 ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL; 2321 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN; 2322 2323 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 2324 2325 if (mrioc->facts.shutdown_timeout) 2326 timeout = mrioc->facts.shutdown_timeout * 10; 2327 2328 do { 2329 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 2330 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 2331 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) { 2332 retval = 0; 2333 break; 2334 } 2335 msleep(100); 2336 } while (--timeout); 2337 2338 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 2339 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 2340 2341 if (retval) { 2342 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 2343 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) 2344 ioc_warn(mrioc, 2345 "shutdown still in progress after timeout\n"); 2346 } 2347 2348 ioc_info(mrioc, 2349 "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n", 2350 (!retval) ? "successful" : "failed", ioc_status, 2351 ioc_config); 2352 } 2353 2354 /** 2355 * mpi3mr_cleanup_ioc - Cleanup controller 2356 * @mrioc: Adapter instance reference 2357 * 2358 * controller cleanup handler, Message unit reset or soft reset 2359 * and shutdown notification is issued to the controller and the 2360 * associated memory resources are freed. 2361 * 2362 * Return: Nothing. 2363 */ 2364 void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc) 2365 { 2366 enum mpi3mr_iocstate ioc_state; 2367 2368 mpi3mr_ioc_disable_intr(mrioc); 2369 2370 ioc_state = mpi3mr_get_iocstate(mrioc); 2371 2372 if ((!mrioc->unrecoverable) && (!mrioc->reset_in_progress) && 2373 (ioc_state == MRIOC_STATE_READY)) { 2374 if (mpi3mr_issue_and_process_mur(mrioc, 2375 MPI3MR_RESET_FROM_CTLR_CLEANUP)) 2376 mpi3mr_issue_reset(mrioc, 2377 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, 2378 MPI3MR_RESET_FROM_MUR_FAILURE); 2379 2380 mpi3mr_issue_ioc_shutdown(mrioc); 2381 } 2382 2383 mpi3mr_free_mem(mrioc); 2384 mpi3mr_cleanup_resources(mrioc); 2385 } 2386 2387 /** 2388 * mpi3mr_soft_reset_handler - Reset the controller 2389 * @mrioc: Adapter instance reference 2390 * @reset_reason: Reset reason code 2391 * @snapdump: Flag to generate snapdump in firmware or not 2392 * 2393 * TBD 2394 * 2395 * Return: 0 on success, non-zero on failure. 2396 */ 2397 int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, 2398 u32 reset_reason, u8 snapdump) 2399 { 2400 return 0; 2401 } 2402