1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2021 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 #include <linux/io-64-nonatomic-lo-hi.h> 12 13 #if defined(writeq) && defined(CONFIG_64BIT) 14 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) 15 { 16 writeq(b, addr); 17 } 18 #else 19 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) 20 { 21 __u64 data_out = b; 22 23 writel((u32)(data_out), addr); 24 writel((u32)(data_out >> 32), (addr + 4)); 25 } 26 #endif 27 28 static inline bool 29 mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q) 30 { 31 u16 pi, ci, max_entries; 32 bool is_qfull = false; 33 34 pi = op_req_q->pi; 35 ci = READ_ONCE(op_req_q->ci); 36 max_entries = op_req_q->num_requests; 37 38 if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1)))) 39 is_qfull = true; 40 41 return is_qfull; 42 } 43 44 static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc) 45 { 46 u16 i, max_vectors; 47 48 max_vectors = mrioc->intr_info_count; 49 50 for (i = 0; i < max_vectors; i++) 51 synchronize_irq(pci_irq_vector(mrioc->pdev, i)); 52 } 53 54 void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc) 55 { 56 mrioc->intr_enabled = 0; 57 mpi3mr_sync_irqs(mrioc); 58 } 59 60 void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc) 61 { 62 mrioc->intr_enabled = 1; 63 } 64 65 static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc) 66 { 67 u16 i; 68 69 mpi3mr_ioc_disable_intr(mrioc); 70 71 if (!mrioc->intr_info) 72 return; 73 74 for (i = 0; i < mrioc->intr_info_count; i++) 75 free_irq(pci_irq_vector(mrioc->pdev, i), 76 (mrioc->intr_info + i)); 77 78 kfree(mrioc->intr_info); 79 mrioc->intr_info = NULL; 80 mrioc->intr_info_count = 0; 81 pci_free_irq_vectors(mrioc->pdev); 82 } 83 84 void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length, 85 dma_addr_t dma_addr) 86 { 87 struct mpi3_sge_common *sgel = paddr; 88 89 sgel->flags = flags; 90 sgel->length = cpu_to_le32(length); 91 sgel->address = cpu_to_le64(dma_addr); 92 } 93 94 void mpi3mr_build_zero_len_sge(void *paddr) 95 { 96 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 97 98 mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1); 99 } 100 101 void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc, 102 dma_addr_t phys_addr) 103 { 104 if (!phys_addr) 105 return NULL; 106 107 if ((phys_addr < mrioc->reply_buf_dma) || 108 (phys_addr > mrioc->reply_buf_dma_max_address)) 109 return NULL; 110 111 return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma); 112 } 113 114 void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc, 115 dma_addr_t phys_addr) 116 { 117 if (!phys_addr) 118 return NULL; 119 120 return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma); 121 } 122 123 static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc, 124 u64 reply_dma) 125 { 126 u32 old_idx = 0; 127 128 spin_lock(&mrioc->reply_free_queue_lock); 129 old_idx = mrioc->reply_free_queue_host_index; 130 mrioc->reply_free_queue_host_index = ( 131 (mrioc->reply_free_queue_host_index == 132 (mrioc->reply_free_qsz - 1)) ? 0 : 133 (mrioc->reply_free_queue_host_index + 1)); 134 mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma); 135 writel(mrioc->reply_free_queue_host_index, 136 &mrioc->sysif_regs->reply_free_host_index); 137 spin_unlock(&mrioc->reply_free_queue_lock); 138 } 139 140 void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc, 141 u64 sense_buf_dma) 142 { 143 u32 old_idx = 0; 144 145 spin_lock(&mrioc->sbq_lock); 146 old_idx = mrioc->sbq_host_index; 147 mrioc->sbq_host_index = ((mrioc->sbq_host_index == 148 (mrioc->sense_buf_q_sz - 1)) ? 0 : 149 (mrioc->sbq_host_index + 1)); 150 mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma); 151 writel(mrioc->sbq_host_index, 152 &mrioc->sysif_regs->sense_buffer_free_host_index); 153 spin_unlock(&mrioc->sbq_lock); 154 } 155 156 static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc, 157 struct mpi3_default_reply *def_reply) 158 { 159 struct mpi3_event_notification_reply *event_reply = 160 (struct mpi3_event_notification_reply *)def_reply; 161 162 mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count); 163 } 164 165 static struct mpi3mr_drv_cmd * 166 mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag, 167 struct mpi3_default_reply *def_reply) 168 { 169 switch (host_tag) { 170 case MPI3MR_HOSTTAG_INITCMDS: 171 return &mrioc->init_cmds; 172 case MPI3MR_HOSTTAG_INVALID: 173 if (def_reply && def_reply->function == 174 MPI3_FUNCTION_EVENT_NOTIFICATION) 175 mpi3mr_handle_events(mrioc, def_reply); 176 return NULL; 177 default: 178 break; 179 } 180 181 return NULL; 182 } 183 184 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc, 185 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma) 186 { 187 u16 reply_desc_type, host_tag = 0; 188 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; 189 u32 ioc_loginfo = 0; 190 struct mpi3_status_reply_descriptor *status_desc; 191 struct mpi3_address_reply_descriptor *addr_desc; 192 struct mpi3_success_reply_descriptor *success_desc; 193 struct mpi3_default_reply *def_reply = NULL; 194 struct mpi3mr_drv_cmd *cmdptr = NULL; 195 struct mpi3_scsi_io_reply *scsi_reply; 196 u8 *sense_buf = NULL; 197 198 *reply_dma = 0; 199 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & 200 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK; 201 switch (reply_desc_type) { 202 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS: 203 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc; 204 host_tag = le16_to_cpu(status_desc->host_tag); 205 ioc_status = le16_to_cpu(status_desc->ioc_status); 206 if (ioc_status & 207 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 208 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); 209 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 210 break; 211 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: 212 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; 213 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address); 214 def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma); 215 if (!def_reply) 216 goto out; 217 host_tag = le16_to_cpu(def_reply->host_tag); 218 ioc_status = le16_to_cpu(def_reply->ioc_status); 219 if (ioc_status & 220 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 221 ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info); 222 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 223 if (def_reply->function == MPI3_FUNCTION_SCSI_IO) { 224 scsi_reply = (struct mpi3_scsi_io_reply *)def_reply; 225 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, 226 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 227 } 228 break; 229 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: 230 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; 231 host_tag = le16_to_cpu(success_desc->host_tag); 232 break; 233 default: 234 break; 235 } 236 237 cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply); 238 if (cmdptr) { 239 if (cmdptr->state & MPI3MR_CMD_PENDING) { 240 cmdptr->state |= MPI3MR_CMD_COMPLETE; 241 cmdptr->ioc_loginfo = ioc_loginfo; 242 cmdptr->ioc_status = ioc_status; 243 cmdptr->state &= ~MPI3MR_CMD_PENDING; 244 if (def_reply) { 245 cmdptr->state |= MPI3MR_CMD_REPLY_VALID; 246 memcpy((u8 *)cmdptr->reply, (u8 *)def_reply, 247 mrioc->facts.reply_sz); 248 } 249 if (cmdptr->is_waiting) { 250 complete(&cmdptr->done); 251 cmdptr->is_waiting = 0; 252 } else if (cmdptr->callback) 253 cmdptr->callback(mrioc, cmdptr); 254 } 255 } 256 out: 257 if (sense_buf) 258 mpi3mr_repost_sense_buf(mrioc, 259 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 260 } 261 262 static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc) 263 { 264 u32 exp_phase = mrioc->admin_reply_ephase; 265 u32 admin_reply_ci = mrioc->admin_reply_ci; 266 u32 num_admin_replies = 0; 267 u64 reply_dma = 0; 268 struct mpi3_default_reply_descriptor *reply_desc; 269 270 reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base + 271 admin_reply_ci; 272 273 if ((le16_to_cpu(reply_desc->reply_flags) & 274 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 275 return 0; 276 277 do { 278 mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci); 279 mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma); 280 if (reply_dma) 281 mpi3mr_repost_reply_buf(mrioc, reply_dma); 282 num_admin_replies++; 283 if (++admin_reply_ci == mrioc->num_admin_replies) { 284 admin_reply_ci = 0; 285 exp_phase ^= 1; 286 } 287 reply_desc = 288 (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base + 289 admin_reply_ci; 290 if ((le16_to_cpu(reply_desc->reply_flags) & 291 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 292 break; 293 } while (1); 294 295 writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); 296 mrioc->admin_reply_ci = admin_reply_ci; 297 mrioc->admin_reply_ephase = exp_phase; 298 299 return num_admin_replies; 300 } 301 302 /** 303 * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to 304 * queue's consumer index from operational reply descriptor queue. 305 * @op_reply_q: op_reply_qinfo object 306 * @reply_ci: operational reply descriptor's queue consumer index 307 * 308 * Returns reply descriptor frame address 309 */ 310 static inline struct mpi3_default_reply_descriptor * 311 mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci) 312 { 313 void *segment_base_addr; 314 struct segments *segments = op_reply_q->q_segments; 315 struct mpi3_default_reply_descriptor *reply_desc = NULL; 316 317 segment_base_addr = 318 segments[reply_ci / op_reply_q->segment_qd].segment; 319 reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr + 320 (reply_ci % op_reply_q->segment_qd); 321 return reply_desc; 322 } 323 324 static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, 325 struct mpi3mr_intr_info *intr_info) 326 { 327 struct op_reply_qinfo *op_reply_q = intr_info->op_reply_q; 328 struct op_req_qinfo *op_req_q; 329 u32 exp_phase; 330 u32 reply_ci; 331 u32 num_op_reply = 0; 332 u64 reply_dma = 0; 333 struct mpi3_default_reply_descriptor *reply_desc; 334 u16 req_q_idx = 0, reply_qidx; 335 336 reply_qidx = op_reply_q->qid - 1; 337 338 exp_phase = op_reply_q->ephase; 339 reply_ci = op_reply_q->ci; 340 341 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci); 342 if ((le16_to_cpu(reply_desc->reply_flags) & 343 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) { 344 return 0; 345 } 346 347 do { 348 req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1; 349 op_req_q = &mrioc->req_qinfo[req_q_idx]; 350 351 WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci)); 352 mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma, 353 reply_qidx); 354 if (reply_dma) 355 mpi3mr_repost_reply_buf(mrioc, reply_dma); 356 num_op_reply++; 357 358 if (++reply_ci == op_reply_q->num_replies) { 359 reply_ci = 0; 360 exp_phase ^= 1; 361 } 362 363 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci); 364 365 if ((le16_to_cpu(reply_desc->reply_flags) & 366 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 367 break; 368 369 } while (1); 370 371 writel(reply_ci, 372 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index); 373 op_reply_q->ci = reply_ci; 374 op_reply_q->ephase = exp_phase; 375 376 return num_op_reply; 377 } 378 379 static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata) 380 { 381 struct mpi3mr_intr_info *intr_info = privdata; 382 struct mpi3mr_ioc *mrioc; 383 u16 midx; 384 u32 num_admin_replies = 0; 385 386 if (!intr_info) 387 return IRQ_NONE; 388 389 mrioc = intr_info->mrioc; 390 391 if (!mrioc->intr_enabled) 392 return IRQ_NONE; 393 394 midx = intr_info->msix_index; 395 396 if (!midx) 397 num_admin_replies = mpi3mr_process_admin_reply_q(mrioc); 398 399 if (num_admin_replies) 400 return IRQ_HANDLED; 401 else 402 return IRQ_NONE; 403 } 404 405 static irqreturn_t mpi3mr_isr(int irq, void *privdata) 406 { 407 struct mpi3mr_intr_info *intr_info = privdata; 408 int ret; 409 410 if (!intr_info) 411 return IRQ_NONE; 412 413 /* Call primary ISR routine */ 414 ret = mpi3mr_isr_primary(irq, privdata); 415 416 return ret; 417 } 418 419 /** 420 * mpi3mr_isr_poll - Reply queue polling routine 421 * @irq: IRQ 422 * @privdata: Interrupt info 423 * 424 * poll for pending I/O completions in a loop until pending I/Os 425 * present or controller queue depth I/Os are processed. 426 * 427 * Return: IRQ_NONE or IRQ_HANDLED 428 */ 429 static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata) 430 { 431 return IRQ_HANDLED; 432 } 433 434 /** 435 * mpi3mr_request_irq - Request IRQ and register ISR 436 * @mrioc: Adapter instance reference 437 * @index: IRQ vector index 438 * 439 * Request threaded ISR with primary ISR and secondary 440 * 441 * Return: 0 on success and non zero on failures. 442 */ 443 static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index) 444 { 445 struct pci_dev *pdev = mrioc->pdev; 446 struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index; 447 int retval = 0; 448 449 intr_info->mrioc = mrioc; 450 intr_info->msix_index = index; 451 intr_info->op_reply_q = NULL; 452 453 snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d", 454 mrioc->driver_name, mrioc->id, index); 455 456 retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr, 457 mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info); 458 if (retval) { 459 ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n", 460 intr_info->name, pci_irq_vector(pdev, index)); 461 return retval; 462 } 463 464 return retval; 465 } 466 467 /** 468 * mpi3mr_setup_isr - Setup ISR for the controller 469 * @mrioc: Adapter instance reference 470 * @setup_one: Request one IRQ or more 471 * 472 * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR 473 * 474 * Return: 0 on success and non zero on failures. 475 */ 476 static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one) 477 { 478 unsigned int irq_flags = PCI_IRQ_MSIX; 479 u16 max_vectors = 0, i; 480 int retval = 0; 481 struct irq_affinity desc = { .pre_vectors = 1}; 482 483 mpi3mr_cleanup_isr(mrioc); 484 485 if (setup_one || reset_devices) 486 max_vectors = 1; 487 else { 488 max_vectors = 489 min_t(int, mrioc->cpu_count + 1, mrioc->msix_count); 490 491 ioc_info(mrioc, 492 "MSI-X vectors supported: %d, no of cores: %d,", 493 mrioc->msix_count, mrioc->cpu_count); 494 ioc_info(mrioc, 495 "MSI-x vectors requested: %d\n", max_vectors); 496 } 497 498 irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; 499 500 mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0; 501 i = pci_alloc_irq_vectors_affinity(mrioc->pdev, 502 1, max_vectors, irq_flags, &desc); 503 if (i <= 0) { 504 ioc_err(mrioc, "Cannot alloc irq vectors\n"); 505 goto out_failed; 506 } 507 if (i != max_vectors) { 508 ioc_info(mrioc, 509 "allocated vectors (%d) are less than configured (%d)\n", 510 i, max_vectors); 511 /* 512 * If only one MSI-x is allocated, then MSI-x 0 will be shared 513 * between Admin queue and operational queue 514 */ 515 if (i == 1) 516 mrioc->op_reply_q_offset = 0; 517 518 max_vectors = i; 519 } 520 mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors, 521 GFP_KERNEL); 522 if (!mrioc->intr_info) { 523 retval = -1; 524 pci_free_irq_vectors(mrioc->pdev); 525 goto out_failed; 526 } 527 for (i = 0; i < max_vectors; i++) { 528 retval = mpi3mr_request_irq(mrioc, i); 529 if (retval) { 530 mrioc->intr_info_count = i; 531 goto out_failed; 532 } 533 } 534 mrioc->intr_info_count = max_vectors; 535 mpi3mr_ioc_enable_intr(mrioc); 536 return retval; 537 out_failed: 538 mpi3mr_cleanup_isr(mrioc); 539 540 return retval; 541 } 542 543 static const struct { 544 enum mpi3mr_iocstate value; 545 char *name; 546 } mrioc_states[] = { 547 { MRIOC_STATE_READY, "ready" }, 548 { MRIOC_STATE_FAULT, "fault" }, 549 { MRIOC_STATE_RESET, "reset" }, 550 { MRIOC_STATE_BECOMING_READY, "becoming ready" }, 551 { MRIOC_STATE_RESET_REQUESTED, "reset requested" }, 552 { MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" }, 553 }; 554 555 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state) 556 { 557 int i; 558 char *name = NULL; 559 560 for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) { 561 if (mrioc_states[i].value == mrioc_state) { 562 name = mrioc_states[i].name; 563 break; 564 } 565 } 566 return name; 567 } 568 569 /** 570 * mpi3mr_print_fault_info - Display fault information 571 * @mrioc: Adapter instance reference 572 * 573 * Display the controller fault information if there is a 574 * controller fault. 575 * 576 * Return: Nothing. 577 */ 578 static void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc) 579 { 580 u32 ioc_status, code, code1, code2, code3; 581 582 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 583 584 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) { 585 code = readl(&mrioc->sysif_regs->fault); 586 code1 = readl(&mrioc->sysif_regs->fault_info[0]); 587 code2 = readl(&mrioc->sysif_regs->fault_info[1]); 588 code3 = readl(&mrioc->sysif_regs->fault_info[2]); 589 590 ioc_info(mrioc, 591 "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n", 592 code, code1, code2, code3); 593 } 594 } 595 596 /** 597 * mpi3mr_get_iocstate - Get IOC State 598 * @mrioc: Adapter instance reference 599 * 600 * Return a proper IOC state enum based on the IOC status and 601 * IOC configuration and unrcoverable state of the controller. 602 * 603 * Return: Current IOC state. 604 */ 605 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc) 606 { 607 u32 ioc_status, ioc_config; 608 u8 ready, enabled; 609 610 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 611 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 612 613 if (mrioc->unrecoverable) 614 return MRIOC_STATE_UNRECOVERABLE; 615 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) 616 return MRIOC_STATE_FAULT; 617 618 ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY); 619 enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC); 620 621 if (ready && enabled) 622 return MRIOC_STATE_READY; 623 if ((!ready) && (!enabled)) 624 return MRIOC_STATE_RESET; 625 if ((!ready) && (enabled)) 626 return MRIOC_STATE_BECOMING_READY; 627 628 return MRIOC_STATE_RESET_REQUESTED; 629 } 630 631 /** 632 * mpi3mr_clear_reset_history - clear reset history 633 * @mrioc: Adapter instance reference 634 * 635 * Write the reset history bit in IOC status to clear the bit, 636 * if it is already set. 637 * 638 * Return: Nothing. 639 */ 640 static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc) 641 { 642 u32 ioc_status; 643 644 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 645 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) 646 writel(ioc_status, &mrioc->sysif_regs->ioc_status); 647 } 648 649 /** 650 * mpi3mr_issue_and_process_mur - Message unit Reset handler 651 * @mrioc: Adapter instance reference 652 * @reset_reason: Reset reason code 653 * 654 * Issue Message unit Reset to the controller and wait for it to 655 * be complete. 656 * 657 * Return: 0 on success, -1 on failure. 658 */ 659 static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc, 660 u32 reset_reason) 661 { 662 u32 ioc_config, timeout, ioc_status; 663 int retval = -1; 664 665 ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n"); 666 if (mrioc->unrecoverable) { 667 ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n"); 668 return retval; 669 } 670 mpi3mr_clear_reset_history(mrioc); 671 writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]); 672 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 673 ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; 674 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 675 676 timeout = mrioc->ready_timeout * 10; 677 do { 678 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 679 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) { 680 mpi3mr_clear_reset_history(mrioc); 681 ioc_config = 682 readl(&mrioc->sysif_regs->ioc_configuration); 683 if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) || 684 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) || 685 (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) { 686 retval = 0; 687 break; 688 } 689 } 690 msleep(100); 691 } while (--timeout); 692 693 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 694 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 695 696 ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n", 697 (!retval) ? "successful" : "failed", ioc_status, ioc_config); 698 return retval; 699 } 700 701 /** 702 * mpi3mr_bring_ioc_ready - Bring controller to ready state 703 * @mrioc: Adapter instance reference 704 * 705 * Set Enable IOC bit in IOC configuration register and wait for 706 * the controller to become ready. 707 * 708 * Return: 0 on success, -1 on failure. 709 */ 710 static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc) 711 { 712 u32 ioc_config, timeout; 713 enum mpi3mr_iocstate current_state; 714 715 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 716 ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; 717 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 718 719 timeout = mrioc->ready_timeout * 10; 720 do { 721 current_state = mpi3mr_get_iocstate(mrioc); 722 if (current_state == MRIOC_STATE_READY) 723 return 0; 724 msleep(100); 725 } while (--timeout); 726 727 return -1; 728 } 729 730 /** 731 * mpi3mr_set_diagsave - Set diag save bit for snapdump 732 * @mrioc: Adapter reference 733 * 734 * Set diag save bit in IOC configuration register to enable 735 * snapdump. 736 * 737 * Return: Nothing. 738 */ 739 static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc) 740 { 741 u32 ioc_config; 742 743 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 744 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE; 745 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 746 } 747 748 /** 749 * mpi3mr_issue_reset - Issue reset to the controller 750 * @mrioc: Adapter reference 751 * @reset_type: Reset type 752 * @reset_reason: Reset reason code 753 * 754 * TBD 755 * 756 * Return: 0 on success, non-zero on failure. 757 */ 758 static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, 759 u32 reset_reason) 760 { 761 return 0; 762 } 763 764 /** 765 * mpi3mr_admin_request_post - Post request to admin queue 766 * @mrioc: Adapter reference 767 * @admin_req: MPI3 request 768 * @admin_req_sz: Request size 769 * @ignore_reset: Ignore reset in process 770 * 771 * Post the MPI3 request into admin request queue and 772 * inform the controller, if the queue is full return 773 * appropriate error. 774 * 775 * Return: 0 on success, non-zero on failure. 776 */ 777 int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req, 778 u16 admin_req_sz, u8 ignore_reset) 779 { 780 u16 areq_pi = 0, areq_ci = 0, max_entries = 0; 781 int retval = 0; 782 unsigned long flags; 783 u8 *areq_entry; 784 785 if (mrioc->unrecoverable) { 786 ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__); 787 return -EFAULT; 788 } 789 790 spin_lock_irqsave(&mrioc->admin_req_lock, flags); 791 areq_pi = mrioc->admin_req_pi; 792 areq_ci = mrioc->admin_req_ci; 793 max_entries = mrioc->num_admin_req; 794 if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) && 795 (areq_pi == (max_entries - 1)))) { 796 ioc_err(mrioc, "AdminReqQ full condition detected\n"); 797 retval = -EAGAIN; 798 goto out; 799 } 800 if (!ignore_reset && mrioc->reset_in_progress) { 801 ioc_err(mrioc, "AdminReqQ submit reset in progress\n"); 802 retval = -EAGAIN; 803 goto out; 804 } 805 areq_entry = (u8 *)mrioc->admin_req_base + 806 (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ); 807 memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); 808 memcpy(areq_entry, (u8 *)admin_req, admin_req_sz); 809 810 if (++areq_pi == max_entries) 811 areq_pi = 0; 812 mrioc->admin_req_pi = areq_pi; 813 814 writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); 815 816 out: 817 spin_unlock_irqrestore(&mrioc->admin_req_lock, flags); 818 819 return retval; 820 } 821 822 /** 823 * mpi3mr_free_op_req_q_segments - free request memory segments 824 * @mrioc: Adapter instance reference 825 * @q_idx: operational request queue index 826 * 827 * Free memory segments allocated for operational request queue 828 * 829 * Return: Nothing. 830 */ 831 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) 832 { 833 u16 j; 834 int size; 835 struct segments *segments; 836 837 segments = mrioc->req_qinfo[q_idx].q_segments; 838 if (!segments) 839 return; 840 841 if (mrioc->enable_segqueue) { 842 size = MPI3MR_OP_REQ_Q_SEG_SIZE; 843 if (mrioc->req_qinfo[q_idx].q_segment_list) { 844 dma_free_coherent(&mrioc->pdev->dev, 845 MPI3MR_MAX_SEG_LIST_SIZE, 846 mrioc->req_qinfo[q_idx].q_segment_list, 847 mrioc->req_qinfo[q_idx].q_segment_list_dma); 848 mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL; 849 } 850 } else 851 size = mrioc->req_qinfo[q_idx].num_requests * 852 mrioc->facts.op_req_sz; 853 854 for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) { 855 if (!segments[j].segment) 856 continue; 857 dma_free_coherent(&mrioc->pdev->dev, 858 size, segments[j].segment, segments[j].segment_dma); 859 segments[j].segment = NULL; 860 } 861 kfree(mrioc->req_qinfo[q_idx].q_segments); 862 mrioc->req_qinfo[q_idx].q_segments = NULL; 863 mrioc->req_qinfo[q_idx].qid = 0; 864 } 865 866 /** 867 * mpi3mr_free_op_reply_q_segments - free reply memory segments 868 * @mrioc: Adapter instance reference 869 * @q_idx: operational reply queue index 870 * 871 * Free memory segments allocated for operational reply queue 872 * 873 * Return: Nothing. 874 */ 875 static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) 876 { 877 u16 j; 878 int size; 879 struct segments *segments; 880 881 segments = mrioc->op_reply_qinfo[q_idx].q_segments; 882 if (!segments) 883 return; 884 885 if (mrioc->enable_segqueue) { 886 size = MPI3MR_OP_REP_Q_SEG_SIZE; 887 if (mrioc->op_reply_qinfo[q_idx].q_segment_list) { 888 dma_free_coherent(&mrioc->pdev->dev, 889 MPI3MR_MAX_SEG_LIST_SIZE, 890 mrioc->op_reply_qinfo[q_idx].q_segment_list, 891 mrioc->op_reply_qinfo[q_idx].q_segment_list_dma); 892 mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL; 893 } 894 } else 895 size = mrioc->op_reply_qinfo[q_idx].segment_qd * 896 mrioc->op_reply_desc_sz; 897 898 for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) { 899 if (!segments[j].segment) 900 continue; 901 dma_free_coherent(&mrioc->pdev->dev, 902 size, segments[j].segment, segments[j].segment_dma); 903 segments[j].segment = NULL; 904 } 905 906 kfree(mrioc->op_reply_qinfo[q_idx].q_segments); 907 mrioc->op_reply_qinfo[q_idx].q_segments = NULL; 908 mrioc->op_reply_qinfo[q_idx].qid = 0; 909 } 910 911 /** 912 * mpi3mr_delete_op_reply_q - delete operational reply queue 913 * @mrioc: Adapter instance reference 914 * @qidx: operational reply queue index 915 * 916 * Delete operatinal reply queue by issuing MPI request 917 * through admin queue. 918 * 919 * Return: 0 on success, non-zero on failure. 920 */ 921 static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) 922 { 923 struct mpi3_delete_reply_queue_request delq_req; 924 int retval = 0; 925 u16 reply_qid = 0, midx; 926 927 reply_qid = mrioc->op_reply_qinfo[qidx].qid; 928 929 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset); 930 931 if (!reply_qid) { 932 retval = -1; 933 ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n"); 934 goto out; 935 } 936 937 memset(&delq_req, 0, sizeof(delq_req)); 938 mutex_lock(&mrioc->init_cmds.mutex); 939 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 940 retval = -1; 941 ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n"); 942 mutex_unlock(&mrioc->init_cmds.mutex); 943 goto out; 944 } 945 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 946 mrioc->init_cmds.is_waiting = 1; 947 mrioc->init_cmds.callback = NULL; 948 delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 949 delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE; 950 delq_req.queue_id = cpu_to_le16(reply_qid); 951 952 init_completion(&mrioc->init_cmds.done); 953 retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req), 954 1); 955 if (retval) { 956 ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n"); 957 goto out_unlock; 958 } 959 wait_for_completion_timeout(&mrioc->init_cmds.done, 960 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 961 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 962 ioc_err(mrioc, "Issue DelRepQ: command timed out\n"); 963 mpi3mr_set_diagsave(mrioc); 964 mpi3mr_issue_reset(mrioc, 965 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 966 MPI3MR_RESET_FROM_DELREPQ_TIMEOUT); 967 mrioc->unrecoverable = 1; 968 969 retval = -1; 970 goto out_unlock; 971 } 972 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 973 != MPI3_IOCSTATUS_SUCCESS) { 974 ioc_err(mrioc, 975 "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 976 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 977 mrioc->init_cmds.ioc_loginfo); 978 retval = -1; 979 goto out_unlock; 980 } 981 mrioc->intr_info[midx].op_reply_q = NULL; 982 983 mpi3mr_free_op_reply_q_segments(mrioc, qidx); 984 out_unlock: 985 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 986 mutex_unlock(&mrioc->init_cmds.mutex); 987 out: 988 989 return retval; 990 } 991 992 /** 993 * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool 994 * @mrioc: Adapter instance reference 995 * @qidx: request queue index 996 * 997 * Allocate segmented memory pools for operational reply 998 * queue. 999 * 1000 * Return: 0 on success, non-zero on failure. 1001 */ 1002 static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx) 1003 { 1004 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 1005 int i, size; 1006 u64 *q_segment_list_entry = NULL; 1007 struct segments *segments; 1008 1009 if (mrioc->enable_segqueue) { 1010 op_reply_q->segment_qd = 1011 MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz; 1012 1013 size = MPI3MR_OP_REP_Q_SEG_SIZE; 1014 1015 op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev, 1016 MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma, 1017 GFP_KERNEL); 1018 if (!op_reply_q->q_segment_list) 1019 return -ENOMEM; 1020 q_segment_list_entry = (u64 *)op_reply_q->q_segment_list; 1021 } else { 1022 op_reply_q->segment_qd = op_reply_q->num_replies; 1023 size = op_reply_q->num_replies * mrioc->op_reply_desc_sz; 1024 } 1025 1026 op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies, 1027 op_reply_q->segment_qd); 1028 1029 op_reply_q->q_segments = kcalloc(op_reply_q->num_segments, 1030 sizeof(struct segments), GFP_KERNEL); 1031 if (!op_reply_q->q_segments) 1032 return -ENOMEM; 1033 1034 segments = op_reply_q->q_segments; 1035 for (i = 0; i < op_reply_q->num_segments; i++) { 1036 segments[i].segment = 1037 dma_alloc_coherent(&mrioc->pdev->dev, 1038 size, &segments[i].segment_dma, GFP_KERNEL); 1039 if (!segments[i].segment) 1040 return -ENOMEM; 1041 if (mrioc->enable_segqueue) 1042 q_segment_list_entry[i] = 1043 (unsigned long)segments[i].segment_dma; 1044 } 1045 1046 return 0; 1047 } 1048 1049 /** 1050 * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool. 1051 * @mrioc: Adapter instance reference 1052 * @qidx: request queue index 1053 * 1054 * Allocate segmented memory pools for operational request 1055 * queue. 1056 * 1057 * Return: 0 on success, non-zero on failure. 1058 */ 1059 static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx) 1060 { 1061 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx; 1062 int i, size; 1063 u64 *q_segment_list_entry = NULL; 1064 struct segments *segments; 1065 1066 if (mrioc->enable_segqueue) { 1067 op_req_q->segment_qd = 1068 MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz; 1069 1070 size = MPI3MR_OP_REQ_Q_SEG_SIZE; 1071 1072 op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev, 1073 MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma, 1074 GFP_KERNEL); 1075 if (!op_req_q->q_segment_list) 1076 return -ENOMEM; 1077 q_segment_list_entry = (u64 *)op_req_q->q_segment_list; 1078 1079 } else { 1080 op_req_q->segment_qd = op_req_q->num_requests; 1081 size = op_req_q->num_requests * mrioc->facts.op_req_sz; 1082 } 1083 1084 op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests, 1085 op_req_q->segment_qd); 1086 1087 op_req_q->q_segments = kcalloc(op_req_q->num_segments, 1088 sizeof(struct segments), GFP_KERNEL); 1089 if (!op_req_q->q_segments) 1090 return -ENOMEM; 1091 1092 segments = op_req_q->q_segments; 1093 for (i = 0; i < op_req_q->num_segments; i++) { 1094 segments[i].segment = 1095 dma_alloc_coherent(&mrioc->pdev->dev, 1096 size, &segments[i].segment_dma, GFP_KERNEL); 1097 if (!segments[i].segment) 1098 return -ENOMEM; 1099 if (mrioc->enable_segqueue) 1100 q_segment_list_entry[i] = 1101 (unsigned long)segments[i].segment_dma; 1102 } 1103 1104 return 0; 1105 } 1106 1107 /** 1108 * mpi3mr_create_op_reply_q - create operational reply queue 1109 * @mrioc: Adapter instance reference 1110 * @qidx: operational reply queue index 1111 * 1112 * Create operatinal reply queue by issuing MPI request 1113 * through admin queue. 1114 * 1115 * Return: 0 on success, non-zero on failure. 1116 */ 1117 static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) 1118 { 1119 struct mpi3_create_reply_queue_request create_req; 1120 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 1121 int retval = 0; 1122 u16 reply_qid = 0, midx; 1123 1124 reply_qid = op_reply_q->qid; 1125 1126 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset); 1127 1128 if (reply_qid) { 1129 retval = -1; 1130 ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n", 1131 reply_qid); 1132 1133 return retval; 1134 } 1135 1136 reply_qid = qidx + 1; 1137 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD; 1138 op_reply_q->ci = 0; 1139 op_reply_q->ephase = 1; 1140 1141 if (!op_reply_q->q_segments) { 1142 retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx); 1143 if (retval) { 1144 mpi3mr_free_op_reply_q_segments(mrioc, qidx); 1145 goto out; 1146 } 1147 } 1148 1149 memset(&create_req, 0, sizeof(create_req)); 1150 mutex_lock(&mrioc->init_cmds.mutex); 1151 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1152 retval = -1; 1153 ioc_err(mrioc, "CreateRepQ: Init command is in use\n"); 1154 goto out; 1155 } 1156 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1157 mrioc->init_cmds.is_waiting = 1; 1158 mrioc->init_cmds.callback = NULL; 1159 create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1160 create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE; 1161 create_req.queue_id = cpu_to_le16(reply_qid); 1162 create_req.flags = MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE; 1163 create_req.msix_index = cpu_to_le16(mrioc->intr_info[midx].msix_index); 1164 if (mrioc->enable_segqueue) { 1165 create_req.flags |= 1166 MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED; 1167 create_req.base_address = cpu_to_le64( 1168 op_reply_q->q_segment_list_dma); 1169 } else 1170 create_req.base_address = cpu_to_le64( 1171 op_reply_q->q_segments[0].segment_dma); 1172 1173 create_req.size = cpu_to_le16(op_reply_q->num_replies); 1174 1175 init_completion(&mrioc->init_cmds.done); 1176 retval = mpi3mr_admin_request_post(mrioc, &create_req, 1177 sizeof(create_req), 1); 1178 if (retval) { 1179 ioc_err(mrioc, "CreateRepQ: Admin Post failed\n"); 1180 goto out_unlock; 1181 } 1182 wait_for_completion_timeout(&mrioc->init_cmds.done, 1183 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1184 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1185 ioc_err(mrioc, "CreateRepQ: command timed out\n"); 1186 mpi3mr_set_diagsave(mrioc); 1187 mpi3mr_issue_reset(mrioc, 1188 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 1189 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT); 1190 mrioc->unrecoverable = 1; 1191 retval = -1; 1192 goto out_unlock; 1193 } 1194 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1195 != MPI3_IOCSTATUS_SUCCESS) { 1196 ioc_err(mrioc, 1197 "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1198 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1199 mrioc->init_cmds.ioc_loginfo); 1200 retval = -1; 1201 goto out_unlock; 1202 } 1203 op_reply_q->qid = reply_qid; 1204 mrioc->intr_info[midx].op_reply_q = op_reply_q; 1205 1206 out_unlock: 1207 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1208 mutex_unlock(&mrioc->init_cmds.mutex); 1209 out: 1210 1211 return retval; 1212 } 1213 1214 /** 1215 * mpi3mr_create_op_req_q - create operational request queue 1216 * @mrioc: Adapter instance reference 1217 * @idx: operational request queue index 1218 * @reply_qid: Reply queue ID 1219 * 1220 * Create operatinal request queue by issuing MPI request 1221 * through admin queue. 1222 * 1223 * Return: 0 on success, non-zero on failure. 1224 */ 1225 static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx, 1226 u16 reply_qid) 1227 { 1228 struct mpi3_create_request_queue_request create_req; 1229 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx; 1230 int retval = 0; 1231 u16 req_qid = 0; 1232 1233 req_qid = op_req_q->qid; 1234 1235 if (req_qid) { 1236 retval = -1; 1237 ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n", 1238 req_qid); 1239 1240 return retval; 1241 } 1242 req_qid = idx + 1; 1243 1244 op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD; 1245 op_req_q->ci = 0; 1246 op_req_q->pi = 0; 1247 op_req_q->reply_qid = reply_qid; 1248 spin_lock_init(&op_req_q->q_lock); 1249 1250 if (!op_req_q->q_segments) { 1251 retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx); 1252 if (retval) { 1253 mpi3mr_free_op_req_q_segments(mrioc, idx); 1254 goto out; 1255 } 1256 } 1257 1258 memset(&create_req, 0, sizeof(create_req)); 1259 mutex_lock(&mrioc->init_cmds.mutex); 1260 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1261 retval = -1; 1262 ioc_err(mrioc, "CreateReqQ: Init command is in use\n"); 1263 goto out; 1264 } 1265 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1266 mrioc->init_cmds.is_waiting = 1; 1267 mrioc->init_cmds.callback = NULL; 1268 create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1269 create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE; 1270 create_req.queue_id = cpu_to_le16(req_qid); 1271 if (mrioc->enable_segqueue) { 1272 create_req.flags = 1273 MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED; 1274 create_req.base_address = cpu_to_le64( 1275 op_req_q->q_segment_list_dma); 1276 } else 1277 create_req.base_address = cpu_to_le64( 1278 op_req_q->q_segments[0].segment_dma); 1279 create_req.reply_queue_id = cpu_to_le16(reply_qid); 1280 create_req.size = cpu_to_le16(op_req_q->num_requests); 1281 1282 init_completion(&mrioc->init_cmds.done); 1283 retval = mpi3mr_admin_request_post(mrioc, &create_req, 1284 sizeof(create_req), 1); 1285 if (retval) { 1286 ioc_err(mrioc, "CreateReqQ: Admin Post failed\n"); 1287 goto out_unlock; 1288 } 1289 wait_for_completion_timeout(&mrioc->init_cmds.done, 1290 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1291 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1292 ioc_err(mrioc, "CreateReqQ: command timed out\n"); 1293 mpi3mr_set_diagsave(mrioc); 1294 if (mpi3mr_issue_reset(mrioc, 1295 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 1296 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT)) 1297 mrioc->unrecoverable = 1; 1298 retval = -1; 1299 goto out_unlock; 1300 } 1301 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1302 != MPI3_IOCSTATUS_SUCCESS) { 1303 ioc_err(mrioc, 1304 "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1305 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1306 mrioc->init_cmds.ioc_loginfo); 1307 retval = -1; 1308 goto out_unlock; 1309 } 1310 op_req_q->qid = req_qid; 1311 1312 out_unlock: 1313 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1314 mutex_unlock(&mrioc->init_cmds.mutex); 1315 out: 1316 1317 return retval; 1318 } 1319 1320 /** 1321 * mpi3mr_create_op_queues - create operational queue pairs 1322 * @mrioc: Adapter instance reference 1323 * 1324 * Allocate memory for operational queue meta data and call 1325 * create request and reply queue functions. 1326 * 1327 * Return: 0 on success, non-zero on failures. 1328 */ 1329 static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc) 1330 { 1331 int retval = 0; 1332 u16 num_queues = 0, i = 0, msix_count_op_q = 1; 1333 1334 num_queues = min_t(int, mrioc->facts.max_op_reply_q, 1335 mrioc->facts.max_op_req_q); 1336 1337 msix_count_op_q = 1338 mrioc->intr_info_count - mrioc->op_reply_q_offset; 1339 if (!mrioc->num_queues) 1340 mrioc->num_queues = min_t(int, num_queues, msix_count_op_q); 1341 num_queues = mrioc->num_queues; 1342 ioc_info(mrioc, "Trying to create %d Operational Q pairs\n", 1343 num_queues); 1344 1345 if (!mrioc->req_qinfo) { 1346 mrioc->req_qinfo = kcalloc(num_queues, 1347 sizeof(struct op_req_qinfo), GFP_KERNEL); 1348 if (!mrioc->req_qinfo) { 1349 retval = -1; 1350 goto out_failed; 1351 } 1352 1353 mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) * 1354 num_queues, GFP_KERNEL); 1355 if (!mrioc->op_reply_qinfo) { 1356 retval = -1; 1357 goto out_failed; 1358 } 1359 } 1360 1361 if (mrioc->enable_segqueue) 1362 ioc_info(mrioc, 1363 "allocating operational queues through segmented queues\n"); 1364 1365 for (i = 0; i < num_queues; i++) { 1366 if (mpi3mr_create_op_reply_q(mrioc, i)) { 1367 ioc_err(mrioc, "Cannot create OP RepQ %d\n", i); 1368 break; 1369 } 1370 if (mpi3mr_create_op_req_q(mrioc, i, 1371 mrioc->op_reply_qinfo[i].qid)) { 1372 ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i); 1373 mpi3mr_delete_op_reply_q(mrioc, i); 1374 break; 1375 } 1376 } 1377 1378 if (i == 0) { 1379 /* Not even one queue is created successfully*/ 1380 retval = -1; 1381 goto out_failed; 1382 } 1383 mrioc->num_op_reply_q = mrioc->num_op_req_q = i; 1384 ioc_info(mrioc, "Successfully created %d Operational Q pairs\n", 1385 mrioc->num_op_reply_q); 1386 1387 return retval; 1388 out_failed: 1389 kfree(mrioc->req_qinfo); 1390 mrioc->req_qinfo = NULL; 1391 1392 kfree(mrioc->op_reply_qinfo); 1393 mrioc->op_reply_qinfo = NULL; 1394 1395 return retval; 1396 } 1397 1398 /** 1399 * mpi3mr_op_request_post - Post request to operational queue 1400 * @mrioc: Adapter reference 1401 * @op_req_q: Operational request queue info 1402 * @req: MPI3 request 1403 * 1404 * Post the MPI3 request into operational request queue and 1405 * inform the controller, if the queue is full return 1406 * appropriate error. 1407 * 1408 * Return: 0 on success, non-zero on failure. 1409 */ 1410 int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc, 1411 struct op_req_qinfo *op_req_q, u8 *req) 1412 { 1413 u16 pi = 0, max_entries, reply_qidx = 0, midx; 1414 int retval = 0; 1415 unsigned long flags; 1416 u8 *req_entry; 1417 void *segment_base_addr; 1418 u16 req_sz = mrioc->facts.op_req_sz; 1419 struct segments *segments = op_req_q->q_segments; 1420 1421 reply_qidx = op_req_q->reply_qid - 1; 1422 1423 if (mrioc->unrecoverable) 1424 return -EFAULT; 1425 1426 spin_lock_irqsave(&op_req_q->q_lock, flags); 1427 pi = op_req_q->pi; 1428 max_entries = op_req_q->num_requests; 1429 1430 if (mpi3mr_check_req_qfull(op_req_q)) { 1431 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX( 1432 reply_qidx, mrioc->op_reply_q_offset); 1433 mpi3mr_process_op_reply_q(mrioc, &mrioc->intr_info[midx]); 1434 1435 if (mpi3mr_check_req_qfull(op_req_q)) { 1436 retval = -EAGAIN; 1437 goto out; 1438 } 1439 } 1440 1441 if (mrioc->reset_in_progress) { 1442 ioc_err(mrioc, "OpReqQ submit reset in progress\n"); 1443 retval = -EAGAIN; 1444 goto out; 1445 } 1446 1447 segment_base_addr = segments[pi / op_req_q->segment_qd].segment; 1448 req_entry = (u8 *)segment_base_addr + 1449 ((pi % op_req_q->segment_qd) * req_sz); 1450 1451 memset(req_entry, 0, req_sz); 1452 memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ); 1453 1454 if (++pi == max_entries) 1455 pi = 0; 1456 op_req_q->pi = pi; 1457 1458 writel(op_req_q->pi, 1459 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index); 1460 1461 out: 1462 spin_unlock_irqrestore(&op_req_q->q_lock, flags); 1463 return retval; 1464 } 1465 1466 /** 1467 * mpi3mr_setup_admin_qpair - Setup admin queue pair 1468 * @mrioc: Adapter instance reference 1469 * 1470 * Allocate memory for admin queue pair if required and register 1471 * the admin queue with the controller. 1472 * 1473 * Return: 0 on success, non-zero on failures. 1474 */ 1475 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc) 1476 { 1477 int retval = 0; 1478 u32 num_admin_entries = 0; 1479 1480 mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE; 1481 mrioc->num_admin_req = mrioc->admin_req_q_sz / 1482 MPI3MR_ADMIN_REQ_FRAME_SZ; 1483 mrioc->admin_req_ci = mrioc->admin_req_pi = 0; 1484 mrioc->admin_req_base = NULL; 1485 1486 mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE; 1487 mrioc->num_admin_replies = mrioc->admin_reply_q_sz / 1488 MPI3MR_ADMIN_REPLY_FRAME_SZ; 1489 mrioc->admin_reply_ci = 0; 1490 mrioc->admin_reply_ephase = 1; 1491 mrioc->admin_reply_base = NULL; 1492 1493 if (!mrioc->admin_req_base) { 1494 mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev, 1495 mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL); 1496 1497 if (!mrioc->admin_req_base) { 1498 retval = -1; 1499 goto out_failed; 1500 } 1501 1502 mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev, 1503 mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma, 1504 GFP_KERNEL); 1505 1506 if (!mrioc->admin_reply_base) { 1507 retval = -1; 1508 goto out_failed; 1509 } 1510 } 1511 1512 num_admin_entries = (mrioc->num_admin_replies << 16) | 1513 (mrioc->num_admin_req); 1514 writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries); 1515 mpi3mr_writeq(mrioc->admin_req_dma, 1516 &mrioc->sysif_regs->admin_request_queue_address); 1517 mpi3mr_writeq(mrioc->admin_reply_dma, 1518 &mrioc->sysif_regs->admin_reply_queue_address); 1519 writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); 1520 writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); 1521 return retval; 1522 1523 out_failed: 1524 1525 if (mrioc->admin_reply_base) { 1526 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz, 1527 mrioc->admin_reply_base, mrioc->admin_reply_dma); 1528 mrioc->admin_reply_base = NULL; 1529 } 1530 if (mrioc->admin_req_base) { 1531 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz, 1532 mrioc->admin_req_base, mrioc->admin_req_dma); 1533 mrioc->admin_req_base = NULL; 1534 } 1535 return retval; 1536 } 1537 1538 /** 1539 * mpi3mr_issue_iocfacts - Send IOC Facts 1540 * @mrioc: Adapter instance reference 1541 * @facts_data: Cached IOC facts data 1542 * 1543 * Issue IOC Facts MPI request through admin queue and wait for 1544 * the completion of it or time out. 1545 * 1546 * Return: 0 on success, non-zero on failures. 1547 */ 1548 static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc, 1549 struct mpi3_ioc_facts_data *facts_data) 1550 { 1551 struct mpi3_ioc_facts_request iocfacts_req; 1552 void *data = NULL; 1553 dma_addr_t data_dma; 1554 u32 data_len = sizeof(*facts_data); 1555 int retval = 0; 1556 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 1557 1558 data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 1559 GFP_KERNEL); 1560 1561 if (!data) { 1562 retval = -1; 1563 goto out; 1564 } 1565 1566 memset(&iocfacts_req, 0, sizeof(iocfacts_req)); 1567 mutex_lock(&mrioc->init_cmds.mutex); 1568 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1569 retval = -1; 1570 ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n"); 1571 mutex_unlock(&mrioc->init_cmds.mutex); 1572 goto out; 1573 } 1574 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1575 mrioc->init_cmds.is_waiting = 1; 1576 mrioc->init_cmds.callback = NULL; 1577 iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1578 iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS; 1579 1580 mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len, 1581 data_dma); 1582 1583 init_completion(&mrioc->init_cmds.done); 1584 retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req, 1585 sizeof(iocfacts_req), 1); 1586 if (retval) { 1587 ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n"); 1588 goto out_unlock; 1589 } 1590 wait_for_completion_timeout(&mrioc->init_cmds.done, 1591 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1592 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1593 ioc_err(mrioc, "Issue IOCFacts: command timed out\n"); 1594 mpi3mr_set_diagsave(mrioc); 1595 mpi3mr_issue_reset(mrioc, 1596 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 1597 MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT); 1598 mrioc->unrecoverable = 1; 1599 retval = -1; 1600 goto out_unlock; 1601 } 1602 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1603 != MPI3_IOCSTATUS_SUCCESS) { 1604 ioc_err(mrioc, 1605 "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1606 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1607 mrioc->init_cmds.ioc_loginfo); 1608 retval = -1; 1609 goto out_unlock; 1610 } 1611 memcpy(facts_data, (u8 *)data, data_len); 1612 out_unlock: 1613 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1614 mutex_unlock(&mrioc->init_cmds.mutex); 1615 1616 out: 1617 if (data) 1618 dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma); 1619 1620 return retval; 1621 } 1622 1623 /** 1624 * mpi3mr_check_reset_dma_mask - Process IOC facts data 1625 * @mrioc: Adapter instance reference 1626 * 1627 * Check whether the new DMA mask requested through IOCFacts by 1628 * firmware needs to be set, if so set it . 1629 * 1630 * Return: 0 on success, non-zero on failure. 1631 */ 1632 static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc) 1633 { 1634 struct pci_dev *pdev = mrioc->pdev; 1635 int r; 1636 u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask); 1637 1638 if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask)) 1639 return 0; 1640 1641 ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n", 1642 mrioc->dma_mask, facts_dma_mask); 1643 1644 r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask); 1645 if (r) { 1646 ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n", 1647 facts_dma_mask, r); 1648 return r; 1649 } 1650 mrioc->dma_mask = facts_dma_mask; 1651 return r; 1652 } 1653 1654 /** 1655 * mpi3mr_process_factsdata - Process IOC facts data 1656 * @mrioc: Adapter instance reference 1657 * @facts_data: Cached IOC facts data 1658 * 1659 * Convert IOC facts data into cpu endianness and cache it in 1660 * the driver . 1661 * 1662 * Return: Nothing. 1663 */ 1664 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, 1665 struct mpi3_ioc_facts_data *facts_data) 1666 { 1667 u32 ioc_config, req_sz, facts_flags; 1668 1669 if ((le16_to_cpu(facts_data->ioc_facts_data_length)) != 1670 (sizeof(*facts_data) / 4)) { 1671 ioc_warn(mrioc, 1672 "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n", 1673 sizeof(*facts_data), 1674 le16_to_cpu(facts_data->ioc_facts_data_length) * 4); 1675 } 1676 1677 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1678 req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >> 1679 MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT); 1680 if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) { 1681 ioc_err(mrioc, 1682 "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n", 1683 req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size)); 1684 } 1685 1686 memset(&mrioc->facts, 0, sizeof(mrioc->facts)); 1687 1688 facts_flags = le32_to_cpu(facts_data->flags); 1689 mrioc->facts.op_req_sz = req_sz; 1690 mrioc->op_reply_desc_sz = 1 << ((ioc_config & 1691 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >> 1692 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT); 1693 1694 mrioc->facts.ioc_num = facts_data->ioc_number; 1695 mrioc->facts.who_init = facts_data->who_init; 1696 mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors); 1697 mrioc->facts.personality = (facts_flags & 1698 MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK); 1699 mrioc->facts.dma_mask = (facts_flags & 1700 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >> 1701 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT; 1702 mrioc->facts.protocol_flags = facts_data->protocol_flags; 1703 mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word); 1704 mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_request); 1705 mrioc->facts.product_id = le16_to_cpu(facts_data->product_id); 1706 mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4; 1707 mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions); 1708 mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id); 1709 mrioc->facts.max_pds = le16_to_cpu(facts_data->max_pds); 1710 mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds); 1711 mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds); 1712 mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_advanced_host_pds); 1713 mrioc->facts.max_raidpds = le16_to_cpu(facts_data->max_raid_pds); 1714 mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme); 1715 mrioc->facts.max_pcie_switches = 1716 le16_to_cpu(facts_data->max_pc_ie_switches); 1717 mrioc->facts.max_sasexpanders = 1718 le16_to_cpu(facts_data->max_sas_expanders); 1719 mrioc->facts.max_sasinitiators = 1720 le16_to_cpu(facts_data->max_sas_initiators); 1721 mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures); 1722 mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle); 1723 mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle); 1724 mrioc->facts.max_op_req_q = 1725 le16_to_cpu(facts_data->max_operational_request_queues); 1726 mrioc->facts.max_op_reply_q = 1727 le16_to_cpu(facts_data->max_operational_reply_queues); 1728 mrioc->facts.ioc_capabilities = 1729 le32_to_cpu(facts_data->ioc_capabilities); 1730 mrioc->facts.fw_ver.build_num = 1731 le16_to_cpu(facts_data->fw_version.build_num); 1732 mrioc->facts.fw_ver.cust_id = 1733 le16_to_cpu(facts_data->fw_version.customer_id); 1734 mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor; 1735 mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major; 1736 mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor; 1737 mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major; 1738 mrioc->msix_count = min_t(int, mrioc->msix_count, 1739 mrioc->facts.max_msix_vectors); 1740 mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask; 1741 mrioc->facts.sge_mod_value = facts_data->sge_modifier_value; 1742 mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift; 1743 mrioc->facts.shutdown_timeout = 1744 le16_to_cpu(facts_data->shutdown_timeout); 1745 1746 ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),", 1747 mrioc->facts.ioc_num, mrioc->facts.max_op_req_q, 1748 mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle); 1749 ioc_info(mrioc, 1750 "maxreqs(%d), mindh(%d) maxPDs(%d) maxvectors(%d) maxperids(%d)\n", 1751 mrioc->facts.max_reqs, mrioc->facts.min_devhandle, 1752 mrioc->facts.max_pds, mrioc->facts.max_msix_vectors, 1753 mrioc->facts.max_perids); 1754 ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ", 1755 mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value, 1756 mrioc->facts.sge_mod_shift); 1757 ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n", 1758 mrioc->facts.dma_mask, (facts_flags & 1759 MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK)); 1760 1761 mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD; 1762 1763 if (reset_devices) 1764 mrioc->max_host_ios = min_t(int, mrioc->max_host_ios, 1765 MPI3MR_HOST_IOS_KDUMP); 1766 } 1767 1768 /** 1769 * mpi3mr_alloc_reply_sense_bufs - Send IOC Init 1770 * @mrioc: Adapter instance reference 1771 * 1772 * Allocate and initialize the reply free buffers, sense 1773 * buffers, reply free queue and sense buffer queue. 1774 * 1775 * Return: 0 on success, non-zero on failures. 1776 */ 1777 static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc) 1778 { 1779 int retval = 0; 1780 u32 sz, i; 1781 dma_addr_t phy_addr; 1782 1783 if (mrioc->init_cmds.reply) 1784 goto post_reply_sbuf; 1785 1786 mrioc->init_cmds.reply = kzalloc(mrioc->facts.reply_sz, GFP_KERNEL); 1787 if (!mrioc->init_cmds.reply) 1788 goto out_failed; 1789 1790 mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES; 1791 mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1; 1792 mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR; 1793 mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1; 1794 1795 /* reply buffer pool, 16 byte align */ 1796 sz = mrioc->num_reply_bufs * mrioc->facts.reply_sz; 1797 mrioc->reply_buf_pool = dma_pool_create("reply_buf pool", 1798 &mrioc->pdev->dev, sz, 16, 0); 1799 if (!mrioc->reply_buf_pool) { 1800 ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n"); 1801 goto out_failed; 1802 } 1803 1804 mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL, 1805 &mrioc->reply_buf_dma); 1806 if (!mrioc->reply_buf) 1807 goto out_failed; 1808 1809 mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz; 1810 1811 /* reply free queue, 8 byte align */ 1812 sz = mrioc->reply_free_qsz * 8; 1813 mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool", 1814 &mrioc->pdev->dev, sz, 8, 0); 1815 if (!mrioc->reply_free_q_pool) { 1816 ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n"); 1817 goto out_failed; 1818 } 1819 mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool, 1820 GFP_KERNEL, &mrioc->reply_free_q_dma); 1821 if (!mrioc->reply_free_q) 1822 goto out_failed; 1823 1824 /* sense buffer pool, 4 byte align */ 1825 sz = mrioc->num_sense_bufs * MPI3MR_SENSEBUF_SZ; 1826 mrioc->sense_buf_pool = dma_pool_create("sense_buf pool", 1827 &mrioc->pdev->dev, sz, 4, 0); 1828 if (!mrioc->sense_buf_pool) { 1829 ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n"); 1830 goto out_failed; 1831 } 1832 mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL, 1833 &mrioc->sense_buf_dma); 1834 if (!mrioc->sense_buf) 1835 goto out_failed; 1836 1837 /* sense buffer queue, 8 byte align */ 1838 sz = mrioc->sense_buf_q_sz * 8; 1839 mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool", 1840 &mrioc->pdev->dev, sz, 8, 0); 1841 if (!mrioc->sense_buf_q_pool) { 1842 ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n"); 1843 goto out_failed; 1844 } 1845 mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool, 1846 GFP_KERNEL, &mrioc->sense_buf_q_dma); 1847 if (!mrioc->sense_buf_q) 1848 goto out_failed; 1849 1850 post_reply_sbuf: 1851 sz = mrioc->num_reply_bufs * mrioc->facts.reply_sz; 1852 ioc_info(mrioc, 1853 "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n", 1854 mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->facts.reply_sz, 1855 (sz / 1024), (unsigned long long)mrioc->reply_buf_dma); 1856 sz = mrioc->reply_free_qsz * 8; 1857 ioc_info(mrioc, 1858 "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n", 1859 mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024), 1860 (unsigned long long)mrioc->reply_free_q_dma); 1861 sz = mrioc->num_sense_bufs * MPI3MR_SENSEBUF_SZ; 1862 ioc_info(mrioc, 1863 "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n", 1864 mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSEBUF_SZ, 1865 (sz / 1024), (unsigned long long)mrioc->sense_buf_dma); 1866 sz = mrioc->sense_buf_q_sz * 8; 1867 ioc_info(mrioc, 1868 "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n", 1869 mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024), 1870 (unsigned long long)mrioc->sense_buf_q_dma); 1871 1872 /* initialize Reply buffer Queue */ 1873 for (i = 0, phy_addr = mrioc->reply_buf_dma; 1874 i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->facts.reply_sz) 1875 mrioc->reply_free_q[i] = cpu_to_le64(phy_addr); 1876 mrioc->reply_free_q[i] = cpu_to_le64(0); 1877 1878 /* initialize Sense Buffer Queue */ 1879 for (i = 0, phy_addr = mrioc->sense_buf_dma; 1880 i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSEBUF_SZ) 1881 mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr); 1882 mrioc->sense_buf_q[i] = cpu_to_le64(0); 1883 return retval; 1884 1885 out_failed: 1886 retval = -1; 1887 return retval; 1888 } 1889 1890 /** 1891 * mpi3mr_issue_iocinit - Send IOC Init 1892 * @mrioc: Adapter instance reference 1893 * 1894 * Issue IOC Init MPI request through admin queue and wait for 1895 * the completion of it or time out. 1896 * 1897 * Return: 0 on success, non-zero on failures. 1898 */ 1899 static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc) 1900 { 1901 struct mpi3_ioc_init_request iocinit_req; 1902 struct mpi3_driver_info_layout *drv_info; 1903 dma_addr_t data_dma; 1904 u32 data_len = sizeof(*drv_info); 1905 int retval = 0; 1906 ktime_t current_time; 1907 1908 drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 1909 GFP_KERNEL); 1910 if (!drv_info) { 1911 retval = -1; 1912 goto out; 1913 } 1914 drv_info->information_length = cpu_to_le32(data_len); 1915 strncpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature)); 1916 strncpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name)); 1917 drv_info->os_name[sizeof(drv_info->os_name) - 1] = 0; 1918 strncpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version)); 1919 drv_info->os_version[sizeof(drv_info->os_version) - 1] = 0; 1920 strncpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name)); 1921 strncpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version)); 1922 strncpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE, sizeof(drv_info->driver_release_date)); 1923 drv_info->driver_capabilities = 0; 1924 memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info, 1925 sizeof(mrioc->driver_info)); 1926 1927 memset(&iocinit_req, 0, sizeof(iocinit_req)); 1928 mutex_lock(&mrioc->init_cmds.mutex); 1929 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1930 retval = -1; 1931 ioc_err(mrioc, "Issue IOCInit: Init command is in use\n"); 1932 mutex_unlock(&mrioc->init_cmds.mutex); 1933 goto out; 1934 } 1935 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1936 mrioc->init_cmds.is_waiting = 1; 1937 mrioc->init_cmds.callback = NULL; 1938 iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1939 iocinit_req.function = MPI3_FUNCTION_IOC_INIT; 1940 iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV; 1941 iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT; 1942 iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR; 1943 iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR; 1944 iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER; 1945 iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz); 1946 iocinit_req.reply_free_queue_address = 1947 cpu_to_le64(mrioc->reply_free_q_dma); 1948 iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSEBUF_SZ); 1949 iocinit_req.sense_buffer_free_queue_depth = 1950 cpu_to_le16(mrioc->sense_buf_q_sz); 1951 iocinit_req.sense_buffer_free_queue_address = 1952 cpu_to_le64(mrioc->sense_buf_q_dma); 1953 iocinit_req.driver_information_address = cpu_to_le64(data_dma); 1954 1955 current_time = ktime_get_real(); 1956 iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time)); 1957 1958 init_completion(&mrioc->init_cmds.done); 1959 retval = mpi3mr_admin_request_post(mrioc, &iocinit_req, 1960 sizeof(iocinit_req), 1); 1961 if (retval) { 1962 ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n"); 1963 goto out_unlock; 1964 } 1965 wait_for_completion_timeout(&mrioc->init_cmds.done, 1966 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1967 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1968 mpi3mr_set_diagsave(mrioc); 1969 mpi3mr_issue_reset(mrioc, 1970 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 1971 MPI3MR_RESET_FROM_IOCINIT_TIMEOUT); 1972 mrioc->unrecoverable = 1; 1973 ioc_err(mrioc, "Issue IOCInit: command timed out\n"); 1974 retval = -1; 1975 goto out_unlock; 1976 } 1977 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1978 != MPI3_IOCSTATUS_SUCCESS) { 1979 ioc_err(mrioc, 1980 "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1981 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1982 mrioc->init_cmds.ioc_loginfo); 1983 retval = -1; 1984 goto out_unlock; 1985 } 1986 1987 out_unlock: 1988 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1989 mutex_unlock(&mrioc->init_cmds.mutex); 1990 1991 out: 1992 if (drv_info) 1993 dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info, 1994 data_dma); 1995 1996 return retval; 1997 } 1998 1999 /** 2000 * mpi3mr_alloc_chain_bufs - Allocate chain buffers 2001 * @mrioc: Adapter instance reference 2002 * 2003 * Allocate chain buffers and set a bitmap to indicate free 2004 * chain buffers. Chain buffers are used to pass the SGE 2005 * information along with MPI3 SCSI IO requests for host I/O. 2006 * 2007 * Return: 0 on success, non-zero on failure 2008 */ 2009 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc) 2010 { 2011 int retval = 0; 2012 u32 sz, i; 2013 u16 num_chains; 2014 2015 num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR; 2016 2017 mrioc->chain_buf_count = num_chains; 2018 sz = sizeof(struct chain_element) * num_chains; 2019 mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL); 2020 if (!mrioc->chain_sgl_list) 2021 goto out_failed; 2022 2023 sz = MPI3MR_PAGE_SIZE_4K; 2024 mrioc->chain_buf_pool = dma_pool_create("chain_buf pool", 2025 &mrioc->pdev->dev, sz, 16, 0); 2026 if (!mrioc->chain_buf_pool) { 2027 ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n"); 2028 goto out_failed; 2029 } 2030 2031 for (i = 0; i < num_chains; i++) { 2032 mrioc->chain_sgl_list[i].addr = 2033 dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL, 2034 &mrioc->chain_sgl_list[i].dma_addr); 2035 2036 if (!mrioc->chain_sgl_list[i].addr) 2037 goto out_failed; 2038 } 2039 mrioc->chain_bitmap_sz = num_chains / 8; 2040 if (num_chains % 8) 2041 mrioc->chain_bitmap_sz++; 2042 mrioc->chain_bitmap = kzalloc(mrioc->chain_bitmap_sz, GFP_KERNEL); 2043 if (!mrioc->chain_bitmap) 2044 goto out_failed; 2045 return retval; 2046 out_failed: 2047 retval = -1; 2048 return retval; 2049 } 2050 2051 /** 2052 * mpi3mr_port_enable_complete - Mark port enable complete 2053 * @mrioc: Adapter instance reference 2054 * @drv_cmd: Internal command tracker 2055 * 2056 * Call back for asynchronous port enable request sets the 2057 * driver command to indicate port enable request is complete. 2058 * 2059 * Return: Nothing 2060 */ 2061 static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc, 2062 struct mpi3mr_drv_cmd *drv_cmd) 2063 { 2064 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2065 drv_cmd->callback = NULL; 2066 mrioc->scan_failed = drv_cmd->ioc_status; 2067 mrioc->scan_started = 0; 2068 } 2069 2070 /** 2071 * mpi3mr_issue_port_enable - Issue Port Enable 2072 * @mrioc: Adapter instance reference 2073 * @async: Flag to wait for completion or not 2074 * 2075 * Issue Port Enable MPI request through admin queue and if the 2076 * async flag is not set wait for the completion of the port 2077 * enable or time out. 2078 * 2079 * Return: 0 on success, non-zero on failures. 2080 */ 2081 int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async) 2082 { 2083 struct mpi3_port_enable_request pe_req; 2084 int retval = 0; 2085 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT; 2086 2087 memset(&pe_req, 0, sizeof(pe_req)); 2088 mutex_lock(&mrioc->init_cmds.mutex); 2089 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2090 retval = -1; 2091 ioc_err(mrioc, "Issue PortEnable: Init command is in use\n"); 2092 mutex_unlock(&mrioc->init_cmds.mutex); 2093 goto out; 2094 } 2095 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2096 if (async) { 2097 mrioc->init_cmds.is_waiting = 0; 2098 mrioc->init_cmds.callback = mpi3mr_port_enable_complete; 2099 } else { 2100 mrioc->init_cmds.is_waiting = 1; 2101 mrioc->init_cmds.callback = NULL; 2102 init_completion(&mrioc->init_cmds.done); 2103 } 2104 pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2105 pe_req.function = MPI3_FUNCTION_PORT_ENABLE; 2106 2107 retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1); 2108 if (retval) { 2109 ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n"); 2110 goto out_unlock; 2111 } 2112 if (!async) { 2113 wait_for_completion_timeout(&mrioc->init_cmds.done, 2114 (pe_timeout * HZ)); 2115 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2116 ioc_err(mrioc, "Issue PortEnable: command timed out\n"); 2117 retval = -1; 2118 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 2119 mpi3mr_set_diagsave(mrioc); 2120 mpi3mr_issue_reset(mrioc, 2121 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 2122 MPI3MR_RESET_FROM_PE_TIMEOUT); 2123 mrioc->unrecoverable = 1; 2124 goto out_unlock; 2125 } 2126 mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds); 2127 } 2128 out_unlock: 2129 mutex_unlock(&mrioc->init_cmds.mutex); 2130 out: 2131 return retval; 2132 } 2133 2134 /** 2135 * mpi3mr_cleanup_resources - Free PCI resources 2136 * @mrioc: Adapter instance reference 2137 * 2138 * Unmap PCI device memory and disable PCI device. 2139 * 2140 * Return: 0 on success and non-zero on failure. 2141 */ 2142 void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc) 2143 { 2144 struct pci_dev *pdev = mrioc->pdev; 2145 2146 mpi3mr_cleanup_isr(mrioc); 2147 2148 if (mrioc->sysif_regs) { 2149 iounmap((void __iomem *)mrioc->sysif_regs); 2150 mrioc->sysif_regs = NULL; 2151 } 2152 2153 if (pci_is_enabled(pdev)) { 2154 if (mrioc->bars) 2155 pci_release_selected_regions(pdev, mrioc->bars); 2156 pci_disable_device(pdev); 2157 } 2158 } 2159 2160 /** 2161 * mpi3mr_setup_resources - Enable PCI resources 2162 * @mrioc: Adapter instance reference 2163 * 2164 * Enable PCI device memory, MSI-x registers and set DMA mask. 2165 * 2166 * Return: 0 on success and non-zero on failure. 2167 */ 2168 int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc) 2169 { 2170 struct pci_dev *pdev = mrioc->pdev; 2171 u32 memap_sz = 0; 2172 int i, retval = 0, capb = 0; 2173 u16 message_control; 2174 u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask : 2175 (((dma_get_required_mask(&pdev->dev) > DMA_BIT_MASK(32)) && 2176 (sizeof(dma_addr_t) > 4)) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32)); 2177 2178 if (pci_enable_device_mem(pdev)) { 2179 ioc_err(mrioc, "pci_enable_device_mem: failed\n"); 2180 retval = -ENODEV; 2181 goto out_failed; 2182 } 2183 2184 capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 2185 if (!capb) { 2186 ioc_err(mrioc, "Unable to find MSI-X Capabilities\n"); 2187 retval = -ENODEV; 2188 goto out_failed; 2189 } 2190 mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); 2191 2192 if (pci_request_selected_regions(pdev, mrioc->bars, 2193 mrioc->driver_name)) { 2194 ioc_err(mrioc, "pci_request_selected_regions: failed\n"); 2195 retval = -ENODEV; 2196 goto out_failed; 2197 } 2198 2199 for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) { 2200 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 2201 mrioc->sysif_regs_phys = pci_resource_start(pdev, i); 2202 memap_sz = pci_resource_len(pdev, i); 2203 mrioc->sysif_regs = 2204 ioremap(mrioc->sysif_regs_phys, memap_sz); 2205 break; 2206 } 2207 } 2208 2209 pci_set_master(pdev); 2210 2211 retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask); 2212 if (retval) { 2213 if (dma_mask != DMA_BIT_MASK(32)) { 2214 ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n"); 2215 dma_mask = DMA_BIT_MASK(32); 2216 retval = dma_set_mask_and_coherent(&pdev->dev, 2217 dma_mask); 2218 } 2219 if (retval) { 2220 mrioc->dma_mask = 0; 2221 ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n"); 2222 goto out_failed; 2223 } 2224 } 2225 mrioc->dma_mask = dma_mask; 2226 2227 if (!mrioc->sysif_regs) { 2228 ioc_err(mrioc, 2229 "Unable to map adapter memory or resource not found\n"); 2230 retval = -EINVAL; 2231 goto out_failed; 2232 } 2233 2234 pci_read_config_word(pdev, capb + 2, &message_control); 2235 mrioc->msix_count = (message_control & 0x3FF) + 1; 2236 2237 pci_save_state(pdev); 2238 2239 pci_set_drvdata(pdev, mrioc->shost); 2240 2241 mpi3mr_ioc_disable_intr(mrioc); 2242 2243 ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n", 2244 (unsigned long long)mrioc->sysif_regs_phys, 2245 mrioc->sysif_regs, memap_sz); 2246 ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n", 2247 mrioc->msix_count); 2248 return retval; 2249 2250 out_failed: 2251 mpi3mr_cleanup_resources(mrioc); 2252 return retval; 2253 } 2254 2255 /** 2256 * mpi3mr_init_ioc - Initialize the controller 2257 * @mrioc: Adapter instance reference 2258 * 2259 * This the controller initialization routine, executed either 2260 * after soft reset or from pci probe callback. 2261 * Setup the required resources, memory map the controller 2262 * registers, create admin and operational reply queue pairs, 2263 * allocate required memory for reply pool, sense buffer pool, 2264 * issue IOC init request to the firmware, unmask the events and 2265 * issue port enable to discover SAS/SATA/NVMe devies and RAID 2266 * volumes. 2267 * 2268 * Return: 0 on success and non-zero on failure. 2269 */ 2270 int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc) 2271 { 2272 int retval = 0; 2273 enum mpi3mr_iocstate ioc_state; 2274 u64 base_info; 2275 u32 timeout; 2276 u32 ioc_status, ioc_config; 2277 struct mpi3_ioc_facts_data facts_data; 2278 2279 mrioc->change_count = 0; 2280 mrioc->cpu_count = num_online_cpus(); 2281 retval = mpi3mr_setup_resources(mrioc); 2282 if (retval) { 2283 ioc_err(mrioc, "Failed to setup resources:error %d\n", 2284 retval); 2285 goto out_nocleanup; 2286 } 2287 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 2288 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 2289 2290 ioc_info(mrioc, "SOD status %x configuration %x\n", 2291 ioc_status, ioc_config); 2292 2293 base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information); 2294 ioc_info(mrioc, "SOD base_info %llx\n", base_info); 2295 2296 /*The timeout value is in 2sec unit, changing it to seconds*/ 2297 mrioc->ready_timeout = 2298 ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >> 2299 MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2; 2300 2301 ioc_info(mrioc, "IOC ready timeout %d\n", mrioc->ready_timeout); 2302 2303 ioc_state = mpi3mr_get_iocstate(mrioc); 2304 ioc_info(mrioc, "IOC in %s state during detection\n", 2305 mpi3mr_iocstate_name(ioc_state)); 2306 2307 if (ioc_state == MRIOC_STATE_BECOMING_READY || 2308 ioc_state == MRIOC_STATE_RESET_REQUESTED) { 2309 timeout = mrioc->ready_timeout * 10; 2310 do { 2311 msleep(100); 2312 } while (--timeout); 2313 2314 ioc_state = mpi3mr_get_iocstate(mrioc); 2315 ioc_info(mrioc, 2316 "IOC in %s state after waiting for reset time\n", 2317 mpi3mr_iocstate_name(ioc_state)); 2318 } 2319 2320 if (ioc_state == MRIOC_STATE_READY) { 2321 retval = mpi3mr_issue_and_process_mur(mrioc, 2322 MPI3MR_RESET_FROM_BRINGUP); 2323 if (retval) { 2324 ioc_err(mrioc, "Failed to MU reset IOC error %d\n", 2325 retval); 2326 } 2327 ioc_state = mpi3mr_get_iocstate(mrioc); 2328 } 2329 if (ioc_state != MRIOC_STATE_RESET) { 2330 mpi3mr_print_fault_info(mrioc); 2331 retval = mpi3mr_issue_reset(mrioc, 2332 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, 2333 MPI3MR_RESET_FROM_BRINGUP); 2334 if (retval) { 2335 ioc_err(mrioc, 2336 "%s :Failed to soft reset IOC error %d\n", 2337 __func__, retval); 2338 goto out_failed; 2339 } 2340 } 2341 ioc_state = mpi3mr_get_iocstate(mrioc); 2342 if (ioc_state != MRIOC_STATE_RESET) { 2343 ioc_err(mrioc, "Cannot bring IOC to reset state\n"); 2344 goto out_failed; 2345 } 2346 2347 retval = mpi3mr_setup_admin_qpair(mrioc); 2348 if (retval) { 2349 ioc_err(mrioc, "Failed to setup admin Qs: error %d\n", 2350 retval); 2351 goto out_failed; 2352 } 2353 2354 retval = mpi3mr_bring_ioc_ready(mrioc); 2355 if (retval) { 2356 ioc_err(mrioc, "Failed to bring ioc ready: error %d\n", 2357 retval); 2358 goto out_failed; 2359 } 2360 2361 retval = mpi3mr_setup_isr(mrioc, 1); 2362 if (retval) { 2363 ioc_err(mrioc, "Failed to setup ISR error %d\n", 2364 retval); 2365 goto out_failed; 2366 } 2367 2368 retval = mpi3mr_issue_iocfacts(mrioc, &facts_data); 2369 if (retval) { 2370 ioc_err(mrioc, "Failed to Issue IOC Facts %d\n", 2371 retval); 2372 goto out_failed; 2373 } 2374 2375 mpi3mr_process_factsdata(mrioc, &facts_data); 2376 retval = mpi3mr_check_reset_dma_mask(mrioc); 2377 if (retval) { 2378 ioc_err(mrioc, "Resetting dma mask failed %d\n", 2379 retval); 2380 goto out_failed; 2381 } 2382 2383 retval = mpi3mr_alloc_reply_sense_bufs(mrioc); 2384 if (retval) { 2385 ioc_err(mrioc, 2386 "%s :Failed to allocated reply sense buffers %d\n", 2387 __func__, retval); 2388 goto out_failed; 2389 } 2390 2391 retval = mpi3mr_alloc_chain_bufs(mrioc); 2392 if (retval) { 2393 ioc_err(mrioc, "Failed to allocated chain buffers %d\n", 2394 retval); 2395 goto out_failed; 2396 } 2397 2398 retval = mpi3mr_issue_iocinit(mrioc); 2399 if (retval) { 2400 ioc_err(mrioc, "Failed to Issue IOC Init %d\n", 2401 retval); 2402 goto out_failed; 2403 } 2404 mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs; 2405 writel(mrioc->reply_free_queue_host_index, 2406 &mrioc->sysif_regs->reply_free_host_index); 2407 2408 mrioc->sbq_host_index = mrioc->num_sense_bufs; 2409 writel(mrioc->sbq_host_index, 2410 &mrioc->sysif_regs->sense_buffer_free_host_index); 2411 2412 retval = mpi3mr_setup_isr(mrioc, 0); 2413 if (retval) { 2414 ioc_err(mrioc, "Failed to re-setup ISR, error %d\n", 2415 retval); 2416 goto out_failed; 2417 } 2418 2419 retval = mpi3mr_create_op_queues(mrioc); 2420 if (retval) { 2421 ioc_err(mrioc, "Failed to create OpQueues error %d\n", 2422 retval); 2423 goto out_failed; 2424 } 2425 2426 return retval; 2427 2428 out_failed: 2429 mpi3mr_cleanup_ioc(mrioc); 2430 out_nocleanup: 2431 return retval; 2432 } 2433 2434 /** 2435 * mpi3mr_free_mem - Free memory allocated for a controller 2436 * @mrioc: Adapter instance reference 2437 * 2438 * Free all the memory allocated for a controller. 2439 * 2440 * Return: Nothing. 2441 */ 2442 static void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc) 2443 { 2444 u16 i; 2445 struct mpi3mr_intr_info *intr_info; 2446 2447 if (mrioc->sense_buf_pool) { 2448 if (mrioc->sense_buf) 2449 dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf, 2450 mrioc->sense_buf_dma); 2451 dma_pool_destroy(mrioc->sense_buf_pool); 2452 mrioc->sense_buf = NULL; 2453 mrioc->sense_buf_pool = NULL; 2454 } 2455 if (mrioc->sense_buf_q_pool) { 2456 if (mrioc->sense_buf_q) 2457 dma_pool_free(mrioc->sense_buf_q_pool, 2458 mrioc->sense_buf_q, mrioc->sense_buf_q_dma); 2459 dma_pool_destroy(mrioc->sense_buf_q_pool); 2460 mrioc->sense_buf_q = NULL; 2461 mrioc->sense_buf_q_pool = NULL; 2462 } 2463 2464 if (mrioc->reply_buf_pool) { 2465 if (mrioc->reply_buf) 2466 dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf, 2467 mrioc->reply_buf_dma); 2468 dma_pool_destroy(mrioc->reply_buf_pool); 2469 mrioc->reply_buf = NULL; 2470 mrioc->reply_buf_pool = NULL; 2471 } 2472 if (mrioc->reply_free_q_pool) { 2473 if (mrioc->reply_free_q) 2474 dma_pool_free(mrioc->reply_free_q_pool, 2475 mrioc->reply_free_q, mrioc->reply_free_q_dma); 2476 dma_pool_destroy(mrioc->reply_free_q_pool); 2477 mrioc->reply_free_q = NULL; 2478 mrioc->reply_free_q_pool = NULL; 2479 } 2480 2481 for (i = 0; i < mrioc->num_op_req_q; i++) 2482 mpi3mr_free_op_req_q_segments(mrioc, i); 2483 2484 for (i = 0; i < mrioc->num_op_reply_q; i++) 2485 mpi3mr_free_op_reply_q_segments(mrioc, i); 2486 2487 for (i = 0; i < mrioc->intr_info_count; i++) { 2488 intr_info = mrioc->intr_info + i; 2489 if (intr_info) 2490 intr_info->op_reply_q = NULL; 2491 } 2492 2493 kfree(mrioc->req_qinfo); 2494 mrioc->req_qinfo = NULL; 2495 mrioc->num_op_req_q = 0; 2496 2497 kfree(mrioc->op_reply_qinfo); 2498 mrioc->op_reply_qinfo = NULL; 2499 mrioc->num_op_reply_q = 0; 2500 2501 kfree(mrioc->init_cmds.reply); 2502 mrioc->init_cmds.reply = NULL; 2503 2504 kfree(mrioc->chain_bitmap); 2505 mrioc->chain_bitmap = NULL; 2506 2507 if (mrioc->chain_buf_pool) { 2508 for (i = 0; i < mrioc->chain_buf_count; i++) { 2509 if (mrioc->chain_sgl_list[i].addr) { 2510 dma_pool_free(mrioc->chain_buf_pool, 2511 mrioc->chain_sgl_list[i].addr, 2512 mrioc->chain_sgl_list[i].dma_addr); 2513 mrioc->chain_sgl_list[i].addr = NULL; 2514 } 2515 } 2516 dma_pool_destroy(mrioc->chain_buf_pool); 2517 mrioc->chain_buf_pool = NULL; 2518 } 2519 2520 kfree(mrioc->chain_sgl_list); 2521 mrioc->chain_sgl_list = NULL; 2522 2523 if (mrioc->admin_reply_base) { 2524 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz, 2525 mrioc->admin_reply_base, mrioc->admin_reply_dma); 2526 mrioc->admin_reply_base = NULL; 2527 } 2528 if (mrioc->admin_req_base) { 2529 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz, 2530 mrioc->admin_req_base, mrioc->admin_req_dma); 2531 mrioc->admin_req_base = NULL; 2532 } 2533 } 2534 2535 /** 2536 * mpi3mr_issue_ioc_shutdown - shutdown controller 2537 * @mrioc: Adapter instance reference 2538 * 2539 * Send shutodwn notification to the controller and wait for the 2540 * shutdown_timeout for it to be completed. 2541 * 2542 * Return: Nothing. 2543 */ 2544 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc) 2545 { 2546 u32 ioc_config, ioc_status; 2547 u8 retval = 1; 2548 u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10; 2549 2550 ioc_info(mrioc, "Issuing shutdown Notification\n"); 2551 if (mrioc->unrecoverable) { 2552 ioc_warn(mrioc, 2553 "IOC is unrecoverable shutdown is not issued\n"); 2554 return; 2555 } 2556 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 2557 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 2558 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) { 2559 ioc_info(mrioc, "shutdown already in progress\n"); 2560 return; 2561 } 2562 2563 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 2564 ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL; 2565 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN; 2566 2567 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 2568 2569 if (mrioc->facts.shutdown_timeout) 2570 timeout = mrioc->facts.shutdown_timeout * 10; 2571 2572 do { 2573 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 2574 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 2575 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) { 2576 retval = 0; 2577 break; 2578 } 2579 msleep(100); 2580 } while (--timeout); 2581 2582 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 2583 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 2584 2585 if (retval) { 2586 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 2587 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) 2588 ioc_warn(mrioc, 2589 "shutdown still in progress after timeout\n"); 2590 } 2591 2592 ioc_info(mrioc, 2593 "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n", 2594 (!retval) ? "successful" : "failed", ioc_status, 2595 ioc_config); 2596 } 2597 2598 /** 2599 * mpi3mr_cleanup_ioc - Cleanup controller 2600 * @mrioc: Adapter instance reference 2601 * 2602 * controller cleanup handler, Message unit reset or soft reset 2603 * and shutdown notification is issued to the controller and the 2604 * associated memory resources are freed. 2605 * 2606 * Return: Nothing. 2607 */ 2608 void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc) 2609 { 2610 enum mpi3mr_iocstate ioc_state; 2611 2612 mpi3mr_ioc_disable_intr(mrioc); 2613 2614 ioc_state = mpi3mr_get_iocstate(mrioc); 2615 2616 if ((!mrioc->unrecoverable) && (!mrioc->reset_in_progress) && 2617 (ioc_state == MRIOC_STATE_READY)) { 2618 if (mpi3mr_issue_and_process_mur(mrioc, 2619 MPI3MR_RESET_FROM_CTLR_CLEANUP)) 2620 mpi3mr_issue_reset(mrioc, 2621 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, 2622 MPI3MR_RESET_FROM_MUR_FAILURE); 2623 2624 mpi3mr_issue_ioc_shutdown(mrioc); 2625 } 2626 2627 mpi3mr_free_mem(mrioc); 2628 mpi3mr_cleanup_resources(mrioc); 2629 } 2630 2631 /** 2632 * mpi3mr_soft_reset_handler - Reset the controller 2633 * @mrioc: Adapter instance reference 2634 * @reset_reason: Reset reason code 2635 * @snapdump: Flag to generate snapdump in firmware or not 2636 * 2637 * TBD 2638 * 2639 * Return: 0 on success, non-zero on failure. 2640 */ 2641 int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, 2642 u32 reset_reason, u8 snapdump) 2643 { 2644 return 0; 2645 } 2646