1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2021 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 #include <linux/io-64-nonatomic-lo-hi.h> 12 13 #if defined(writeq) && defined(CONFIG_64BIT) 14 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) 15 { 16 writeq(b, addr); 17 } 18 #else 19 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) 20 { 21 __u64 data_out = b; 22 23 writel((u32)(data_out), addr); 24 writel((u32)(data_out >> 32), (addr + 4)); 25 } 26 #endif 27 28 static inline bool 29 mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q) 30 { 31 u16 pi, ci, max_entries; 32 bool is_qfull = false; 33 34 pi = op_req_q->pi; 35 ci = READ_ONCE(op_req_q->ci); 36 max_entries = op_req_q->num_requests; 37 38 if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1)))) 39 is_qfull = true; 40 41 return is_qfull; 42 } 43 44 static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc) 45 { 46 u16 i, max_vectors; 47 48 max_vectors = mrioc->intr_info_count; 49 50 for (i = 0; i < max_vectors; i++) 51 synchronize_irq(pci_irq_vector(mrioc->pdev, i)); 52 } 53 54 void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc) 55 { 56 mrioc->intr_enabled = 0; 57 mpi3mr_sync_irqs(mrioc); 58 } 59 60 void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc) 61 { 62 mrioc->intr_enabled = 1; 63 } 64 65 static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc) 66 { 67 u16 i; 68 69 mpi3mr_ioc_disable_intr(mrioc); 70 71 if (!mrioc->intr_info) 72 return; 73 74 for (i = 0; i < mrioc->intr_info_count; i++) 75 free_irq(pci_irq_vector(mrioc->pdev, i), 76 (mrioc->intr_info + i)); 77 78 kfree(mrioc->intr_info); 79 mrioc->intr_info = NULL; 80 mrioc->intr_info_count = 0; 81 pci_free_irq_vectors(mrioc->pdev); 82 } 83 84 void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length, 85 dma_addr_t dma_addr) 86 { 87 struct mpi3_sge_common *sgel = paddr; 88 89 sgel->flags = flags; 90 sgel->length = cpu_to_le32(length); 91 sgel->address = cpu_to_le64(dma_addr); 92 } 93 94 void mpi3mr_build_zero_len_sge(void *paddr) 95 { 96 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 97 98 mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1); 99 } 100 101 void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc, 102 dma_addr_t phys_addr) 103 { 104 if (!phys_addr) 105 return NULL; 106 107 if ((phys_addr < mrioc->reply_buf_dma) || 108 (phys_addr > mrioc->reply_buf_dma_max_address)) 109 return NULL; 110 111 return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma); 112 } 113 114 void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc, 115 dma_addr_t phys_addr) 116 { 117 if (!phys_addr) 118 return NULL; 119 120 return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma); 121 } 122 123 static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc, 124 u64 reply_dma) 125 { 126 u32 old_idx = 0; 127 128 spin_lock(&mrioc->reply_free_queue_lock); 129 old_idx = mrioc->reply_free_queue_host_index; 130 mrioc->reply_free_queue_host_index = ( 131 (mrioc->reply_free_queue_host_index == 132 (mrioc->reply_free_qsz - 1)) ? 0 : 133 (mrioc->reply_free_queue_host_index + 1)); 134 mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma); 135 writel(mrioc->reply_free_queue_host_index, 136 &mrioc->sysif_regs->reply_free_host_index); 137 spin_unlock(&mrioc->reply_free_queue_lock); 138 } 139 140 void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc, 141 u64 sense_buf_dma) 142 { 143 u32 old_idx = 0; 144 145 spin_lock(&mrioc->sbq_lock); 146 old_idx = mrioc->sbq_host_index; 147 mrioc->sbq_host_index = ((mrioc->sbq_host_index == 148 (mrioc->sense_buf_q_sz - 1)) ? 0 : 149 (mrioc->sbq_host_index + 1)); 150 mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma); 151 writel(mrioc->sbq_host_index, 152 &mrioc->sysif_regs->sense_buffer_free_host_index); 153 spin_unlock(&mrioc->sbq_lock); 154 } 155 156 static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc, 157 struct mpi3_default_reply *def_reply) 158 { 159 struct mpi3_event_notification_reply *event_reply = 160 (struct mpi3_event_notification_reply *)def_reply; 161 162 mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count); 163 } 164 165 static struct mpi3mr_drv_cmd * 166 mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag, 167 struct mpi3_default_reply *def_reply) 168 { 169 switch (host_tag) { 170 case MPI3MR_HOSTTAG_INITCMDS: 171 return &mrioc->init_cmds; 172 case MPI3MR_HOSTTAG_INVALID: 173 if (def_reply && def_reply->function == 174 MPI3_FUNCTION_EVENT_NOTIFICATION) 175 mpi3mr_handle_events(mrioc, def_reply); 176 return NULL; 177 default: 178 break; 179 } 180 181 return NULL; 182 } 183 184 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc, 185 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma) 186 { 187 u16 reply_desc_type, host_tag = 0; 188 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; 189 u32 ioc_loginfo = 0; 190 struct mpi3_status_reply_descriptor *status_desc; 191 struct mpi3_address_reply_descriptor *addr_desc; 192 struct mpi3_success_reply_descriptor *success_desc; 193 struct mpi3_default_reply *def_reply = NULL; 194 struct mpi3mr_drv_cmd *cmdptr = NULL; 195 struct mpi3_scsi_io_reply *scsi_reply; 196 u8 *sense_buf = NULL; 197 198 *reply_dma = 0; 199 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & 200 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK; 201 switch (reply_desc_type) { 202 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS: 203 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc; 204 host_tag = le16_to_cpu(status_desc->host_tag); 205 ioc_status = le16_to_cpu(status_desc->ioc_status); 206 if (ioc_status & 207 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 208 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); 209 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 210 break; 211 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: 212 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; 213 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address); 214 def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma); 215 if (!def_reply) 216 goto out; 217 host_tag = le16_to_cpu(def_reply->host_tag); 218 ioc_status = le16_to_cpu(def_reply->ioc_status); 219 if (ioc_status & 220 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 221 ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info); 222 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 223 if (def_reply->function == MPI3_FUNCTION_SCSI_IO) { 224 scsi_reply = (struct mpi3_scsi_io_reply *)def_reply; 225 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, 226 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 227 } 228 break; 229 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: 230 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; 231 host_tag = le16_to_cpu(success_desc->host_tag); 232 break; 233 default: 234 break; 235 } 236 237 cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply); 238 if (cmdptr) { 239 if (cmdptr->state & MPI3MR_CMD_PENDING) { 240 cmdptr->state |= MPI3MR_CMD_COMPLETE; 241 cmdptr->ioc_loginfo = ioc_loginfo; 242 cmdptr->ioc_status = ioc_status; 243 cmdptr->state &= ~MPI3MR_CMD_PENDING; 244 if (def_reply) { 245 cmdptr->state |= MPI3MR_CMD_REPLY_VALID; 246 memcpy((u8 *)cmdptr->reply, (u8 *)def_reply, 247 mrioc->facts.reply_sz); 248 } 249 if (cmdptr->is_waiting) { 250 complete(&cmdptr->done); 251 cmdptr->is_waiting = 0; 252 } else if (cmdptr->callback) 253 cmdptr->callback(mrioc, cmdptr); 254 } 255 } 256 out: 257 if (sense_buf) 258 mpi3mr_repost_sense_buf(mrioc, 259 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 260 } 261 262 static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc) 263 { 264 u32 exp_phase = mrioc->admin_reply_ephase; 265 u32 admin_reply_ci = mrioc->admin_reply_ci; 266 u32 num_admin_replies = 0; 267 u64 reply_dma = 0; 268 struct mpi3_default_reply_descriptor *reply_desc; 269 270 reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base + 271 admin_reply_ci; 272 273 if ((le16_to_cpu(reply_desc->reply_flags) & 274 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 275 return 0; 276 277 do { 278 mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci); 279 mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma); 280 if (reply_dma) 281 mpi3mr_repost_reply_buf(mrioc, reply_dma); 282 num_admin_replies++; 283 if (++admin_reply_ci == mrioc->num_admin_replies) { 284 admin_reply_ci = 0; 285 exp_phase ^= 1; 286 } 287 reply_desc = 288 (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base + 289 admin_reply_ci; 290 if ((le16_to_cpu(reply_desc->reply_flags) & 291 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 292 break; 293 } while (1); 294 295 writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); 296 mrioc->admin_reply_ci = admin_reply_ci; 297 mrioc->admin_reply_ephase = exp_phase; 298 299 return num_admin_replies; 300 } 301 302 /** 303 * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to 304 * queue's consumer index from operational reply descriptor queue. 305 * @op_reply_q: op_reply_qinfo object 306 * @reply_ci: operational reply descriptor's queue consumer index 307 * 308 * Returns reply descriptor frame address 309 */ 310 static inline struct mpi3_default_reply_descriptor * 311 mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci) 312 { 313 void *segment_base_addr; 314 struct segments *segments = op_reply_q->q_segments; 315 struct mpi3_default_reply_descriptor *reply_desc = NULL; 316 317 segment_base_addr = 318 segments[reply_ci / op_reply_q->segment_qd].segment; 319 reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr + 320 (reply_ci % op_reply_q->segment_qd); 321 return reply_desc; 322 } 323 324 static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, 325 struct mpi3mr_intr_info *intr_info) 326 { 327 struct op_reply_qinfo *op_reply_q = intr_info->op_reply_q; 328 struct op_req_qinfo *op_req_q; 329 u32 exp_phase; 330 u32 reply_ci; 331 u32 num_op_reply = 0; 332 u64 reply_dma = 0; 333 struct mpi3_default_reply_descriptor *reply_desc; 334 u16 req_q_idx = 0, reply_qidx; 335 336 reply_qidx = op_reply_q->qid - 1; 337 338 exp_phase = op_reply_q->ephase; 339 reply_ci = op_reply_q->ci; 340 341 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci); 342 if ((le16_to_cpu(reply_desc->reply_flags) & 343 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) { 344 return 0; 345 } 346 347 do { 348 req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1; 349 op_req_q = &mrioc->req_qinfo[req_q_idx]; 350 351 WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci)); 352 mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma, 353 reply_qidx); 354 if (reply_dma) 355 mpi3mr_repost_reply_buf(mrioc, reply_dma); 356 num_op_reply++; 357 358 if (++reply_ci == op_reply_q->num_replies) { 359 reply_ci = 0; 360 exp_phase ^= 1; 361 } 362 363 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci); 364 365 if ((le16_to_cpu(reply_desc->reply_flags) & 366 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 367 break; 368 369 } while (1); 370 371 writel(reply_ci, 372 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index); 373 op_reply_q->ci = reply_ci; 374 op_reply_q->ephase = exp_phase; 375 376 return num_op_reply; 377 } 378 379 static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata) 380 { 381 struct mpi3mr_intr_info *intr_info = privdata; 382 struct mpi3mr_ioc *mrioc; 383 u16 midx; 384 u32 num_admin_replies = 0; 385 386 if (!intr_info) 387 return IRQ_NONE; 388 389 mrioc = intr_info->mrioc; 390 391 if (!mrioc->intr_enabled) 392 return IRQ_NONE; 393 394 midx = intr_info->msix_index; 395 396 if (!midx) 397 num_admin_replies = mpi3mr_process_admin_reply_q(mrioc); 398 399 if (num_admin_replies) 400 return IRQ_HANDLED; 401 else 402 return IRQ_NONE; 403 } 404 405 static irqreturn_t mpi3mr_isr(int irq, void *privdata) 406 { 407 struct mpi3mr_intr_info *intr_info = privdata; 408 int ret; 409 410 if (!intr_info) 411 return IRQ_NONE; 412 413 /* Call primary ISR routine */ 414 ret = mpi3mr_isr_primary(irq, privdata); 415 416 return ret; 417 } 418 419 /** 420 * mpi3mr_isr_poll - Reply queue polling routine 421 * @irq: IRQ 422 * @privdata: Interrupt info 423 * 424 * poll for pending I/O completions in a loop until pending I/Os 425 * present or controller queue depth I/Os are processed. 426 * 427 * Return: IRQ_NONE or IRQ_HANDLED 428 */ 429 static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata) 430 { 431 return IRQ_HANDLED; 432 } 433 434 /** 435 * mpi3mr_request_irq - Request IRQ and register ISR 436 * @mrioc: Adapter instance reference 437 * @index: IRQ vector index 438 * 439 * Request threaded ISR with primary ISR and secondary 440 * 441 * Return: 0 on success and non zero on failures. 442 */ 443 static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index) 444 { 445 struct pci_dev *pdev = mrioc->pdev; 446 struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index; 447 int retval = 0; 448 449 intr_info->mrioc = mrioc; 450 intr_info->msix_index = index; 451 intr_info->op_reply_q = NULL; 452 453 snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d", 454 mrioc->driver_name, mrioc->id, index); 455 456 retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr, 457 mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info); 458 if (retval) { 459 ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n", 460 intr_info->name, pci_irq_vector(pdev, index)); 461 return retval; 462 } 463 464 return retval; 465 } 466 467 /** 468 * mpi3mr_setup_isr - Setup ISR for the controller 469 * @mrioc: Adapter instance reference 470 * @setup_one: Request one IRQ or more 471 * 472 * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR 473 * 474 * Return: 0 on success and non zero on failures. 475 */ 476 static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one) 477 { 478 unsigned int irq_flags = PCI_IRQ_MSIX; 479 u16 max_vectors = 0, i; 480 int retval = 0; 481 struct irq_affinity desc = { .pre_vectors = 1}; 482 483 mpi3mr_cleanup_isr(mrioc); 484 485 if (setup_one || reset_devices) 486 max_vectors = 1; 487 else { 488 max_vectors = 489 min_t(int, mrioc->cpu_count + 1, mrioc->msix_count); 490 491 ioc_info(mrioc, 492 "MSI-X vectors supported: %d, no of cores: %d,", 493 mrioc->msix_count, mrioc->cpu_count); 494 ioc_info(mrioc, 495 "MSI-x vectors requested: %d\n", max_vectors); 496 } 497 498 irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; 499 500 mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0; 501 i = pci_alloc_irq_vectors_affinity(mrioc->pdev, 502 1, max_vectors, irq_flags, &desc); 503 if (i <= 0) { 504 ioc_err(mrioc, "Cannot alloc irq vectors\n"); 505 goto out_failed; 506 } 507 if (i != max_vectors) { 508 ioc_info(mrioc, 509 "allocated vectors (%d) are less than configured (%d)\n", 510 i, max_vectors); 511 /* 512 * If only one MSI-x is allocated, then MSI-x 0 will be shared 513 * between Admin queue and operational queue 514 */ 515 if (i == 1) 516 mrioc->op_reply_q_offset = 0; 517 518 max_vectors = i; 519 } 520 mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors, 521 GFP_KERNEL); 522 if (!mrioc->intr_info) { 523 retval = -1; 524 pci_free_irq_vectors(mrioc->pdev); 525 goto out_failed; 526 } 527 for (i = 0; i < max_vectors; i++) { 528 retval = mpi3mr_request_irq(mrioc, i); 529 if (retval) { 530 mrioc->intr_info_count = i; 531 goto out_failed; 532 } 533 } 534 mrioc->intr_info_count = max_vectors; 535 mpi3mr_ioc_enable_intr(mrioc); 536 return retval; 537 out_failed: 538 mpi3mr_cleanup_isr(mrioc); 539 540 return retval; 541 } 542 543 static const struct { 544 enum mpi3mr_iocstate value; 545 char *name; 546 } mrioc_states[] = { 547 { MRIOC_STATE_READY, "ready" }, 548 { MRIOC_STATE_FAULT, "fault" }, 549 { MRIOC_STATE_RESET, "reset" }, 550 { MRIOC_STATE_BECOMING_READY, "becoming ready" }, 551 { MRIOC_STATE_RESET_REQUESTED, "reset requested" }, 552 { MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" }, 553 }; 554 555 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state) 556 { 557 int i; 558 char *name = NULL; 559 560 for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) { 561 if (mrioc_states[i].value == mrioc_state) { 562 name = mrioc_states[i].name; 563 break; 564 } 565 } 566 return name; 567 } 568 569 /** 570 * mpi3mr_print_fault_info - Display fault information 571 * @mrioc: Adapter instance reference 572 * 573 * Display the controller fault information if there is a 574 * controller fault. 575 * 576 * Return: Nothing. 577 */ 578 static void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc) 579 { 580 u32 ioc_status, code, code1, code2, code3; 581 582 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 583 584 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) { 585 code = readl(&mrioc->sysif_regs->fault); 586 code1 = readl(&mrioc->sysif_regs->fault_info[0]); 587 code2 = readl(&mrioc->sysif_regs->fault_info[1]); 588 code3 = readl(&mrioc->sysif_regs->fault_info[2]); 589 590 ioc_info(mrioc, 591 "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n", 592 code, code1, code2, code3); 593 } 594 } 595 596 /** 597 * mpi3mr_get_iocstate - Get IOC State 598 * @mrioc: Adapter instance reference 599 * 600 * Return a proper IOC state enum based on the IOC status and 601 * IOC configuration and unrcoverable state of the controller. 602 * 603 * Return: Current IOC state. 604 */ 605 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc) 606 { 607 u32 ioc_status, ioc_config; 608 u8 ready, enabled; 609 610 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 611 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 612 613 if (mrioc->unrecoverable) 614 return MRIOC_STATE_UNRECOVERABLE; 615 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) 616 return MRIOC_STATE_FAULT; 617 618 ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY); 619 enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC); 620 621 if (ready && enabled) 622 return MRIOC_STATE_READY; 623 if ((!ready) && (!enabled)) 624 return MRIOC_STATE_RESET; 625 if ((!ready) && (enabled)) 626 return MRIOC_STATE_BECOMING_READY; 627 628 return MRIOC_STATE_RESET_REQUESTED; 629 } 630 631 /** 632 * mpi3mr_clear_reset_history - clear reset history 633 * @mrioc: Adapter instance reference 634 * 635 * Write the reset history bit in IOC status to clear the bit, 636 * if it is already set. 637 * 638 * Return: Nothing. 639 */ 640 static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc) 641 { 642 u32 ioc_status; 643 644 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 645 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) 646 writel(ioc_status, &mrioc->sysif_regs->ioc_status); 647 } 648 649 /** 650 * mpi3mr_issue_and_process_mur - Message unit Reset handler 651 * @mrioc: Adapter instance reference 652 * @reset_reason: Reset reason code 653 * 654 * Issue Message unit Reset to the controller and wait for it to 655 * be complete. 656 * 657 * Return: 0 on success, -1 on failure. 658 */ 659 static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc, 660 u32 reset_reason) 661 { 662 u32 ioc_config, timeout, ioc_status; 663 int retval = -1; 664 665 ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n"); 666 if (mrioc->unrecoverable) { 667 ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n"); 668 return retval; 669 } 670 mpi3mr_clear_reset_history(mrioc); 671 writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]); 672 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 673 ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; 674 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 675 676 timeout = mrioc->ready_timeout * 10; 677 do { 678 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 679 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) { 680 mpi3mr_clear_reset_history(mrioc); 681 ioc_config = 682 readl(&mrioc->sysif_regs->ioc_configuration); 683 if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) || 684 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) || 685 (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) { 686 retval = 0; 687 break; 688 } 689 } 690 msleep(100); 691 } while (--timeout); 692 693 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 694 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 695 696 ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n", 697 (!retval) ? "successful" : "failed", ioc_status, ioc_config); 698 return retval; 699 } 700 701 /** 702 * mpi3mr_bring_ioc_ready - Bring controller to ready state 703 * @mrioc: Adapter instance reference 704 * 705 * Set Enable IOC bit in IOC configuration register and wait for 706 * the controller to become ready. 707 * 708 * Return: 0 on success, -1 on failure. 709 */ 710 static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc) 711 { 712 u32 ioc_config, timeout; 713 enum mpi3mr_iocstate current_state; 714 715 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 716 ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; 717 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 718 719 timeout = mrioc->ready_timeout * 10; 720 do { 721 current_state = mpi3mr_get_iocstate(mrioc); 722 if (current_state == MRIOC_STATE_READY) 723 return 0; 724 msleep(100); 725 } while (--timeout); 726 727 return -1; 728 } 729 730 /** 731 * mpi3mr_set_diagsave - Set diag save bit for snapdump 732 * @mrioc: Adapter reference 733 * 734 * Set diag save bit in IOC configuration register to enable 735 * snapdump. 736 * 737 * Return: Nothing. 738 */ 739 static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc) 740 { 741 u32 ioc_config; 742 743 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 744 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE; 745 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 746 } 747 748 /** 749 * mpi3mr_issue_reset - Issue reset to the controller 750 * @mrioc: Adapter reference 751 * @reset_type: Reset type 752 * @reset_reason: Reset reason code 753 * 754 * TBD 755 * 756 * Return: 0 on success, non-zero on failure. 757 */ 758 static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, 759 u32 reset_reason) 760 { 761 return 0; 762 } 763 764 /** 765 * mpi3mr_admin_request_post - Post request to admin queue 766 * @mrioc: Adapter reference 767 * @admin_req: MPI3 request 768 * @admin_req_sz: Request size 769 * @ignore_reset: Ignore reset in process 770 * 771 * Post the MPI3 request into admin request queue and 772 * inform the controller, if the queue is full return 773 * appropriate error. 774 * 775 * Return: 0 on success, non-zero on failure. 776 */ 777 int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req, 778 u16 admin_req_sz, u8 ignore_reset) 779 { 780 u16 areq_pi = 0, areq_ci = 0, max_entries = 0; 781 int retval = 0; 782 unsigned long flags; 783 u8 *areq_entry; 784 785 if (mrioc->unrecoverable) { 786 ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__); 787 return -EFAULT; 788 } 789 790 spin_lock_irqsave(&mrioc->admin_req_lock, flags); 791 areq_pi = mrioc->admin_req_pi; 792 areq_ci = mrioc->admin_req_ci; 793 max_entries = mrioc->num_admin_req; 794 if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) && 795 (areq_pi == (max_entries - 1)))) { 796 ioc_err(mrioc, "AdminReqQ full condition detected\n"); 797 retval = -EAGAIN; 798 goto out; 799 } 800 if (!ignore_reset && mrioc->reset_in_progress) { 801 ioc_err(mrioc, "AdminReqQ submit reset in progress\n"); 802 retval = -EAGAIN; 803 goto out; 804 } 805 areq_entry = (u8 *)mrioc->admin_req_base + 806 (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ); 807 memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); 808 memcpy(areq_entry, (u8 *)admin_req, admin_req_sz); 809 810 if (++areq_pi == max_entries) 811 areq_pi = 0; 812 mrioc->admin_req_pi = areq_pi; 813 814 writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); 815 816 out: 817 spin_unlock_irqrestore(&mrioc->admin_req_lock, flags); 818 819 return retval; 820 } 821 822 /** 823 * mpi3mr_free_op_req_q_segments - free request memory segments 824 * @mrioc: Adapter instance reference 825 * @q_idx: operational request queue index 826 * 827 * Free memory segments allocated for operational request queue 828 * 829 * Return: Nothing. 830 */ 831 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) 832 { 833 u16 j; 834 int size; 835 struct segments *segments; 836 837 segments = mrioc->req_qinfo[q_idx].q_segments; 838 if (!segments) 839 return; 840 841 if (mrioc->enable_segqueue) { 842 size = MPI3MR_OP_REQ_Q_SEG_SIZE; 843 if (mrioc->req_qinfo[q_idx].q_segment_list) { 844 dma_free_coherent(&mrioc->pdev->dev, 845 MPI3MR_MAX_SEG_LIST_SIZE, 846 mrioc->req_qinfo[q_idx].q_segment_list, 847 mrioc->req_qinfo[q_idx].q_segment_list_dma); 848 mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL; 849 } 850 } else 851 size = mrioc->req_qinfo[q_idx].num_requests * 852 mrioc->facts.op_req_sz; 853 854 for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) { 855 if (!segments[j].segment) 856 continue; 857 dma_free_coherent(&mrioc->pdev->dev, 858 size, segments[j].segment, segments[j].segment_dma); 859 segments[j].segment = NULL; 860 } 861 kfree(mrioc->req_qinfo[q_idx].q_segments); 862 mrioc->req_qinfo[q_idx].q_segments = NULL; 863 mrioc->req_qinfo[q_idx].qid = 0; 864 } 865 866 /** 867 * mpi3mr_free_op_reply_q_segments - free reply memory segments 868 * @mrioc: Adapter instance reference 869 * @q_idx: operational reply queue index 870 * 871 * Free memory segments allocated for operational reply queue 872 * 873 * Return: Nothing. 874 */ 875 static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) 876 { 877 u16 j; 878 int size; 879 struct segments *segments; 880 881 segments = mrioc->op_reply_qinfo[q_idx].q_segments; 882 if (!segments) 883 return; 884 885 if (mrioc->enable_segqueue) { 886 size = MPI3MR_OP_REP_Q_SEG_SIZE; 887 if (mrioc->op_reply_qinfo[q_idx].q_segment_list) { 888 dma_free_coherent(&mrioc->pdev->dev, 889 MPI3MR_MAX_SEG_LIST_SIZE, 890 mrioc->op_reply_qinfo[q_idx].q_segment_list, 891 mrioc->op_reply_qinfo[q_idx].q_segment_list_dma); 892 mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL; 893 } 894 } else 895 size = mrioc->op_reply_qinfo[q_idx].segment_qd * 896 mrioc->op_reply_desc_sz; 897 898 for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) { 899 if (!segments[j].segment) 900 continue; 901 dma_free_coherent(&mrioc->pdev->dev, 902 size, segments[j].segment, segments[j].segment_dma); 903 segments[j].segment = NULL; 904 } 905 906 kfree(mrioc->op_reply_qinfo[q_idx].q_segments); 907 mrioc->op_reply_qinfo[q_idx].q_segments = NULL; 908 mrioc->op_reply_qinfo[q_idx].qid = 0; 909 } 910 911 /** 912 * mpi3mr_delete_op_reply_q - delete operational reply queue 913 * @mrioc: Adapter instance reference 914 * @qidx: operational reply queue index 915 * 916 * Delete operatinal reply queue by issuing MPI request 917 * through admin queue. 918 * 919 * Return: 0 on success, non-zero on failure. 920 */ 921 static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) 922 { 923 struct mpi3_delete_reply_queue_request delq_req; 924 int retval = 0; 925 u16 reply_qid = 0, midx; 926 927 reply_qid = mrioc->op_reply_qinfo[qidx].qid; 928 929 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset); 930 931 if (!reply_qid) { 932 retval = -1; 933 ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n"); 934 goto out; 935 } 936 937 memset(&delq_req, 0, sizeof(delq_req)); 938 mutex_lock(&mrioc->init_cmds.mutex); 939 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 940 retval = -1; 941 ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n"); 942 mutex_unlock(&mrioc->init_cmds.mutex); 943 goto out; 944 } 945 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 946 mrioc->init_cmds.is_waiting = 1; 947 mrioc->init_cmds.callback = NULL; 948 delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 949 delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE; 950 delq_req.queue_id = cpu_to_le16(reply_qid); 951 952 init_completion(&mrioc->init_cmds.done); 953 retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req), 954 1); 955 if (retval) { 956 ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n"); 957 goto out_unlock; 958 } 959 wait_for_completion_timeout(&mrioc->init_cmds.done, 960 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 961 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 962 ioc_err(mrioc, "Issue DelRepQ: command timed out\n"); 963 mpi3mr_set_diagsave(mrioc); 964 mpi3mr_issue_reset(mrioc, 965 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 966 MPI3MR_RESET_FROM_DELREPQ_TIMEOUT); 967 mrioc->unrecoverable = 1; 968 969 retval = -1; 970 goto out_unlock; 971 } 972 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 973 != MPI3_IOCSTATUS_SUCCESS) { 974 ioc_err(mrioc, 975 "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 976 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 977 mrioc->init_cmds.ioc_loginfo); 978 retval = -1; 979 goto out_unlock; 980 } 981 mrioc->intr_info[midx].op_reply_q = NULL; 982 983 mpi3mr_free_op_reply_q_segments(mrioc, qidx); 984 out_unlock: 985 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 986 mutex_unlock(&mrioc->init_cmds.mutex); 987 out: 988 989 return retval; 990 } 991 992 /** 993 * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool 994 * @mrioc: Adapter instance reference 995 * @qidx: request queue index 996 * 997 * Allocate segmented memory pools for operational reply 998 * queue. 999 * 1000 * Return: 0 on success, non-zero on failure. 1001 */ 1002 static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx) 1003 { 1004 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 1005 int i, size; 1006 u64 *q_segment_list_entry = NULL; 1007 struct segments *segments; 1008 1009 if (mrioc->enable_segqueue) { 1010 op_reply_q->segment_qd = 1011 MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz; 1012 1013 size = MPI3MR_OP_REP_Q_SEG_SIZE; 1014 1015 op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev, 1016 MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma, 1017 GFP_KERNEL); 1018 if (!op_reply_q->q_segment_list) 1019 return -ENOMEM; 1020 q_segment_list_entry = (u64 *)op_reply_q->q_segment_list; 1021 } else { 1022 op_reply_q->segment_qd = op_reply_q->num_replies; 1023 size = op_reply_q->num_replies * mrioc->op_reply_desc_sz; 1024 } 1025 1026 op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies, 1027 op_reply_q->segment_qd); 1028 1029 op_reply_q->q_segments = kcalloc(op_reply_q->num_segments, 1030 sizeof(struct segments), GFP_KERNEL); 1031 if (!op_reply_q->q_segments) 1032 return -ENOMEM; 1033 1034 segments = op_reply_q->q_segments; 1035 for (i = 0; i < op_reply_q->num_segments; i++) { 1036 segments[i].segment = 1037 dma_alloc_coherent(&mrioc->pdev->dev, 1038 size, &segments[i].segment_dma, GFP_KERNEL); 1039 if (!segments[i].segment) 1040 return -ENOMEM; 1041 if (mrioc->enable_segqueue) 1042 q_segment_list_entry[i] = 1043 (unsigned long)segments[i].segment_dma; 1044 } 1045 1046 return 0; 1047 } 1048 1049 /** 1050 * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool. 1051 * @mrioc: Adapter instance reference 1052 * @qidx: request queue index 1053 * 1054 * Allocate segmented memory pools for operational request 1055 * queue. 1056 * 1057 * Return: 0 on success, non-zero on failure. 1058 */ 1059 static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx) 1060 { 1061 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx; 1062 int i, size; 1063 u64 *q_segment_list_entry = NULL; 1064 struct segments *segments; 1065 1066 if (mrioc->enable_segqueue) { 1067 op_req_q->segment_qd = 1068 MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz; 1069 1070 size = MPI3MR_OP_REQ_Q_SEG_SIZE; 1071 1072 op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev, 1073 MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma, 1074 GFP_KERNEL); 1075 if (!op_req_q->q_segment_list) 1076 return -ENOMEM; 1077 q_segment_list_entry = (u64 *)op_req_q->q_segment_list; 1078 1079 } else { 1080 op_req_q->segment_qd = op_req_q->num_requests; 1081 size = op_req_q->num_requests * mrioc->facts.op_req_sz; 1082 } 1083 1084 op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests, 1085 op_req_q->segment_qd); 1086 1087 op_req_q->q_segments = kcalloc(op_req_q->num_segments, 1088 sizeof(struct segments), GFP_KERNEL); 1089 if (!op_req_q->q_segments) 1090 return -ENOMEM; 1091 1092 segments = op_req_q->q_segments; 1093 for (i = 0; i < op_req_q->num_segments; i++) { 1094 segments[i].segment = 1095 dma_alloc_coherent(&mrioc->pdev->dev, 1096 size, &segments[i].segment_dma, GFP_KERNEL); 1097 if (!segments[i].segment) 1098 return -ENOMEM; 1099 if (mrioc->enable_segqueue) 1100 q_segment_list_entry[i] = 1101 (unsigned long)segments[i].segment_dma; 1102 } 1103 1104 return 0; 1105 } 1106 1107 /** 1108 * mpi3mr_create_op_reply_q - create operational reply queue 1109 * @mrioc: Adapter instance reference 1110 * @qidx: operational reply queue index 1111 * 1112 * Create operatinal reply queue by issuing MPI request 1113 * through admin queue. 1114 * 1115 * Return: 0 on success, non-zero on failure. 1116 */ 1117 static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) 1118 { 1119 struct mpi3_create_reply_queue_request create_req; 1120 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 1121 int retval = 0; 1122 u16 reply_qid = 0, midx; 1123 1124 reply_qid = op_reply_q->qid; 1125 1126 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset); 1127 1128 if (reply_qid) { 1129 retval = -1; 1130 ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n", 1131 reply_qid); 1132 1133 return retval; 1134 } 1135 1136 reply_qid = qidx + 1; 1137 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD; 1138 op_reply_q->ci = 0; 1139 op_reply_q->ephase = 1; 1140 1141 if (!op_reply_q->q_segments) { 1142 retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx); 1143 if (retval) { 1144 mpi3mr_free_op_reply_q_segments(mrioc, qidx); 1145 goto out; 1146 } 1147 } 1148 1149 memset(&create_req, 0, sizeof(create_req)); 1150 mutex_lock(&mrioc->init_cmds.mutex); 1151 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1152 retval = -1; 1153 ioc_err(mrioc, "CreateRepQ: Init command is in use\n"); 1154 goto out; 1155 } 1156 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1157 mrioc->init_cmds.is_waiting = 1; 1158 mrioc->init_cmds.callback = NULL; 1159 create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1160 create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE; 1161 create_req.queue_id = cpu_to_le16(reply_qid); 1162 create_req.flags = MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE; 1163 create_req.msix_index = cpu_to_le16(mrioc->intr_info[midx].msix_index); 1164 if (mrioc->enable_segqueue) { 1165 create_req.flags |= 1166 MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED; 1167 create_req.base_address = cpu_to_le64( 1168 op_reply_q->q_segment_list_dma); 1169 } else 1170 create_req.base_address = cpu_to_le64( 1171 op_reply_q->q_segments[0].segment_dma); 1172 1173 create_req.size = cpu_to_le16(op_reply_q->num_replies); 1174 1175 init_completion(&mrioc->init_cmds.done); 1176 retval = mpi3mr_admin_request_post(mrioc, &create_req, 1177 sizeof(create_req), 1); 1178 if (retval) { 1179 ioc_err(mrioc, "CreateRepQ: Admin Post failed\n"); 1180 goto out_unlock; 1181 } 1182 wait_for_completion_timeout(&mrioc->init_cmds.done, 1183 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1184 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1185 ioc_err(mrioc, "CreateRepQ: command timed out\n"); 1186 mpi3mr_set_diagsave(mrioc); 1187 mpi3mr_issue_reset(mrioc, 1188 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 1189 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT); 1190 mrioc->unrecoverable = 1; 1191 retval = -1; 1192 goto out_unlock; 1193 } 1194 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1195 != MPI3_IOCSTATUS_SUCCESS) { 1196 ioc_err(mrioc, 1197 "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1198 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1199 mrioc->init_cmds.ioc_loginfo); 1200 retval = -1; 1201 goto out_unlock; 1202 } 1203 op_reply_q->qid = reply_qid; 1204 mrioc->intr_info[midx].op_reply_q = op_reply_q; 1205 1206 out_unlock: 1207 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1208 mutex_unlock(&mrioc->init_cmds.mutex); 1209 out: 1210 1211 return retval; 1212 } 1213 1214 /** 1215 * mpi3mr_create_op_req_q - create operational request queue 1216 * @mrioc: Adapter instance reference 1217 * @idx: operational request queue index 1218 * @reply_qid: Reply queue ID 1219 * 1220 * Create operatinal request queue by issuing MPI request 1221 * through admin queue. 1222 * 1223 * Return: 0 on success, non-zero on failure. 1224 */ 1225 static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx, 1226 u16 reply_qid) 1227 { 1228 struct mpi3_create_request_queue_request create_req; 1229 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx; 1230 int retval = 0; 1231 u16 req_qid = 0; 1232 1233 req_qid = op_req_q->qid; 1234 1235 if (req_qid) { 1236 retval = -1; 1237 ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n", 1238 req_qid); 1239 1240 return retval; 1241 } 1242 req_qid = idx + 1; 1243 1244 op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD; 1245 op_req_q->ci = 0; 1246 op_req_q->pi = 0; 1247 op_req_q->reply_qid = reply_qid; 1248 spin_lock_init(&op_req_q->q_lock); 1249 1250 if (!op_req_q->q_segments) { 1251 retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx); 1252 if (retval) { 1253 mpi3mr_free_op_req_q_segments(mrioc, idx); 1254 goto out; 1255 } 1256 } 1257 1258 memset(&create_req, 0, sizeof(create_req)); 1259 mutex_lock(&mrioc->init_cmds.mutex); 1260 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1261 retval = -1; 1262 ioc_err(mrioc, "CreateReqQ: Init command is in use\n"); 1263 goto out; 1264 } 1265 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1266 mrioc->init_cmds.is_waiting = 1; 1267 mrioc->init_cmds.callback = NULL; 1268 create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1269 create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE; 1270 create_req.queue_id = cpu_to_le16(req_qid); 1271 if (mrioc->enable_segqueue) { 1272 create_req.flags = 1273 MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED; 1274 create_req.base_address = cpu_to_le64( 1275 op_req_q->q_segment_list_dma); 1276 } else 1277 create_req.base_address = cpu_to_le64( 1278 op_req_q->q_segments[0].segment_dma); 1279 create_req.reply_queue_id = cpu_to_le16(reply_qid); 1280 create_req.size = cpu_to_le16(op_req_q->num_requests); 1281 1282 init_completion(&mrioc->init_cmds.done); 1283 retval = mpi3mr_admin_request_post(mrioc, &create_req, 1284 sizeof(create_req), 1); 1285 if (retval) { 1286 ioc_err(mrioc, "CreateReqQ: Admin Post failed\n"); 1287 goto out_unlock; 1288 } 1289 wait_for_completion_timeout(&mrioc->init_cmds.done, 1290 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1291 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1292 ioc_err(mrioc, "CreateReqQ: command timed out\n"); 1293 mpi3mr_set_diagsave(mrioc); 1294 if (mpi3mr_issue_reset(mrioc, 1295 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 1296 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT)) 1297 mrioc->unrecoverable = 1; 1298 retval = -1; 1299 goto out_unlock; 1300 } 1301 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1302 != MPI3_IOCSTATUS_SUCCESS) { 1303 ioc_err(mrioc, 1304 "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1305 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1306 mrioc->init_cmds.ioc_loginfo); 1307 retval = -1; 1308 goto out_unlock; 1309 } 1310 op_req_q->qid = req_qid; 1311 1312 out_unlock: 1313 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1314 mutex_unlock(&mrioc->init_cmds.mutex); 1315 out: 1316 1317 return retval; 1318 } 1319 1320 /** 1321 * mpi3mr_create_op_queues - create operational queue pairs 1322 * @mrioc: Adapter instance reference 1323 * 1324 * Allocate memory for operational queue meta data and call 1325 * create request and reply queue functions. 1326 * 1327 * Return: 0 on success, non-zero on failures. 1328 */ 1329 static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc) 1330 { 1331 int retval = 0; 1332 u16 num_queues = 0, i = 0, msix_count_op_q = 1; 1333 1334 num_queues = min_t(int, mrioc->facts.max_op_reply_q, 1335 mrioc->facts.max_op_req_q); 1336 1337 msix_count_op_q = 1338 mrioc->intr_info_count - mrioc->op_reply_q_offset; 1339 if (!mrioc->num_queues) 1340 mrioc->num_queues = min_t(int, num_queues, msix_count_op_q); 1341 num_queues = mrioc->num_queues; 1342 ioc_info(mrioc, "Trying to create %d Operational Q pairs\n", 1343 num_queues); 1344 1345 if (!mrioc->req_qinfo) { 1346 mrioc->req_qinfo = kcalloc(num_queues, 1347 sizeof(struct op_req_qinfo), GFP_KERNEL); 1348 if (!mrioc->req_qinfo) { 1349 retval = -1; 1350 goto out_failed; 1351 } 1352 1353 mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) * 1354 num_queues, GFP_KERNEL); 1355 if (!mrioc->op_reply_qinfo) { 1356 retval = -1; 1357 goto out_failed; 1358 } 1359 } 1360 1361 if (mrioc->enable_segqueue) 1362 ioc_info(mrioc, 1363 "allocating operational queues through segmented queues\n"); 1364 1365 for (i = 0; i < num_queues; i++) { 1366 if (mpi3mr_create_op_reply_q(mrioc, i)) { 1367 ioc_err(mrioc, "Cannot create OP RepQ %d\n", i); 1368 break; 1369 } 1370 if (mpi3mr_create_op_req_q(mrioc, i, 1371 mrioc->op_reply_qinfo[i].qid)) { 1372 ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i); 1373 mpi3mr_delete_op_reply_q(mrioc, i); 1374 break; 1375 } 1376 } 1377 1378 if (i == 0) { 1379 /* Not even one queue is created successfully*/ 1380 retval = -1; 1381 goto out_failed; 1382 } 1383 mrioc->num_op_reply_q = mrioc->num_op_req_q = i; 1384 ioc_info(mrioc, "Successfully created %d Operational Q pairs\n", 1385 mrioc->num_op_reply_q); 1386 1387 return retval; 1388 out_failed: 1389 kfree(mrioc->req_qinfo); 1390 mrioc->req_qinfo = NULL; 1391 1392 kfree(mrioc->op_reply_qinfo); 1393 mrioc->op_reply_qinfo = NULL; 1394 1395 return retval; 1396 } 1397 1398 /** 1399 * mpi3mr_op_request_post - Post request to operational queue 1400 * @mrioc: Adapter reference 1401 * @op_req_q: Operational request queue info 1402 * @req: MPI3 request 1403 * 1404 * Post the MPI3 request into operational request queue and 1405 * inform the controller, if the queue is full return 1406 * appropriate error. 1407 * 1408 * Return: 0 on success, non-zero on failure. 1409 */ 1410 int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc, 1411 struct op_req_qinfo *op_req_q, u8 *req) 1412 { 1413 u16 pi = 0, max_entries, reply_qidx = 0, midx; 1414 int retval = 0; 1415 unsigned long flags; 1416 u8 *req_entry; 1417 void *segment_base_addr; 1418 u16 req_sz = mrioc->facts.op_req_sz; 1419 struct segments *segments = op_req_q->q_segments; 1420 1421 reply_qidx = op_req_q->reply_qid - 1; 1422 1423 if (mrioc->unrecoverable) 1424 return -EFAULT; 1425 1426 spin_lock_irqsave(&op_req_q->q_lock, flags); 1427 pi = op_req_q->pi; 1428 max_entries = op_req_q->num_requests; 1429 1430 if (mpi3mr_check_req_qfull(op_req_q)) { 1431 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX( 1432 reply_qidx, mrioc->op_reply_q_offset); 1433 mpi3mr_process_op_reply_q(mrioc, &mrioc->intr_info[midx]); 1434 1435 if (mpi3mr_check_req_qfull(op_req_q)) { 1436 retval = -EAGAIN; 1437 goto out; 1438 } 1439 } 1440 1441 if (mrioc->reset_in_progress) { 1442 ioc_err(mrioc, "OpReqQ submit reset in progress\n"); 1443 retval = -EAGAIN; 1444 goto out; 1445 } 1446 1447 segment_base_addr = segments[pi / op_req_q->segment_qd].segment; 1448 req_entry = (u8 *)segment_base_addr + 1449 ((pi % op_req_q->segment_qd) * req_sz); 1450 1451 memset(req_entry, 0, req_sz); 1452 memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ); 1453 1454 if (++pi == max_entries) 1455 pi = 0; 1456 op_req_q->pi = pi; 1457 1458 writel(op_req_q->pi, 1459 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index); 1460 1461 out: 1462 spin_unlock_irqrestore(&op_req_q->q_lock, flags); 1463 return retval; 1464 } 1465 1466 /** 1467 * mpi3mr_watchdog_work - watchdog thread to monitor faults 1468 * @work: work struct 1469 * 1470 * Watch dog work periodically executed (1 second interval) to 1471 * monitor firmware fault and to issue periodic timer sync to 1472 * the firmware. 1473 * 1474 * Return: Nothing. 1475 */ 1476 static void mpi3mr_watchdog_work(struct work_struct *work) 1477 { 1478 struct mpi3mr_ioc *mrioc = 1479 container_of(work, struct mpi3mr_ioc, watchdog_work.work); 1480 unsigned long flags; 1481 enum mpi3mr_iocstate ioc_state; 1482 u32 fault, host_diagnostic; 1483 1484 /*Check for fault state every one second and issue Soft reset*/ 1485 ioc_state = mpi3mr_get_iocstate(mrioc); 1486 if (ioc_state == MRIOC_STATE_FAULT) { 1487 fault = readl(&mrioc->sysif_regs->fault) & 1488 MPI3_SYSIF_FAULT_CODE_MASK; 1489 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); 1490 if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) { 1491 if (!mrioc->diagsave_timeout) { 1492 mpi3mr_print_fault_info(mrioc); 1493 ioc_warn(mrioc, "Diag save in progress\n"); 1494 } 1495 if ((mrioc->diagsave_timeout++) <= 1496 MPI3_SYSIF_DIAG_SAVE_TIMEOUT) 1497 goto schedule_work; 1498 } else 1499 mpi3mr_print_fault_info(mrioc); 1500 mrioc->diagsave_timeout = 0; 1501 1502 if (fault == MPI3_SYSIF_FAULT_CODE_FACTORY_RESET) { 1503 ioc_info(mrioc, 1504 "Factory Reset fault occurred marking controller as unrecoverable" 1505 ); 1506 mrioc->unrecoverable = 1; 1507 goto out; 1508 } 1509 1510 if ((fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) || 1511 (fault == MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS) || 1512 (mrioc->reset_in_progress)) 1513 goto out; 1514 if (fault == MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET) 1515 mpi3mr_soft_reset_handler(mrioc, 1516 MPI3MR_RESET_FROM_CIACTIV_FAULT, 0); 1517 else 1518 mpi3mr_soft_reset_handler(mrioc, 1519 MPI3MR_RESET_FROM_FAULT_WATCH, 0); 1520 } 1521 1522 schedule_work: 1523 spin_lock_irqsave(&mrioc->watchdog_lock, flags); 1524 if (mrioc->watchdog_work_q) 1525 queue_delayed_work(mrioc->watchdog_work_q, 1526 &mrioc->watchdog_work, 1527 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); 1528 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); 1529 out: 1530 return; 1531 } 1532 1533 /** 1534 * mpi3mr_start_watchdog - Start watchdog 1535 * @mrioc: Adapter instance reference 1536 * 1537 * Create and start the watchdog thread to monitor controller 1538 * faults. 1539 * 1540 * Return: Nothing. 1541 */ 1542 void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc) 1543 { 1544 if (mrioc->watchdog_work_q) 1545 return; 1546 1547 INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work); 1548 snprintf(mrioc->watchdog_work_q_name, 1549 sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name, 1550 mrioc->id); 1551 mrioc->watchdog_work_q = 1552 create_singlethread_workqueue(mrioc->watchdog_work_q_name); 1553 if (!mrioc->watchdog_work_q) { 1554 ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__); 1555 return; 1556 } 1557 1558 if (mrioc->watchdog_work_q) 1559 queue_delayed_work(mrioc->watchdog_work_q, 1560 &mrioc->watchdog_work, 1561 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); 1562 } 1563 1564 /** 1565 * mpi3mr_stop_watchdog - Stop watchdog 1566 * @mrioc: Adapter instance reference 1567 * 1568 * Stop the watchdog thread created to monitor controller 1569 * faults. 1570 * 1571 * Return: Nothing. 1572 */ 1573 void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc) 1574 { 1575 unsigned long flags; 1576 struct workqueue_struct *wq; 1577 1578 spin_lock_irqsave(&mrioc->watchdog_lock, flags); 1579 wq = mrioc->watchdog_work_q; 1580 mrioc->watchdog_work_q = NULL; 1581 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); 1582 if (wq) { 1583 if (!cancel_delayed_work_sync(&mrioc->watchdog_work)) 1584 flush_workqueue(wq); 1585 destroy_workqueue(wq); 1586 } 1587 } 1588 1589 /** 1590 * mpi3mr_setup_admin_qpair - Setup admin queue pair 1591 * @mrioc: Adapter instance reference 1592 * 1593 * Allocate memory for admin queue pair if required and register 1594 * the admin queue with the controller. 1595 * 1596 * Return: 0 on success, non-zero on failures. 1597 */ 1598 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc) 1599 { 1600 int retval = 0; 1601 u32 num_admin_entries = 0; 1602 1603 mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE; 1604 mrioc->num_admin_req = mrioc->admin_req_q_sz / 1605 MPI3MR_ADMIN_REQ_FRAME_SZ; 1606 mrioc->admin_req_ci = mrioc->admin_req_pi = 0; 1607 mrioc->admin_req_base = NULL; 1608 1609 mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE; 1610 mrioc->num_admin_replies = mrioc->admin_reply_q_sz / 1611 MPI3MR_ADMIN_REPLY_FRAME_SZ; 1612 mrioc->admin_reply_ci = 0; 1613 mrioc->admin_reply_ephase = 1; 1614 mrioc->admin_reply_base = NULL; 1615 1616 if (!mrioc->admin_req_base) { 1617 mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev, 1618 mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL); 1619 1620 if (!mrioc->admin_req_base) { 1621 retval = -1; 1622 goto out_failed; 1623 } 1624 1625 mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev, 1626 mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma, 1627 GFP_KERNEL); 1628 1629 if (!mrioc->admin_reply_base) { 1630 retval = -1; 1631 goto out_failed; 1632 } 1633 } 1634 1635 num_admin_entries = (mrioc->num_admin_replies << 16) | 1636 (mrioc->num_admin_req); 1637 writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries); 1638 mpi3mr_writeq(mrioc->admin_req_dma, 1639 &mrioc->sysif_regs->admin_request_queue_address); 1640 mpi3mr_writeq(mrioc->admin_reply_dma, 1641 &mrioc->sysif_regs->admin_reply_queue_address); 1642 writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); 1643 writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); 1644 return retval; 1645 1646 out_failed: 1647 1648 if (mrioc->admin_reply_base) { 1649 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz, 1650 mrioc->admin_reply_base, mrioc->admin_reply_dma); 1651 mrioc->admin_reply_base = NULL; 1652 } 1653 if (mrioc->admin_req_base) { 1654 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz, 1655 mrioc->admin_req_base, mrioc->admin_req_dma); 1656 mrioc->admin_req_base = NULL; 1657 } 1658 return retval; 1659 } 1660 1661 /** 1662 * mpi3mr_issue_iocfacts - Send IOC Facts 1663 * @mrioc: Adapter instance reference 1664 * @facts_data: Cached IOC facts data 1665 * 1666 * Issue IOC Facts MPI request through admin queue and wait for 1667 * the completion of it or time out. 1668 * 1669 * Return: 0 on success, non-zero on failures. 1670 */ 1671 static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc, 1672 struct mpi3_ioc_facts_data *facts_data) 1673 { 1674 struct mpi3_ioc_facts_request iocfacts_req; 1675 void *data = NULL; 1676 dma_addr_t data_dma; 1677 u32 data_len = sizeof(*facts_data); 1678 int retval = 0; 1679 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 1680 1681 data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 1682 GFP_KERNEL); 1683 1684 if (!data) { 1685 retval = -1; 1686 goto out; 1687 } 1688 1689 memset(&iocfacts_req, 0, sizeof(iocfacts_req)); 1690 mutex_lock(&mrioc->init_cmds.mutex); 1691 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1692 retval = -1; 1693 ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n"); 1694 mutex_unlock(&mrioc->init_cmds.mutex); 1695 goto out; 1696 } 1697 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1698 mrioc->init_cmds.is_waiting = 1; 1699 mrioc->init_cmds.callback = NULL; 1700 iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1701 iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS; 1702 1703 mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len, 1704 data_dma); 1705 1706 init_completion(&mrioc->init_cmds.done); 1707 retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req, 1708 sizeof(iocfacts_req), 1); 1709 if (retval) { 1710 ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n"); 1711 goto out_unlock; 1712 } 1713 wait_for_completion_timeout(&mrioc->init_cmds.done, 1714 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1715 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1716 ioc_err(mrioc, "Issue IOCFacts: command timed out\n"); 1717 mpi3mr_set_diagsave(mrioc); 1718 mpi3mr_issue_reset(mrioc, 1719 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 1720 MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT); 1721 mrioc->unrecoverable = 1; 1722 retval = -1; 1723 goto out_unlock; 1724 } 1725 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1726 != MPI3_IOCSTATUS_SUCCESS) { 1727 ioc_err(mrioc, 1728 "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1729 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1730 mrioc->init_cmds.ioc_loginfo); 1731 retval = -1; 1732 goto out_unlock; 1733 } 1734 memcpy(facts_data, (u8 *)data, data_len); 1735 out_unlock: 1736 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1737 mutex_unlock(&mrioc->init_cmds.mutex); 1738 1739 out: 1740 if (data) 1741 dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma); 1742 1743 return retval; 1744 } 1745 1746 /** 1747 * mpi3mr_check_reset_dma_mask - Process IOC facts data 1748 * @mrioc: Adapter instance reference 1749 * 1750 * Check whether the new DMA mask requested through IOCFacts by 1751 * firmware needs to be set, if so set it . 1752 * 1753 * Return: 0 on success, non-zero on failure. 1754 */ 1755 static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc) 1756 { 1757 struct pci_dev *pdev = mrioc->pdev; 1758 int r; 1759 u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask); 1760 1761 if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask)) 1762 return 0; 1763 1764 ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n", 1765 mrioc->dma_mask, facts_dma_mask); 1766 1767 r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask); 1768 if (r) { 1769 ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n", 1770 facts_dma_mask, r); 1771 return r; 1772 } 1773 mrioc->dma_mask = facts_dma_mask; 1774 return r; 1775 } 1776 1777 /** 1778 * mpi3mr_process_factsdata - Process IOC facts data 1779 * @mrioc: Adapter instance reference 1780 * @facts_data: Cached IOC facts data 1781 * 1782 * Convert IOC facts data into cpu endianness and cache it in 1783 * the driver . 1784 * 1785 * Return: Nothing. 1786 */ 1787 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, 1788 struct mpi3_ioc_facts_data *facts_data) 1789 { 1790 u32 ioc_config, req_sz, facts_flags; 1791 1792 if ((le16_to_cpu(facts_data->ioc_facts_data_length)) != 1793 (sizeof(*facts_data) / 4)) { 1794 ioc_warn(mrioc, 1795 "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n", 1796 sizeof(*facts_data), 1797 le16_to_cpu(facts_data->ioc_facts_data_length) * 4); 1798 } 1799 1800 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1801 req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >> 1802 MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT); 1803 if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) { 1804 ioc_err(mrioc, 1805 "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n", 1806 req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size)); 1807 } 1808 1809 memset(&mrioc->facts, 0, sizeof(mrioc->facts)); 1810 1811 facts_flags = le32_to_cpu(facts_data->flags); 1812 mrioc->facts.op_req_sz = req_sz; 1813 mrioc->op_reply_desc_sz = 1 << ((ioc_config & 1814 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >> 1815 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT); 1816 1817 mrioc->facts.ioc_num = facts_data->ioc_number; 1818 mrioc->facts.who_init = facts_data->who_init; 1819 mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors); 1820 mrioc->facts.personality = (facts_flags & 1821 MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK); 1822 mrioc->facts.dma_mask = (facts_flags & 1823 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >> 1824 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT; 1825 mrioc->facts.protocol_flags = facts_data->protocol_flags; 1826 mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word); 1827 mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_request); 1828 mrioc->facts.product_id = le16_to_cpu(facts_data->product_id); 1829 mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4; 1830 mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions); 1831 mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id); 1832 mrioc->facts.max_pds = le16_to_cpu(facts_data->max_pds); 1833 mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds); 1834 mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds); 1835 mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_advanced_host_pds); 1836 mrioc->facts.max_raidpds = le16_to_cpu(facts_data->max_raid_pds); 1837 mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme); 1838 mrioc->facts.max_pcie_switches = 1839 le16_to_cpu(facts_data->max_pc_ie_switches); 1840 mrioc->facts.max_sasexpanders = 1841 le16_to_cpu(facts_data->max_sas_expanders); 1842 mrioc->facts.max_sasinitiators = 1843 le16_to_cpu(facts_data->max_sas_initiators); 1844 mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures); 1845 mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle); 1846 mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle); 1847 mrioc->facts.max_op_req_q = 1848 le16_to_cpu(facts_data->max_operational_request_queues); 1849 mrioc->facts.max_op_reply_q = 1850 le16_to_cpu(facts_data->max_operational_reply_queues); 1851 mrioc->facts.ioc_capabilities = 1852 le32_to_cpu(facts_data->ioc_capabilities); 1853 mrioc->facts.fw_ver.build_num = 1854 le16_to_cpu(facts_data->fw_version.build_num); 1855 mrioc->facts.fw_ver.cust_id = 1856 le16_to_cpu(facts_data->fw_version.customer_id); 1857 mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor; 1858 mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major; 1859 mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor; 1860 mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major; 1861 mrioc->msix_count = min_t(int, mrioc->msix_count, 1862 mrioc->facts.max_msix_vectors); 1863 mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask; 1864 mrioc->facts.sge_mod_value = facts_data->sge_modifier_value; 1865 mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift; 1866 mrioc->facts.shutdown_timeout = 1867 le16_to_cpu(facts_data->shutdown_timeout); 1868 1869 ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),", 1870 mrioc->facts.ioc_num, mrioc->facts.max_op_req_q, 1871 mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle); 1872 ioc_info(mrioc, 1873 "maxreqs(%d), mindh(%d) maxPDs(%d) maxvectors(%d) maxperids(%d)\n", 1874 mrioc->facts.max_reqs, mrioc->facts.min_devhandle, 1875 mrioc->facts.max_pds, mrioc->facts.max_msix_vectors, 1876 mrioc->facts.max_perids); 1877 ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ", 1878 mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value, 1879 mrioc->facts.sge_mod_shift); 1880 ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n", 1881 mrioc->facts.dma_mask, (facts_flags & 1882 MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK)); 1883 1884 mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD; 1885 1886 if (reset_devices) 1887 mrioc->max_host_ios = min_t(int, mrioc->max_host_ios, 1888 MPI3MR_HOST_IOS_KDUMP); 1889 } 1890 1891 /** 1892 * mpi3mr_alloc_reply_sense_bufs - Send IOC Init 1893 * @mrioc: Adapter instance reference 1894 * 1895 * Allocate and initialize the reply free buffers, sense 1896 * buffers, reply free queue and sense buffer queue. 1897 * 1898 * Return: 0 on success, non-zero on failures. 1899 */ 1900 static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc) 1901 { 1902 int retval = 0; 1903 u32 sz, i; 1904 dma_addr_t phy_addr; 1905 1906 if (mrioc->init_cmds.reply) 1907 goto post_reply_sbuf; 1908 1909 mrioc->init_cmds.reply = kzalloc(mrioc->facts.reply_sz, GFP_KERNEL); 1910 if (!mrioc->init_cmds.reply) 1911 goto out_failed; 1912 1913 mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES; 1914 mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1; 1915 mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR; 1916 mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1; 1917 1918 /* reply buffer pool, 16 byte align */ 1919 sz = mrioc->num_reply_bufs * mrioc->facts.reply_sz; 1920 mrioc->reply_buf_pool = dma_pool_create("reply_buf pool", 1921 &mrioc->pdev->dev, sz, 16, 0); 1922 if (!mrioc->reply_buf_pool) { 1923 ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n"); 1924 goto out_failed; 1925 } 1926 1927 mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL, 1928 &mrioc->reply_buf_dma); 1929 if (!mrioc->reply_buf) 1930 goto out_failed; 1931 1932 mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz; 1933 1934 /* reply free queue, 8 byte align */ 1935 sz = mrioc->reply_free_qsz * 8; 1936 mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool", 1937 &mrioc->pdev->dev, sz, 8, 0); 1938 if (!mrioc->reply_free_q_pool) { 1939 ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n"); 1940 goto out_failed; 1941 } 1942 mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool, 1943 GFP_KERNEL, &mrioc->reply_free_q_dma); 1944 if (!mrioc->reply_free_q) 1945 goto out_failed; 1946 1947 /* sense buffer pool, 4 byte align */ 1948 sz = mrioc->num_sense_bufs * MPI3MR_SENSEBUF_SZ; 1949 mrioc->sense_buf_pool = dma_pool_create("sense_buf pool", 1950 &mrioc->pdev->dev, sz, 4, 0); 1951 if (!mrioc->sense_buf_pool) { 1952 ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n"); 1953 goto out_failed; 1954 } 1955 mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL, 1956 &mrioc->sense_buf_dma); 1957 if (!mrioc->sense_buf) 1958 goto out_failed; 1959 1960 /* sense buffer queue, 8 byte align */ 1961 sz = mrioc->sense_buf_q_sz * 8; 1962 mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool", 1963 &mrioc->pdev->dev, sz, 8, 0); 1964 if (!mrioc->sense_buf_q_pool) { 1965 ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n"); 1966 goto out_failed; 1967 } 1968 mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool, 1969 GFP_KERNEL, &mrioc->sense_buf_q_dma); 1970 if (!mrioc->sense_buf_q) 1971 goto out_failed; 1972 1973 post_reply_sbuf: 1974 sz = mrioc->num_reply_bufs * mrioc->facts.reply_sz; 1975 ioc_info(mrioc, 1976 "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n", 1977 mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->facts.reply_sz, 1978 (sz / 1024), (unsigned long long)mrioc->reply_buf_dma); 1979 sz = mrioc->reply_free_qsz * 8; 1980 ioc_info(mrioc, 1981 "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n", 1982 mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024), 1983 (unsigned long long)mrioc->reply_free_q_dma); 1984 sz = mrioc->num_sense_bufs * MPI3MR_SENSEBUF_SZ; 1985 ioc_info(mrioc, 1986 "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n", 1987 mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSEBUF_SZ, 1988 (sz / 1024), (unsigned long long)mrioc->sense_buf_dma); 1989 sz = mrioc->sense_buf_q_sz * 8; 1990 ioc_info(mrioc, 1991 "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n", 1992 mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024), 1993 (unsigned long long)mrioc->sense_buf_q_dma); 1994 1995 /* initialize Reply buffer Queue */ 1996 for (i = 0, phy_addr = mrioc->reply_buf_dma; 1997 i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->facts.reply_sz) 1998 mrioc->reply_free_q[i] = cpu_to_le64(phy_addr); 1999 mrioc->reply_free_q[i] = cpu_to_le64(0); 2000 2001 /* initialize Sense Buffer Queue */ 2002 for (i = 0, phy_addr = mrioc->sense_buf_dma; 2003 i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSEBUF_SZ) 2004 mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr); 2005 mrioc->sense_buf_q[i] = cpu_to_le64(0); 2006 return retval; 2007 2008 out_failed: 2009 retval = -1; 2010 return retval; 2011 } 2012 2013 /** 2014 * mpi3mr_issue_iocinit - Send IOC Init 2015 * @mrioc: Adapter instance reference 2016 * 2017 * Issue IOC Init MPI request through admin queue and wait for 2018 * the completion of it or time out. 2019 * 2020 * Return: 0 on success, non-zero on failures. 2021 */ 2022 static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc) 2023 { 2024 struct mpi3_ioc_init_request iocinit_req; 2025 struct mpi3_driver_info_layout *drv_info; 2026 dma_addr_t data_dma; 2027 u32 data_len = sizeof(*drv_info); 2028 int retval = 0; 2029 ktime_t current_time; 2030 2031 drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 2032 GFP_KERNEL); 2033 if (!drv_info) { 2034 retval = -1; 2035 goto out; 2036 } 2037 drv_info->information_length = cpu_to_le32(data_len); 2038 strncpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature)); 2039 strncpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name)); 2040 drv_info->os_name[sizeof(drv_info->os_name) - 1] = 0; 2041 strncpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version)); 2042 drv_info->os_version[sizeof(drv_info->os_version) - 1] = 0; 2043 strncpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name)); 2044 strncpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version)); 2045 strncpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE, sizeof(drv_info->driver_release_date)); 2046 drv_info->driver_capabilities = 0; 2047 memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info, 2048 sizeof(mrioc->driver_info)); 2049 2050 memset(&iocinit_req, 0, sizeof(iocinit_req)); 2051 mutex_lock(&mrioc->init_cmds.mutex); 2052 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2053 retval = -1; 2054 ioc_err(mrioc, "Issue IOCInit: Init command is in use\n"); 2055 mutex_unlock(&mrioc->init_cmds.mutex); 2056 goto out; 2057 } 2058 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2059 mrioc->init_cmds.is_waiting = 1; 2060 mrioc->init_cmds.callback = NULL; 2061 iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2062 iocinit_req.function = MPI3_FUNCTION_IOC_INIT; 2063 iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV; 2064 iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT; 2065 iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR; 2066 iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR; 2067 iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER; 2068 iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz); 2069 iocinit_req.reply_free_queue_address = 2070 cpu_to_le64(mrioc->reply_free_q_dma); 2071 iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSEBUF_SZ); 2072 iocinit_req.sense_buffer_free_queue_depth = 2073 cpu_to_le16(mrioc->sense_buf_q_sz); 2074 iocinit_req.sense_buffer_free_queue_address = 2075 cpu_to_le64(mrioc->sense_buf_q_dma); 2076 iocinit_req.driver_information_address = cpu_to_le64(data_dma); 2077 2078 current_time = ktime_get_real(); 2079 iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time)); 2080 2081 init_completion(&mrioc->init_cmds.done); 2082 retval = mpi3mr_admin_request_post(mrioc, &iocinit_req, 2083 sizeof(iocinit_req), 1); 2084 if (retval) { 2085 ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n"); 2086 goto out_unlock; 2087 } 2088 wait_for_completion_timeout(&mrioc->init_cmds.done, 2089 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2090 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2091 mpi3mr_set_diagsave(mrioc); 2092 mpi3mr_issue_reset(mrioc, 2093 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 2094 MPI3MR_RESET_FROM_IOCINIT_TIMEOUT); 2095 mrioc->unrecoverable = 1; 2096 ioc_err(mrioc, "Issue IOCInit: command timed out\n"); 2097 retval = -1; 2098 goto out_unlock; 2099 } 2100 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2101 != MPI3_IOCSTATUS_SUCCESS) { 2102 ioc_err(mrioc, 2103 "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2104 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2105 mrioc->init_cmds.ioc_loginfo); 2106 retval = -1; 2107 goto out_unlock; 2108 } 2109 2110 out_unlock: 2111 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2112 mutex_unlock(&mrioc->init_cmds.mutex); 2113 2114 out: 2115 if (drv_info) 2116 dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info, 2117 data_dma); 2118 2119 return retval; 2120 } 2121 2122 /** 2123 * mpi3mr_alloc_chain_bufs - Allocate chain buffers 2124 * @mrioc: Adapter instance reference 2125 * 2126 * Allocate chain buffers and set a bitmap to indicate free 2127 * chain buffers. Chain buffers are used to pass the SGE 2128 * information along with MPI3 SCSI IO requests for host I/O. 2129 * 2130 * Return: 0 on success, non-zero on failure 2131 */ 2132 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc) 2133 { 2134 int retval = 0; 2135 u32 sz, i; 2136 u16 num_chains; 2137 2138 num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR; 2139 2140 mrioc->chain_buf_count = num_chains; 2141 sz = sizeof(struct chain_element) * num_chains; 2142 mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL); 2143 if (!mrioc->chain_sgl_list) 2144 goto out_failed; 2145 2146 sz = MPI3MR_PAGE_SIZE_4K; 2147 mrioc->chain_buf_pool = dma_pool_create("chain_buf pool", 2148 &mrioc->pdev->dev, sz, 16, 0); 2149 if (!mrioc->chain_buf_pool) { 2150 ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n"); 2151 goto out_failed; 2152 } 2153 2154 for (i = 0; i < num_chains; i++) { 2155 mrioc->chain_sgl_list[i].addr = 2156 dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL, 2157 &mrioc->chain_sgl_list[i].dma_addr); 2158 2159 if (!mrioc->chain_sgl_list[i].addr) 2160 goto out_failed; 2161 } 2162 mrioc->chain_bitmap_sz = num_chains / 8; 2163 if (num_chains % 8) 2164 mrioc->chain_bitmap_sz++; 2165 mrioc->chain_bitmap = kzalloc(mrioc->chain_bitmap_sz, GFP_KERNEL); 2166 if (!mrioc->chain_bitmap) 2167 goto out_failed; 2168 return retval; 2169 out_failed: 2170 retval = -1; 2171 return retval; 2172 } 2173 2174 /** 2175 * mpi3mr_port_enable_complete - Mark port enable complete 2176 * @mrioc: Adapter instance reference 2177 * @drv_cmd: Internal command tracker 2178 * 2179 * Call back for asynchronous port enable request sets the 2180 * driver command to indicate port enable request is complete. 2181 * 2182 * Return: Nothing 2183 */ 2184 static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc, 2185 struct mpi3mr_drv_cmd *drv_cmd) 2186 { 2187 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2188 drv_cmd->callback = NULL; 2189 mrioc->scan_failed = drv_cmd->ioc_status; 2190 mrioc->scan_started = 0; 2191 } 2192 2193 /** 2194 * mpi3mr_issue_port_enable - Issue Port Enable 2195 * @mrioc: Adapter instance reference 2196 * @async: Flag to wait for completion or not 2197 * 2198 * Issue Port Enable MPI request through admin queue and if the 2199 * async flag is not set wait for the completion of the port 2200 * enable or time out. 2201 * 2202 * Return: 0 on success, non-zero on failures. 2203 */ 2204 int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async) 2205 { 2206 struct mpi3_port_enable_request pe_req; 2207 int retval = 0; 2208 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT; 2209 2210 memset(&pe_req, 0, sizeof(pe_req)); 2211 mutex_lock(&mrioc->init_cmds.mutex); 2212 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2213 retval = -1; 2214 ioc_err(mrioc, "Issue PortEnable: Init command is in use\n"); 2215 mutex_unlock(&mrioc->init_cmds.mutex); 2216 goto out; 2217 } 2218 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2219 if (async) { 2220 mrioc->init_cmds.is_waiting = 0; 2221 mrioc->init_cmds.callback = mpi3mr_port_enable_complete; 2222 } else { 2223 mrioc->init_cmds.is_waiting = 1; 2224 mrioc->init_cmds.callback = NULL; 2225 init_completion(&mrioc->init_cmds.done); 2226 } 2227 pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2228 pe_req.function = MPI3_FUNCTION_PORT_ENABLE; 2229 2230 retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1); 2231 if (retval) { 2232 ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n"); 2233 goto out_unlock; 2234 } 2235 if (!async) { 2236 wait_for_completion_timeout(&mrioc->init_cmds.done, 2237 (pe_timeout * HZ)); 2238 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2239 ioc_err(mrioc, "Issue PortEnable: command timed out\n"); 2240 retval = -1; 2241 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 2242 mpi3mr_set_diagsave(mrioc); 2243 mpi3mr_issue_reset(mrioc, 2244 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 2245 MPI3MR_RESET_FROM_PE_TIMEOUT); 2246 mrioc->unrecoverable = 1; 2247 goto out_unlock; 2248 } 2249 mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds); 2250 } 2251 out_unlock: 2252 mutex_unlock(&mrioc->init_cmds.mutex); 2253 out: 2254 return retval; 2255 } 2256 2257 /** 2258 * mpi3mr_cleanup_resources - Free PCI resources 2259 * @mrioc: Adapter instance reference 2260 * 2261 * Unmap PCI device memory and disable PCI device. 2262 * 2263 * Return: 0 on success and non-zero on failure. 2264 */ 2265 void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc) 2266 { 2267 struct pci_dev *pdev = mrioc->pdev; 2268 2269 mpi3mr_cleanup_isr(mrioc); 2270 2271 if (mrioc->sysif_regs) { 2272 iounmap((void __iomem *)mrioc->sysif_regs); 2273 mrioc->sysif_regs = NULL; 2274 } 2275 2276 if (pci_is_enabled(pdev)) { 2277 if (mrioc->bars) 2278 pci_release_selected_regions(pdev, mrioc->bars); 2279 pci_disable_device(pdev); 2280 } 2281 } 2282 2283 /** 2284 * mpi3mr_setup_resources - Enable PCI resources 2285 * @mrioc: Adapter instance reference 2286 * 2287 * Enable PCI device memory, MSI-x registers and set DMA mask. 2288 * 2289 * Return: 0 on success and non-zero on failure. 2290 */ 2291 int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc) 2292 { 2293 struct pci_dev *pdev = mrioc->pdev; 2294 u32 memap_sz = 0; 2295 int i, retval = 0, capb = 0; 2296 u16 message_control; 2297 u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask : 2298 (((dma_get_required_mask(&pdev->dev) > DMA_BIT_MASK(32)) && 2299 (sizeof(dma_addr_t) > 4)) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32)); 2300 2301 if (pci_enable_device_mem(pdev)) { 2302 ioc_err(mrioc, "pci_enable_device_mem: failed\n"); 2303 retval = -ENODEV; 2304 goto out_failed; 2305 } 2306 2307 capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 2308 if (!capb) { 2309 ioc_err(mrioc, "Unable to find MSI-X Capabilities\n"); 2310 retval = -ENODEV; 2311 goto out_failed; 2312 } 2313 mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); 2314 2315 if (pci_request_selected_regions(pdev, mrioc->bars, 2316 mrioc->driver_name)) { 2317 ioc_err(mrioc, "pci_request_selected_regions: failed\n"); 2318 retval = -ENODEV; 2319 goto out_failed; 2320 } 2321 2322 for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) { 2323 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 2324 mrioc->sysif_regs_phys = pci_resource_start(pdev, i); 2325 memap_sz = pci_resource_len(pdev, i); 2326 mrioc->sysif_regs = 2327 ioremap(mrioc->sysif_regs_phys, memap_sz); 2328 break; 2329 } 2330 } 2331 2332 pci_set_master(pdev); 2333 2334 retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask); 2335 if (retval) { 2336 if (dma_mask != DMA_BIT_MASK(32)) { 2337 ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n"); 2338 dma_mask = DMA_BIT_MASK(32); 2339 retval = dma_set_mask_and_coherent(&pdev->dev, 2340 dma_mask); 2341 } 2342 if (retval) { 2343 mrioc->dma_mask = 0; 2344 ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n"); 2345 goto out_failed; 2346 } 2347 } 2348 mrioc->dma_mask = dma_mask; 2349 2350 if (!mrioc->sysif_regs) { 2351 ioc_err(mrioc, 2352 "Unable to map adapter memory or resource not found\n"); 2353 retval = -EINVAL; 2354 goto out_failed; 2355 } 2356 2357 pci_read_config_word(pdev, capb + 2, &message_control); 2358 mrioc->msix_count = (message_control & 0x3FF) + 1; 2359 2360 pci_save_state(pdev); 2361 2362 pci_set_drvdata(pdev, mrioc->shost); 2363 2364 mpi3mr_ioc_disable_intr(mrioc); 2365 2366 ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n", 2367 (unsigned long long)mrioc->sysif_regs_phys, 2368 mrioc->sysif_regs, memap_sz); 2369 ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n", 2370 mrioc->msix_count); 2371 return retval; 2372 2373 out_failed: 2374 mpi3mr_cleanup_resources(mrioc); 2375 return retval; 2376 } 2377 2378 /** 2379 * mpi3mr_init_ioc - Initialize the controller 2380 * @mrioc: Adapter instance reference 2381 * 2382 * This the controller initialization routine, executed either 2383 * after soft reset or from pci probe callback. 2384 * Setup the required resources, memory map the controller 2385 * registers, create admin and operational reply queue pairs, 2386 * allocate required memory for reply pool, sense buffer pool, 2387 * issue IOC init request to the firmware, unmask the events and 2388 * issue port enable to discover SAS/SATA/NVMe devies and RAID 2389 * volumes. 2390 * 2391 * Return: 0 on success and non-zero on failure. 2392 */ 2393 int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc) 2394 { 2395 int retval = 0; 2396 enum mpi3mr_iocstate ioc_state; 2397 u64 base_info; 2398 u32 timeout; 2399 u32 ioc_status, ioc_config; 2400 struct mpi3_ioc_facts_data facts_data; 2401 2402 mrioc->change_count = 0; 2403 mrioc->cpu_count = num_online_cpus(); 2404 retval = mpi3mr_setup_resources(mrioc); 2405 if (retval) { 2406 ioc_err(mrioc, "Failed to setup resources:error %d\n", 2407 retval); 2408 goto out_nocleanup; 2409 } 2410 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 2411 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 2412 2413 ioc_info(mrioc, "SOD status %x configuration %x\n", 2414 ioc_status, ioc_config); 2415 2416 base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information); 2417 ioc_info(mrioc, "SOD base_info %llx\n", base_info); 2418 2419 /*The timeout value is in 2sec unit, changing it to seconds*/ 2420 mrioc->ready_timeout = 2421 ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >> 2422 MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2; 2423 2424 ioc_info(mrioc, "IOC ready timeout %d\n", mrioc->ready_timeout); 2425 2426 ioc_state = mpi3mr_get_iocstate(mrioc); 2427 ioc_info(mrioc, "IOC in %s state during detection\n", 2428 mpi3mr_iocstate_name(ioc_state)); 2429 2430 if (ioc_state == MRIOC_STATE_BECOMING_READY || 2431 ioc_state == MRIOC_STATE_RESET_REQUESTED) { 2432 timeout = mrioc->ready_timeout * 10; 2433 do { 2434 msleep(100); 2435 } while (--timeout); 2436 2437 ioc_state = mpi3mr_get_iocstate(mrioc); 2438 ioc_info(mrioc, 2439 "IOC in %s state after waiting for reset time\n", 2440 mpi3mr_iocstate_name(ioc_state)); 2441 } 2442 2443 if (ioc_state == MRIOC_STATE_READY) { 2444 retval = mpi3mr_issue_and_process_mur(mrioc, 2445 MPI3MR_RESET_FROM_BRINGUP); 2446 if (retval) { 2447 ioc_err(mrioc, "Failed to MU reset IOC error %d\n", 2448 retval); 2449 } 2450 ioc_state = mpi3mr_get_iocstate(mrioc); 2451 } 2452 if (ioc_state != MRIOC_STATE_RESET) { 2453 mpi3mr_print_fault_info(mrioc); 2454 retval = mpi3mr_issue_reset(mrioc, 2455 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, 2456 MPI3MR_RESET_FROM_BRINGUP); 2457 if (retval) { 2458 ioc_err(mrioc, 2459 "%s :Failed to soft reset IOC error %d\n", 2460 __func__, retval); 2461 goto out_failed; 2462 } 2463 } 2464 ioc_state = mpi3mr_get_iocstate(mrioc); 2465 if (ioc_state != MRIOC_STATE_RESET) { 2466 ioc_err(mrioc, "Cannot bring IOC to reset state\n"); 2467 goto out_failed; 2468 } 2469 2470 retval = mpi3mr_setup_admin_qpair(mrioc); 2471 if (retval) { 2472 ioc_err(mrioc, "Failed to setup admin Qs: error %d\n", 2473 retval); 2474 goto out_failed; 2475 } 2476 2477 retval = mpi3mr_bring_ioc_ready(mrioc); 2478 if (retval) { 2479 ioc_err(mrioc, "Failed to bring ioc ready: error %d\n", 2480 retval); 2481 goto out_failed; 2482 } 2483 2484 retval = mpi3mr_setup_isr(mrioc, 1); 2485 if (retval) { 2486 ioc_err(mrioc, "Failed to setup ISR error %d\n", 2487 retval); 2488 goto out_failed; 2489 } 2490 2491 retval = mpi3mr_issue_iocfacts(mrioc, &facts_data); 2492 if (retval) { 2493 ioc_err(mrioc, "Failed to Issue IOC Facts %d\n", 2494 retval); 2495 goto out_failed; 2496 } 2497 2498 mpi3mr_process_factsdata(mrioc, &facts_data); 2499 retval = mpi3mr_check_reset_dma_mask(mrioc); 2500 if (retval) { 2501 ioc_err(mrioc, "Resetting dma mask failed %d\n", 2502 retval); 2503 goto out_failed; 2504 } 2505 2506 retval = mpi3mr_alloc_reply_sense_bufs(mrioc); 2507 if (retval) { 2508 ioc_err(mrioc, 2509 "%s :Failed to allocated reply sense buffers %d\n", 2510 __func__, retval); 2511 goto out_failed; 2512 } 2513 2514 retval = mpi3mr_alloc_chain_bufs(mrioc); 2515 if (retval) { 2516 ioc_err(mrioc, "Failed to allocated chain buffers %d\n", 2517 retval); 2518 goto out_failed; 2519 } 2520 2521 retval = mpi3mr_issue_iocinit(mrioc); 2522 if (retval) { 2523 ioc_err(mrioc, "Failed to Issue IOC Init %d\n", 2524 retval); 2525 goto out_failed; 2526 } 2527 mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs; 2528 writel(mrioc->reply_free_queue_host_index, 2529 &mrioc->sysif_regs->reply_free_host_index); 2530 2531 mrioc->sbq_host_index = mrioc->num_sense_bufs; 2532 writel(mrioc->sbq_host_index, 2533 &mrioc->sysif_regs->sense_buffer_free_host_index); 2534 2535 retval = mpi3mr_setup_isr(mrioc, 0); 2536 if (retval) { 2537 ioc_err(mrioc, "Failed to re-setup ISR, error %d\n", 2538 retval); 2539 goto out_failed; 2540 } 2541 2542 retval = mpi3mr_create_op_queues(mrioc); 2543 if (retval) { 2544 ioc_err(mrioc, "Failed to create OpQueues error %d\n", 2545 retval); 2546 goto out_failed; 2547 } 2548 2549 return retval; 2550 2551 out_failed: 2552 mpi3mr_cleanup_ioc(mrioc); 2553 out_nocleanup: 2554 return retval; 2555 } 2556 2557 /** 2558 * mpi3mr_free_mem - Free memory allocated for a controller 2559 * @mrioc: Adapter instance reference 2560 * 2561 * Free all the memory allocated for a controller. 2562 * 2563 * Return: Nothing. 2564 */ 2565 static void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc) 2566 { 2567 u16 i; 2568 struct mpi3mr_intr_info *intr_info; 2569 2570 if (mrioc->sense_buf_pool) { 2571 if (mrioc->sense_buf) 2572 dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf, 2573 mrioc->sense_buf_dma); 2574 dma_pool_destroy(mrioc->sense_buf_pool); 2575 mrioc->sense_buf = NULL; 2576 mrioc->sense_buf_pool = NULL; 2577 } 2578 if (mrioc->sense_buf_q_pool) { 2579 if (mrioc->sense_buf_q) 2580 dma_pool_free(mrioc->sense_buf_q_pool, 2581 mrioc->sense_buf_q, mrioc->sense_buf_q_dma); 2582 dma_pool_destroy(mrioc->sense_buf_q_pool); 2583 mrioc->sense_buf_q = NULL; 2584 mrioc->sense_buf_q_pool = NULL; 2585 } 2586 2587 if (mrioc->reply_buf_pool) { 2588 if (mrioc->reply_buf) 2589 dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf, 2590 mrioc->reply_buf_dma); 2591 dma_pool_destroy(mrioc->reply_buf_pool); 2592 mrioc->reply_buf = NULL; 2593 mrioc->reply_buf_pool = NULL; 2594 } 2595 if (mrioc->reply_free_q_pool) { 2596 if (mrioc->reply_free_q) 2597 dma_pool_free(mrioc->reply_free_q_pool, 2598 mrioc->reply_free_q, mrioc->reply_free_q_dma); 2599 dma_pool_destroy(mrioc->reply_free_q_pool); 2600 mrioc->reply_free_q = NULL; 2601 mrioc->reply_free_q_pool = NULL; 2602 } 2603 2604 for (i = 0; i < mrioc->num_op_req_q; i++) 2605 mpi3mr_free_op_req_q_segments(mrioc, i); 2606 2607 for (i = 0; i < mrioc->num_op_reply_q; i++) 2608 mpi3mr_free_op_reply_q_segments(mrioc, i); 2609 2610 for (i = 0; i < mrioc->intr_info_count; i++) { 2611 intr_info = mrioc->intr_info + i; 2612 if (intr_info) 2613 intr_info->op_reply_q = NULL; 2614 } 2615 2616 kfree(mrioc->req_qinfo); 2617 mrioc->req_qinfo = NULL; 2618 mrioc->num_op_req_q = 0; 2619 2620 kfree(mrioc->op_reply_qinfo); 2621 mrioc->op_reply_qinfo = NULL; 2622 mrioc->num_op_reply_q = 0; 2623 2624 kfree(mrioc->init_cmds.reply); 2625 mrioc->init_cmds.reply = NULL; 2626 2627 kfree(mrioc->chain_bitmap); 2628 mrioc->chain_bitmap = NULL; 2629 2630 if (mrioc->chain_buf_pool) { 2631 for (i = 0; i < mrioc->chain_buf_count; i++) { 2632 if (mrioc->chain_sgl_list[i].addr) { 2633 dma_pool_free(mrioc->chain_buf_pool, 2634 mrioc->chain_sgl_list[i].addr, 2635 mrioc->chain_sgl_list[i].dma_addr); 2636 mrioc->chain_sgl_list[i].addr = NULL; 2637 } 2638 } 2639 dma_pool_destroy(mrioc->chain_buf_pool); 2640 mrioc->chain_buf_pool = NULL; 2641 } 2642 2643 kfree(mrioc->chain_sgl_list); 2644 mrioc->chain_sgl_list = NULL; 2645 2646 if (mrioc->admin_reply_base) { 2647 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz, 2648 mrioc->admin_reply_base, mrioc->admin_reply_dma); 2649 mrioc->admin_reply_base = NULL; 2650 } 2651 if (mrioc->admin_req_base) { 2652 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz, 2653 mrioc->admin_req_base, mrioc->admin_req_dma); 2654 mrioc->admin_req_base = NULL; 2655 } 2656 } 2657 2658 /** 2659 * mpi3mr_issue_ioc_shutdown - shutdown controller 2660 * @mrioc: Adapter instance reference 2661 * 2662 * Send shutodwn notification to the controller and wait for the 2663 * shutdown_timeout for it to be completed. 2664 * 2665 * Return: Nothing. 2666 */ 2667 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc) 2668 { 2669 u32 ioc_config, ioc_status; 2670 u8 retval = 1; 2671 u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10; 2672 2673 ioc_info(mrioc, "Issuing shutdown Notification\n"); 2674 if (mrioc->unrecoverable) { 2675 ioc_warn(mrioc, 2676 "IOC is unrecoverable shutdown is not issued\n"); 2677 return; 2678 } 2679 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 2680 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 2681 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) { 2682 ioc_info(mrioc, "shutdown already in progress\n"); 2683 return; 2684 } 2685 2686 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 2687 ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL; 2688 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN; 2689 2690 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 2691 2692 if (mrioc->facts.shutdown_timeout) 2693 timeout = mrioc->facts.shutdown_timeout * 10; 2694 2695 do { 2696 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 2697 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 2698 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) { 2699 retval = 0; 2700 break; 2701 } 2702 msleep(100); 2703 } while (--timeout); 2704 2705 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 2706 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 2707 2708 if (retval) { 2709 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 2710 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) 2711 ioc_warn(mrioc, 2712 "shutdown still in progress after timeout\n"); 2713 } 2714 2715 ioc_info(mrioc, 2716 "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n", 2717 (!retval) ? "successful" : "failed", ioc_status, 2718 ioc_config); 2719 } 2720 2721 /** 2722 * mpi3mr_cleanup_ioc - Cleanup controller 2723 * @mrioc: Adapter instance reference 2724 * 2725 * controller cleanup handler, Message unit reset or soft reset 2726 * and shutdown notification is issued to the controller and the 2727 * associated memory resources are freed. 2728 * 2729 * Return: Nothing. 2730 */ 2731 void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc) 2732 { 2733 enum mpi3mr_iocstate ioc_state; 2734 2735 mpi3mr_stop_watchdog(mrioc); 2736 2737 mpi3mr_ioc_disable_intr(mrioc); 2738 2739 ioc_state = mpi3mr_get_iocstate(mrioc); 2740 2741 if ((!mrioc->unrecoverable) && (!mrioc->reset_in_progress) && 2742 (ioc_state == MRIOC_STATE_READY)) { 2743 if (mpi3mr_issue_and_process_mur(mrioc, 2744 MPI3MR_RESET_FROM_CTLR_CLEANUP)) 2745 mpi3mr_issue_reset(mrioc, 2746 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, 2747 MPI3MR_RESET_FROM_MUR_FAILURE); 2748 2749 mpi3mr_issue_ioc_shutdown(mrioc); 2750 } 2751 2752 mpi3mr_free_mem(mrioc); 2753 mpi3mr_cleanup_resources(mrioc); 2754 } 2755 2756 /** 2757 * mpi3mr_soft_reset_handler - Reset the controller 2758 * @mrioc: Adapter instance reference 2759 * @reset_reason: Reset reason code 2760 * @snapdump: Flag to generate snapdump in firmware or not 2761 * 2762 * TBD 2763 * 2764 * Return: 0 on success, non-zero on failure. 2765 */ 2766 int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, 2767 u32 reset_reason, u8 snapdump) 2768 { 2769 return 0; 2770 } 2771