1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2021 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 #include <linux/io-64-nonatomic-lo-hi.h> 12 13 static int 14 mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u32 reset_reason); 15 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc); 16 17 #if defined(writeq) && defined(CONFIG_64BIT) 18 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) 19 { 20 writeq(b, addr); 21 } 22 #else 23 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) 24 { 25 __u64 data_out = b; 26 27 writel((u32)(data_out), addr); 28 writel((u32)(data_out >> 32), (addr + 4)); 29 } 30 #endif 31 32 static inline bool 33 mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q) 34 { 35 u16 pi, ci, max_entries; 36 bool is_qfull = false; 37 38 pi = op_req_q->pi; 39 ci = READ_ONCE(op_req_q->ci); 40 max_entries = op_req_q->num_requests; 41 42 if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1)))) 43 is_qfull = true; 44 45 return is_qfull; 46 } 47 48 static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc) 49 { 50 u16 i, max_vectors; 51 52 max_vectors = mrioc->intr_info_count; 53 54 for (i = 0; i < max_vectors; i++) 55 synchronize_irq(pci_irq_vector(mrioc->pdev, i)); 56 } 57 58 void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc) 59 { 60 mrioc->intr_enabled = 0; 61 mpi3mr_sync_irqs(mrioc); 62 } 63 64 void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc) 65 { 66 mrioc->intr_enabled = 1; 67 } 68 69 static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc) 70 { 71 u16 i; 72 73 mpi3mr_ioc_disable_intr(mrioc); 74 75 if (!mrioc->intr_info) 76 return; 77 78 for (i = 0; i < mrioc->intr_info_count; i++) 79 free_irq(pci_irq_vector(mrioc->pdev, i), 80 (mrioc->intr_info + i)); 81 82 kfree(mrioc->intr_info); 83 mrioc->intr_info = NULL; 84 mrioc->intr_info_count = 0; 85 mrioc->is_intr_info_set = false; 86 pci_free_irq_vectors(mrioc->pdev); 87 } 88 89 void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length, 90 dma_addr_t dma_addr) 91 { 92 struct mpi3_sge_common *sgel = paddr; 93 94 sgel->flags = flags; 95 sgel->length = cpu_to_le32(length); 96 sgel->address = cpu_to_le64(dma_addr); 97 } 98 99 void mpi3mr_build_zero_len_sge(void *paddr) 100 { 101 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 102 103 mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1); 104 } 105 106 void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc, 107 dma_addr_t phys_addr) 108 { 109 if (!phys_addr) 110 return NULL; 111 112 if ((phys_addr < mrioc->reply_buf_dma) || 113 (phys_addr > mrioc->reply_buf_dma_max_address)) 114 return NULL; 115 116 return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma); 117 } 118 119 void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc, 120 dma_addr_t phys_addr) 121 { 122 if (!phys_addr) 123 return NULL; 124 125 return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma); 126 } 127 128 static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc, 129 u64 reply_dma) 130 { 131 u32 old_idx = 0; 132 unsigned long flags; 133 134 spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags); 135 old_idx = mrioc->reply_free_queue_host_index; 136 mrioc->reply_free_queue_host_index = ( 137 (mrioc->reply_free_queue_host_index == 138 (mrioc->reply_free_qsz - 1)) ? 0 : 139 (mrioc->reply_free_queue_host_index + 1)); 140 mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma); 141 writel(mrioc->reply_free_queue_host_index, 142 &mrioc->sysif_regs->reply_free_host_index); 143 spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags); 144 } 145 146 void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc, 147 u64 sense_buf_dma) 148 { 149 u32 old_idx = 0; 150 unsigned long flags; 151 152 spin_lock_irqsave(&mrioc->sbq_lock, flags); 153 old_idx = mrioc->sbq_host_index; 154 mrioc->sbq_host_index = ((mrioc->sbq_host_index == 155 (mrioc->sense_buf_q_sz - 1)) ? 0 : 156 (mrioc->sbq_host_index + 1)); 157 mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma); 158 writel(mrioc->sbq_host_index, 159 &mrioc->sysif_regs->sense_buffer_free_host_index); 160 spin_unlock_irqrestore(&mrioc->sbq_lock, flags); 161 } 162 163 static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc, 164 struct mpi3_event_notification_reply *event_reply) 165 { 166 char *desc = NULL; 167 u16 event; 168 169 event = event_reply->event; 170 171 switch (event) { 172 case MPI3_EVENT_LOG_DATA: 173 desc = "Log Data"; 174 break; 175 case MPI3_EVENT_CHANGE: 176 desc = "Event Change"; 177 break; 178 case MPI3_EVENT_GPIO_INTERRUPT: 179 desc = "GPIO Interrupt"; 180 break; 181 case MPI3_EVENT_TEMP_THRESHOLD: 182 desc = "Temperature Threshold"; 183 break; 184 case MPI3_EVENT_CABLE_MGMT: 185 desc = "Cable Management"; 186 break; 187 case MPI3_EVENT_ENERGY_PACK_CHANGE: 188 desc = "Energy Pack Change"; 189 break; 190 case MPI3_EVENT_DEVICE_ADDED: 191 { 192 struct mpi3_device_page0 *event_data = 193 (struct mpi3_device_page0 *)event_reply->event_data; 194 ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n", 195 event_data->dev_handle, event_data->device_form); 196 return; 197 } 198 case MPI3_EVENT_DEVICE_INFO_CHANGED: 199 { 200 struct mpi3_device_page0 *event_data = 201 (struct mpi3_device_page0 *)event_reply->event_data; 202 ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n", 203 event_data->dev_handle, event_data->device_form); 204 return; 205 } 206 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 207 { 208 struct mpi3_event_data_device_status_change *event_data = 209 (struct mpi3_event_data_device_status_change *)event_reply->event_data; 210 ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n", 211 event_data->dev_handle, event_data->reason_code); 212 return; 213 } 214 case MPI3_EVENT_SAS_DISCOVERY: 215 { 216 struct mpi3_event_data_sas_discovery *event_data = 217 (struct mpi3_event_data_sas_discovery *)event_reply->event_data; 218 ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n", 219 (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ? 220 "start" : "stop", 221 le32_to_cpu(event_data->discovery_status)); 222 return; 223 } 224 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE: 225 desc = "SAS Broadcast Primitive"; 226 break; 227 case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE: 228 desc = "SAS Notify Primitive"; 229 break; 230 case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: 231 desc = "SAS Init Device Status Change"; 232 break; 233 case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW: 234 desc = "SAS Init Table Overflow"; 235 break; 236 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 237 desc = "SAS Topology Change List"; 238 break; 239 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 240 desc = "Enclosure Device Status Change"; 241 break; 242 case MPI3_EVENT_HARD_RESET_RECEIVED: 243 desc = "Hard Reset Received"; 244 break; 245 case MPI3_EVENT_SAS_PHY_COUNTER: 246 desc = "SAS PHY Counter"; 247 break; 248 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 249 desc = "SAS Device Discovery Error"; 250 break; 251 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 252 desc = "PCIE Topology Change List"; 253 break; 254 case MPI3_EVENT_PCIE_ENUMERATION: 255 { 256 struct mpi3_event_data_pcie_enumeration *event_data = 257 (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data; 258 ioc_info(mrioc, "PCIE Enumeration: (%s)", 259 (event_data->reason_code == 260 MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop"); 261 if (event_data->enumeration_status) 262 ioc_info(mrioc, "enumeration_status(0x%08x)\n", 263 le32_to_cpu(event_data->enumeration_status)); 264 return; 265 } 266 case MPI3_EVENT_PREPARE_FOR_RESET: 267 desc = "Prepare For Reset"; 268 break; 269 } 270 271 if (!desc) 272 return; 273 274 ioc_info(mrioc, "%s\n", desc); 275 } 276 277 static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc, 278 struct mpi3_default_reply *def_reply) 279 { 280 struct mpi3_event_notification_reply *event_reply = 281 (struct mpi3_event_notification_reply *)def_reply; 282 283 mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count); 284 mpi3mr_print_event_data(mrioc, event_reply); 285 mpi3mr_os_handle_events(mrioc, event_reply); 286 } 287 288 static struct mpi3mr_drv_cmd * 289 mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag, 290 struct mpi3_default_reply *def_reply) 291 { 292 u16 idx; 293 294 switch (host_tag) { 295 case MPI3MR_HOSTTAG_INITCMDS: 296 return &mrioc->init_cmds; 297 case MPI3MR_HOSTTAG_BLK_TMS: 298 return &mrioc->host_tm_cmds; 299 case MPI3MR_HOSTTAG_INVALID: 300 if (def_reply && def_reply->function == 301 MPI3_FUNCTION_EVENT_NOTIFICATION) 302 mpi3mr_handle_events(mrioc, def_reply); 303 return NULL; 304 default: 305 break; 306 } 307 if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN && 308 host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) { 309 idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 310 return &mrioc->dev_rmhs_cmds[idx]; 311 } 312 313 return NULL; 314 } 315 316 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc, 317 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma) 318 { 319 u16 reply_desc_type, host_tag = 0; 320 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; 321 u32 ioc_loginfo = 0; 322 struct mpi3_status_reply_descriptor *status_desc; 323 struct mpi3_address_reply_descriptor *addr_desc; 324 struct mpi3_success_reply_descriptor *success_desc; 325 struct mpi3_default_reply *def_reply = NULL; 326 struct mpi3mr_drv_cmd *cmdptr = NULL; 327 struct mpi3_scsi_io_reply *scsi_reply; 328 u8 *sense_buf = NULL; 329 330 *reply_dma = 0; 331 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & 332 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK; 333 switch (reply_desc_type) { 334 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS: 335 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc; 336 host_tag = le16_to_cpu(status_desc->host_tag); 337 ioc_status = le16_to_cpu(status_desc->ioc_status); 338 if (ioc_status & 339 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 340 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); 341 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 342 break; 343 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: 344 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; 345 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address); 346 def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma); 347 if (!def_reply) 348 goto out; 349 host_tag = le16_to_cpu(def_reply->host_tag); 350 ioc_status = le16_to_cpu(def_reply->ioc_status); 351 if (ioc_status & 352 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 353 ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info); 354 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 355 if (def_reply->function == MPI3_FUNCTION_SCSI_IO) { 356 scsi_reply = (struct mpi3_scsi_io_reply *)def_reply; 357 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, 358 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 359 } 360 break; 361 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: 362 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; 363 host_tag = le16_to_cpu(success_desc->host_tag); 364 break; 365 default: 366 break; 367 } 368 369 cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply); 370 if (cmdptr) { 371 if (cmdptr->state & MPI3MR_CMD_PENDING) { 372 cmdptr->state |= MPI3MR_CMD_COMPLETE; 373 cmdptr->ioc_loginfo = ioc_loginfo; 374 cmdptr->ioc_status = ioc_status; 375 cmdptr->state &= ~MPI3MR_CMD_PENDING; 376 if (def_reply) { 377 cmdptr->state |= MPI3MR_CMD_REPLY_VALID; 378 memcpy((u8 *)cmdptr->reply, (u8 *)def_reply, 379 mrioc->facts.reply_sz); 380 } 381 if (cmdptr->is_waiting) { 382 complete(&cmdptr->done); 383 cmdptr->is_waiting = 0; 384 } else if (cmdptr->callback) 385 cmdptr->callback(mrioc, cmdptr); 386 } 387 } 388 out: 389 if (sense_buf) 390 mpi3mr_repost_sense_buf(mrioc, 391 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 392 } 393 394 static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc) 395 { 396 u32 exp_phase = mrioc->admin_reply_ephase; 397 u32 admin_reply_ci = mrioc->admin_reply_ci; 398 u32 num_admin_replies = 0; 399 u64 reply_dma = 0; 400 struct mpi3_default_reply_descriptor *reply_desc; 401 402 reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base + 403 admin_reply_ci; 404 405 if ((le16_to_cpu(reply_desc->reply_flags) & 406 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 407 return 0; 408 409 do { 410 mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci); 411 mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma); 412 if (reply_dma) 413 mpi3mr_repost_reply_buf(mrioc, reply_dma); 414 num_admin_replies++; 415 if (++admin_reply_ci == mrioc->num_admin_replies) { 416 admin_reply_ci = 0; 417 exp_phase ^= 1; 418 } 419 reply_desc = 420 (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base + 421 admin_reply_ci; 422 if ((le16_to_cpu(reply_desc->reply_flags) & 423 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 424 break; 425 } while (1); 426 427 writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); 428 mrioc->admin_reply_ci = admin_reply_ci; 429 mrioc->admin_reply_ephase = exp_phase; 430 431 return num_admin_replies; 432 } 433 434 /** 435 * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to 436 * queue's consumer index from operational reply descriptor queue. 437 * @op_reply_q: op_reply_qinfo object 438 * @reply_ci: operational reply descriptor's queue consumer index 439 * 440 * Returns reply descriptor frame address 441 */ 442 static inline struct mpi3_default_reply_descriptor * 443 mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci) 444 { 445 void *segment_base_addr; 446 struct segments *segments = op_reply_q->q_segments; 447 struct mpi3_default_reply_descriptor *reply_desc = NULL; 448 449 segment_base_addr = 450 segments[reply_ci / op_reply_q->segment_qd].segment; 451 reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr + 452 (reply_ci % op_reply_q->segment_qd); 453 return reply_desc; 454 } 455 456 static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, 457 struct mpi3mr_intr_info *intr_info) 458 { 459 struct op_reply_qinfo *op_reply_q = intr_info->op_reply_q; 460 struct op_req_qinfo *op_req_q; 461 u32 exp_phase; 462 u32 reply_ci; 463 u32 num_op_reply = 0; 464 u64 reply_dma = 0; 465 struct mpi3_default_reply_descriptor *reply_desc; 466 u16 req_q_idx = 0, reply_qidx; 467 468 reply_qidx = op_reply_q->qid - 1; 469 470 if (!atomic_add_unless(&op_reply_q->in_use, 1, 1)) 471 return 0; 472 473 exp_phase = op_reply_q->ephase; 474 reply_ci = op_reply_q->ci; 475 476 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci); 477 if ((le16_to_cpu(reply_desc->reply_flags) & 478 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) { 479 atomic_dec(&op_reply_q->in_use); 480 return 0; 481 } 482 483 do { 484 req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1; 485 op_req_q = &mrioc->req_qinfo[req_q_idx]; 486 487 WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci)); 488 mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma, 489 reply_qidx); 490 atomic_dec(&op_reply_q->pend_ios); 491 if (reply_dma) 492 mpi3mr_repost_reply_buf(mrioc, reply_dma); 493 num_op_reply++; 494 495 if (++reply_ci == op_reply_q->num_replies) { 496 reply_ci = 0; 497 exp_phase ^= 1; 498 } 499 500 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci); 501 502 if ((le16_to_cpu(reply_desc->reply_flags) & 503 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 504 break; 505 /* 506 * Exit completion loop to avoid CPU lockup 507 * Ensure remaining completion happens from threaded ISR. 508 */ 509 if (num_op_reply > mrioc->max_host_ios) { 510 intr_info->op_reply_q->enable_irq_poll = true; 511 break; 512 } 513 514 } while (1); 515 516 writel(reply_ci, 517 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index); 518 op_reply_q->ci = reply_ci; 519 op_reply_q->ephase = exp_phase; 520 521 atomic_dec(&op_reply_q->in_use); 522 return num_op_reply; 523 } 524 525 static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata) 526 { 527 struct mpi3mr_intr_info *intr_info = privdata; 528 struct mpi3mr_ioc *mrioc; 529 u16 midx; 530 u32 num_admin_replies = 0, num_op_reply = 0; 531 532 if (!intr_info) 533 return IRQ_NONE; 534 535 mrioc = intr_info->mrioc; 536 537 if (!mrioc->intr_enabled) 538 return IRQ_NONE; 539 540 midx = intr_info->msix_index; 541 542 if (!midx) 543 num_admin_replies = mpi3mr_process_admin_reply_q(mrioc); 544 if (intr_info->op_reply_q) 545 num_op_reply = mpi3mr_process_op_reply_q(mrioc, intr_info); 546 547 if (num_admin_replies || num_op_reply) 548 return IRQ_HANDLED; 549 else 550 return IRQ_NONE; 551 } 552 553 static irqreturn_t mpi3mr_isr(int irq, void *privdata) 554 { 555 struct mpi3mr_intr_info *intr_info = privdata; 556 struct mpi3mr_ioc *mrioc; 557 u16 midx; 558 int ret; 559 560 if (!intr_info) 561 return IRQ_NONE; 562 563 mrioc = intr_info->mrioc; 564 midx = intr_info->msix_index; 565 /* Call primary ISR routine */ 566 ret = mpi3mr_isr_primary(irq, privdata); 567 568 /* 569 * If more IOs are expected, schedule IRQ polling thread. 570 * Otherwise exit from ISR. 571 */ 572 if (!intr_info->op_reply_q) 573 return ret; 574 575 if (!intr_info->op_reply_q->enable_irq_poll || 576 !atomic_read(&intr_info->op_reply_q->pend_ios)) 577 return ret; 578 579 disable_irq_nosync(pci_irq_vector(mrioc->pdev, midx)); 580 581 return IRQ_WAKE_THREAD; 582 } 583 584 /** 585 * mpi3mr_isr_poll - Reply queue polling routine 586 * @irq: IRQ 587 * @privdata: Interrupt info 588 * 589 * poll for pending I/O completions in a loop until pending I/Os 590 * present or controller queue depth I/Os are processed. 591 * 592 * Return: IRQ_NONE or IRQ_HANDLED 593 */ 594 static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata) 595 { 596 struct mpi3mr_intr_info *intr_info = privdata; 597 struct mpi3mr_ioc *mrioc; 598 u16 midx; 599 u32 num_op_reply = 0; 600 601 if (!intr_info || !intr_info->op_reply_q) 602 return IRQ_NONE; 603 604 mrioc = intr_info->mrioc; 605 midx = intr_info->msix_index; 606 607 /* Poll for pending IOs completions */ 608 do { 609 if (!mrioc->intr_enabled) 610 break; 611 612 if (!midx) 613 mpi3mr_process_admin_reply_q(mrioc); 614 if (intr_info->op_reply_q) 615 num_op_reply += 616 mpi3mr_process_op_reply_q(mrioc, intr_info); 617 618 usleep_range(mrioc->irqpoll_sleep, 10 * mrioc->irqpoll_sleep); 619 620 } while (atomic_read(&intr_info->op_reply_q->pend_ios) && 621 (num_op_reply < mrioc->max_host_ios)); 622 623 intr_info->op_reply_q->enable_irq_poll = false; 624 enable_irq(pci_irq_vector(mrioc->pdev, midx)); 625 626 return IRQ_HANDLED; 627 } 628 629 /** 630 * mpi3mr_request_irq - Request IRQ and register ISR 631 * @mrioc: Adapter instance reference 632 * @index: IRQ vector index 633 * 634 * Request threaded ISR with primary ISR and secondary 635 * 636 * Return: 0 on success and non zero on failures. 637 */ 638 static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index) 639 { 640 struct pci_dev *pdev = mrioc->pdev; 641 struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index; 642 int retval = 0; 643 644 intr_info->mrioc = mrioc; 645 intr_info->msix_index = index; 646 intr_info->op_reply_q = NULL; 647 648 snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d", 649 mrioc->driver_name, mrioc->id, index); 650 651 retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr, 652 mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info); 653 if (retval) { 654 ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n", 655 intr_info->name, pci_irq_vector(pdev, index)); 656 return retval; 657 } 658 659 return retval; 660 } 661 662 /** 663 * mpi3mr_setup_isr - Setup ISR for the controller 664 * @mrioc: Adapter instance reference 665 * @setup_one: Request one IRQ or more 666 * 667 * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR 668 * 669 * Return: 0 on success and non zero on failures. 670 */ 671 static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one) 672 { 673 unsigned int irq_flags = PCI_IRQ_MSIX; 674 int max_vectors; 675 int retval; 676 int i; 677 struct irq_affinity desc = { .pre_vectors = 1}; 678 679 if (mrioc->is_intr_info_set) 680 return 0; 681 682 mpi3mr_cleanup_isr(mrioc); 683 684 if (setup_one || reset_devices) 685 max_vectors = 1; 686 else { 687 max_vectors = 688 min_t(int, mrioc->cpu_count + 1, mrioc->msix_count); 689 690 ioc_info(mrioc, 691 "MSI-X vectors supported: %d, no of cores: %d,", 692 mrioc->msix_count, mrioc->cpu_count); 693 ioc_info(mrioc, 694 "MSI-x vectors requested: %d\n", max_vectors); 695 } 696 697 irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; 698 699 mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0; 700 retval = pci_alloc_irq_vectors_affinity(mrioc->pdev, 701 1, max_vectors, irq_flags, &desc); 702 if (retval < 0) { 703 ioc_err(mrioc, "Cannot alloc irq vectors\n"); 704 goto out_failed; 705 } 706 if (retval != max_vectors) { 707 ioc_info(mrioc, 708 "allocated vectors (%d) are less than configured (%d)\n", 709 retval, max_vectors); 710 /* 711 * If only one MSI-x is allocated, then MSI-x 0 will be shared 712 * between Admin queue and operational queue 713 */ 714 if (retval == 1) 715 mrioc->op_reply_q_offset = 0; 716 717 max_vectors = retval; 718 } 719 mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors, 720 GFP_KERNEL); 721 if (!mrioc->intr_info) { 722 retval = -ENOMEM; 723 pci_free_irq_vectors(mrioc->pdev); 724 goto out_failed; 725 } 726 for (i = 0; i < max_vectors; i++) { 727 retval = mpi3mr_request_irq(mrioc, i); 728 if (retval) { 729 mrioc->intr_info_count = i; 730 goto out_failed; 731 } 732 } 733 if (reset_devices || !setup_one) 734 mrioc->is_intr_info_set = true; 735 mrioc->intr_info_count = max_vectors; 736 mpi3mr_ioc_enable_intr(mrioc); 737 return 0; 738 739 out_failed: 740 mpi3mr_cleanup_isr(mrioc); 741 742 return retval; 743 } 744 745 static const struct { 746 enum mpi3mr_iocstate value; 747 char *name; 748 } mrioc_states[] = { 749 { MRIOC_STATE_READY, "ready" }, 750 { MRIOC_STATE_FAULT, "fault" }, 751 { MRIOC_STATE_RESET, "reset" }, 752 { MRIOC_STATE_BECOMING_READY, "becoming ready" }, 753 { MRIOC_STATE_RESET_REQUESTED, "reset requested" }, 754 { MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" }, 755 }; 756 757 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state) 758 { 759 int i; 760 char *name = NULL; 761 762 for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) { 763 if (mrioc_states[i].value == mrioc_state) { 764 name = mrioc_states[i].name; 765 break; 766 } 767 } 768 return name; 769 } 770 771 /* Reset reason to name mapper structure*/ 772 static const struct { 773 enum mpi3mr_reset_reason value; 774 char *name; 775 } mpi3mr_reset_reason_codes[] = { 776 { MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" }, 777 { MPI3MR_RESET_FROM_FAULT_WATCH, "fault" }, 778 { MPI3MR_RESET_FROM_IOCTL, "application invocation" }, 779 { MPI3MR_RESET_FROM_EH_HOS, "error handling" }, 780 { MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" }, 781 { MPI3MR_RESET_FROM_IOCTL_TIMEOUT, "IOCTL timeout" }, 782 { MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" }, 783 { MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" }, 784 { MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" }, 785 { MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" }, 786 { MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" }, 787 { MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" }, 788 { MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" }, 789 { 790 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT, 791 "create request queue timeout" 792 }, 793 { 794 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT, 795 "create reply queue timeout" 796 }, 797 { MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" }, 798 { MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" }, 799 { MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" }, 800 { MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" }, 801 { 802 MPI3MR_RESET_FROM_CIACTVRST_TIMER, 803 "component image activation timeout" 804 }, 805 { 806 MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT, 807 "get package version timeout" 808 }, 809 { MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" }, 810 { MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" }, 811 }; 812 813 /** 814 * mpi3mr_reset_rc_name - get reset reason code name 815 * @reason_code: reset reason code value 816 * 817 * Map reset reason to an NULL terminated ASCII string 818 * 819 * Return: name corresponding to reset reason value or NULL. 820 */ 821 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code) 822 { 823 int i; 824 char *name = NULL; 825 826 for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) { 827 if (mpi3mr_reset_reason_codes[i].value == reason_code) { 828 name = mpi3mr_reset_reason_codes[i].name; 829 break; 830 } 831 } 832 return name; 833 } 834 835 /* Reset type to name mapper structure*/ 836 static const struct { 837 u16 reset_type; 838 char *name; 839 } mpi3mr_reset_types[] = { 840 { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" }, 841 { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" }, 842 }; 843 844 /** 845 * mpi3mr_reset_type_name - get reset type name 846 * @reset_type: reset type value 847 * 848 * Map reset type to an NULL terminated ASCII string 849 * 850 * Return: name corresponding to reset type value or NULL. 851 */ 852 static const char *mpi3mr_reset_type_name(u16 reset_type) 853 { 854 int i; 855 char *name = NULL; 856 857 for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) { 858 if (mpi3mr_reset_types[i].reset_type == reset_type) { 859 name = mpi3mr_reset_types[i].name; 860 break; 861 } 862 } 863 return name; 864 } 865 866 /** 867 * mpi3mr_print_fault_info - Display fault information 868 * @mrioc: Adapter instance reference 869 * 870 * Display the controller fault information if there is a 871 * controller fault. 872 * 873 * Return: Nothing. 874 */ 875 static void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc) 876 { 877 u32 ioc_status, code, code1, code2, code3; 878 879 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 880 881 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) { 882 code = readl(&mrioc->sysif_regs->fault); 883 code1 = readl(&mrioc->sysif_regs->fault_info[0]); 884 code2 = readl(&mrioc->sysif_regs->fault_info[1]); 885 code3 = readl(&mrioc->sysif_regs->fault_info[2]); 886 887 ioc_info(mrioc, 888 "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n", 889 code, code1, code2, code3); 890 } 891 } 892 893 /** 894 * mpi3mr_get_iocstate - Get IOC State 895 * @mrioc: Adapter instance reference 896 * 897 * Return a proper IOC state enum based on the IOC status and 898 * IOC configuration and unrcoverable state of the controller. 899 * 900 * Return: Current IOC state. 901 */ 902 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc) 903 { 904 u32 ioc_status, ioc_config; 905 u8 ready, enabled; 906 907 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 908 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 909 910 if (mrioc->unrecoverable) 911 return MRIOC_STATE_UNRECOVERABLE; 912 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) 913 return MRIOC_STATE_FAULT; 914 915 ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY); 916 enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC); 917 918 if (ready && enabled) 919 return MRIOC_STATE_READY; 920 if ((!ready) && (!enabled)) 921 return MRIOC_STATE_RESET; 922 if ((!ready) && (enabled)) 923 return MRIOC_STATE_BECOMING_READY; 924 925 return MRIOC_STATE_RESET_REQUESTED; 926 } 927 928 /** 929 * mpi3mr_clear_reset_history - clear reset history 930 * @mrioc: Adapter instance reference 931 * 932 * Write the reset history bit in IOC status to clear the bit, 933 * if it is already set. 934 * 935 * Return: Nothing. 936 */ 937 static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc) 938 { 939 u32 ioc_status; 940 941 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 942 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) 943 writel(ioc_status, &mrioc->sysif_regs->ioc_status); 944 } 945 946 /** 947 * mpi3mr_issue_and_process_mur - Message unit Reset handler 948 * @mrioc: Adapter instance reference 949 * @reset_reason: Reset reason code 950 * 951 * Issue Message unit Reset to the controller and wait for it to 952 * be complete. 953 * 954 * Return: 0 on success, -1 on failure. 955 */ 956 static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc, 957 u32 reset_reason) 958 { 959 u32 ioc_config, timeout, ioc_status; 960 int retval = -1; 961 962 ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n"); 963 if (mrioc->unrecoverable) { 964 ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n"); 965 return retval; 966 } 967 mpi3mr_clear_reset_history(mrioc); 968 writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]); 969 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 970 ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; 971 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 972 973 timeout = mrioc->ready_timeout * 10; 974 do { 975 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 976 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) { 977 mpi3mr_clear_reset_history(mrioc); 978 ioc_config = 979 readl(&mrioc->sysif_regs->ioc_configuration); 980 if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) || 981 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) || 982 (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) { 983 retval = 0; 984 break; 985 } 986 } 987 msleep(100); 988 } while (--timeout); 989 990 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 991 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 992 993 ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n", 994 (!retval) ? "successful" : "failed", ioc_status, ioc_config); 995 return retval; 996 } 997 998 /** 999 * mpi3mr_bring_ioc_ready - Bring controller to ready state 1000 * @mrioc: Adapter instance reference 1001 * 1002 * Set Enable IOC bit in IOC configuration register and wait for 1003 * the controller to become ready. 1004 * 1005 * Return: 0 on success, appropriate error on failure. 1006 */ 1007 static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc) 1008 { 1009 u32 ioc_config, ioc_status, timeout; 1010 int retval = 0; 1011 enum mpi3mr_iocstate ioc_state; 1012 u64 base_info; 1013 1014 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1015 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1016 base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information); 1017 ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n", 1018 ioc_status, ioc_config, base_info); 1019 1020 /*The timeout value is in 2sec unit, changing it to seconds*/ 1021 mrioc->ready_timeout = 1022 ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >> 1023 MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2; 1024 1025 ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout); 1026 1027 ioc_state = mpi3mr_get_iocstate(mrioc); 1028 ioc_info(mrioc, "controller is in %s state during detection\n", 1029 mpi3mr_iocstate_name(ioc_state)); 1030 1031 if (ioc_state == MRIOC_STATE_BECOMING_READY || 1032 ioc_state == MRIOC_STATE_RESET_REQUESTED) { 1033 timeout = mrioc->ready_timeout * 10; 1034 do { 1035 msleep(100); 1036 } while (--timeout); 1037 1038 ioc_state = mpi3mr_get_iocstate(mrioc); 1039 ioc_info(mrioc, 1040 "controller is in %s state after waiting to reset\n", 1041 mpi3mr_iocstate_name(ioc_state)); 1042 } 1043 1044 if (ioc_state == MRIOC_STATE_READY) { 1045 ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n"); 1046 retval = mpi3mr_issue_and_process_mur(mrioc, 1047 MPI3MR_RESET_FROM_BRINGUP); 1048 ioc_state = mpi3mr_get_iocstate(mrioc); 1049 if (retval) 1050 ioc_err(mrioc, 1051 "message unit reset failed with error %d current state %s\n", 1052 retval, mpi3mr_iocstate_name(ioc_state)); 1053 } 1054 if (ioc_state != MRIOC_STATE_RESET) { 1055 mpi3mr_print_fault_info(mrioc); 1056 ioc_info(mrioc, "issuing soft reset to bring to reset state\n"); 1057 retval = mpi3mr_issue_reset(mrioc, 1058 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, 1059 MPI3MR_RESET_FROM_BRINGUP); 1060 if (retval) { 1061 ioc_err(mrioc, 1062 "soft reset failed with error %d\n", retval); 1063 goto out_failed; 1064 } 1065 } 1066 ioc_state = mpi3mr_get_iocstate(mrioc); 1067 if (ioc_state != MRIOC_STATE_RESET) { 1068 ioc_err(mrioc, 1069 "cannot bring controller to reset state, current state: %s\n", 1070 mpi3mr_iocstate_name(ioc_state)); 1071 goto out_failed; 1072 } 1073 mpi3mr_clear_reset_history(mrioc); 1074 retval = mpi3mr_setup_admin_qpair(mrioc); 1075 if (retval) { 1076 ioc_err(mrioc, "failed to setup admin queues: error %d\n", 1077 retval); 1078 goto out_failed; 1079 } 1080 1081 ioc_info(mrioc, "bringing controller to ready state\n"); 1082 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1083 ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; 1084 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 1085 1086 timeout = mrioc->ready_timeout * 10; 1087 do { 1088 ioc_state = mpi3mr_get_iocstate(mrioc); 1089 if (ioc_state == MRIOC_STATE_READY) { 1090 ioc_info(mrioc, 1091 "successfully transistioned to %s state\n", 1092 mpi3mr_iocstate_name(ioc_state)); 1093 return 0; 1094 } 1095 msleep(100); 1096 } while (--timeout); 1097 1098 out_failed: 1099 ioc_state = mpi3mr_get_iocstate(mrioc); 1100 ioc_err(mrioc, 1101 "failed to bring to ready state, current state: %s\n", 1102 mpi3mr_iocstate_name(ioc_state)); 1103 return retval; 1104 } 1105 1106 /** 1107 * mpi3mr_soft_reset_success - Check softreset is success or not 1108 * @ioc_status: IOC status register value 1109 * @ioc_config: IOC config register value 1110 * 1111 * Check whether the soft reset is successful or not based on 1112 * IOC status and IOC config register values. 1113 * 1114 * Return: True when the soft reset is success, false otherwise. 1115 */ 1116 static inline bool 1117 mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config) 1118 { 1119 if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) || 1120 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) || 1121 (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) 1122 return true; 1123 return false; 1124 } 1125 1126 /** 1127 * mpi3mr_diagfault_success - Check diag fault is success or not 1128 * @mrioc: Adapter reference 1129 * @ioc_status: IOC status register value 1130 * 1131 * Check whether the controller hit diag reset fault code. 1132 * 1133 * Return: True when there is diag fault, false otherwise. 1134 */ 1135 static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc, 1136 u32 ioc_status) 1137 { 1138 u32 fault; 1139 1140 if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) 1141 return false; 1142 fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK; 1143 if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) 1144 return true; 1145 return false; 1146 } 1147 1148 /** 1149 * mpi3mr_set_diagsave - Set diag save bit for snapdump 1150 * @mrioc: Adapter reference 1151 * 1152 * Set diag save bit in IOC configuration register to enable 1153 * snapdump. 1154 * 1155 * Return: Nothing. 1156 */ 1157 static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc) 1158 { 1159 u32 ioc_config; 1160 1161 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1162 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE; 1163 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 1164 } 1165 1166 /** 1167 * mpi3mr_issue_reset - Issue reset to the controller 1168 * @mrioc: Adapter reference 1169 * @reset_type: Reset type 1170 * @reset_reason: Reset reason code 1171 * 1172 * Unlock the host diagnostic registers and write the specific 1173 * reset type to that, wait for reset acknowledgment from the 1174 * controller, if the reset is not successful retry for the 1175 * predefined number of times. 1176 * 1177 * Return: 0 on success, non-zero on failure. 1178 */ 1179 static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, 1180 u32 reset_reason) 1181 { 1182 int retval = -1; 1183 u8 unlock_retry_count, reset_retry_count = 0; 1184 u32 host_diagnostic, timeout, ioc_status, ioc_config; 1185 1186 pci_cfg_access_lock(mrioc->pdev); 1187 if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) && 1188 (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT)) 1189 goto out; 1190 if (mrioc->unrecoverable) 1191 goto out; 1192 retry_reset: 1193 unlock_retry_count = 0; 1194 mpi3mr_clear_reset_history(mrioc); 1195 do { 1196 ioc_info(mrioc, 1197 "Write magic sequence to unlock host diag register (retry=%d)\n", 1198 ++unlock_retry_count); 1199 if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) { 1200 writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]); 1201 mrioc->unrecoverable = 1; 1202 goto out; 1203 } 1204 1205 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH, 1206 &mrioc->sysif_regs->write_sequence); 1207 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST, 1208 &mrioc->sysif_regs->write_sequence); 1209 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND, 1210 &mrioc->sysif_regs->write_sequence); 1211 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD, 1212 &mrioc->sysif_regs->write_sequence); 1213 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH, 1214 &mrioc->sysif_regs->write_sequence); 1215 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH, 1216 &mrioc->sysif_regs->write_sequence); 1217 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH, 1218 &mrioc->sysif_regs->write_sequence); 1219 usleep_range(1000, 1100); 1220 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); 1221 ioc_info(mrioc, 1222 "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n", 1223 unlock_retry_count, host_diagnostic); 1224 } while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE)); 1225 1226 writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]); 1227 ioc_info(mrioc, "%s reset due to %s(0x%x)\n", 1228 mpi3mr_reset_type_name(reset_type), 1229 mpi3mr_reset_rc_name(reset_reason), reset_reason); 1230 writel(host_diagnostic | reset_type, 1231 &mrioc->sysif_regs->host_diagnostic); 1232 timeout = mrioc->ready_timeout * 10; 1233 if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) { 1234 do { 1235 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1236 if (ioc_status & 1237 MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) { 1238 mpi3mr_clear_reset_history(mrioc); 1239 ioc_config = 1240 readl(&mrioc->sysif_regs->ioc_configuration); 1241 if (mpi3mr_soft_reset_success(ioc_status, 1242 ioc_config)) { 1243 retval = 0; 1244 break; 1245 } 1246 } 1247 msleep(100); 1248 } while (--timeout); 1249 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND, 1250 &mrioc->sysif_regs->write_sequence); 1251 } else if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT) { 1252 do { 1253 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1254 if (mpi3mr_diagfault_success(mrioc, ioc_status)) { 1255 retval = 0; 1256 break; 1257 } 1258 msleep(100); 1259 } while (--timeout); 1260 mpi3mr_clear_reset_history(mrioc); 1261 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND, 1262 &mrioc->sysif_regs->write_sequence); 1263 } 1264 if (retval && ((++reset_retry_count) < MPI3MR_MAX_RESET_RETRY_COUNT)) { 1265 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1266 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1267 ioc_info(mrioc, 1268 "Base IOC Sts/Config after reset try %d is (0x%x)/(0x%x)\n", 1269 reset_retry_count, ioc_status, ioc_config); 1270 goto retry_reset; 1271 } 1272 1273 out: 1274 pci_cfg_access_unlock(mrioc->pdev); 1275 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1276 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1277 1278 ioc_info(mrioc, 1279 "Base IOC Sts/Config after %s reset is (0x%x)/(0x%x)\n", 1280 (!retval) ? "successful" : "failed", ioc_status, 1281 ioc_config); 1282 return retval; 1283 } 1284 1285 /** 1286 * mpi3mr_admin_request_post - Post request to admin queue 1287 * @mrioc: Adapter reference 1288 * @admin_req: MPI3 request 1289 * @admin_req_sz: Request size 1290 * @ignore_reset: Ignore reset in process 1291 * 1292 * Post the MPI3 request into admin request queue and 1293 * inform the controller, if the queue is full return 1294 * appropriate error. 1295 * 1296 * Return: 0 on success, non-zero on failure. 1297 */ 1298 int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req, 1299 u16 admin_req_sz, u8 ignore_reset) 1300 { 1301 u16 areq_pi = 0, areq_ci = 0, max_entries = 0; 1302 int retval = 0; 1303 unsigned long flags; 1304 u8 *areq_entry; 1305 1306 if (mrioc->unrecoverable) { 1307 ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__); 1308 return -EFAULT; 1309 } 1310 1311 spin_lock_irqsave(&mrioc->admin_req_lock, flags); 1312 areq_pi = mrioc->admin_req_pi; 1313 areq_ci = mrioc->admin_req_ci; 1314 max_entries = mrioc->num_admin_req; 1315 if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) && 1316 (areq_pi == (max_entries - 1)))) { 1317 ioc_err(mrioc, "AdminReqQ full condition detected\n"); 1318 retval = -EAGAIN; 1319 goto out; 1320 } 1321 if (!ignore_reset && mrioc->reset_in_progress) { 1322 ioc_err(mrioc, "AdminReqQ submit reset in progress\n"); 1323 retval = -EAGAIN; 1324 goto out; 1325 } 1326 areq_entry = (u8 *)mrioc->admin_req_base + 1327 (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ); 1328 memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); 1329 memcpy(areq_entry, (u8 *)admin_req, admin_req_sz); 1330 1331 if (++areq_pi == max_entries) 1332 areq_pi = 0; 1333 mrioc->admin_req_pi = areq_pi; 1334 1335 writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); 1336 1337 out: 1338 spin_unlock_irqrestore(&mrioc->admin_req_lock, flags); 1339 1340 return retval; 1341 } 1342 1343 /** 1344 * mpi3mr_free_op_req_q_segments - free request memory segments 1345 * @mrioc: Adapter instance reference 1346 * @q_idx: operational request queue index 1347 * 1348 * Free memory segments allocated for operational request queue 1349 * 1350 * Return: Nothing. 1351 */ 1352 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) 1353 { 1354 u16 j; 1355 int size; 1356 struct segments *segments; 1357 1358 segments = mrioc->req_qinfo[q_idx].q_segments; 1359 if (!segments) 1360 return; 1361 1362 if (mrioc->enable_segqueue) { 1363 size = MPI3MR_OP_REQ_Q_SEG_SIZE; 1364 if (mrioc->req_qinfo[q_idx].q_segment_list) { 1365 dma_free_coherent(&mrioc->pdev->dev, 1366 MPI3MR_MAX_SEG_LIST_SIZE, 1367 mrioc->req_qinfo[q_idx].q_segment_list, 1368 mrioc->req_qinfo[q_idx].q_segment_list_dma); 1369 mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL; 1370 } 1371 } else 1372 size = mrioc->req_qinfo[q_idx].num_requests * 1373 mrioc->facts.op_req_sz; 1374 1375 for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) { 1376 if (!segments[j].segment) 1377 continue; 1378 dma_free_coherent(&mrioc->pdev->dev, 1379 size, segments[j].segment, segments[j].segment_dma); 1380 segments[j].segment = NULL; 1381 } 1382 kfree(mrioc->req_qinfo[q_idx].q_segments); 1383 mrioc->req_qinfo[q_idx].q_segments = NULL; 1384 mrioc->req_qinfo[q_idx].qid = 0; 1385 } 1386 1387 /** 1388 * mpi3mr_free_op_reply_q_segments - free reply memory segments 1389 * @mrioc: Adapter instance reference 1390 * @q_idx: operational reply queue index 1391 * 1392 * Free memory segments allocated for operational reply queue 1393 * 1394 * Return: Nothing. 1395 */ 1396 static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) 1397 { 1398 u16 j; 1399 int size; 1400 struct segments *segments; 1401 1402 segments = mrioc->op_reply_qinfo[q_idx].q_segments; 1403 if (!segments) 1404 return; 1405 1406 if (mrioc->enable_segqueue) { 1407 size = MPI3MR_OP_REP_Q_SEG_SIZE; 1408 if (mrioc->op_reply_qinfo[q_idx].q_segment_list) { 1409 dma_free_coherent(&mrioc->pdev->dev, 1410 MPI3MR_MAX_SEG_LIST_SIZE, 1411 mrioc->op_reply_qinfo[q_idx].q_segment_list, 1412 mrioc->op_reply_qinfo[q_idx].q_segment_list_dma); 1413 mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL; 1414 } 1415 } else 1416 size = mrioc->op_reply_qinfo[q_idx].segment_qd * 1417 mrioc->op_reply_desc_sz; 1418 1419 for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) { 1420 if (!segments[j].segment) 1421 continue; 1422 dma_free_coherent(&mrioc->pdev->dev, 1423 size, segments[j].segment, segments[j].segment_dma); 1424 segments[j].segment = NULL; 1425 } 1426 1427 kfree(mrioc->op_reply_qinfo[q_idx].q_segments); 1428 mrioc->op_reply_qinfo[q_idx].q_segments = NULL; 1429 mrioc->op_reply_qinfo[q_idx].qid = 0; 1430 } 1431 1432 /** 1433 * mpi3mr_delete_op_reply_q - delete operational reply queue 1434 * @mrioc: Adapter instance reference 1435 * @qidx: operational reply queue index 1436 * 1437 * Delete operatinal reply queue by issuing MPI request 1438 * through admin queue. 1439 * 1440 * Return: 0 on success, non-zero on failure. 1441 */ 1442 static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) 1443 { 1444 struct mpi3_delete_reply_queue_request delq_req; 1445 int retval = 0; 1446 u16 reply_qid = 0, midx; 1447 1448 reply_qid = mrioc->op_reply_qinfo[qidx].qid; 1449 1450 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset); 1451 1452 if (!reply_qid) { 1453 retval = -1; 1454 ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n"); 1455 goto out; 1456 } 1457 1458 memset(&delq_req, 0, sizeof(delq_req)); 1459 mutex_lock(&mrioc->init_cmds.mutex); 1460 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1461 retval = -1; 1462 ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n"); 1463 mutex_unlock(&mrioc->init_cmds.mutex); 1464 goto out; 1465 } 1466 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1467 mrioc->init_cmds.is_waiting = 1; 1468 mrioc->init_cmds.callback = NULL; 1469 delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1470 delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE; 1471 delq_req.queue_id = cpu_to_le16(reply_qid); 1472 1473 init_completion(&mrioc->init_cmds.done); 1474 retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req), 1475 1); 1476 if (retval) { 1477 ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n"); 1478 goto out_unlock; 1479 } 1480 wait_for_completion_timeout(&mrioc->init_cmds.done, 1481 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1482 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1483 ioc_err(mrioc, "delete reply queue timed out\n"); 1484 mpi3mr_check_rh_fault_ioc(mrioc, 1485 MPI3MR_RESET_FROM_DELREPQ_TIMEOUT); 1486 retval = -1; 1487 goto out_unlock; 1488 } 1489 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1490 != MPI3_IOCSTATUS_SUCCESS) { 1491 ioc_err(mrioc, 1492 "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1493 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1494 mrioc->init_cmds.ioc_loginfo); 1495 retval = -1; 1496 goto out_unlock; 1497 } 1498 mrioc->intr_info[midx].op_reply_q = NULL; 1499 1500 mpi3mr_free_op_reply_q_segments(mrioc, qidx); 1501 out_unlock: 1502 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1503 mutex_unlock(&mrioc->init_cmds.mutex); 1504 out: 1505 1506 return retval; 1507 } 1508 1509 /** 1510 * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool 1511 * @mrioc: Adapter instance reference 1512 * @qidx: request queue index 1513 * 1514 * Allocate segmented memory pools for operational reply 1515 * queue. 1516 * 1517 * Return: 0 on success, non-zero on failure. 1518 */ 1519 static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx) 1520 { 1521 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 1522 int i, size; 1523 u64 *q_segment_list_entry = NULL; 1524 struct segments *segments; 1525 1526 if (mrioc->enable_segqueue) { 1527 op_reply_q->segment_qd = 1528 MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz; 1529 1530 size = MPI3MR_OP_REP_Q_SEG_SIZE; 1531 1532 op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev, 1533 MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma, 1534 GFP_KERNEL); 1535 if (!op_reply_q->q_segment_list) 1536 return -ENOMEM; 1537 q_segment_list_entry = (u64 *)op_reply_q->q_segment_list; 1538 } else { 1539 op_reply_q->segment_qd = op_reply_q->num_replies; 1540 size = op_reply_q->num_replies * mrioc->op_reply_desc_sz; 1541 } 1542 1543 op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies, 1544 op_reply_q->segment_qd); 1545 1546 op_reply_q->q_segments = kcalloc(op_reply_q->num_segments, 1547 sizeof(struct segments), GFP_KERNEL); 1548 if (!op_reply_q->q_segments) 1549 return -ENOMEM; 1550 1551 segments = op_reply_q->q_segments; 1552 for (i = 0; i < op_reply_q->num_segments; i++) { 1553 segments[i].segment = 1554 dma_alloc_coherent(&mrioc->pdev->dev, 1555 size, &segments[i].segment_dma, GFP_KERNEL); 1556 if (!segments[i].segment) 1557 return -ENOMEM; 1558 if (mrioc->enable_segqueue) 1559 q_segment_list_entry[i] = 1560 (unsigned long)segments[i].segment_dma; 1561 } 1562 1563 return 0; 1564 } 1565 1566 /** 1567 * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool. 1568 * @mrioc: Adapter instance reference 1569 * @qidx: request queue index 1570 * 1571 * Allocate segmented memory pools for operational request 1572 * queue. 1573 * 1574 * Return: 0 on success, non-zero on failure. 1575 */ 1576 static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx) 1577 { 1578 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx; 1579 int i, size; 1580 u64 *q_segment_list_entry = NULL; 1581 struct segments *segments; 1582 1583 if (mrioc->enable_segqueue) { 1584 op_req_q->segment_qd = 1585 MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz; 1586 1587 size = MPI3MR_OP_REQ_Q_SEG_SIZE; 1588 1589 op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev, 1590 MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma, 1591 GFP_KERNEL); 1592 if (!op_req_q->q_segment_list) 1593 return -ENOMEM; 1594 q_segment_list_entry = (u64 *)op_req_q->q_segment_list; 1595 1596 } else { 1597 op_req_q->segment_qd = op_req_q->num_requests; 1598 size = op_req_q->num_requests * mrioc->facts.op_req_sz; 1599 } 1600 1601 op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests, 1602 op_req_q->segment_qd); 1603 1604 op_req_q->q_segments = kcalloc(op_req_q->num_segments, 1605 sizeof(struct segments), GFP_KERNEL); 1606 if (!op_req_q->q_segments) 1607 return -ENOMEM; 1608 1609 segments = op_req_q->q_segments; 1610 for (i = 0; i < op_req_q->num_segments; i++) { 1611 segments[i].segment = 1612 dma_alloc_coherent(&mrioc->pdev->dev, 1613 size, &segments[i].segment_dma, GFP_KERNEL); 1614 if (!segments[i].segment) 1615 return -ENOMEM; 1616 if (mrioc->enable_segqueue) 1617 q_segment_list_entry[i] = 1618 (unsigned long)segments[i].segment_dma; 1619 } 1620 1621 return 0; 1622 } 1623 1624 /** 1625 * mpi3mr_create_op_reply_q - create operational reply queue 1626 * @mrioc: Adapter instance reference 1627 * @qidx: operational reply queue index 1628 * 1629 * Create operatinal reply queue by issuing MPI request 1630 * through admin queue. 1631 * 1632 * Return: 0 on success, non-zero on failure. 1633 */ 1634 static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) 1635 { 1636 struct mpi3_create_reply_queue_request create_req; 1637 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 1638 int retval = 0; 1639 u16 reply_qid = 0, midx; 1640 1641 reply_qid = op_reply_q->qid; 1642 1643 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset); 1644 1645 if (reply_qid) { 1646 retval = -1; 1647 ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n", 1648 reply_qid); 1649 1650 return retval; 1651 } 1652 1653 reply_qid = qidx + 1; 1654 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD; 1655 op_reply_q->ci = 0; 1656 op_reply_q->ephase = 1; 1657 atomic_set(&op_reply_q->pend_ios, 0); 1658 atomic_set(&op_reply_q->in_use, 0); 1659 op_reply_q->enable_irq_poll = false; 1660 1661 if (!op_reply_q->q_segments) { 1662 retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx); 1663 if (retval) { 1664 mpi3mr_free_op_reply_q_segments(mrioc, qidx); 1665 goto out; 1666 } 1667 } 1668 1669 memset(&create_req, 0, sizeof(create_req)); 1670 mutex_lock(&mrioc->init_cmds.mutex); 1671 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1672 retval = -1; 1673 ioc_err(mrioc, "CreateRepQ: Init command is in use\n"); 1674 goto out_unlock; 1675 } 1676 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1677 mrioc->init_cmds.is_waiting = 1; 1678 mrioc->init_cmds.callback = NULL; 1679 create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1680 create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE; 1681 create_req.queue_id = cpu_to_le16(reply_qid); 1682 create_req.flags = MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE; 1683 create_req.msix_index = cpu_to_le16(mrioc->intr_info[midx].msix_index); 1684 if (mrioc->enable_segqueue) { 1685 create_req.flags |= 1686 MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED; 1687 create_req.base_address = cpu_to_le64( 1688 op_reply_q->q_segment_list_dma); 1689 } else 1690 create_req.base_address = cpu_to_le64( 1691 op_reply_q->q_segments[0].segment_dma); 1692 1693 create_req.size = cpu_to_le16(op_reply_q->num_replies); 1694 1695 init_completion(&mrioc->init_cmds.done); 1696 retval = mpi3mr_admin_request_post(mrioc, &create_req, 1697 sizeof(create_req), 1); 1698 if (retval) { 1699 ioc_err(mrioc, "CreateRepQ: Admin Post failed\n"); 1700 goto out_unlock; 1701 } 1702 wait_for_completion_timeout(&mrioc->init_cmds.done, 1703 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1704 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1705 ioc_err(mrioc, "create reply queue timed out\n"); 1706 mpi3mr_check_rh_fault_ioc(mrioc, 1707 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT); 1708 retval = -1; 1709 goto out_unlock; 1710 } 1711 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1712 != MPI3_IOCSTATUS_SUCCESS) { 1713 ioc_err(mrioc, 1714 "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1715 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1716 mrioc->init_cmds.ioc_loginfo); 1717 retval = -1; 1718 goto out_unlock; 1719 } 1720 op_reply_q->qid = reply_qid; 1721 if (midx < mrioc->intr_info_count) 1722 mrioc->intr_info[midx].op_reply_q = op_reply_q; 1723 1724 out_unlock: 1725 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1726 mutex_unlock(&mrioc->init_cmds.mutex); 1727 out: 1728 1729 return retval; 1730 } 1731 1732 /** 1733 * mpi3mr_create_op_req_q - create operational request queue 1734 * @mrioc: Adapter instance reference 1735 * @idx: operational request queue index 1736 * @reply_qid: Reply queue ID 1737 * 1738 * Create operatinal request queue by issuing MPI request 1739 * through admin queue. 1740 * 1741 * Return: 0 on success, non-zero on failure. 1742 */ 1743 static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx, 1744 u16 reply_qid) 1745 { 1746 struct mpi3_create_request_queue_request create_req; 1747 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx; 1748 int retval = 0; 1749 u16 req_qid = 0; 1750 1751 req_qid = op_req_q->qid; 1752 1753 if (req_qid) { 1754 retval = -1; 1755 ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n", 1756 req_qid); 1757 1758 return retval; 1759 } 1760 req_qid = idx + 1; 1761 1762 op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD; 1763 op_req_q->ci = 0; 1764 op_req_q->pi = 0; 1765 op_req_q->reply_qid = reply_qid; 1766 spin_lock_init(&op_req_q->q_lock); 1767 1768 if (!op_req_q->q_segments) { 1769 retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx); 1770 if (retval) { 1771 mpi3mr_free_op_req_q_segments(mrioc, idx); 1772 goto out; 1773 } 1774 } 1775 1776 memset(&create_req, 0, sizeof(create_req)); 1777 mutex_lock(&mrioc->init_cmds.mutex); 1778 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1779 retval = -1; 1780 ioc_err(mrioc, "CreateReqQ: Init command is in use\n"); 1781 goto out_unlock; 1782 } 1783 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1784 mrioc->init_cmds.is_waiting = 1; 1785 mrioc->init_cmds.callback = NULL; 1786 create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1787 create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE; 1788 create_req.queue_id = cpu_to_le16(req_qid); 1789 if (mrioc->enable_segqueue) { 1790 create_req.flags = 1791 MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED; 1792 create_req.base_address = cpu_to_le64( 1793 op_req_q->q_segment_list_dma); 1794 } else 1795 create_req.base_address = cpu_to_le64( 1796 op_req_q->q_segments[0].segment_dma); 1797 create_req.reply_queue_id = cpu_to_le16(reply_qid); 1798 create_req.size = cpu_to_le16(op_req_q->num_requests); 1799 1800 init_completion(&mrioc->init_cmds.done); 1801 retval = mpi3mr_admin_request_post(mrioc, &create_req, 1802 sizeof(create_req), 1); 1803 if (retval) { 1804 ioc_err(mrioc, "CreateReqQ: Admin Post failed\n"); 1805 goto out_unlock; 1806 } 1807 wait_for_completion_timeout(&mrioc->init_cmds.done, 1808 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1809 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1810 ioc_err(mrioc, "create request queue timed out\n"); 1811 mpi3mr_check_rh_fault_ioc(mrioc, 1812 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT); 1813 retval = -1; 1814 goto out_unlock; 1815 } 1816 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1817 != MPI3_IOCSTATUS_SUCCESS) { 1818 ioc_err(mrioc, 1819 "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1820 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1821 mrioc->init_cmds.ioc_loginfo); 1822 retval = -1; 1823 goto out_unlock; 1824 } 1825 op_req_q->qid = req_qid; 1826 1827 out_unlock: 1828 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1829 mutex_unlock(&mrioc->init_cmds.mutex); 1830 out: 1831 1832 return retval; 1833 } 1834 1835 /** 1836 * mpi3mr_create_op_queues - create operational queue pairs 1837 * @mrioc: Adapter instance reference 1838 * 1839 * Allocate memory for operational queue meta data and call 1840 * create request and reply queue functions. 1841 * 1842 * Return: 0 on success, non-zero on failures. 1843 */ 1844 static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc) 1845 { 1846 int retval = 0; 1847 u16 num_queues = 0, i = 0, msix_count_op_q = 1; 1848 1849 num_queues = min_t(int, mrioc->facts.max_op_reply_q, 1850 mrioc->facts.max_op_req_q); 1851 1852 msix_count_op_q = 1853 mrioc->intr_info_count - mrioc->op_reply_q_offset; 1854 if (!mrioc->num_queues) 1855 mrioc->num_queues = min_t(int, num_queues, msix_count_op_q); 1856 num_queues = mrioc->num_queues; 1857 ioc_info(mrioc, "Trying to create %d Operational Q pairs\n", 1858 num_queues); 1859 1860 if (!mrioc->req_qinfo) { 1861 mrioc->req_qinfo = kcalloc(num_queues, 1862 sizeof(struct op_req_qinfo), GFP_KERNEL); 1863 if (!mrioc->req_qinfo) { 1864 retval = -1; 1865 goto out_failed; 1866 } 1867 1868 mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) * 1869 num_queues, GFP_KERNEL); 1870 if (!mrioc->op_reply_qinfo) { 1871 retval = -1; 1872 goto out_failed; 1873 } 1874 } 1875 1876 if (mrioc->enable_segqueue) 1877 ioc_info(mrioc, 1878 "allocating operational queues through segmented queues\n"); 1879 1880 for (i = 0; i < num_queues; i++) { 1881 if (mpi3mr_create_op_reply_q(mrioc, i)) { 1882 ioc_err(mrioc, "Cannot create OP RepQ %d\n", i); 1883 break; 1884 } 1885 if (mpi3mr_create_op_req_q(mrioc, i, 1886 mrioc->op_reply_qinfo[i].qid)) { 1887 ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i); 1888 mpi3mr_delete_op_reply_q(mrioc, i); 1889 break; 1890 } 1891 } 1892 1893 if (i == 0) { 1894 /* Not even one queue is created successfully*/ 1895 retval = -1; 1896 goto out_failed; 1897 } 1898 mrioc->num_op_reply_q = mrioc->num_op_req_q = i; 1899 ioc_info(mrioc, "Successfully created %d Operational Q pairs\n", 1900 mrioc->num_op_reply_q); 1901 1902 return retval; 1903 out_failed: 1904 kfree(mrioc->req_qinfo); 1905 mrioc->req_qinfo = NULL; 1906 1907 kfree(mrioc->op_reply_qinfo); 1908 mrioc->op_reply_qinfo = NULL; 1909 1910 return retval; 1911 } 1912 1913 /** 1914 * mpi3mr_op_request_post - Post request to operational queue 1915 * @mrioc: Adapter reference 1916 * @op_req_q: Operational request queue info 1917 * @req: MPI3 request 1918 * 1919 * Post the MPI3 request into operational request queue and 1920 * inform the controller, if the queue is full return 1921 * appropriate error. 1922 * 1923 * Return: 0 on success, non-zero on failure. 1924 */ 1925 int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc, 1926 struct op_req_qinfo *op_req_q, u8 *req) 1927 { 1928 u16 pi = 0, max_entries, reply_qidx = 0, midx; 1929 int retval = 0; 1930 unsigned long flags; 1931 u8 *req_entry; 1932 void *segment_base_addr; 1933 u16 req_sz = mrioc->facts.op_req_sz; 1934 struct segments *segments = op_req_q->q_segments; 1935 1936 reply_qidx = op_req_q->reply_qid - 1; 1937 1938 if (mrioc->unrecoverable) 1939 return -EFAULT; 1940 1941 spin_lock_irqsave(&op_req_q->q_lock, flags); 1942 pi = op_req_q->pi; 1943 max_entries = op_req_q->num_requests; 1944 1945 if (mpi3mr_check_req_qfull(op_req_q)) { 1946 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX( 1947 reply_qidx, mrioc->op_reply_q_offset); 1948 mpi3mr_process_op_reply_q(mrioc, &mrioc->intr_info[midx]); 1949 1950 if (mpi3mr_check_req_qfull(op_req_q)) { 1951 retval = -EAGAIN; 1952 goto out; 1953 } 1954 } 1955 1956 if (mrioc->reset_in_progress) { 1957 ioc_err(mrioc, "OpReqQ submit reset in progress\n"); 1958 retval = -EAGAIN; 1959 goto out; 1960 } 1961 1962 segment_base_addr = segments[pi / op_req_q->segment_qd].segment; 1963 req_entry = (u8 *)segment_base_addr + 1964 ((pi % op_req_q->segment_qd) * req_sz); 1965 1966 memset(req_entry, 0, req_sz); 1967 memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ); 1968 1969 if (++pi == max_entries) 1970 pi = 0; 1971 op_req_q->pi = pi; 1972 1973 if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios) 1974 > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT) 1975 mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true; 1976 1977 writel(op_req_q->pi, 1978 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index); 1979 1980 out: 1981 spin_unlock_irqrestore(&op_req_q->q_lock, flags); 1982 return retval; 1983 } 1984 1985 /** 1986 * mpi3mr_check_rh_fault_ioc - check reset history and fault 1987 * controller 1988 * @mrioc: Adapter instance reference 1989 * @reason_code, reason code for the fault. 1990 * 1991 * This routine will save snapdump and fault the controller with 1992 * the given reason code if it is not already in the fault or 1993 * not asynchronosuly reset. This will be used to handle 1994 * initilaization time faults/resets/timeout as in those cases 1995 * immediate soft reset invocation is not required. 1996 * 1997 * Return: None. 1998 */ 1999 void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code) 2000 { 2001 u32 ioc_status, host_diagnostic, timeout; 2002 2003 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 2004 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || 2005 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { 2006 mpi3mr_print_fault_info(mrioc); 2007 return; 2008 } 2009 mpi3mr_set_diagsave(mrioc); 2010 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 2011 reason_code); 2012 timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; 2013 do { 2014 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); 2015 if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS)) 2016 break; 2017 msleep(100); 2018 } while (--timeout); 2019 } 2020 2021 /** 2022 * mpi3mr_sync_timestamp - Issue time stamp sync request 2023 * @mrioc: Adapter reference 2024 * 2025 * Issue IO unit control MPI request to synchornize firmware 2026 * timestamp with host time. 2027 * 2028 * Return: 0 on success, non-zero on failure. 2029 */ 2030 static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc) 2031 { 2032 ktime_t current_time; 2033 struct mpi3_iounit_control_request iou_ctrl; 2034 int retval = 0; 2035 2036 memset(&iou_ctrl, 0, sizeof(iou_ctrl)); 2037 mutex_lock(&mrioc->init_cmds.mutex); 2038 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2039 retval = -1; 2040 ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n"); 2041 mutex_unlock(&mrioc->init_cmds.mutex); 2042 goto out; 2043 } 2044 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2045 mrioc->init_cmds.is_waiting = 1; 2046 mrioc->init_cmds.callback = NULL; 2047 iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2048 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL; 2049 iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP; 2050 current_time = ktime_get_real(); 2051 iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time)); 2052 2053 init_completion(&mrioc->init_cmds.done); 2054 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, 2055 sizeof(iou_ctrl), 0); 2056 if (retval) { 2057 ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n"); 2058 goto out_unlock; 2059 } 2060 2061 wait_for_completion_timeout(&mrioc->init_cmds.done, 2062 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2063 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2064 ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n"); 2065 mrioc->init_cmds.is_waiting = 0; 2066 if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET)) 2067 mpi3mr_soft_reset_handler(mrioc, 2068 MPI3MR_RESET_FROM_TSU_TIMEOUT, 1); 2069 retval = -1; 2070 goto out_unlock; 2071 } 2072 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2073 != MPI3_IOCSTATUS_SUCCESS) { 2074 ioc_err(mrioc, 2075 "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2076 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2077 mrioc->init_cmds.ioc_loginfo); 2078 retval = -1; 2079 goto out_unlock; 2080 } 2081 2082 out_unlock: 2083 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2084 mutex_unlock(&mrioc->init_cmds.mutex); 2085 2086 out: 2087 return retval; 2088 } 2089 2090 /** 2091 * mpi3mr_print_pkg_ver - display controller fw package version 2092 * @mrioc: Adapter reference 2093 * 2094 * Retrieve firmware package version from the component image 2095 * header of the controller flash and display it. 2096 * 2097 * Return: 0 on success and non-zero on failure. 2098 */ 2099 static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc) 2100 { 2101 struct mpi3_ci_upload_request ci_upload; 2102 int retval = -1; 2103 void *data = NULL; 2104 dma_addr_t data_dma; 2105 struct mpi3_ci_manifest_mpi *manifest; 2106 u32 data_len = sizeof(struct mpi3_ci_manifest_mpi); 2107 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 2108 2109 data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 2110 GFP_KERNEL); 2111 if (!data) 2112 return -ENOMEM; 2113 2114 memset(&ci_upload, 0, sizeof(ci_upload)); 2115 mutex_lock(&mrioc->init_cmds.mutex); 2116 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2117 ioc_err(mrioc, "sending get package version failed due to command in use\n"); 2118 mutex_unlock(&mrioc->init_cmds.mutex); 2119 goto out; 2120 } 2121 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2122 mrioc->init_cmds.is_waiting = 1; 2123 mrioc->init_cmds.callback = NULL; 2124 ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2125 ci_upload.function = MPI3_FUNCTION_CI_UPLOAD; 2126 ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY; 2127 ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST); 2128 ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE); 2129 ci_upload.segment_size = cpu_to_le32(data_len); 2130 2131 mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len, 2132 data_dma); 2133 init_completion(&mrioc->init_cmds.done); 2134 retval = mpi3mr_admin_request_post(mrioc, &ci_upload, 2135 sizeof(ci_upload), 1); 2136 if (retval) { 2137 ioc_err(mrioc, "posting get package version failed\n"); 2138 goto out_unlock; 2139 } 2140 wait_for_completion_timeout(&mrioc->init_cmds.done, 2141 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2142 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2143 ioc_err(mrioc, "get package version timed out\n"); 2144 mpi3mr_check_rh_fault_ioc(mrioc, 2145 MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT); 2146 retval = -1; 2147 goto out_unlock; 2148 } 2149 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2150 == MPI3_IOCSTATUS_SUCCESS) { 2151 manifest = (struct mpi3_ci_manifest_mpi *) data; 2152 if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) { 2153 ioc_info(mrioc, 2154 "firmware package version(%d.%d.%d.%d.%05d-%05d)\n", 2155 manifest->package_version.gen_major, 2156 manifest->package_version.gen_minor, 2157 manifest->package_version.phase_major, 2158 manifest->package_version.phase_minor, 2159 manifest->package_version.customer_id, 2160 manifest->package_version.build_num); 2161 } 2162 } 2163 retval = 0; 2164 out_unlock: 2165 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2166 mutex_unlock(&mrioc->init_cmds.mutex); 2167 2168 out: 2169 if (data) 2170 dma_free_coherent(&mrioc->pdev->dev, data_len, data, 2171 data_dma); 2172 return retval; 2173 } 2174 2175 /** 2176 * mpi3mr_watchdog_work - watchdog thread to monitor faults 2177 * @work: work struct 2178 * 2179 * Watch dog work periodically executed (1 second interval) to 2180 * monitor firmware fault and to issue periodic timer sync to 2181 * the firmware. 2182 * 2183 * Return: Nothing. 2184 */ 2185 static void mpi3mr_watchdog_work(struct work_struct *work) 2186 { 2187 struct mpi3mr_ioc *mrioc = 2188 container_of(work, struct mpi3mr_ioc, watchdog_work.work); 2189 unsigned long flags; 2190 enum mpi3mr_iocstate ioc_state; 2191 u32 fault, host_diagnostic; 2192 2193 if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) { 2194 mrioc->ts_update_counter = 0; 2195 mpi3mr_sync_timestamp(mrioc); 2196 } 2197 2198 /*Check for fault state every one second and issue Soft reset*/ 2199 ioc_state = mpi3mr_get_iocstate(mrioc); 2200 if (ioc_state == MRIOC_STATE_FAULT) { 2201 fault = readl(&mrioc->sysif_regs->fault) & 2202 MPI3_SYSIF_FAULT_CODE_MASK; 2203 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); 2204 if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) { 2205 if (!mrioc->diagsave_timeout) { 2206 mpi3mr_print_fault_info(mrioc); 2207 ioc_warn(mrioc, "Diag save in progress\n"); 2208 } 2209 if ((mrioc->diagsave_timeout++) <= 2210 MPI3_SYSIF_DIAG_SAVE_TIMEOUT) 2211 goto schedule_work; 2212 } else 2213 mpi3mr_print_fault_info(mrioc); 2214 mrioc->diagsave_timeout = 0; 2215 2216 if (fault == MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED) { 2217 ioc_info(mrioc, 2218 "Factory Reset fault occurred marking controller as unrecoverable" 2219 ); 2220 mrioc->unrecoverable = 1; 2221 goto out; 2222 } 2223 2224 if ((fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) || 2225 (fault == MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS) || 2226 (mrioc->reset_in_progress)) 2227 goto out; 2228 if (fault == MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET) 2229 mpi3mr_soft_reset_handler(mrioc, 2230 MPI3MR_RESET_FROM_CIACTIV_FAULT, 0); 2231 else 2232 mpi3mr_soft_reset_handler(mrioc, 2233 MPI3MR_RESET_FROM_FAULT_WATCH, 0); 2234 } 2235 2236 schedule_work: 2237 spin_lock_irqsave(&mrioc->watchdog_lock, flags); 2238 if (mrioc->watchdog_work_q) 2239 queue_delayed_work(mrioc->watchdog_work_q, 2240 &mrioc->watchdog_work, 2241 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); 2242 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); 2243 out: 2244 return; 2245 } 2246 2247 /** 2248 * mpi3mr_start_watchdog - Start watchdog 2249 * @mrioc: Adapter instance reference 2250 * 2251 * Create and start the watchdog thread to monitor controller 2252 * faults. 2253 * 2254 * Return: Nothing. 2255 */ 2256 void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc) 2257 { 2258 if (mrioc->watchdog_work_q) 2259 return; 2260 2261 INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work); 2262 snprintf(mrioc->watchdog_work_q_name, 2263 sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name, 2264 mrioc->id); 2265 mrioc->watchdog_work_q = 2266 create_singlethread_workqueue(mrioc->watchdog_work_q_name); 2267 if (!mrioc->watchdog_work_q) { 2268 ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__); 2269 return; 2270 } 2271 2272 if (mrioc->watchdog_work_q) 2273 queue_delayed_work(mrioc->watchdog_work_q, 2274 &mrioc->watchdog_work, 2275 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); 2276 } 2277 2278 /** 2279 * mpi3mr_stop_watchdog - Stop watchdog 2280 * @mrioc: Adapter instance reference 2281 * 2282 * Stop the watchdog thread created to monitor controller 2283 * faults. 2284 * 2285 * Return: Nothing. 2286 */ 2287 void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc) 2288 { 2289 unsigned long flags; 2290 struct workqueue_struct *wq; 2291 2292 spin_lock_irqsave(&mrioc->watchdog_lock, flags); 2293 wq = mrioc->watchdog_work_q; 2294 mrioc->watchdog_work_q = NULL; 2295 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); 2296 if (wq) { 2297 if (!cancel_delayed_work_sync(&mrioc->watchdog_work)) 2298 flush_workqueue(wq); 2299 destroy_workqueue(wq); 2300 } 2301 } 2302 2303 /** 2304 * mpi3mr_kill_ioc - Kill the controller 2305 * @mrioc: Adapter instance reference 2306 * @reason: reason for the failure. 2307 * 2308 * If fault debug is enabled, display the fault info else issue 2309 * diag fault and freeze the system for controller debug 2310 * purpose. 2311 * 2312 * Return: Nothing. 2313 */ 2314 static void mpi3mr_kill_ioc(struct mpi3mr_ioc *mrioc, u32 reason) 2315 { 2316 enum mpi3mr_iocstate ioc_state; 2317 2318 if (!mrioc->fault_dbg) 2319 return; 2320 2321 dump_stack(); 2322 2323 ioc_state = mpi3mr_get_iocstate(mrioc); 2324 if (ioc_state == MRIOC_STATE_FAULT) 2325 mpi3mr_print_fault_info(mrioc); 2326 else { 2327 ioc_err(mrioc, "Firmware is halted due to the reason %d\n", 2328 reason); 2329 mpi3mr_diagfault_reset_handler(mrioc, reason); 2330 } 2331 if (mrioc->fault_dbg == 2) 2332 for (;;) 2333 ; 2334 else 2335 panic("panic in %s\n", __func__); 2336 } 2337 2338 /** 2339 * mpi3mr_setup_admin_qpair - Setup admin queue pair 2340 * @mrioc: Adapter instance reference 2341 * 2342 * Allocate memory for admin queue pair if required and register 2343 * the admin queue with the controller. 2344 * 2345 * Return: 0 on success, non-zero on failures. 2346 */ 2347 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc) 2348 { 2349 int retval = 0; 2350 u32 num_admin_entries = 0; 2351 2352 mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE; 2353 mrioc->num_admin_req = mrioc->admin_req_q_sz / 2354 MPI3MR_ADMIN_REQ_FRAME_SZ; 2355 mrioc->admin_req_ci = mrioc->admin_req_pi = 0; 2356 mrioc->admin_req_base = NULL; 2357 2358 mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE; 2359 mrioc->num_admin_replies = mrioc->admin_reply_q_sz / 2360 MPI3MR_ADMIN_REPLY_FRAME_SZ; 2361 mrioc->admin_reply_ci = 0; 2362 mrioc->admin_reply_ephase = 1; 2363 mrioc->admin_reply_base = NULL; 2364 2365 if (!mrioc->admin_req_base) { 2366 mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev, 2367 mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL); 2368 2369 if (!mrioc->admin_req_base) { 2370 retval = -1; 2371 goto out_failed; 2372 } 2373 2374 mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev, 2375 mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma, 2376 GFP_KERNEL); 2377 2378 if (!mrioc->admin_reply_base) { 2379 retval = -1; 2380 goto out_failed; 2381 } 2382 } 2383 2384 num_admin_entries = (mrioc->num_admin_replies << 16) | 2385 (mrioc->num_admin_req); 2386 writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries); 2387 mpi3mr_writeq(mrioc->admin_req_dma, 2388 &mrioc->sysif_regs->admin_request_queue_address); 2389 mpi3mr_writeq(mrioc->admin_reply_dma, 2390 &mrioc->sysif_regs->admin_reply_queue_address); 2391 writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); 2392 writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); 2393 return retval; 2394 2395 out_failed: 2396 2397 if (mrioc->admin_reply_base) { 2398 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz, 2399 mrioc->admin_reply_base, mrioc->admin_reply_dma); 2400 mrioc->admin_reply_base = NULL; 2401 } 2402 if (mrioc->admin_req_base) { 2403 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz, 2404 mrioc->admin_req_base, mrioc->admin_req_dma); 2405 mrioc->admin_req_base = NULL; 2406 } 2407 return retval; 2408 } 2409 2410 /** 2411 * mpi3mr_issue_iocfacts - Send IOC Facts 2412 * @mrioc: Adapter instance reference 2413 * @facts_data: Cached IOC facts data 2414 * 2415 * Issue IOC Facts MPI request through admin queue and wait for 2416 * the completion of it or time out. 2417 * 2418 * Return: 0 on success, non-zero on failures. 2419 */ 2420 static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc, 2421 struct mpi3_ioc_facts_data *facts_data) 2422 { 2423 struct mpi3_ioc_facts_request iocfacts_req; 2424 void *data = NULL; 2425 dma_addr_t data_dma; 2426 u32 data_len = sizeof(*facts_data); 2427 int retval = 0; 2428 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 2429 2430 data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 2431 GFP_KERNEL); 2432 2433 if (!data) { 2434 retval = -1; 2435 goto out; 2436 } 2437 2438 memset(&iocfacts_req, 0, sizeof(iocfacts_req)); 2439 mutex_lock(&mrioc->init_cmds.mutex); 2440 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2441 retval = -1; 2442 ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n"); 2443 mutex_unlock(&mrioc->init_cmds.mutex); 2444 goto out; 2445 } 2446 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2447 mrioc->init_cmds.is_waiting = 1; 2448 mrioc->init_cmds.callback = NULL; 2449 iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2450 iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS; 2451 2452 mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len, 2453 data_dma); 2454 2455 init_completion(&mrioc->init_cmds.done); 2456 retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req, 2457 sizeof(iocfacts_req), 1); 2458 if (retval) { 2459 ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n"); 2460 goto out_unlock; 2461 } 2462 wait_for_completion_timeout(&mrioc->init_cmds.done, 2463 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2464 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2465 ioc_err(mrioc, "ioc_facts timed out\n"); 2466 mpi3mr_check_rh_fault_ioc(mrioc, 2467 MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT); 2468 retval = -1; 2469 goto out_unlock; 2470 } 2471 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2472 != MPI3_IOCSTATUS_SUCCESS) { 2473 ioc_err(mrioc, 2474 "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2475 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2476 mrioc->init_cmds.ioc_loginfo); 2477 retval = -1; 2478 goto out_unlock; 2479 } 2480 memcpy(facts_data, (u8 *)data, data_len); 2481 out_unlock: 2482 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2483 mutex_unlock(&mrioc->init_cmds.mutex); 2484 2485 out: 2486 if (data) 2487 dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma); 2488 2489 return retval; 2490 } 2491 2492 /** 2493 * mpi3mr_check_reset_dma_mask - Process IOC facts data 2494 * @mrioc: Adapter instance reference 2495 * 2496 * Check whether the new DMA mask requested through IOCFacts by 2497 * firmware needs to be set, if so set it . 2498 * 2499 * Return: 0 on success, non-zero on failure. 2500 */ 2501 static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc) 2502 { 2503 struct pci_dev *pdev = mrioc->pdev; 2504 int r; 2505 u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask); 2506 2507 if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask)) 2508 return 0; 2509 2510 ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n", 2511 mrioc->dma_mask, facts_dma_mask); 2512 2513 r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask); 2514 if (r) { 2515 ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n", 2516 facts_dma_mask, r); 2517 return r; 2518 } 2519 mrioc->dma_mask = facts_dma_mask; 2520 return r; 2521 } 2522 2523 /** 2524 * mpi3mr_process_factsdata - Process IOC facts data 2525 * @mrioc: Adapter instance reference 2526 * @facts_data: Cached IOC facts data 2527 * 2528 * Convert IOC facts data into cpu endianness and cache it in 2529 * the driver . 2530 * 2531 * Return: Nothing. 2532 */ 2533 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, 2534 struct mpi3_ioc_facts_data *facts_data) 2535 { 2536 u32 ioc_config, req_sz, facts_flags; 2537 2538 if ((le16_to_cpu(facts_data->ioc_facts_data_length)) != 2539 (sizeof(*facts_data) / 4)) { 2540 ioc_warn(mrioc, 2541 "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n", 2542 sizeof(*facts_data), 2543 le16_to_cpu(facts_data->ioc_facts_data_length) * 4); 2544 } 2545 2546 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 2547 req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >> 2548 MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT); 2549 if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) { 2550 ioc_err(mrioc, 2551 "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n", 2552 req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size)); 2553 } 2554 2555 memset(&mrioc->facts, 0, sizeof(mrioc->facts)); 2556 2557 facts_flags = le32_to_cpu(facts_data->flags); 2558 mrioc->facts.op_req_sz = req_sz; 2559 mrioc->op_reply_desc_sz = 1 << ((ioc_config & 2560 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >> 2561 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT); 2562 2563 mrioc->facts.ioc_num = facts_data->ioc_number; 2564 mrioc->facts.who_init = facts_data->who_init; 2565 mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors); 2566 mrioc->facts.personality = (facts_flags & 2567 MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK); 2568 mrioc->facts.dma_mask = (facts_flags & 2569 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >> 2570 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT; 2571 mrioc->facts.protocol_flags = facts_data->protocol_flags; 2572 mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word); 2573 mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_request); 2574 mrioc->facts.product_id = le16_to_cpu(facts_data->product_id); 2575 mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4; 2576 mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions); 2577 mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id); 2578 mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds); 2579 mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds); 2580 mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds); 2581 mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds); 2582 mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme); 2583 mrioc->facts.max_pcie_switches = 2584 le16_to_cpu(facts_data->max_pcie_switches); 2585 mrioc->facts.max_sasexpanders = 2586 le16_to_cpu(facts_data->max_sas_expanders); 2587 mrioc->facts.max_sasinitiators = 2588 le16_to_cpu(facts_data->max_sas_initiators); 2589 mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures); 2590 mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle); 2591 mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle); 2592 mrioc->facts.max_op_req_q = 2593 le16_to_cpu(facts_data->max_operational_request_queues); 2594 mrioc->facts.max_op_reply_q = 2595 le16_to_cpu(facts_data->max_operational_reply_queues); 2596 mrioc->facts.ioc_capabilities = 2597 le32_to_cpu(facts_data->ioc_capabilities); 2598 mrioc->facts.fw_ver.build_num = 2599 le16_to_cpu(facts_data->fw_version.build_num); 2600 mrioc->facts.fw_ver.cust_id = 2601 le16_to_cpu(facts_data->fw_version.customer_id); 2602 mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor; 2603 mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major; 2604 mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor; 2605 mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major; 2606 mrioc->msix_count = min_t(int, mrioc->msix_count, 2607 mrioc->facts.max_msix_vectors); 2608 mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask; 2609 mrioc->facts.sge_mod_value = facts_data->sge_modifier_value; 2610 mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift; 2611 mrioc->facts.shutdown_timeout = 2612 le16_to_cpu(facts_data->shutdown_timeout); 2613 2614 ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),", 2615 mrioc->facts.ioc_num, mrioc->facts.max_op_req_q, 2616 mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle); 2617 ioc_info(mrioc, 2618 "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n", 2619 mrioc->facts.max_reqs, mrioc->facts.min_devhandle, 2620 mrioc->facts.max_msix_vectors, mrioc->facts.max_perids); 2621 ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ", 2622 mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value, 2623 mrioc->facts.sge_mod_shift); 2624 ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n", 2625 mrioc->facts.dma_mask, (facts_flags & 2626 MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK)); 2627 2628 mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD; 2629 2630 if (reset_devices) 2631 mrioc->max_host_ios = min_t(int, mrioc->max_host_ios, 2632 MPI3MR_HOST_IOS_KDUMP); 2633 } 2634 2635 /** 2636 * mpi3mr_alloc_reply_sense_bufs - Send IOC Init 2637 * @mrioc: Adapter instance reference 2638 * 2639 * Allocate and initialize the reply free buffers, sense 2640 * buffers, reply free queue and sense buffer queue. 2641 * 2642 * Return: 0 on success, non-zero on failures. 2643 */ 2644 static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc) 2645 { 2646 int retval = 0; 2647 u32 sz, i; 2648 2649 if (mrioc->init_cmds.reply) 2650 return retval; 2651 2652 mrioc->init_cmds.reply = kzalloc(mrioc->facts.reply_sz, GFP_KERNEL); 2653 if (!mrioc->init_cmds.reply) 2654 goto out_failed; 2655 2656 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { 2657 mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->facts.reply_sz, 2658 GFP_KERNEL); 2659 if (!mrioc->dev_rmhs_cmds[i].reply) 2660 goto out_failed; 2661 } 2662 2663 mrioc->host_tm_cmds.reply = kzalloc(mrioc->facts.reply_sz, GFP_KERNEL); 2664 if (!mrioc->host_tm_cmds.reply) 2665 goto out_failed; 2666 2667 mrioc->dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8; 2668 if (mrioc->facts.max_devhandle % 8) 2669 mrioc->dev_handle_bitmap_sz++; 2670 mrioc->removepend_bitmap = kzalloc(mrioc->dev_handle_bitmap_sz, 2671 GFP_KERNEL); 2672 if (!mrioc->removepend_bitmap) 2673 goto out_failed; 2674 2675 mrioc->devrem_bitmap_sz = MPI3MR_NUM_DEVRMCMD / 8; 2676 if (MPI3MR_NUM_DEVRMCMD % 8) 2677 mrioc->devrem_bitmap_sz++; 2678 mrioc->devrem_bitmap = kzalloc(mrioc->devrem_bitmap_sz, 2679 GFP_KERNEL); 2680 if (!mrioc->devrem_bitmap) 2681 goto out_failed; 2682 2683 mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES; 2684 mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1; 2685 mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR; 2686 mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1; 2687 2688 /* reply buffer pool, 16 byte align */ 2689 sz = mrioc->num_reply_bufs * mrioc->facts.reply_sz; 2690 mrioc->reply_buf_pool = dma_pool_create("reply_buf pool", 2691 &mrioc->pdev->dev, sz, 16, 0); 2692 if (!mrioc->reply_buf_pool) { 2693 ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n"); 2694 goto out_failed; 2695 } 2696 2697 mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL, 2698 &mrioc->reply_buf_dma); 2699 if (!mrioc->reply_buf) 2700 goto out_failed; 2701 2702 mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz; 2703 2704 /* reply free queue, 8 byte align */ 2705 sz = mrioc->reply_free_qsz * 8; 2706 mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool", 2707 &mrioc->pdev->dev, sz, 8, 0); 2708 if (!mrioc->reply_free_q_pool) { 2709 ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n"); 2710 goto out_failed; 2711 } 2712 mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool, 2713 GFP_KERNEL, &mrioc->reply_free_q_dma); 2714 if (!mrioc->reply_free_q) 2715 goto out_failed; 2716 2717 /* sense buffer pool, 4 byte align */ 2718 sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ; 2719 mrioc->sense_buf_pool = dma_pool_create("sense_buf pool", 2720 &mrioc->pdev->dev, sz, 4, 0); 2721 if (!mrioc->sense_buf_pool) { 2722 ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n"); 2723 goto out_failed; 2724 } 2725 mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL, 2726 &mrioc->sense_buf_dma); 2727 if (!mrioc->sense_buf) 2728 goto out_failed; 2729 2730 /* sense buffer queue, 8 byte align */ 2731 sz = mrioc->sense_buf_q_sz * 8; 2732 mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool", 2733 &mrioc->pdev->dev, sz, 8, 0); 2734 if (!mrioc->sense_buf_q_pool) { 2735 ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n"); 2736 goto out_failed; 2737 } 2738 mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool, 2739 GFP_KERNEL, &mrioc->sense_buf_q_dma); 2740 if (!mrioc->sense_buf_q) 2741 goto out_failed; 2742 2743 return retval; 2744 2745 out_failed: 2746 retval = -1; 2747 return retval; 2748 } 2749 2750 /** 2751 * mpimr_initialize_reply_sbuf_queues - initialize reply sense 2752 * buffers 2753 * @mrioc: Adapter instance reference 2754 * 2755 * Helper function to initialize reply and sense buffers along 2756 * with some debug prints. 2757 * 2758 * Return: None. 2759 */ 2760 static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc) 2761 { 2762 u32 sz, i; 2763 dma_addr_t phy_addr; 2764 2765 sz = mrioc->num_reply_bufs * mrioc->facts.reply_sz; 2766 ioc_info(mrioc, 2767 "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n", 2768 mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->facts.reply_sz, 2769 (sz / 1024), (unsigned long long)mrioc->reply_buf_dma); 2770 sz = mrioc->reply_free_qsz * 8; 2771 ioc_info(mrioc, 2772 "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n", 2773 mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024), 2774 (unsigned long long)mrioc->reply_free_q_dma); 2775 sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ; 2776 ioc_info(mrioc, 2777 "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n", 2778 mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ, 2779 (sz / 1024), (unsigned long long)mrioc->sense_buf_dma); 2780 sz = mrioc->sense_buf_q_sz * 8; 2781 ioc_info(mrioc, 2782 "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n", 2783 mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024), 2784 (unsigned long long)mrioc->sense_buf_q_dma); 2785 2786 /* initialize Reply buffer Queue */ 2787 for (i = 0, phy_addr = mrioc->reply_buf_dma; 2788 i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->facts.reply_sz) 2789 mrioc->reply_free_q[i] = cpu_to_le64(phy_addr); 2790 mrioc->reply_free_q[i] = cpu_to_le64(0); 2791 2792 /* initialize Sense Buffer Queue */ 2793 for (i = 0, phy_addr = mrioc->sense_buf_dma; 2794 i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ) 2795 mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr); 2796 mrioc->sense_buf_q[i] = cpu_to_le64(0); 2797 } 2798 2799 /** 2800 * mpi3mr_issue_iocinit - Send IOC Init 2801 * @mrioc: Adapter instance reference 2802 * 2803 * Issue IOC Init MPI request through admin queue and wait for 2804 * the completion of it or time out. 2805 * 2806 * Return: 0 on success, non-zero on failures. 2807 */ 2808 static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc) 2809 { 2810 struct mpi3_ioc_init_request iocinit_req; 2811 struct mpi3_driver_info_layout *drv_info; 2812 dma_addr_t data_dma; 2813 u32 data_len = sizeof(*drv_info); 2814 int retval = 0; 2815 ktime_t current_time; 2816 2817 drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 2818 GFP_KERNEL); 2819 if (!drv_info) { 2820 retval = -1; 2821 goto out; 2822 } 2823 mpimr_initialize_reply_sbuf_queues(mrioc); 2824 2825 drv_info->information_length = cpu_to_le32(data_len); 2826 strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature)); 2827 strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name)); 2828 strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version)); 2829 strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name)); 2830 strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version)); 2831 strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE, 2832 sizeof(drv_info->driver_release_date)); 2833 drv_info->driver_capabilities = 0; 2834 memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info, 2835 sizeof(mrioc->driver_info)); 2836 2837 memset(&iocinit_req, 0, sizeof(iocinit_req)); 2838 mutex_lock(&mrioc->init_cmds.mutex); 2839 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2840 retval = -1; 2841 ioc_err(mrioc, "Issue IOCInit: Init command is in use\n"); 2842 mutex_unlock(&mrioc->init_cmds.mutex); 2843 goto out; 2844 } 2845 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2846 mrioc->init_cmds.is_waiting = 1; 2847 mrioc->init_cmds.callback = NULL; 2848 iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2849 iocinit_req.function = MPI3_FUNCTION_IOC_INIT; 2850 iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV; 2851 iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT; 2852 iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR; 2853 iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR; 2854 iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER; 2855 iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz); 2856 iocinit_req.reply_free_queue_address = 2857 cpu_to_le64(mrioc->reply_free_q_dma); 2858 iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ); 2859 iocinit_req.sense_buffer_free_queue_depth = 2860 cpu_to_le16(mrioc->sense_buf_q_sz); 2861 iocinit_req.sense_buffer_free_queue_address = 2862 cpu_to_le64(mrioc->sense_buf_q_dma); 2863 iocinit_req.driver_information_address = cpu_to_le64(data_dma); 2864 2865 current_time = ktime_get_real(); 2866 iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time)); 2867 2868 init_completion(&mrioc->init_cmds.done); 2869 retval = mpi3mr_admin_request_post(mrioc, &iocinit_req, 2870 sizeof(iocinit_req), 1); 2871 if (retval) { 2872 ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n"); 2873 goto out_unlock; 2874 } 2875 wait_for_completion_timeout(&mrioc->init_cmds.done, 2876 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2877 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2878 mpi3mr_check_rh_fault_ioc(mrioc, 2879 MPI3MR_RESET_FROM_IOCINIT_TIMEOUT); 2880 ioc_err(mrioc, "ioc_init timed out\n"); 2881 retval = -1; 2882 goto out_unlock; 2883 } 2884 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2885 != MPI3_IOCSTATUS_SUCCESS) { 2886 ioc_err(mrioc, 2887 "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2888 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2889 mrioc->init_cmds.ioc_loginfo); 2890 retval = -1; 2891 goto out_unlock; 2892 } 2893 2894 mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs; 2895 writel(mrioc->reply_free_queue_host_index, 2896 &mrioc->sysif_regs->reply_free_host_index); 2897 2898 mrioc->sbq_host_index = mrioc->num_sense_bufs; 2899 writel(mrioc->sbq_host_index, 2900 &mrioc->sysif_regs->sense_buffer_free_host_index); 2901 out_unlock: 2902 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2903 mutex_unlock(&mrioc->init_cmds.mutex); 2904 2905 out: 2906 if (drv_info) 2907 dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info, 2908 data_dma); 2909 2910 return retval; 2911 } 2912 2913 /** 2914 * mpi3mr_unmask_events - Unmask events in event mask bitmap 2915 * @mrioc: Adapter instance reference 2916 * @event: MPI event ID 2917 * 2918 * Un mask the specific event by resetting the event_mask 2919 * bitmap. 2920 * 2921 * Return: 0 on success, non-zero on failures. 2922 */ 2923 static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event) 2924 { 2925 u32 desired_event; 2926 u8 word; 2927 2928 if (event >= 128) 2929 return; 2930 2931 desired_event = (1 << (event % 32)); 2932 word = event / 32; 2933 2934 mrioc->event_masks[word] &= ~desired_event; 2935 } 2936 2937 /** 2938 * mpi3mr_issue_event_notification - Send event notification 2939 * @mrioc: Adapter instance reference 2940 * 2941 * Issue event notification MPI request through admin queue and 2942 * wait for the completion of it or time out. 2943 * 2944 * Return: 0 on success, non-zero on failures. 2945 */ 2946 static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc) 2947 { 2948 struct mpi3_event_notification_request evtnotify_req; 2949 int retval = 0; 2950 u8 i; 2951 2952 memset(&evtnotify_req, 0, sizeof(evtnotify_req)); 2953 mutex_lock(&mrioc->init_cmds.mutex); 2954 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2955 retval = -1; 2956 ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n"); 2957 mutex_unlock(&mrioc->init_cmds.mutex); 2958 goto out; 2959 } 2960 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2961 mrioc->init_cmds.is_waiting = 1; 2962 mrioc->init_cmds.callback = NULL; 2963 evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2964 evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION; 2965 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 2966 evtnotify_req.event_masks[i] = 2967 cpu_to_le32(mrioc->event_masks[i]); 2968 init_completion(&mrioc->init_cmds.done); 2969 retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req, 2970 sizeof(evtnotify_req), 1); 2971 if (retval) { 2972 ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n"); 2973 goto out_unlock; 2974 } 2975 wait_for_completion_timeout(&mrioc->init_cmds.done, 2976 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2977 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2978 ioc_err(mrioc, "event notification timed out\n"); 2979 mpi3mr_check_rh_fault_ioc(mrioc, 2980 MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT); 2981 retval = -1; 2982 goto out_unlock; 2983 } 2984 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2985 != MPI3_IOCSTATUS_SUCCESS) { 2986 ioc_err(mrioc, 2987 "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2988 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2989 mrioc->init_cmds.ioc_loginfo); 2990 retval = -1; 2991 goto out_unlock; 2992 } 2993 2994 out_unlock: 2995 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2996 mutex_unlock(&mrioc->init_cmds.mutex); 2997 out: 2998 return retval; 2999 } 3000 3001 /** 3002 * mpi3mr_send_event_ack - Send event acknowledgment 3003 * @mrioc: Adapter instance reference 3004 * @event: MPI3 event ID 3005 * @event_ctx: Event context 3006 * 3007 * Send event acknowledgment through admin queue and wait for 3008 * it to complete. 3009 * 3010 * Return: 0 on success, non-zero on failures. 3011 */ 3012 int mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 3013 u32 event_ctx) 3014 { 3015 struct mpi3_event_ack_request evtack_req; 3016 int retval = 0; 3017 3018 memset(&evtack_req, 0, sizeof(evtack_req)); 3019 mutex_lock(&mrioc->init_cmds.mutex); 3020 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 3021 retval = -1; 3022 ioc_err(mrioc, "Send EvtAck: Init command is in use\n"); 3023 mutex_unlock(&mrioc->init_cmds.mutex); 3024 goto out; 3025 } 3026 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 3027 mrioc->init_cmds.is_waiting = 1; 3028 mrioc->init_cmds.callback = NULL; 3029 evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 3030 evtack_req.function = MPI3_FUNCTION_EVENT_ACK; 3031 evtack_req.event = event; 3032 evtack_req.event_context = cpu_to_le32(event_ctx); 3033 3034 init_completion(&mrioc->init_cmds.done); 3035 retval = mpi3mr_admin_request_post(mrioc, &evtack_req, 3036 sizeof(evtack_req), 1); 3037 if (retval) { 3038 ioc_err(mrioc, "Send EvtAck: Admin Post failed\n"); 3039 goto out_unlock; 3040 } 3041 wait_for_completion_timeout(&mrioc->init_cmds.done, 3042 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 3043 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 3044 ioc_err(mrioc, "Issue EvtNotify: command timed out\n"); 3045 if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET)) 3046 mpi3mr_soft_reset_handler(mrioc, 3047 MPI3MR_RESET_FROM_EVTACK_TIMEOUT, 1); 3048 retval = -1; 3049 goto out_unlock; 3050 } 3051 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 3052 != MPI3_IOCSTATUS_SUCCESS) { 3053 ioc_err(mrioc, 3054 "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 3055 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 3056 mrioc->init_cmds.ioc_loginfo); 3057 retval = -1; 3058 goto out_unlock; 3059 } 3060 3061 out_unlock: 3062 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3063 mutex_unlock(&mrioc->init_cmds.mutex); 3064 out: 3065 return retval; 3066 } 3067 3068 /** 3069 * mpi3mr_alloc_chain_bufs - Allocate chain buffers 3070 * @mrioc: Adapter instance reference 3071 * 3072 * Allocate chain buffers and set a bitmap to indicate free 3073 * chain buffers. Chain buffers are used to pass the SGE 3074 * information along with MPI3 SCSI IO requests for host I/O. 3075 * 3076 * Return: 0 on success, non-zero on failure 3077 */ 3078 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc) 3079 { 3080 int retval = 0; 3081 u32 sz, i; 3082 u16 num_chains; 3083 3084 if (mrioc->chain_sgl_list) 3085 return retval; 3086 3087 num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR; 3088 3089 if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION 3090 | SHOST_DIX_TYPE1_PROTECTION 3091 | SHOST_DIX_TYPE2_PROTECTION 3092 | SHOST_DIX_TYPE3_PROTECTION)) 3093 num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR); 3094 3095 mrioc->chain_buf_count = num_chains; 3096 sz = sizeof(struct chain_element) * num_chains; 3097 mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL); 3098 if (!mrioc->chain_sgl_list) 3099 goto out_failed; 3100 3101 sz = MPI3MR_PAGE_SIZE_4K; 3102 mrioc->chain_buf_pool = dma_pool_create("chain_buf pool", 3103 &mrioc->pdev->dev, sz, 16, 0); 3104 if (!mrioc->chain_buf_pool) { 3105 ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n"); 3106 goto out_failed; 3107 } 3108 3109 for (i = 0; i < num_chains; i++) { 3110 mrioc->chain_sgl_list[i].addr = 3111 dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL, 3112 &mrioc->chain_sgl_list[i].dma_addr); 3113 3114 if (!mrioc->chain_sgl_list[i].addr) 3115 goto out_failed; 3116 } 3117 mrioc->chain_bitmap_sz = num_chains / 8; 3118 if (num_chains % 8) 3119 mrioc->chain_bitmap_sz++; 3120 mrioc->chain_bitmap = kzalloc(mrioc->chain_bitmap_sz, GFP_KERNEL); 3121 if (!mrioc->chain_bitmap) 3122 goto out_failed; 3123 return retval; 3124 out_failed: 3125 retval = -1; 3126 return retval; 3127 } 3128 3129 /** 3130 * mpi3mr_port_enable_complete - Mark port enable complete 3131 * @mrioc: Adapter instance reference 3132 * @drv_cmd: Internal command tracker 3133 * 3134 * Call back for asynchronous port enable request sets the 3135 * driver command to indicate port enable request is complete. 3136 * 3137 * Return: Nothing 3138 */ 3139 static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc, 3140 struct mpi3mr_drv_cmd *drv_cmd) 3141 { 3142 drv_cmd->state = MPI3MR_CMD_NOTUSED; 3143 drv_cmd->callback = NULL; 3144 mrioc->scan_failed = drv_cmd->ioc_status; 3145 mrioc->scan_started = 0; 3146 } 3147 3148 /** 3149 * mpi3mr_issue_port_enable - Issue Port Enable 3150 * @mrioc: Adapter instance reference 3151 * @async: Flag to wait for completion or not 3152 * 3153 * Issue Port Enable MPI request through admin queue and if the 3154 * async flag is not set wait for the completion of the port 3155 * enable or time out. 3156 * 3157 * Return: 0 on success, non-zero on failures. 3158 */ 3159 int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async) 3160 { 3161 struct mpi3_port_enable_request pe_req; 3162 int retval = 0; 3163 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT; 3164 3165 memset(&pe_req, 0, sizeof(pe_req)); 3166 mutex_lock(&mrioc->init_cmds.mutex); 3167 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 3168 retval = -1; 3169 ioc_err(mrioc, "Issue PortEnable: Init command is in use\n"); 3170 mutex_unlock(&mrioc->init_cmds.mutex); 3171 goto out; 3172 } 3173 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 3174 if (async) { 3175 mrioc->init_cmds.is_waiting = 0; 3176 mrioc->init_cmds.callback = mpi3mr_port_enable_complete; 3177 } else { 3178 mrioc->init_cmds.is_waiting = 1; 3179 mrioc->init_cmds.callback = NULL; 3180 init_completion(&mrioc->init_cmds.done); 3181 } 3182 pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 3183 pe_req.function = MPI3_FUNCTION_PORT_ENABLE; 3184 3185 retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1); 3186 if (retval) { 3187 ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n"); 3188 goto out_unlock; 3189 } 3190 if (async) { 3191 mutex_unlock(&mrioc->init_cmds.mutex); 3192 goto out; 3193 } 3194 3195 wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ)); 3196 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 3197 ioc_err(mrioc, "port enable timed out\n"); 3198 retval = -1; 3199 mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT); 3200 goto out_unlock; 3201 } 3202 mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds); 3203 3204 out_unlock: 3205 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3206 mutex_unlock(&mrioc->init_cmds.mutex); 3207 out: 3208 return retval; 3209 } 3210 3211 /* Protocol type to name mapper structure */ 3212 static const struct { 3213 u8 protocol; 3214 char *name; 3215 } mpi3mr_protocols[] = { 3216 { MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" }, 3217 { MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" }, 3218 { MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" }, 3219 }; 3220 3221 /* Capability to name mapper structure*/ 3222 static const struct { 3223 u32 capability; 3224 char *name; 3225 } mpi3mr_capabilities[] = { 3226 { MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE, "RAID" }, 3227 }; 3228 3229 /** 3230 * mpi3mr_print_ioc_info - Display controller information 3231 * @mrioc: Adapter instance reference 3232 * 3233 * Display controller personalit, capability, supported 3234 * protocols etc. 3235 * 3236 * Return: Nothing 3237 */ 3238 static void 3239 mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc) 3240 { 3241 int i = 0, bytes_written = 0; 3242 char personality[16]; 3243 char protocol[50] = {0}; 3244 char capabilities[100] = {0}; 3245 struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver; 3246 3247 switch (mrioc->facts.personality) { 3248 case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA: 3249 strncpy(personality, "Enhanced HBA", sizeof(personality)); 3250 break; 3251 case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR: 3252 strncpy(personality, "RAID", sizeof(personality)); 3253 break; 3254 default: 3255 strncpy(personality, "Unknown", sizeof(personality)); 3256 break; 3257 } 3258 3259 ioc_info(mrioc, "Running in %s Personality", personality); 3260 3261 ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n", 3262 fwver->gen_major, fwver->gen_minor, fwver->ph_major, 3263 fwver->ph_minor, fwver->cust_id, fwver->build_num); 3264 3265 for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) { 3266 if (mrioc->facts.protocol_flags & 3267 mpi3mr_protocols[i].protocol) { 3268 bytes_written += scnprintf(protocol + bytes_written, 3269 sizeof(protocol) - bytes_written, "%s%s", 3270 bytes_written ? "," : "", 3271 mpi3mr_protocols[i].name); 3272 } 3273 } 3274 3275 bytes_written = 0; 3276 for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) { 3277 if (mrioc->facts.protocol_flags & 3278 mpi3mr_capabilities[i].capability) { 3279 bytes_written += scnprintf(capabilities + bytes_written, 3280 sizeof(capabilities) - bytes_written, "%s%s", 3281 bytes_written ? "," : "", 3282 mpi3mr_capabilities[i].name); 3283 } 3284 } 3285 3286 ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n", 3287 protocol, capabilities); 3288 } 3289 3290 /** 3291 * mpi3mr_cleanup_resources - Free PCI resources 3292 * @mrioc: Adapter instance reference 3293 * 3294 * Unmap PCI device memory and disable PCI device. 3295 * 3296 * Return: 0 on success and non-zero on failure. 3297 */ 3298 void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc) 3299 { 3300 struct pci_dev *pdev = mrioc->pdev; 3301 3302 mpi3mr_cleanup_isr(mrioc); 3303 3304 if (mrioc->sysif_regs) { 3305 iounmap((void __iomem *)mrioc->sysif_regs); 3306 mrioc->sysif_regs = NULL; 3307 } 3308 3309 if (pci_is_enabled(pdev)) { 3310 if (mrioc->bars) 3311 pci_release_selected_regions(pdev, mrioc->bars); 3312 pci_disable_device(pdev); 3313 } 3314 } 3315 3316 /** 3317 * mpi3mr_setup_resources - Enable PCI resources 3318 * @mrioc: Adapter instance reference 3319 * 3320 * Enable PCI device memory, MSI-x registers and set DMA mask. 3321 * 3322 * Return: 0 on success and non-zero on failure. 3323 */ 3324 int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc) 3325 { 3326 struct pci_dev *pdev = mrioc->pdev; 3327 u32 memap_sz = 0; 3328 int i, retval = 0, capb = 0; 3329 u16 message_control; 3330 u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask : 3331 (((dma_get_required_mask(&pdev->dev) > DMA_BIT_MASK(32)) && 3332 (sizeof(dma_addr_t) > 4)) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32)); 3333 3334 if (pci_enable_device_mem(pdev)) { 3335 ioc_err(mrioc, "pci_enable_device_mem: failed\n"); 3336 retval = -ENODEV; 3337 goto out_failed; 3338 } 3339 3340 capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 3341 if (!capb) { 3342 ioc_err(mrioc, "Unable to find MSI-X Capabilities\n"); 3343 retval = -ENODEV; 3344 goto out_failed; 3345 } 3346 mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); 3347 3348 if (pci_request_selected_regions(pdev, mrioc->bars, 3349 mrioc->driver_name)) { 3350 ioc_err(mrioc, "pci_request_selected_regions: failed\n"); 3351 retval = -ENODEV; 3352 goto out_failed; 3353 } 3354 3355 for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) { 3356 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 3357 mrioc->sysif_regs_phys = pci_resource_start(pdev, i); 3358 memap_sz = pci_resource_len(pdev, i); 3359 mrioc->sysif_regs = 3360 ioremap(mrioc->sysif_regs_phys, memap_sz); 3361 break; 3362 } 3363 } 3364 3365 pci_set_master(pdev); 3366 3367 retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask); 3368 if (retval) { 3369 if (dma_mask != DMA_BIT_MASK(32)) { 3370 ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n"); 3371 dma_mask = DMA_BIT_MASK(32); 3372 retval = dma_set_mask_and_coherent(&pdev->dev, 3373 dma_mask); 3374 } 3375 if (retval) { 3376 mrioc->dma_mask = 0; 3377 ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n"); 3378 goto out_failed; 3379 } 3380 } 3381 mrioc->dma_mask = dma_mask; 3382 3383 if (!mrioc->sysif_regs) { 3384 ioc_err(mrioc, 3385 "Unable to map adapter memory or resource not found\n"); 3386 retval = -EINVAL; 3387 goto out_failed; 3388 } 3389 3390 pci_read_config_word(pdev, capb + 2, &message_control); 3391 mrioc->msix_count = (message_control & 0x3FF) + 1; 3392 3393 pci_save_state(pdev); 3394 3395 pci_set_drvdata(pdev, mrioc->shost); 3396 3397 mpi3mr_ioc_disable_intr(mrioc); 3398 3399 ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n", 3400 (unsigned long long)mrioc->sysif_regs_phys, 3401 mrioc->sysif_regs, memap_sz); 3402 ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n", 3403 mrioc->msix_count); 3404 return retval; 3405 3406 out_failed: 3407 mpi3mr_cleanup_resources(mrioc); 3408 return retval; 3409 } 3410 3411 /** 3412 * mpi3mr_enable_events - Enable required events 3413 * @mrioc: Adapter instance reference 3414 * 3415 * This routine unmasks the events required by the driver by 3416 * sennding appropriate event mask bitmapt through an event 3417 * notification request. 3418 * 3419 * Return: 0 on success and non-zero on failure. 3420 */ 3421 static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc) 3422 { 3423 int retval = 0; 3424 u32 i; 3425 3426 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 3427 mrioc->event_masks[i] = -1; 3428 3429 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED); 3430 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED); 3431 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE); 3432 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE); 3433 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST); 3434 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY); 3435 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR); 3436 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE); 3437 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST); 3438 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION); 3439 mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT); 3440 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE); 3441 3442 retval = mpi3mr_issue_event_notification(mrioc); 3443 if (retval) 3444 ioc_err(mrioc, "failed to issue event notification %d\n", 3445 retval); 3446 return retval; 3447 } 3448 3449 /** 3450 * mpi3mr_init_ioc - Initialize the controller 3451 * @mrioc: Adapter instance reference 3452 * @init_type: Flag to indicate is the init_type 3453 * 3454 * This the controller initialization routine, executed either 3455 * after soft reset or from pci probe callback. 3456 * Setup the required resources, memory map the controller 3457 * registers, create admin and operational reply queue pairs, 3458 * allocate required memory for reply pool, sense buffer pool, 3459 * issue IOC init request to the firmware, unmask the events and 3460 * issue port enable to discover SAS/SATA/NVMe devies and RAID 3461 * volumes. 3462 * 3463 * Return: 0 on success and non-zero on failure. 3464 */ 3465 int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc) 3466 { 3467 int retval = 0; 3468 u8 retry = 0; 3469 struct mpi3_ioc_facts_data facts_data; 3470 3471 retry_init: 3472 retval = mpi3mr_bring_ioc_ready(mrioc); 3473 if (retval) { 3474 ioc_err(mrioc, "Failed to bring ioc ready: error %d\n", 3475 retval); 3476 goto out_failed_noretry; 3477 } 3478 3479 retval = mpi3mr_setup_isr(mrioc, 1); 3480 if (retval) { 3481 ioc_err(mrioc, "Failed to setup ISR error %d\n", 3482 retval); 3483 goto out_failed_noretry; 3484 } 3485 3486 retval = mpi3mr_issue_iocfacts(mrioc, &facts_data); 3487 if (retval) { 3488 ioc_err(mrioc, "Failed to Issue IOC Facts %d\n", 3489 retval); 3490 goto out_failed; 3491 } 3492 3493 mpi3mr_process_factsdata(mrioc, &facts_data); 3494 3495 retval = mpi3mr_check_reset_dma_mask(mrioc); 3496 if (retval) { 3497 ioc_err(mrioc, "Resetting dma mask failed %d\n", 3498 retval); 3499 goto out_failed_noretry; 3500 } 3501 3502 mpi3mr_print_ioc_info(mrioc); 3503 3504 retval = mpi3mr_alloc_reply_sense_bufs(mrioc); 3505 if (retval) { 3506 ioc_err(mrioc, 3507 "%s :Failed to allocated reply sense buffers %d\n", 3508 __func__, retval); 3509 goto out_failed_noretry; 3510 } 3511 3512 retval = mpi3mr_alloc_chain_bufs(mrioc); 3513 if (retval) { 3514 ioc_err(mrioc, "Failed to allocated chain buffers %d\n", 3515 retval); 3516 goto out_failed_noretry; 3517 } 3518 3519 retval = mpi3mr_issue_iocinit(mrioc); 3520 if (retval) { 3521 ioc_err(mrioc, "Failed to Issue IOC Init %d\n", 3522 retval); 3523 goto out_failed; 3524 } 3525 3526 retval = mpi3mr_print_pkg_ver(mrioc); 3527 if (retval) { 3528 ioc_err(mrioc, "failed to get package version\n"); 3529 goto out_failed; 3530 } 3531 3532 retval = mpi3mr_setup_isr(mrioc, 0); 3533 if (retval) { 3534 ioc_err(mrioc, "Failed to re-setup ISR, error %d\n", 3535 retval); 3536 goto out_failed_noretry; 3537 } 3538 3539 retval = mpi3mr_create_op_queues(mrioc); 3540 if (retval) { 3541 ioc_err(mrioc, "Failed to create OpQueues error %d\n", 3542 retval); 3543 goto out_failed; 3544 } 3545 3546 retval = mpi3mr_enable_events(mrioc); 3547 if (retval) { 3548 ioc_err(mrioc, "failed to enable events %d\n", 3549 retval); 3550 goto out_failed; 3551 } 3552 3553 ioc_info(mrioc, "controller initialization completed successfully\n"); 3554 return retval; 3555 out_failed: 3556 if (retry < 2) { 3557 retry++; 3558 ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n", 3559 retry); 3560 mpi3mr_memset_buffers(mrioc); 3561 goto retry_init; 3562 } 3563 out_failed_noretry: 3564 ioc_err(mrioc, "controller initialization failed\n"); 3565 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 3566 MPI3MR_RESET_FROM_CTLR_CLEANUP); 3567 mrioc->unrecoverable = 1; 3568 return retval; 3569 } 3570 3571 int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume) 3572 { 3573 3574 return 0; 3575 } 3576 3577 /** 3578 * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's 3579 * segments 3580 * @mrioc: Adapter instance reference 3581 * @qidx: Operational reply queue index 3582 * 3583 * Return: Nothing. 3584 */ 3585 static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx) 3586 { 3587 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 3588 struct segments *segments; 3589 int i, size; 3590 3591 if (!op_reply_q->q_segments) 3592 return; 3593 3594 size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz; 3595 segments = op_reply_q->q_segments; 3596 for (i = 0; i < op_reply_q->num_segments; i++) 3597 memset(segments[i].segment, 0, size); 3598 } 3599 3600 /** 3601 * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's 3602 * segments 3603 * @mrioc: Adapter instance reference 3604 * @qidx: Operational request queue index 3605 * 3606 * Return: Nothing. 3607 */ 3608 static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx) 3609 { 3610 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx; 3611 struct segments *segments; 3612 int i, size; 3613 3614 if (!op_req_q->q_segments) 3615 return; 3616 3617 size = op_req_q->segment_qd * mrioc->facts.op_req_sz; 3618 segments = op_req_q->q_segments; 3619 for (i = 0; i < op_req_q->num_segments; i++) 3620 memset(segments[i].segment, 0, size); 3621 } 3622 3623 /** 3624 * mpi3mr_memset_buffers - memset memory for a controller 3625 * @mrioc: Adapter instance reference 3626 * 3627 * clear all the memory allocated for a controller, typically 3628 * called post reset to reuse the memory allocated during the 3629 * controller init. 3630 * 3631 * Return: Nothing. 3632 */ 3633 void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc) 3634 { 3635 u16 i; 3636 3637 mrioc->change_count = 0; 3638 if (mrioc->admin_req_base) 3639 memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz); 3640 if (mrioc->admin_reply_base) 3641 memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz); 3642 3643 if (mrioc->init_cmds.reply) { 3644 memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply)); 3645 memset(mrioc->host_tm_cmds.reply, 0, 3646 sizeof(*mrioc->host_tm_cmds.reply)); 3647 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) 3648 memset(mrioc->dev_rmhs_cmds[i].reply, 0, 3649 sizeof(*mrioc->dev_rmhs_cmds[i].reply)); 3650 memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz); 3651 memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz); 3652 } 3653 3654 for (i = 0; i < mrioc->num_queues; i++) { 3655 mrioc->op_reply_qinfo[i].qid = 0; 3656 mrioc->op_reply_qinfo[i].ci = 0; 3657 mrioc->op_reply_qinfo[i].num_replies = 0; 3658 mrioc->op_reply_qinfo[i].ephase = 0; 3659 atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0); 3660 atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0); 3661 mpi3mr_memset_op_reply_q_buffers(mrioc, i); 3662 3663 mrioc->req_qinfo[i].ci = 0; 3664 mrioc->req_qinfo[i].pi = 0; 3665 mrioc->req_qinfo[i].num_requests = 0; 3666 mrioc->req_qinfo[i].qid = 0; 3667 mrioc->req_qinfo[i].reply_qid = 0; 3668 spin_lock_init(&mrioc->req_qinfo[i].q_lock); 3669 mpi3mr_memset_op_req_q_buffers(mrioc, i); 3670 } 3671 } 3672 3673 /** 3674 * mpi3mr_free_mem - Free memory allocated for a controller 3675 * @mrioc: Adapter instance reference 3676 * 3677 * Free all the memory allocated for a controller. 3678 * 3679 * Return: Nothing. 3680 */ 3681 void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc) 3682 { 3683 u16 i; 3684 struct mpi3mr_intr_info *intr_info; 3685 3686 if (mrioc->sense_buf_pool) { 3687 if (mrioc->sense_buf) 3688 dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf, 3689 mrioc->sense_buf_dma); 3690 dma_pool_destroy(mrioc->sense_buf_pool); 3691 mrioc->sense_buf = NULL; 3692 mrioc->sense_buf_pool = NULL; 3693 } 3694 if (mrioc->sense_buf_q_pool) { 3695 if (mrioc->sense_buf_q) 3696 dma_pool_free(mrioc->sense_buf_q_pool, 3697 mrioc->sense_buf_q, mrioc->sense_buf_q_dma); 3698 dma_pool_destroy(mrioc->sense_buf_q_pool); 3699 mrioc->sense_buf_q = NULL; 3700 mrioc->sense_buf_q_pool = NULL; 3701 } 3702 3703 if (mrioc->reply_buf_pool) { 3704 if (mrioc->reply_buf) 3705 dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf, 3706 mrioc->reply_buf_dma); 3707 dma_pool_destroy(mrioc->reply_buf_pool); 3708 mrioc->reply_buf = NULL; 3709 mrioc->reply_buf_pool = NULL; 3710 } 3711 if (mrioc->reply_free_q_pool) { 3712 if (mrioc->reply_free_q) 3713 dma_pool_free(mrioc->reply_free_q_pool, 3714 mrioc->reply_free_q, mrioc->reply_free_q_dma); 3715 dma_pool_destroy(mrioc->reply_free_q_pool); 3716 mrioc->reply_free_q = NULL; 3717 mrioc->reply_free_q_pool = NULL; 3718 } 3719 3720 for (i = 0; i < mrioc->num_op_req_q; i++) 3721 mpi3mr_free_op_req_q_segments(mrioc, i); 3722 3723 for (i = 0; i < mrioc->num_op_reply_q; i++) 3724 mpi3mr_free_op_reply_q_segments(mrioc, i); 3725 3726 for (i = 0; i < mrioc->intr_info_count; i++) { 3727 intr_info = mrioc->intr_info + i; 3728 intr_info->op_reply_q = NULL; 3729 } 3730 3731 kfree(mrioc->req_qinfo); 3732 mrioc->req_qinfo = NULL; 3733 mrioc->num_op_req_q = 0; 3734 3735 kfree(mrioc->op_reply_qinfo); 3736 mrioc->op_reply_qinfo = NULL; 3737 mrioc->num_op_reply_q = 0; 3738 3739 kfree(mrioc->init_cmds.reply); 3740 mrioc->init_cmds.reply = NULL; 3741 3742 kfree(mrioc->host_tm_cmds.reply); 3743 mrioc->host_tm_cmds.reply = NULL; 3744 3745 kfree(mrioc->removepend_bitmap); 3746 mrioc->removepend_bitmap = NULL; 3747 3748 kfree(mrioc->devrem_bitmap); 3749 mrioc->devrem_bitmap = NULL; 3750 3751 kfree(mrioc->chain_bitmap); 3752 mrioc->chain_bitmap = NULL; 3753 3754 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { 3755 kfree(mrioc->dev_rmhs_cmds[i].reply); 3756 mrioc->dev_rmhs_cmds[i].reply = NULL; 3757 } 3758 3759 if (mrioc->chain_buf_pool) { 3760 for (i = 0; i < mrioc->chain_buf_count; i++) { 3761 if (mrioc->chain_sgl_list[i].addr) { 3762 dma_pool_free(mrioc->chain_buf_pool, 3763 mrioc->chain_sgl_list[i].addr, 3764 mrioc->chain_sgl_list[i].dma_addr); 3765 mrioc->chain_sgl_list[i].addr = NULL; 3766 } 3767 } 3768 dma_pool_destroy(mrioc->chain_buf_pool); 3769 mrioc->chain_buf_pool = NULL; 3770 } 3771 3772 kfree(mrioc->chain_sgl_list); 3773 mrioc->chain_sgl_list = NULL; 3774 3775 if (mrioc->admin_reply_base) { 3776 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz, 3777 mrioc->admin_reply_base, mrioc->admin_reply_dma); 3778 mrioc->admin_reply_base = NULL; 3779 } 3780 if (mrioc->admin_req_base) { 3781 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz, 3782 mrioc->admin_req_base, mrioc->admin_req_dma); 3783 mrioc->admin_req_base = NULL; 3784 } 3785 } 3786 3787 /** 3788 * mpi3mr_issue_ioc_shutdown - shutdown controller 3789 * @mrioc: Adapter instance reference 3790 * 3791 * Send shutodwn notification to the controller and wait for the 3792 * shutdown_timeout for it to be completed. 3793 * 3794 * Return: Nothing. 3795 */ 3796 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc) 3797 { 3798 u32 ioc_config, ioc_status; 3799 u8 retval = 1; 3800 u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10; 3801 3802 ioc_info(mrioc, "Issuing shutdown Notification\n"); 3803 if (mrioc->unrecoverable) { 3804 ioc_warn(mrioc, 3805 "IOC is unrecoverable shutdown is not issued\n"); 3806 return; 3807 } 3808 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 3809 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 3810 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) { 3811 ioc_info(mrioc, "shutdown already in progress\n"); 3812 return; 3813 } 3814 3815 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 3816 ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL; 3817 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ; 3818 3819 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 3820 3821 if (mrioc->facts.shutdown_timeout) 3822 timeout = mrioc->facts.shutdown_timeout * 10; 3823 3824 do { 3825 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 3826 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 3827 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) { 3828 retval = 0; 3829 break; 3830 } 3831 msleep(100); 3832 } while (--timeout); 3833 3834 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 3835 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 3836 3837 if (retval) { 3838 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 3839 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) 3840 ioc_warn(mrioc, 3841 "shutdown still in progress after timeout\n"); 3842 } 3843 3844 ioc_info(mrioc, 3845 "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n", 3846 (!retval) ? "successful" : "failed", ioc_status, 3847 ioc_config); 3848 } 3849 3850 /** 3851 * mpi3mr_cleanup_ioc - Cleanup controller 3852 * @mrioc: Adapter instance reference 3853 3854 * controller cleanup handler, Message unit reset or soft reset 3855 * and shutdown notification is issued to the controller. 3856 * 3857 * Return: Nothing. 3858 */ 3859 void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc) 3860 { 3861 enum mpi3mr_iocstate ioc_state; 3862 3863 dprint_exit(mrioc, "cleaning up the controller\n"); 3864 mpi3mr_ioc_disable_intr(mrioc); 3865 3866 ioc_state = mpi3mr_get_iocstate(mrioc); 3867 3868 if ((!mrioc->unrecoverable) && (!mrioc->reset_in_progress) && 3869 (ioc_state == MRIOC_STATE_READY)) { 3870 if (mpi3mr_issue_and_process_mur(mrioc, 3871 MPI3MR_RESET_FROM_CTLR_CLEANUP)) 3872 mpi3mr_issue_reset(mrioc, 3873 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, 3874 MPI3MR_RESET_FROM_MUR_FAILURE); 3875 mpi3mr_issue_ioc_shutdown(mrioc); 3876 } 3877 dprint_exit(mrioc, "controller cleanup completed\n"); 3878 } 3879 3880 /** 3881 * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command 3882 * @mrioc: Adapter instance reference 3883 * @cmdptr: Internal command tracker 3884 * 3885 * Complete an internal driver commands with state indicating it 3886 * is completed due to reset. 3887 * 3888 * Return: Nothing. 3889 */ 3890 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc, 3891 struct mpi3mr_drv_cmd *cmdptr) 3892 { 3893 if (cmdptr->state & MPI3MR_CMD_PENDING) { 3894 cmdptr->state |= MPI3MR_CMD_RESET; 3895 cmdptr->state &= ~MPI3MR_CMD_PENDING; 3896 if (cmdptr->is_waiting) { 3897 complete(&cmdptr->done); 3898 cmdptr->is_waiting = 0; 3899 } else if (cmdptr->callback) 3900 cmdptr->callback(mrioc, cmdptr); 3901 } 3902 } 3903 3904 /** 3905 * mpi3mr_flush_drv_cmds - Flush internaldriver commands 3906 * @mrioc: Adapter instance reference 3907 * 3908 * Flush all internal driver commands post reset 3909 * 3910 * Return: Nothing. 3911 */ 3912 static void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc) 3913 { 3914 struct mpi3mr_drv_cmd *cmdptr; 3915 u8 i; 3916 3917 cmdptr = &mrioc->init_cmds; 3918 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 3919 cmdptr = &mrioc->host_tm_cmds; 3920 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 3921 3922 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { 3923 cmdptr = &mrioc->dev_rmhs_cmds[i]; 3924 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 3925 } 3926 } 3927 3928 /** 3929 * mpi3mr_diagfault_reset_handler - Diag fault reset handler 3930 * @mrioc: Adapter instance reference 3931 * @reset_reason: Reset reason code 3932 * 3933 * This is an handler for issuing diag fault reset from the 3934 * applications through IOCTL path to stop the execution of the 3935 * controller 3936 * 3937 * Return: 0 on success, non-zero on failure. 3938 */ 3939 int mpi3mr_diagfault_reset_handler(struct mpi3mr_ioc *mrioc, 3940 u32 reset_reason) 3941 { 3942 int retval = 0; 3943 3944 ioc_info(mrioc, "Entry: reason code: %s\n", 3945 mpi3mr_reset_rc_name(reset_reason)); 3946 mrioc->reset_in_progress = 1; 3947 3948 mpi3mr_ioc_disable_intr(mrioc); 3949 3950 retval = mpi3mr_issue_reset(mrioc, 3951 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason); 3952 3953 if (retval) { 3954 ioc_err(mrioc, "The diag fault reset failed: reason %d\n", 3955 reset_reason); 3956 mpi3mr_ioc_enable_intr(mrioc); 3957 } 3958 ioc_info(mrioc, "%s\n", ((retval == 0) ? "SUCCESS" : "FAILED")); 3959 mrioc->reset_in_progress = 0; 3960 return retval; 3961 } 3962 3963 /** 3964 * mpi3mr_soft_reset_handler - Reset the controller 3965 * @mrioc: Adapter instance reference 3966 * @reset_reason: Reset reason code 3967 * @snapdump: Flag to generate snapdump in firmware or not 3968 * 3969 * This is an handler for recovering controller by issuing soft 3970 * reset are diag fault reset. This is a blocking function and 3971 * when one reset is executed if any other resets they will be 3972 * blocked. All IOCTLs/IO will be blocked during the reset. If 3973 * controller reset is successful then the controller will be 3974 * reinitalized, otherwise the controller will be marked as not 3975 * recoverable 3976 * 3977 * In snapdump bit is set, the controller is issued with diag 3978 * fault reset so that the firmware can create a snap dump and 3979 * post that the firmware will result in F000 fault and the 3980 * driver will issue soft reset to recover from that. 3981 * 3982 * Return: 0 on success, non-zero on failure. 3983 */ 3984 int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, 3985 u32 reset_reason, u8 snapdump) 3986 { 3987 int retval = 0, i; 3988 unsigned long flags; 3989 u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; 3990 3991 if (mrioc->fault_dbg) { 3992 if (snapdump) 3993 mpi3mr_set_diagsave(mrioc); 3994 mpi3mr_kill_ioc(mrioc, reset_reason); 3995 } 3996 3997 /* 3998 * Block new resets until the currently executing one is finished and 3999 * return the status of the existing reset for all blocked resets 4000 */ 4001 if (!mutex_trylock(&mrioc->reset_mutex)) { 4002 ioc_info(mrioc, "Another reset in progress\n"); 4003 return -1; 4004 } 4005 mrioc->reset_in_progress = 1; 4006 4007 if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) && 4008 (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) { 4009 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 4010 mrioc->event_masks[i] = -1; 4011 4012 retval = mpi3mr_issue_event_notification(mrioc); 4013 4014 if (retval) { 4015 ioc_err(mrioc, 4016 "Failed to turn off events prior to reset %d\n", 4017 retval); 4018 } 4019 } 4020 4021 mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT); 4022 4023 mpi3mr_ioc_disable_intr(mrioc); 4024 4025 if (snapdump) { 4026 mpi3mr_set_diagsave(mrioc); 4027 retval = mpi3mr_issue_reset(mrioc, 4028 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason); 4029 if (!retval) { 4030 do { 4031 host_diagnostic = 4032 readl(&mrioc->sysif_regs->host_diagnostic); 4033 if (!(host_diagnostic & 4034 MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS)) 4035 break; 4036 msleep(100); 4037 } while (--timeout); 4038 } 4039 } 4040 4041 retval = mpi3mr_issue_reset(mrioc, 4042 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason); 4043 if (retval) { 4044 ioc_err(mrioc, "Failed to issue soft reset to the ioc\n"); 4045 goto out; 4046 } 4047 4048 mpi3mr_flush_delayed_rmhs_list(mrioc); 4049 mpi3mr_flush_drv_cmds(mrioc); 4050 memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz); 4051 memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz); 4052 mpi3mr_cleanup_fwevt_list(mrioc); 4053 mpi3mr_flush_host_io(mrioc); 4054 mpi3mr_invalidate_devhandles(mrioc); 4055 mpi3mr_memset_buffers(mrioc); 4056 retval = mpi3mr_reinit_ioc(mrioc, 0); 4057 if (retval) { 4058 pr_err(IOCNAME "reinit after soft reset failed: reason %d\n", 4059 mrioc->name, reset_reason); 4060 goto out; 4061 } 4062 ssleep(10); 4063 4064 out: 4065 if (!retval) { 4066 mrioc->reset_in_progress = 0; 4067 scsi_unblock_requests(mrioc->shost); 4068 mpi3mr_rfresh_tgtdevs(mrioc); 4069 mrioc->ts_update_counter = 0; 4070 spin_lock_irqsave(&mrioc->watchdog_lock, flags); 4071 if (mrioc->watchdog_work_q) 4072 queue_delayed_work(mrioc->watchdog_work_q, 4073 &mrioc->watchdog_work, 4074 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); 4075 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); 4076 } else { 4077 mpi3mr_issue_reset(mrioc, 4078 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason); 4079 mrioc->unrecoverable = 1; 4080 mrioc->reset_in_progress = 0; 4081 retval = -1; 4082 } 4083 4084 mutex_unlock(&mrioc->reset_mutex); 4085 ioc_info(mrioc, "%s\n", ((retval == 0) ? "SUCCESS" : "FAILED")); 4086 return retval; 4087 } 4088