1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2021 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 #include <linux/io-64-nonatomic-lo-hi.h> 12 13 #if defined(writeq) && defined(CONFIG_64BIT) 14 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) 15 { 16 writeq(b, addr); 17 } 18 #else 19 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) 20 { 21 __u64 data_out = b; 22 23 writel((u32)(data_out), addr); 24 writel((u32)(data_out >> 32), (addr + 4)); 25 } 26 #endif 27 28 static inline bool 29 mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q) 30 { 31 u16 pi, ci, max_entries; 32 bool is_qfull = false; 33 34 pi = op_req_q->pi; 35 ci = READ_ONCE(op_req_q->ci); 36 max_entries = op_req_q->num_requests; 37 38 if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1)))) 39 is_qfull = true; 40 41 return is_qfull; 42 } 43 44 static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc) 45 { 46 u16 i, max_vectors; 47 48 max_vectors = mrioc->intr_info_count; 49 50 for (i = 0; i < max_vectors; i++) 51 synchronize_irq(pci_irq_vector(mrioc->pdev, i)); 52 } 53 54 void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc) 55 { 56 mrioc->intr_enabled = 0; 57 mpi3mr_sync_irqs(mrioc); 58 } 59 60 void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc) 61 { 62 mrioc->intr_enabled = 1; 63 } 64 65 static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc) 66 { 67 u16 i; 68 69 mpi3mr_ioc_disable_intr(mrioc); 70 71 if (!mrioc->intr_info) 72 return; 73 74 for (i = 0; i < mrioc->intr_info_count; i++) 75 free_irq(pci_irq_vector(mrioc->pdev, i), 76 (mrioc->intr_info + i)); 77 78 kfree(mrioc->intr_info); 79 mrioc->intr_info = NULL; 80 mrioc->intr_info_count = 0; 81 pci_free_irq_vectors(mrioc->pdev); 82 } 83 84 void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length, 85 dma_addr_t dma_addr) 86 { 87 struct mpi3_sge_common *sgel = paddr; 88 89 sgel->flags = flags; 90 sgel->length = cpu_to_le32(length); 91 sgel->address = cpu_to_le64(dma_addr); 92 } 93 94 void mpi3mr_build_zero_len_sge(void *paddr) 95 { 96 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 97 98 mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1); 99 } 100 101 void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc, 102 dma_addr_t phys_addr) 103 { 104 if (!phys_addr) 105 return NULL; 106 107 if ((phys_addr < mrioc->reply_buf_dma) || 108 (phys_addr > mrioc->reply_buf_dma_max_address)) 109 return NULL; 110 111 return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma); 112 } 113 114 void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc, 115 dma_addr_t phys_addr) 116 { 117 if (!phys_addr) 118 return NULL; 119 120 return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma); 121 } 122 123 static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc, 124 u64 reply_dma) 125 { 126 u32 old_idx = 0; 127 unsigned long flags; 128 129 spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags); 130 old_idx = mrioc->reply_free_queue_host_index; 131 mrioc->reply_free_queue_host_index = ( 132 (mrioc->reply_free_queue_host_index == 133 (mrioc->reply_free_qsz - 1)) ? 0 : 134 (mrioc->reply_free_queue_host_index + 1)); 135 mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma); 136 writel(mrioc->reply_free_queue_host_index, 137 &mrioc->sysif_regs->reply_free_host_index); 138 spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags); 139 } 140 141 void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc, 142 u64 sense_buf_dma) 143 { 144 u32 old_idx = 0; 145 unsigned long flags; 146 147 spin_lock_irqsave(&mrioc->sbq_lock, flags); 148 old_idx = mrioc->sbq_host_index; 149 mrioc->sbq_host_index = ((mrioc->sbq_host_index == 150 (mrioc->sense_buf_q_sz - 1)) ? 0 : 151 (mrioc->sbq_host_index + 1)); 152 mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma); 153 writel(mrioc->sbq_host_index, 154 &mrioc->sysif_regs->sense_buffer_free_host_index); 155 spin_unlock_irqrestore(&mrioc->sbq_lock, flags); 156 } 157 158 static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc, 159 struct mpi3_event_notification_reply *event_reply) 160 { 161 char *desc = NULL; 162 u16 event; 163 164 event = event_reply->event; 165 166 switch (event) { 167 case MPI3_EVENT_LOG_DATA: 168 desc = "Log Data"; 169 break; 170 case MPI3_EVENT_CHANGE: 171 desc = "Event Change"; 172 break; 173 case MPI3_EVENT_GPIO_INTERRUPT: 174 desc = "GPIO Interrupt"; 175 break; 176 case MPI3_EVENT_TEMP_THRESHOLD: 177 desc = "Temperature Threshold"; 178 break; 179 case MPI3_EVENT_CABLE_MGMT: 180 desc = "Cable Management"; 181 break; 182 case MPI3_EVENT_ENERGY_PACK_CHANGE: 183 desc = "Energy Pack Change"; 184 break; 185 case MPI3_EVENT_DEVICE_ADDED: 186 { 187 struct mpi3_device_page0 *event_data = 188 (struct mpi3_device_page0 *)event_reply->event_data; 189 ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n", 190 event_data->dev_handle, event_data->device_form); 191 return; 192 } 193 case MPI3_EVENT_DEVICE_INFO_CHANGED: 194 { 195 struct mpi3_device_page0 *event_data = 196 (struct mpi3_device_page0 *)event_reply->event_data; 197 ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n", 198 event_data->dev_handle, event_data->device_form); 199 return; 200 } 201 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 202 { 203 struct mpi3_event_data_device_status_change *event_data = 204 (struct mpi3_event_data_device_status_change *)event_reply->event_data; 205 ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n", 206 event_data->dev_handle, event_data->reason_code); 207 return; 208 } 209 case MPI3_EVENT_SAS_DISCOVERY: 210 { 211 struct mpi3_event_data_sas_discovery *event_data = 212 (struct mpi3_event_data_sas_discovery *)event_reply->event_data; 213 ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n", 214 (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ? 215 "start" : "stop", 216 le32_to_cpu(event_data->discovery_status)); 217 return; 218 } 219 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE: 220 desc = "SAS Broadcast Primitive"; 221 break; 222 case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE: 223 desc = "SAS Notify Primitive"; 224 break; 225 case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: 226 desc = "SAS Init Device Status Change"; 227 break; 228 case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW: 229 desc = "SAS Init Table Overflow"; 230 break; 231 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 232 desc = "SAS Topology Change List"; 233 break; 234 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 235 desc = "Enclosure Device Status Change"; 236 break; 237 case MPI3_EVENT_HARD_RESET_RECEIVED: 238 desc = "Hard Reset Received"; 239 break; 240 case MPI3_EVENT_SAS_PHY_COUNTER: 241 desc = "SAS PHY Counter"; 242 break; 243 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 244 desc = "SAS Device Discovery Error"; 245 break; 246 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 247 desc = "PCIE Topology Change List"; 248 break; 249 case MPI3_EVENT_PCIE_ENUMERATION: 250 { 251 struct mpi3_event_data_pcie_enumeration *event_data = 252 (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data; 253 ioc_info(mrioc, "PCIE Enumeration: (%s)", 254 (event_data->reason_code == 255 MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop"); 256 if (event_data->enumeration_status) 257 ioc_info(mrioc, "enumeration_status(0x%08x)\n", 258 le32_to_cpu(event_data->enumeration_status)); 259 return; 260 } 261 case MPI3_EVENT_PREPARE_FOR_RESET: 262 desc = "Prepare For Reset"; 263 break; 264 } 265 266 if (!desc) 267 return; 268 269 ioc_info(mrioc, "%s\n", desc); 270 } 271 272 static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc, 273 struct mpi3_default_reply *def_reply) 274 { 275 struct mpi3_event_notification_reply *event_reply = 276 (struct mpi3_event_notification_reply *)def_reply; 277 278 mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count); 279 mpi3mr_print_event_data(mrioc, event_reply); 280 mpi3mr_os_handle_events(mrioc, event_reply); 281 } 282 283 static struct mpi3mr_drv_cmd * 284 mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag, 285 struct mpi3_default_reply *def_reply) 286 { 287 u16 idx; 288 289 switch (host_tag) { 290 case MPI3MR_HOSTTAG_INITCMDS: 291 return &mrioc->init_cmds; 292 case MPI3MR_HOSTTAG_BLK_TMS: 293 return &mrioc->host_tm_cmds; 294 case MPI3MR_HOSTTAG_INVALID: 295 if (def_reply && def_reply->function == 296 MPI3_FUNCTION_EVENT_NOTIFICATION) 297 mpi3mr_handle_events(mrioc, def_reply); 298 return NULL; 299 default: 300 break; 301 } 302 if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN && 303 host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) { 304 idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 305 return &mrioc->dev_rmhs_cmds[idx]; 306 } 307 308 return NULL; 309 } 310 311 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc, 312 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma) 313 { 314 u16 reply_desc_type, host_tag = 0; 315 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; 316 u32 ioc_loginfo = 0; 317 struct mpi3_status_reply_descriptor *status_desc; 318 struct mpi3_address_reply_descriptor *addr_desc; 319 struct mpi3_success_reply_descriptor *success_desc; 320 struct mpi3_default_reply *def_reply = NULL; 321 struct mpi3mr_drv_cmd *cmdptr = NULL; 322 struct mpi3_scsi_io_reply *scsi_reply; 323 u8 *sense_buf = NULL; 324 325 *reply_dma = 0; 326 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & 327 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK; 328 switch (reply_desc_type) { 329 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS: 330 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc; 331 host_tag = le16_to_cpu(status_desc->host_tag); 332 ioc_status = le16_to_cpu(status_desc->ioc_status); 333 if (ioc_status & 334 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 335 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); 336 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 337 break; 338 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: 339 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; 340 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address); 341 def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma); 342 if (!def_reply) 343 goto out; 344 host_tag = le16_to_cpu(def_reply->host_tag); 345 ioc_status = le16_to_cpu(def_reply->ioc_status); 346 if (ioc_status & 347 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 348 ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info); 349 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 350 if (def_reply->function == MPI3_FUNCTION_SCSI_IO) { 351 scsi_reply = (struct mpi3_scsi_io_reply *)def_reply; 352 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, 353 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 354 } 355 break; 356 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: 357 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; 358 host_tag = le16_to_cpu(success_desc->host_tag); 359 break; 360 default: 361 break; 362 } 363 364 cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply); 365 if (cmdptr) { 366 if (cmdptr->state & MPI3MR_CMD_PENDING) { 367 cmdptr->state |= MPI3MR_CMD_COMPLETE; 368 cmdptr->ioc_loginfo = ioc_loginfo; 369 cmdptr->ioc_status = ioc_status; 370 cmdptr->state &= ~MPI3MR_CMD_PENDING; 371 if (def_reply) { 372 cmdptr->state |= MPI3MR_CMD_REPLY_VALID; 373 memcpy((u8 *)cmdptr->reply, (u8 *)def_reply, 374 mrioc->facts.reply_sz); 375 } 376 if (cmdptr->is_waiting) { 377 complete(&cmdptr->done); 378 cmdptr->is_waiting = 0; 379 } else if (cmdptr->callback) 380 cmdptr->callback(mrioc, cmdptr); 381 } 382 } 383 out: 384 if (sense_buf) 385 mpi3mr_repost_sense_buf(mrioc, 386 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 387 } 388 389 static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc) 390 { 391 u32 exp_phase = mrioc->admin_reply_ephase; 392 u32 admin_reply_ci = mrioc->admin_reply_ci; 393 u32 num_admin_replies = 0; 394 u64 reply_dma = 0; 395 struct mpi3_default_reply_descriptor *reply_desc; 396 397 reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base + 398 admin_reply_ci; 399 400 if ((le16_to_cpu(reply_desc->reply_flags) & 401 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 402 return 0; 403 404 do { 405 mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci); 406 mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma); 407 if (reply_dma) 408 mpi3mr_repost_reply_buf(mrioc, reply_dma); 409 num_admin_replies++; 410 if (++admin_reply_ci == mrioc->num_admin_replies) { 411 admin_reply_ci = 0; 412 exp_phase ^= 1; 413 } 414 reply_desc = 415 (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base + 416 admin_reply_ci; 417 if ((le16_to_cpu(reply_desc->reply_flags) & 418 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 419 break; 420 } while (1); 421 422 writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); 423 mrioc->admin_reply_ci = admin_reply_ci; 424 mrioc->admin_reply_ephase = exp_phase; 425 426 return num_admin_replies; 427 } 428 429 /** 430 * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to 431 * queue's consumer index from operational reply descriptor queue. 432 * @op_reply_q: op_reply_qinfo object 433 * @reply_ci: operational reply descriptor's queue consumer index 434 * 435 * Returns reply descriptor frame address 436 */ 437 static inline struct mpi3_default_reply_descriptor * 438 mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci) 439 { 440 void *segment_base_addr; 441 struct segments *segments = op_reply_q->q_segments; 442 struct mpi3_default_reply_descriptor *reply_desc = NULL; 443 444 segment_base_addr = 445 segments[reply_ci / op_reply_q->segment_qd].segment; 446 reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr + 447 (reply_ci % op_reply_q->segment_qd); 448 return reply_desc; 449 } 450 451 static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, 452 struct mpi3mr_intr_info *intr_info) 453 { 454 struct op_reply_qinfo *op_reply_q = intr_info->op_reply_q; 455 struct op_req_qinfo *op_req_q; 456 u32 exp_phase; 457 u32 reply_ci; 458 u32 num_op_reply = 0; 459 u64 reply_dma = 0; 460 struct mpi3_default_reply_descriptor *reply_desc; 461 u16 req_q_idx = 0, reply_qidx; 462 463 reply_qidx = op_reply_q->qid - 1; 464 465 if (!atomic_add_unless(&op_reply_q->in_use, 1, 1)) 466 return 0; 467 468 exp_phase = op_reply_q->ephase; 469 reply_ci = op_reply_q->ci; 470 471 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci); 472 if ((le16_to_cpu(reply_desc->reply_flags) & 473 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) { 474 atomic_dec(&op_reply_q->in_use); 475 return 0; 476 } 477 478 do { 479 req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1; 480 op_req_q = &mrioc->req_qinfo[req_q_idx]; 481 482 WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci)); 483 mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma, 484 reply_qidx); 485 atomic_dec(&op_reply_q->pend_ios); 486 if (reply_dma) 487 mpi3mr_repost_reply_buf(mrioc, reply_dma); 488 num_op_reply++; 489 490 if (++reply_ci == op_reply_q->num_replies) { 491 reply_ci = 0; 492 exp_phase ^= 1; 493 } 494 495 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci); 496 497 if ((le16_to_cpu(reply_desc->reply_flags) & 498 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 499 break; 500 /* 501 * Exit completion loop to avoid CPU lockup 502 * Ensure remaining completion happens from threaded ISR. 503 */ 504 if (num_op_reply > mrioc->max_host_ios) { 505 intr_info->op_reply_q->enable_irq_poll = true; 506 break; 507 } 508 509 } while (1); 510 511 writel(reply_ci, 512 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index); 513 op_reply_q->ci = reply_ci; 514 op_reply_q->ephase = exp_phase; 515 516 atomic_dec(&op_reply_q->in_use); 517 return num_op_reply; 518 } 519 520 static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata) 521 { 522 struct mpi3mr_intr_info *intr_info = privdata; 523 struct mpi3mr_ioc *mrioc; 524 u16 midx; 525 u32 num_admin_replies = 0, num_op_reply = 0; 526 527 if (!intr_info) 528 return IRQ_NONE; 529 530 mrioc = intr_info->mrioc; 531 532 if (!mrioc->intr_enabled) 533 return IRQ_NONE; 534 535 midx = intr_info->msix_index; 536 537 if (!midx) 538 num_admin_replies = mpi3mr_process_admin_reply_q(mrioc); 539 if (intr_info->op_reply_q) 540 num_op_reply = mpi3mr_process_op_reply_q(mrioc, intr_info); 541 542 if (num_admin_replies || num_op_reply) 543 return IRQ_HANDLED; 544 else 545 return IRQ_NONE; 546 } 547 548 static irqreturn_t mpi3mr_isr(int irq, void *privdata) 549 { 550 struct mpi3mr_intr_info *intr_info = privdata; 551 struct mpi3mr_ioc *mrioc; 552 u16 midx; 553 int ret; 554 555 if (!intr_info) 556 return IRQ_NONE; 557 558 mrioc = intr_info->mrioc; 559 midx = intr_info->msix_index; 560 /* Call primary ISR routine */ 561 ret = mpi3mr_isr_primary(irq, privdata); 562 563 /* 564 * If more IOs are expected, schedule IRQ polling thread. 565 * Otherwise exit from ISR. 566 */ 567 if (!intr_info->op_reply_q) 568 return ret; 569 570 if (!intr_info->op_reply_q->enable_irq_poll || 571 !atomic_read(&intr_info->op_reply_q->pend_ios)) 572 return ret; 573 574 disable_irq_nosync(pci_irq_vector(mrioc->pdev, midx)); 575 576 return IRQ_WAKE_THREAD; 577 } 578 579 /** 580 * mpi3mr_isr_poll - Reply queue polling routine 581 * @irq: IRQ 582 * @privdata: Interrupt info 583 * 584 * poll for pending I/O completions in a loop until pending I/Os 585 * present or controller queue depth I/Os are processed. 586 * 587 * Return: IRQ_NONE or IRQ_HANDLED 588 */ 589 static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata) 590 { 591 struct mpi3mr_intr_info *intr_info = privdata; 592 struct mpi3mr_ioc *mrioc; 593 u16 midx; 594 u32 num_op_reply = 0; 595 596 if (!intr_info || !intr_info->op_reply_q) 597 return IRQ_NONE; 598 599 mrioc = intr_info->mrioc; 600 midx = intr_info->msix_index; 601 602 /* Poll for pending IOs completions */ 603 do { 604 if (!mrioc->intr_enabled) 605 break; 606 607 if (!midx) 608 mpi3mr_process_admin_reply_q(mrioc); 609 if (intr_info->op_reply_q) 610 num_op_reply += 611 mpi3mr_process_op_reply_q(mrioc, intr_info); 612 613 usleep_range(mrioc->irqpoll_sleep, 10 * mrioc->irqpoll_sleep); 614 615 } while (atomic_read(&intr_info->op_reply_q->pend_ios) && 616 (num_op_reply < mrioc->max_host_ios)); 617 618 intr_info->op_reply_q->enable_irq_poll = false; 619 enable_irq(pci_irq_vector(mrioc->pdev, midx)); 620 621 return IRQ_HANDLED; 622 } 623 624 /** 625 * mpi3mr_request_irq - Request IRQ and register ISR 626 * @mrioc: Adapter instance reference 627 * @index: IRQ vector index 628 * 629 * Request threaded ISR with primary ISR and secondary 630 * 631 * Return: 0 on success and non zero on failures. 632 */ 633 static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index) 634 { 635 struct pci_dev *pdev = mrioc->pdev; 636 struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index; 637 int retval = 0; 638 639 intr_info->mrioc = mrioc; 640 intr_info->msix_index = index; 641 intr_info->op_reply_q = NULL; 642 643 snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d", 644 mrioc->driver_name, mrioc->id, index); 645 646 retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr, 647 mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info); 648 if (retval) { 649 ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n", 650 intr_info->name, pci_irq_vector(pdev, index)); 651 return retval; 652 } 653 654 return retval; 655 } 656 657 /** 658 * mpi3mr_setup_isr - Setup ISR for the controller 659 * @mrioc: Adapter instance reference 660 * @setup_one: Request one IRQ or more 661 * 662 * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR 663 * 664 * Return: 0 on success and non zero on failures. 665 */ 666 static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one) 667 { 668 unsigned int irq_flags = PCI_IRQ_MSIX; 669 int max_vectors; 670 int retval; 671 int i; 672 struct irq_affinity desc = { .pre_vectors = 1}; 673 674 mpi3mr_cleanup_isr(mrioc); 675 676 if (setup_one || reset_devices) 677 max_vectors = 1; 678 else { 679 max_vectors = 680 min_t(int, mrioc->cpu_count + 1, mrioc->msix_count); 681 682 ioc_info(mrioc, 683 "MSI-X vectors supported: %d, no of cores: %d,", 684 mrioc->msix_count, mrioc->cpu_count); 685 ioc_info(mrioc, 686 "MSI-x vectors requested: %d\n", max_vectors); 687 } 688 689 irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; 690 691 mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0; 692 retval = pci_alloc_irq_vectors_affinity(mrioc->pdev, 693 1, max_vectors, irq_flags, &desc); 694 if (retval < 0) { 695 ioc_err(mrioc, "Cannot alloc irq vectors\n"); 696 goto out_failed; 697 } 698 if (retval != max_vectors) { 699 ioc_info(mrioc, 700 "allocated vectors (%d) are less than configured (%d)\n", 701 retval, max_vectors); 702 /* 703 * If only one MSI-x is allocated, then MSI-x 0 will be shared 704 * between Admin queue and operational queue 705 */ 706 if (retval == 1) 707 mrioc->op_reply_q_offset = 0; 708 709 max_vectors = retval; 710 } 711 mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors, 712 GFP_KERNEL); 713 if (!mrioc->intr_info) { 714 retval = -ENOMEM; 715 pci_free_irq_vectors(mrioc->pdev); 716 goto out_failed; 717 } 718 for (i = 0; i < max_vectors; i++) { 719 retval = mpi3mr_request_irq(mrioc, i); 720 if (retval) { 721 mrioc->intr_info_count = i; 722 goto out_failed; 723 } 724 } 725 mrioc->intr_info_count = max_vectors; 726 mpi3mr_ioc_enable_intr(mrioc); 727 return 0; 728 729 out_failed: 730 mpi3mr_cleanup_isr(mrioc); 731 732 return retval; 733 } 734 735 static const struct { 736 enum mpi3mr_iocstate value; 737 char *name; 738 } mrioc_states[] = { 739 { MRIOC_STATE_READY, "ready" }, 740 { MRIOC_STATE_FAULT, "fault" }, 741 { MRIOC_STATE_RESET, "reset" }, 742 { MRIOC_STATE_BECOMING_READY, "becoming ready" }, 743 { MRIOC_STATE_RESET_REQUESTED, "reset requested" }, 744 { MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" }, 745 }; 746 747 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state) 748 { 749 int i; 750 char *name = NULL; 751 752 for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) { 753 if (mrioc_states[i].value == mrioc_state) { 754 name = mrioc_states[i].name; 755 break; 756 } 757 } 758 return name; 759 } 760 761 /* Reset reason to name mapper structure*/ 762 static const struct { 763 enum mpi3mr_reset_reason value; 764 char *name; 765 } mpi3mr_reset_reason_codes[] = { 766 { MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" }, 767 { MPI3MR_RESET_FROM_FAULT_WATCH, "fault" }, 768 { MPI3MR_RESET_FROM_IOCTL, "application invocation" }, 769 { MPI3MR_RESET_FROM_EH_HOS, "error handling" }, 770 { MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" }, 771 { MPI3MR_RESET_FROM_IOCTL_TIMEOUT, "IOCTL timeout" }, 772 { MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" }, 773 { MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" }, 774 { MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" }, 775 { MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" }, 776 { MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" }, 777 { MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" }, 778 { MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" }, 779 { 780 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT, 781 "create request queue timeout" 782 }, 783 { 784 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT, 785 "create reply queue timeout" 786 }, 787 { MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" }, 788 { MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" }, 789 { MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" }, 790 { MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" }, 791 { 792 MPI3MR_RESET_FROM_CIACTVRST_TIMER, 793 "component image activation timeout" 794 }, 795 { 796 MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT, 797 "get package version timeout" 798 }, 799 { MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" }, 800 { MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" }, 801 }; 802 803 /** 804 * mpi3mr_reset_rc_name - get reset reason code name 805 * @reason_code: reset reason code value 806 * 807 * Map reset reason to an NULL terminated ASCII string 808 * 809 * Return: name corresponding to reset reason value or NULL. 810 */ 811 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code) 812 { 813 int i; 814 char *name = NULL; 815 816 for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) { 817 if (mpi3mr_reset_reason_codes[i].value == reason_code) { 818 name = mpi3mr_reset_reason_codes[i].name; 819 break; 820 } 821 } 822 return name; 823 } 824 825 /* Reset type to name mapper structure*/ 826 static const struct { 827 u16 reset_type; 828 char *name; 829 } mpi3mr_reset_types[] = { 830 { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" }, 831 { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" }, 832 }; 833 834 /** 835 * mpi3mr_reset_type_name - get reset type name 836 * @reset_type: reset type value 837 * 838 * Map reset type to an NULL terminated ASCII string 839 * 840 * Return: name corresponding to reset type value or NULL. 841 */ 842 static const char *mpi3mr_reset_type_name(u16 reset_type) 843 { 844 int i; 845 char *name = NULL; 846 847 for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) { 848 if (mpi3mr_reset_types[i].reset_type == reset_type) { 849 name = mpi3mr_reset_types[i].name; 850 break; 851 } 852 } 853 return name; 854 } 855 856 /** 857 * mpi3mr_print_fault_info - Display fault information 858 * @mrioc: Adapter instance reference 859 * 860 * Display the controller fault information if there is a 861 * controller fault. 862 * 863 * Return: Nothing. 864 */ 865 static void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc) 866 { 867 u32 ioc_status, code, code1, code2, code3; 868 869 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 870 871 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) { 872 code = readl(&mrioc->sysif_regs->fault); 873 code1 = readl(&mrioc->sysif_regs->fault_info[0]); 874 code2 = readl(&mrioc->sysif_regs->fault_info[1]); 875 code3 = readl(&mrioc->sysif_regs->fault_info[2]); 876 877 ioc_info(mrioc, 878 "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n", 879 code, code1, code2, code3); 880 } 881 } 882 883 /** 884 * mpi3mr_get_iocstate - Get IOC State 885 * @mrioc: Adapter instance reference 886 * 887 * Return a proper IOC state enum based on the IOC status and 888 * IOC configuration and unrcoverable state of the controller. 889 * 890 * Return: Current IOC state. 891 */ 892 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc) 893 { 894 u32 ioc_status, ioc_config; 895 u8 ready, enabled; 896 897 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 898 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 899 900 if (mrioc->unrecoverable) 901 return MRIOC_STATE_UNRECOVERABLE; 902 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) 903 return MRIOC_STATE_FAULT; 904 905 ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY); 906 enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC); 907 908 if (ready && enabled) 909 return MRIOC_STATE_READY; 910 if ((!ready) && (!enabled)) 911 return MRIOC_STATE_RESET; 912 if ((!ready) && (enabled)) 913 return MRIOC_STATE_BECOMING_READY; 914 915 return MRIOC_STATE_RESET_REQUESTED; 916 } 917 918 /** 919 * mpi3mr_clear_reset_history - clear reset history 920 * @mrioc: Adapter instance reference 921 * 922 * Write the reset history bit in IOC status to clear the bit, 923 * if it is already set. 924 * 925 * Return: Nothing. 926 */ 927 static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc) 928 { 929 u32 ioc_status; 930 931 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 932 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) 933 writel(ioc_status, &mrioc->sysif_regs->ioc_status); 934 } 935 936 /** 937 * mpi3mr_issue_and_process_mur - Message unit Reset handler 938 * @mrioc: Adapter instance reference 939 * @reset_reason: Reset reason code 940 * 941 * Issue Message unit Reset to the controller and wait for it to 942 * be complete. 943 * 944 * Return: 0 on success, -1 on failure. 945 */ 946 static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc, 947 u32 reset_reason) 948 { 949 u32 ioc_config, timeout, ioc_status; 950 int retval = -1; 951 952 ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n"); 953 if (mrioc->unrecoverable) { 954 ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n"); 955 return retval; 956 } 957 mpi3mr_clear_reset_history(mrioc); 958 writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]); 959 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 960 ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; 961 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 962 963 timeout = mrioc->ready_timeout * 10; 964 do { 965 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 966 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) { 967 mpi3mr_clear_reset_history(mrioc); 968 ioc_config = 969 readl(&mrioc->sysif_regs->ioc_configuration); 970 if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) || 971 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) || 972 (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) { 973 retval = 0; 974 break; 975 } 976 } 977 msleep(100); 978 } while (--timeout); 979 980 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 981 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 982 983 ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n", 984 (!retval) ? "successful" : "failed", ioc_status, ioc_config); 985 return retval; 986 } 987 988 /** 989 * mpi3mr_bring_ioc_ready - Bring controller to ready state 990 * @mrioc: Adapter instance reference 991 * 992 * Set Enable IOC bit in IOC configuration register and wait for 993 * the controller to become ready. 994 * 995 * Return: 0 on success, -1 on failure. 996 */ 997 static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc) 998 { 999 u32 ioc_config, timeout; 1000 enum mpi3mr_iocstate current_state; 1001 1002 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1003 ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; 1004 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 1005 1006 timeout = mrioc->ready_timeout * 10; 1007 do { 1008 current_state = mpi3mr_get_iocstate(mrioc); 1009 if (current_state == MRIOC_STATE_READY) 1010 return 0; 1011 msleep(100); 1012 } while (--timeout); 1013 1014 return -1; 1015 } 1016 1017 /** 1018 * mpi3mr_soft_reset_success - Check softreset is success or not 1019 * @ioc_status: IOC status register value 1020 * @ioc_config: IOC config register value 1021 * 1022 * Check whether the soft reset is successful or not based on 1023 * IOC status and IOC config register values. 1024 * 1025 * Return: True when the soft reset is success, false otherwise. 1026 */ 1027 static inline bool 1028 mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config) 1029 { 1030 if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) || 1031 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) || 1032 (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) 1033 return true; 1034 return false; 1035 } 1036 1037 /** 1038 * mpi3mr_diagfault_success - Check diag fault is success or not 1039 * @mrioc: Adapter reference 1040 * @ioc_status: IOC status register value 1041 * 1042 * Check whether the controller hit diag reset fault code. 1043 * 1044 * Return: True when there is diag fault, false otherwise. 1045 */ 1046 static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc, 1047 u32 ioc_status) 1048 { 1049 u32 fault; 1050 1051 if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) 1052 return false; 1053 fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK; 1054 if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) 1055 return true; 1056 return false; 1057 } 1058 1059 /** 1060 * mpi3mr_set_diagsave - Set diag save bit for snapdump 1061 * @mrioc: Adapter reference 1062 * 1063 * Set diag save bit in IOC configuration register to enable 1064 * snapdump. 1065 * 1066 * Return: Nothing. 1067 */ 1068 static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc) 1069 { 1070 u32 ioc_config; 1071 1072 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1073 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE; 1074 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 1075 } 1076 1077 /** 1078 * mpi3mr_issue_reset - Issue reset to the controller 1079 * @mrioc: Adapter reference 1080 * @reset_type: Reset type 1081 * @reset_reason: Reset reason code 1082 * 1083 * Unlock the host diagnostic registers and write the specific 1084 * reset type to that, wait for reset acknowledgment from the 1085 * controller, if the reset is not successful retry for the 1086 * predefined number of times. 1087 * 1088 * Return: 0 on success, non-zero on failure. 1089 */ 1090 static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, 1091 u32 reset_reason) 1092 { 1093 int retval = -1; 1094 u8 unlock_retry_count, reset_retry_count = 0; 1095 u32 host_diagnostic, timeout, ioc_status, ioc_config; 1096 1097 pci_cfg_access_lock(mrioc->pdev); 1098 if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) && 1099 (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT)) 1100 goto out; 1101 if (mrioc->unrecoverable) 1102 goto out; 1103 retry_reset: 1104 unlock_retry_count = 0; 1105 mpi3mr_clear_reset_history(mrioc); 1106 do { 1107 ioc_info(mrioc, 1108 "Write magic sequence to unlock host diag register (retry=%d)\n", 1109 ++unlock_retry_count); 1110 if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) { 1111 writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]); 1112 mrioc->unrecoverable = 1; 1113 goto out; 1114 } 1115 1116 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH, 1117 &mrioc->sysif_regs->write_sequence); 1118 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST, 1119 &mrioc->sysif_regs->write_sequence); 1120 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND, 1121 &mrioc->sysif_regs->write_sequence); 1122 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD, 1123 &mrioc->sysif_regs->write_sequence); 1124 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH, 1125 &mrioc->sysif_regs->write_sequence); 1126 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH, 1127 &mrioc->sysif_regs->write_sequence); 1128 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH, 1129 &mrioc->sysif_regs->write_sequence); 1130 usleep_range(1000, 1100); 1131 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); 1132 ioc_info(mrioc, 1133 "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n", 1134 unlock_retry_count, host_diagnostic); 1135 } while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE)); 1136 1137 writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]); 1138 ioc_info(mrioc, "%s reset due to %s(0x%x)\n", 1139 mpi3mr_reset_type_name(reset_type), 1140 mpi3mr_reset_rc_name(reset_reason), reset_reason); 1141 writel(host_diagnostic | reset_type, 1142 &mrioc->sysif_regs->host_diagnostic); 1143 timeout = mrioc->ready_timeout * 10; 1144 if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) { 1145 do { 1146 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1147 if (ioc_status & 1148 MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) { 1149 mpi3mr_clear_reset_history(mrioc); 1150 ioc_config = 1151 readl(&mrioc->sysif_regs->ioc_configuration); 1152 if (mpi3mr_soft_reset_success(ioc_status, 1153 ioc_config)) { 1154 retval = 0; 1155 break; 1156 } 1157 } 1158 msleep(100); 1159 } while (--timeout); 1160 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND, 1161 &mrioc->sysif_regs->write_sequence); 1162 } else if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT) { 1163 do { 1164 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1165 if (mpi3mr_diagfault_success(mrioc, ioc_status)) { 1166 retval = 0; 1167 break; 1168 } 1169 msleep(100); 1170 } while (--timeout); 1171 mpi3mr_clear_reset_history(mrioc); 1172 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND, 1173 &mrioc->sysif_regs->write_sequence); 1174 } 1175 if (retval && ((++reset_retry_count) < MPI3MR_MAX_RESET_RETRY_COUNT)) { 1176 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1177 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1178 ioc_info(mrioc, 1179 "Base IOC Sts/Config after reset try %d is (0x%x)/(0x%x)\n", 1180 reset_retry_count, ioc_status, ioc_config); 1181 goto retry_reset; 1182 } 1183 1184 out: 1185 pci_cfg_access_unlock(mrioc->pdev); 1186 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1187 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1188 1189 ioc_info(mrioc, 1190 "Base IOC Sts/Config after %s reset is (0x%x)/(0x%x)\n", 1191 (!retval) ? "successful" : "failed", ioc_status, 1192 ioc_config); 1193 return retval; 1194 } 1195 1196 /** 1197 * mpi3mr_admin_request_post - Post request to admin queue 1198 * @mrioc: Adapter reference 1199 * @admin_req: MPI3 request 1200 * @admin_req_sz: Request size 1201 * @ignore_reset: Ignore reset in process 1202 * 1203 * Post the MPI3 request into admin request queue and 1204 * inform the controller, if the queue is full return 1205 * appropriate error. 1206 * 1207 * Return: 0 on success, non-zero on failure. 1208 */ 1209 int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req, 1210 u16 admin_req_sz, u8 ignore_reset) 1211 { 1212 u16 areq_pi = 0, areq_ci = 0, max_entries = 0; 1213 int retval = 0; 1214 unsigned long flags; 1215 u8 *areq_entry; 1216 1217 if (mrioc->unrecoverable) { 1218 ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__); 1219 return -EFAULT; 1220 } 1221 1222 spin_lock_irqsave(&mrioc->admin_req_lock, flags); 1223 areq_pi = mrioc->admin_req_pi; 1224 areq_ci = mrioc->admin_req_ci; 1225 max_entries = mrioc->num_admin_req; 1226 if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) && 1227 (areq_pi == (max_entries - 1)))) { 1228 ioc_err(mrioc, "AdminReqQ full condition detected\n"); 1229 retval = -EAGAIN; 1230 goto out; 1231 } 1232 if (!ignore_reset && mrioc->reset_in_progress) { 1233 ioc_err(mrioc, "AdminReqQ submit reset in progress\n"); 1234 retval = -EAGAIN; 1235 goto out; 1236 } 1237 areq_entry = (u8 *)mrioc->admin_req_base + 1238 (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ); 1239 memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); 1240 memcpy(areq_entry, (u8 *)admin_req, admin_req_sz); 1241 1242 if (++areq_pi == max_entries) 1243 areq_pi = 0; 1244 mrioc->admin_req_pi = areq_pi; 1245 1246 writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); 1247 1248 out: 1249 spin_unlock_irqrestore(&mrioc->admin_req_lock, flags); 1250 1251 return retval; 1252 } 1253 1254 /** 1255 * mpi3mr_free_op_req_q_segments - free request memory segments 1256 * @mrioc: Adapter instance reference 1257 * @q_idx: operational request queue index 1258 * 1259 * Free memory segments allocated for operational request queue 1260 * 1261 * Return: Nothing. 1262 */ 1263 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) 1264 { 1265 u16 j; 1266 int size; 1267 struct segments *segments; 1268 1269 segments = mrioc->req_qinfo[q_idx].q_segments; 1270 if (!segments) 1271 return; 1272 1273 if (mrioc->enable_segqueue) { 1274 size = MPI3MR_OP_REQ_Q_SEG_SIZE; 1275 if (mrioc->req_qinfo[q_idx].q_segment_list) { 1276 dma_free_coherent(&mrioc->pdev->dev, 1277 MPI3MR_MAX_SEG_LIST_SIZE, 1278 mrioc->req_qinfo[q_idx].q_segment_list, 1279 mrioc->req_qinfo[q_idx].q_segment_list_dma); 1280 mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL; 1281 } 1282 } else 1283 size = mrioc->req_qinfo[q_idx].num_requests * 1284 mrioc->facts.op_req_sz; 1285 1286 for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) { 1287 if (!segments[j].segment) 1288 continue; 1289 dma_free_coherent(&mrioc->pdev->dev, 1290 size, segments[j].segment, segments[j].segment_dma); 1291 segments[j].segment = NULL; 1292 } 1293 kfree(mrioc->req_qinfo[q_idx].q_segments); 1294 mrioc->req_qinfo[q_idx].q_segments = NULL; 1295 mrioc->req_qinfo[q_idx].qid = 0; 1296 } 1297 1298 /** 1299 * mpi3mr_free_op_reply_q_segments - free reply memory segments 1300 * @mrioc: Adapter instance reference 1301 * @q_idx: operational reply queue index 1302 * 1303 * Free memory segments allocated for operational reply queue 1304 * 1305 * Return: Nothing. 1306 */ 1307 static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) 1308 { 1309 u16 j; 1310 int size; 1311 struct segments *segments; 1312 1313 segments = mrioc->op_reply_qinfo[q_idx].q_segments; 1314 if (!segments) 1315 return; 1316 1317 if (mrioc->enable_segqueue) { 1318 size = MPI3MR_OP_REP_Q_SEG_SIZE; 1319 if (mrioc->op_reply_qinfo[q_idx].q_segment_list) { 1320 dma_free_coherent(&mrioc->pdev->dev, 1321 MPI3MR_MAX_SEG_LIST_SIZE, 1322 mrioc->op_reply_qinfo[q_idx].q_segment_list, 1323 mrioc->op_reply_qinfo[q_idx].q_segment_list_dma); 1324 mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL; 1325 } 1326 } else 1327 size = mrioc->op_reply_qinfo[q_idx].segment_qd * 1328 mrioc->op_reply_desc_sz; 1329 1330 for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) { 1331 if (!segments[j].segment) 1332 continue; 1333 dma_free_coherent(&mrioc->pdev->dev, 1334 size, segments[j].segment, segments[j].segment_dma); 1335 segments[j].segment = NULL; 1336 } 1337 1338 kfree(mrioc->op_reply_qinfo[q_idx].q_segments); 1339 mrioc->op_reply_qinfo[q_idx].q_segments = NULL; 1340 mrioc->op_reply_qinfo[q_idx].qid = 0; 1341 } 1342 1343 /** 1344 * mpi3mr_delete_op_reply_q - delete operational reply queue 1345 * @mrioc: Adapter instance reference 1346 * @qidx: operational reply queue index 1347 * 1348 * Delete operatinal reply queue by issuing MPI request 1349 * through admin queue. 1350 * 1351 * Return: 0 on success, non-zero on failure. 1352 */ 1353 static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) 1354 { 1355 struct mpi3_delete_reply_queue_request delq_req; 1356 int retval = 0; 1357 u16 reply_qid = 0, midx; 1358 1359 reply_qid = mrioc->op_reply_qinfo[qidx].qid; 1360 1361 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset); 1362 1363 if (!reply_qid) { 1364 retval = -1; 1365 ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n"); 1366 goto out; 1367 } 1368 1369 memset(&delq_req, 0, sizeof(delq_req)); 1370 mutex_lock(&mrioc->init_cmds.mutex); 1371 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1372 retval = -1; 1373 ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n"); 1374 mutex_unlock(&mrioc->init_cmds.mutex); 1375 goto out; 1376 } 1377 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1378 mrioc->init_cmds.is_waiting = 1; 1379 mrioc->init_cmds.callback = NULL; 1380 delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1381 delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE; 1382 delq_req.queue_id = cpu_to_le16(reply_qid); 1383 1384 init_completion(&mrioc->init_cmds.done); 1385 retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req), 1386 1); 1387 if (retval) { 1388 ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n"); 1389 goto out_unlock; 1390 } 1391 wait_for_completion_timeout(&mrioc->init_cmds.done, 1392 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1393 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1394 ioc_err(mrioc, "Issue DelRepQ: command timed out\n"); 1395 mpi3mr_set_diagsave(mrioc); 1396 mpi3mr_issue_reset(mrioc, 1397 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 1398 MPI3MR_RESET_FROM_DELREPQ_TIMEOUT); 1399 mrioc->unrecoverable = 1; 1400 1401 retval = -1; 1402 goto out_unlock; 1403 } 1404 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1405 != MPI3_IOCSTATUS_SUCCESS) { 1406 ioc_err(mrioc, 1407 "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1408 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1409 mrioc->init_cmds.ioc_loginfo); 1410 retval = -1; 1411 goto out_unlock; 1412 } 1413 mrioc->intr_info[midx].op_reply_q = NULL; 1414 1415 mpi3mr_free_op_reply_q_segments(mrioc, qidx); 1416 out_unlock: 1417 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1418 mutex_unlock(&mrioc->init_cmds.mutex); 1419 out: 1420 1421 return retval; 1422 } 1423 1424 /** 1425 * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool 1426 * @mrioc: Adapter instance reference 1427 * @qidx: request queue index 1428 * 1429 * Allocate segmented memory pools for operational reply 1430 * queue. 1431 * 1432 * Return: 0 on success, non-zero on failure. 1433 */ 1434 static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx) 1435 { 1436 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 1437 int i, size; 1438 u64 *q_segment_list_entry = NULL; 1439 struct segments *segments; 1440 1441 if (mrioc->enable_segqueue) { 1442 op_reply_q->segment_qd = 1443 MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz; 1444 1445 size = MPI3MR_OP_REP_Q_SEG_SIZE; 1446 1447 op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev, 1448 MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma, 1449 GFP_KERNEL); 1450 if (!op_reply_q->q_segment_list) 1451 return -ENOMEM; 1452 q_segment_list_entry = (u64 *)op_reply_q->q_segment_list; 1453 } else { 1454 op_reply_q->segment_qd = op_reply_q->num_replies; 1455 size = op_reply_q->num_replies * mrioc->op_reply_desc_sz; 1456 } 1457 1458 op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies, 1459 op_reply_q->segment_qd); 1460 1461 op_reply_q->q_segments = kcalloc(op_reply_q->num_segments, 1462 sizeof(struct segments), GFP_KERNEL); 1463 if (!op_reply_q->q_segments) 1464 return -ENOMEM; 1465 1466 segments = op_reply_q->q_segments; 1467 for (i = 0; i < op_reply_q->num_segments; i++) { 1468 segments[i].segment = 1469 dma_alloc_coherent(&mrioc->pdev->dev, 1470 size, &segments[i].segment_dma, GFP_KERNEL); 1471 if (!segments[i].segment) 1472 return -ENOMEM; 1473 if (mrioc->enable_segqueue) 1474 q_segment_list_entry[i] = 1475 (unsigned long)segments[i].segment_dma; 1476 } 1477 1478 return 0; 1479 } 1480 1481 /** 1482 * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool. 1483 * @mrioc: Adapter instance reference 1484 * @qidx: request queue index 1485 * 1486 * Allocate segmented memory pools for operational request 1487 * queue. 1488 * 1489 * Return: 0 on success, non-zero on failure. 1490 */ 1491 static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx) 1492 { 1493 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx; 1494 int i, size; 1495 u64 *q_segment_list_entry = NULL; 1496 struct segments *segments; 1497 1498 if (mrioc->enable_segqueue) { 1499 op_req_q->segment_qd = 1500 MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz; 1501 1502 size = MPI3MR_OP_REQ_Q_SEG_SIZE; 1503 1504 op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev, 1505 MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma, 1506 GFP_KERNEL); 1507 if (!op_req_q->q_segment_list) 1508 return -ENOMEM; 1509 q_segment_list_entry = (u64 *)op_req_q->q_segment_list; 1510 1511 } else { 1512 op_req_q->segment_qd = op_req_q->num_requests; 1513 size = op_req_q->num_requests * mrioc->facts.op_req_sz; 1514 } 1515 1516 op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests, 1517 op_req_q->segment_qd); 1518 1519 op_req_q->q_segments = kcalloc(op_req_q->num_segments, 1520 sizeof(struct segments), GFP_KERNEL); 1521 if (!op_req_q->q_segments) 1522 return -ENOMEM; 1523 1524 segments = op_req_q->q_segments; 1525 for (i = 0; i < op_req_q->num_segments; i++) { 1526 segments[i].segment = 1527 dma_alloc_coherent(&mrioc->pdev->dev, 1528 size, &segments[i].segment_dma, GFP_KERNEL); 1529 if (!segments[i].segment) 1530 return -ENOMEM; 1531 if (mrioc->enable_segqueue) 1532 q_segment_list_entry[i] = 1533 (unsigned long)segments[i].segment_dma; 1534 } 1535 1536 return 0; 1537 } 1538 1539 /** 1540 * mpi3mr_create_op_reply_q - create operational reply queue 1541 * @mrioc: Adapter instance reference 1542 * @qidx: operational reply queue index 1543 * 1544 * Create operatinal reply queue by issuing MPI request 1545 * through admin queue. 1546 * 1547 * Return: 0 on success, non-zero on failure. 1548 */ 1549 static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) 1550 { 1551 struct mpi3_create_reply_queue_request create_req; 1552 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 1553 int retval = 0; 1554 u16 reply_qid = 0, midx; 1555 1556 reply_qid = op_reply_q->qid; 1557 1558 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset); 1559 1560 if (reply_qid) { 1561 retval = -1; 1562 ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n", 1563 reply_qid); 1564 1565 return retval; 1566 } 1567 1568 reply_qid = qidx + 1; 1569 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD; 1570 op_reply_q->ci = 0; 1571 op_reply_q->ephase = 1; 1572 atomic_set(&op_reply_q->pend_ios, 0); 1573 atomic_set(&op_reply_q->in_use, 0); 1574 op_reply_q->enable_irq_poll = false; 1575 1576 if (!op_reply_q->q_segments) { 1577 retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx); 1578 if (retval) { 1579 mpi3mr_free_op_reply_q_segments(mrioc, qidx); 1580 goto out; 1581 } 1582 } 1583 1584 memset(&create_req, 0, sizeof(create_req)); 1585 mutex_lock(&mrioc->init_cmds.mutex); 1586 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1587 retval = -1; 1588 ioc_err(mrioc, "CreateRepQ: Init command is in use\n"); 1589 goto out_unlock; 1590 } 1591 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1592 mrioc->init_cmds.is_waiting = 1; 1593 mrioc->init_cmds.callback = NULL; 1594 create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1595 create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE; 1596 create_req.queue_id = cpu_to_le16(reply_qid); 1597 create_req.flags = MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE; 1598 create_req.msix_index = cpu_to_le16(mrioc->intr_info[midx].msix_index); 1599 if (mrioc->enable_segqueue) { 1600 create_req.flags |= 1601 MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED; 1602 create_req.base_address = cpu_to_le64( 1603 op_reply_q->q_segment_list_dma); 1604 } else 1605 create_req.base_address = cpu_to_le64( 1606 op_reply_q->q_segments[0].segment_dma); 1607 1608 create_req.size = cpu_to_le16(op_reply_q->num_replies); 1609 1610 init_completion(&mrioc->init_cmds.done); 1611 retval = mpi3mr_admin_request_post(mrioc, &create_req, 1612 sizeof(create_req), 1); 1613 if (retval) { 1614 ioc_err(mrioc, "CreateRepQ: Admin Post failed\n"); 1615 goto out_unlock; 1616 } 1617 wait_for_completion_timeout(&mrioc->init_cmds.done, 1618 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1619 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1620 ioc_err(mrioc, "CreateRepQ: command timed out\n"); 1621 mpi3mr_set_diagsave(mrioc); 1622 mpi3mr_issue_reset(mrioc, 1623 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 1624 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT); 1625 mrioc->unrecoverable = 1; 1626 retval = -1; 1627 goto out_unlock; 1628 } 1629 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1630 != MPI3_IOCSTATUS_SUCCESS) { 1631 ioc_err(mrioc, 1632 "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1633 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1634 mrioc->init_cmds.ioc_loginfo); 1635 retval = -1; 1636 goto out_unlock; 1637 } 1638 op_reply_q->qid = reply_qid; 1639 mrioc->intr_info[midx].op_reply_q = op_reply_q; 1640 1641 out_unlock: 1642 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1643 mutex_unlock(&mrioc->init_cmds.mutex); 1644 out: 1645 1646 return retval; 1647 } 1648 1649 /** 1650 * mpi3mr_create_op_req_q - create operational request queue 1651 * @mrioc: Adapter instance reference 1652 * @idx: operational request queue index 1653 * @reply_qid: Reply queue ID 1654 * 1655 * Create operatinal request queue by issuing MPI request 1656 * through admin queue. 1657 * 1658 * Return: 0 on success, non-zero on failure. 1659 */ 1660 static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx, 1661 u16 reply_qid) 1662 { 1663 struct mpi3_create_request_queue_request create_req; 1664 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx; 1665 int retval = 0; 1666 u16 req_qid = 0; 1667 1668 req_qid = op_req_q->qid; 1669 1670 if (req_qid) { 1671 retval = -1; 1672 ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n", 1673 req_qid); 1674 1675 return retval; 1676 } 1677 req_qid = idx + 1; 1678 1679 op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD; 1680 op_req_q->ci = 0; 1681 op_req_q->pi = 0; 1682 op_req_q->reply_qid = reply_qid; 1683 spin_lock_init(&op_req_q->q_lock); 1684 1685 if (!op_req_q->q_segments) { 1686 retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx); 1687 if (retval) { 1688 mpi3mr_free_op_req_q_segments(mrioc, idx); 1689 goto out; 1690 } 1691 } 1692 1693 memset(&create_req, 0, sizeof(create_req)); 1694 mutex_lock(&mrioc->init_cmds.mutex); 1695 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1696 retval = -1; 1697 ioc_err(mrioc, "CreateReqQ: Init command is in use\n"); 1698 goto out_unlock; 1699 } 1700 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1701 mrioc->init_cmds.is_waiting = 1; 1702 mrioc->init_cmds.callback = NULL; 1703 create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1704 create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE; 1705 create_req.queue_id = cpu_to_le16(req_qid); 1706 if (mrioc->enable_segqueue) { 1707 create_req.flags = 1708 MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED; 1709 create_req.base_address = cpu_to_le64( 1710 op_req_q->q_segment_list_dma); 1711 } else 1712 create_req.base_address = cpu_to_le64( 1713 op_req_q->q_segments[0].segment_dma); 1714 create_req.reply_queue_id = cpu_to_le16(reply_qid); 1715 create_req.size = cpu_to_le16(op_req_q->num_requests); 1716 1717 init_completion(&mrioc->init_cmds.done); 1718 retval = mpi3mr_admin_request_post(mrioc, &create_req, 1719 sizeof(create_req), 1); 1720 if (retval) { 1721 ioc_err(mrioc, "CreateReqQ: Admin Post failed\n"); 1722 goto out_unlock; 1723 } 1724 wait_for_completion_timeout(&mrioc->init_cmds.done, 1725 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1726 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1727 ioc_err(mrioc, "CreateReqQ: command timed out\n"); 1728 mpi3mr_set_diagsave(mrioc); 1729 if (mpi3mr_issue_reset(mrioc, 1730 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 1731 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT)) 1732 mrioc->unrecoverable = 1; 1733 retval = -1; 1734 goto out_unlock; 1735 } 1736 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1737 != MPI3_IOCSTATUS_SUCCESS) { 1738 ioc_err(mrioc, 1739 "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1740 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1741 mrioc->init_cmds.ioc_loginfo); 1742 retval = -1; 1743 goto out_unlock; 1744 } 1745 op_req_q->qid = req_qid; 1746 1747 out_unlock: 1748 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1749 mutex_unlock(&mrioc->init_cmds.mutex); 1750 out: 1751 1752 return retval; 1753 } 1754 1755 /** 1756 * mpi3mr_create_op_queues - create operational queue pairs 1757 * @mrioc: Adapter instance reference 1758 * 1759 * Allocate memory for operational queue meta data and call 1760 * create request and reply queue functions. 1761 * 1762 * Return: 0 on success, non-zero on failures. 1763 */ 1764 static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc) 1765 { 1766 int retval = 0; 1767 u16 num_queues = 0, i = 0, msix_count_op_q = 1; 1768 1769 num_queues = min_t(int, mrioc->facts.max_op_reply_q, 1770 mrioc->facts.max_op_req_q); 1771 1772 msix_count_op_q = 1773 mrioc->intr_info_count - mrioc->op_reply_q_offset; 1774 if (!mrioc->num_queues) 1775 mrioc->num_queues = min_t(int, num_queues, msix_count_op_q); 1776 num_queues = mrioc->num_queues; 1777 ioc_info(mrioc, "Trying to create %d Operational Q pairs\n", 1778 num_queues); 1779 1780 if (!mrioc->req_qinfo) { 1781 mrioc->req_qinfo = kcalloc(num_queues, 1782 sizeof(struct op_req_qinfo), GFP_KERNEL); 1783 if (!mrioc->req_qinfo) { 1784 retval = -1; 1785 goto out_failed; 1786 } 1787 1788 mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) * 1789 num_queues, GFP_KERNEL); 1790 if (!mrioc->op_reply_qinfo) { 1791 retval = -1; 1792 goto out_failed; 1793 } 1794 } 1795 1796 if (mrioc->enable_segqueue) 1797 ioc_info(mrioc, 1798 "allocating operational queues through segmented queues\n"); 1799 1800 for (i = 0; i < num_queues; i++) { 1801 if (mpi3mr_create_op_reply_q(mrioc, i)) { 1802 ioc_err(mrioc, "Cannot create OP RepQ %d\n", i); 1803 break; 1804 } 1805 if (mpi3mr_create_op_req_q(mrioc, i, 1806 mrioc->op_reply_qinfo[i].qid)) { 1807 ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i); 1808 mpi3mr_delete_op_reply_q(mrioc, i); 1809 break; 1810 } 1811 } 1812 1813 if (i == 0) { 1814 /* Not even one queue is created successfully*/ 1815 retval = -1; 1816 goto out_failed; 1817 } 1818 mrioc->num_op_reply_q = mrioc->num_op_req_q = i; 1819 ioc_info(mrioc, "Successfully created %d Operational Q pairs\n", 1820 mrioc->num_op_reply_q); 1821 1822 return retval; 1823 out_failed: 1824 kfree(mrioc->req_qinfo); 1825 mrioc->req_qinfo = NULL; 1826 1827 kfree(mrioc->op_reply_qinfo); 1828 mrioc->op_reply_qinfo = NULL; 1829 1830 return retval; 1831 } 1832 1833 /** 1834 * mpi3mr_op_request_post - Post request to operational queue 1835 * @mrioc: Adapter reference 1836 * @op_req_q: Operational request queue info 1837 * @req: MPI3 request 1838 * 1839 * Post the MPI3 request into operational request queue and 1840 * inform the controller, if the queue is full return 1841 * appropriate error. 1842 * 1843 * Return: 0 on success, non-zero on failure. 1844 */ 1845 int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc, 1846 struct op_req_qinfo *op_req_q, u8 *req) 1847 { 1848 u16 pi = 0, max_entries, reply_qidx = 0, midx; 1849 int retval = 0; 1850 unsigned long flags; 1851 u8 *req_entry; 1852 void *segment_base_addr; 1853 u16 req_sz = mrioc->facts.op_req_sz; 1854 struct segments *segments = op_req_q->q_segments; 1855 1856 reply_qidx = op_req_q->reply_qid - 1; 1857 1858 if (mrioc->unrecoverable) 1859 return -EFAULT; 1860 1861 spin_lock_irqsave(&op_req_q->q_lock, flags); 1862 pi = op_req_q->pi; 1863 max_entries = op_req_q->num_requests; 1864 1865 if (mpi3mr_check_req_qfull(op_req_q)) { 1866 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX( 1867 reply_qidx, mrioc->op_reply_q_offset); 1868 mpi3mr_process_op_reply_q(mrioc, &mrioc->intr_info[midx]); 1869 1870 if (mpi3mr_check_req_qfull(op_req_q)) { 1871 retval = -EAGAIN; 1872 goto out; 1873 } 1874 } 1875 1876 if (mrioc->reset_in_progress) { 1877 ioc_err(mrioc, "OpReqQ submit reset in progress\n"); 1878 retval = -EAGAIN; 1879 goto out; 1880 } 1881 1882 segment_base_addr = segments[pi / op_req_q->segment_qd].segment; 1883 req_entry = (u8 *)segment_base_addr + 1884 ((pi % op_req_q->segment_qd) * req_sz); 1885 1886 memset(req_entry, 0, req_sz); 1887 memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ); 1888 1889 if (++pi == max_entries) 1890 pi = 0; 1891 op_req_q->pi = pi; 1892 1893 if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios) 1894 > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT) 1895 mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true; 1896 1897 writel(op_req_q->pi, 1898 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index); 1899 1900 out: 1901 spin_unlock_irqrestore(&op_req_q->q_lock, flags); 1902 return retval; 1903 } 1904 1905 /** 1906 * mpi3mr_sync_timestamp - Issue time stamp sync request 1907 * @mrioc: Adapter reference 1908 * 1909 * Issue IO unit control MPI request to synchornize firmware 1910 * timestamp with host time. 1911 * 1912 * Return: 0 on success, non-zero on failure. 1913 */ 1914 static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc) 1915 { 1916 ktime_t current_time; 1917 struct mpi3_iounit_control_request iou_ctrl; 1918 int retval = 0; 1919 1920 memset(&iou_ctrl, 0, sizeof(iou_ctrl)); 1921 mutex_lock(&mrioc->init_cmds.mutex); 1922 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1923 retval = -1; 1924 ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n"); 1925 mutex_unlock(&mrioc->init_cmds.mutex); 1926 goto out; 1927 } 1928 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1929 mrioc->init_cmds.is_waiting = 1; 1930 mrioc->init_cmds.callback = NULL; 1931 iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1932 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL; 1933 iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP; 1934 current_time = ktime_get_real(); 1935 iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time)); 1936 1937 init_completion(&mrioc->init_cmds.done); 1938 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, 1939 sizeof(iou_ctrl), 0); 1940 if (retval) { 1941 ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n"); 1942 goto out_unlock; 1943 } 1944 1945 wait_for_completion_timeout(&mrioc->init_cmds.done, 1946 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1947 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1948 ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n"); 1949 mrioc->init_cmds.is_waiting = 0; 1950 if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET)) 1951 mpi3mr_soft_reset_handler(mrioc, 1952 MPI3MR_RESET_FROM_TSU_TIMEOUT, 1); 1953 retval = -1; 1954 goto out_unlock; 1955 } 1956 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1957 != MPI3_IOCSTATUS_SUCCESS) { 1958 ioc_err(mrioc, 1959 "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1960 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1961 mrioc->init_cmds.ioc_loginfo); 1962 retval = -1; 1963 goto out_unlock; 1964 } 1965 1966 out_unlock: 1967 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1968 mutex_unlock(&mrioc->init_cmds.mutex); 1969 1970 out: 1971 return retval; 1972 } 1973 1974 /** 1975 * mpi3mr_print_pkg_ver - display controller fw package version 1976 * @mrioc: Adapter reference 1977 * 1978 * Retrieve firmware package version from the component image 1979 * header of the controller flash and display it. 1980 * 1981 * Return: 0 on success and non-zero on failure. 1982 */ 1983 static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc) 1984 { 1985 struct mpi3_ci_upload_request ci_upload; 1986 int retval = -1; 1987 void *data = NULL; 1988 dma_addr_t data_dma; 1989 struct mpi3_ci_manifest_mpi *manifest; 1990 u32 data_len = sizeof(struct mpi3_ci_manifest_mpi); 1991 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 1992 1993 data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 1994 GFP_KERNEL); 1995 if (!data) 1996 return -ENOMEM; 1997 1998 memset(&ci_upload, 0, sizeof(ci_upload)); 1999 mutex_lock(&mrioc->init_cmds.mutex); 2000 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2001 ioc_err(mrioc, "sending get package version failed due to command in use\n"); 2002 mutex_unlock(&mrioc->init_cmds.mutex); 2003 goto out; 2004 } 2005 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2006 mrioc->init_cmds.is_waiting = 1; 2007 mrioc->init_cmds.callback = NULL; 2008 ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2009 ci_upload.function = MPI3_FUNCTION_CI_UPLOAD; 2010 ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY; 2011 ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST); 2012 ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE); 2013 ci_upload.segment_size = cpu_to_le32(data_len); 2014 2015 mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len, 2016 data_dma); 2017 init_completion(&mrioc->init_cmds.done); 2018 retval = mpi3mr_admin_request_post(mrioc, &ci_upload, 2019 sizeof(ci_upload), 1); 2020 if (retval) { 2021 ioc_err(mrioc, "posting get package version failed\n"); 2022 goto out_unlock; 2023 } 2024 wait_for_completion_timeout(&mrioc->init_cmds.done, 2025 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2026 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2027 ioc_err(mrioc, "get package version timed out\n"); 2028 retval = -1; 2029 goto out_unlock; 2030 } 2031 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2032 == MPI3_IOCSTATUS_SUCCESS) { 2033 manifest = (struct mpi3_ci_manifest_mpi *) data; 2034 if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) { 2035 ioc_info(mrioc, 2036 "firmware package version(%d.%d.%d.%d.%05d-%05d)\n", 2037 manifest->package_version.gen_major, 2038 manifest->package_version.gen_minor, 2039 manifest->package_version.phase_major, 2040 manifest->package_version.phase_minor, 2041 manifest->package_version.customer_id, 2042 manifest->package_version.build_num); 2043 } 2044 } 2045 retval = 0; 2046 out_unlock: 2047 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2048 mutex_unlock(&mrioc->init_cmds.mutex); 2049 2050 out: 2051 if (data) 2052 dma_free_coherent(&mrioc->pdev->dev, data_len, data, 2053 data_dma); 2054 return retval; 2055 } 2056 2057 /** 2058 * mpi3mr_watchdog_work - watchdog thread to monitor faults 2059 * @work: work struct 2060 * 2061 * Watch dog work periodically executed (1 second interval) to 2062 * monitor firmware fault and to issue periodic timer sync to 2063 * the firmware. 2064 * 2065 * Return: Nothing. 2066 */ 2067 static void mpi3mr_watchdog_work(struct work_struct *work) 2068 { 2069 struct mpi3mr_ioc *mrioc = 2070 container_of(work, struct mpi3mr_ioc, watchdog_work.work); 2071 unsigned long flags; 2072 enum mpi3mr_iocstate ioc_state; 2073 u32 fault, host_diagnostic; 2074 2075 if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) { 2076 mrioc->ts_update_counter = 0; 2077 mpi3mr_sync_timestamp(mrioc); 2078 } 2079 2080 /*Check for fault state every one second and issue Soft reset*/ 2081 ioc_state = mpi3mr_get_iocstate(mrioc); 2082 if (ioc_state == MRIOC_STATE_FAULT) { 2083 fault = readl(&mrioc->sysif_regs->fault) & 2084 MPI3_SYSIF_FAULT_CODE_MASK; 2085 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); 2086 if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) { 2087 if (!mrioc->diagsave_timeout) { 2088 mpi3mr_print_fault_info(mrioc); 2089 ioc_warn(mrioc, "Diag save in progress\n"); 2090 } 2091 if ((mrioc->diagsave_timeout++) <= 2092 MPI3_SYSIF_DIAG_SAVE_TIMEOUT) 2093 goto schedule_work; 2094 } else 2095 mpi3mr_print_fault_info(mrioc); 2096 mrioc->diagsave_timeout = 0; 2097 2098 if (fault == MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED) { 2099 ioc_info(mrioc, 2100 "Factory Reset fault occurred marking controller as unrecoverable" 2101 ); 2102 mrioc->unrecoverable = 1; 2103 goto out; 2104 } 2105 2106 if ((fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) || 2107 (fault == MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS) || 2108 (mrioc->reset_in_progress)) 2109 goto out; 2110 if (fault == MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET) 2111 mpi3mr_soft_reset_handler(mrioc, 2112 MPI3MR_RESET_FROM_CIACTIV_FAULT, 0); 2113 else 2114 mpi3mr_soft_reset_handler(mrioc, 2115 MPI3MR_RESET_FROM_FAULT_WATCH, 0); 2116 } 2117 2118 schedule_work: 2119 spin_lock_irqsave(&mrioc->watchdog_lock, flags); 2120 if (mrioc->watchdog_work_q) 2121 queue_delayed_work(mrioc->watchdog_work_q, 2122 &mrioc->watchdog_work, 2123 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); 2124 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); 2125 out: 2126 return; 2127 } 2128 2129 /** 2130 * mpi3mr_start_watchdog - Start watchdog 2131 * @mrioc: Adapter instance reference 2132 * 2133 * Create and start the watchdog thread to monitor controller 2134 * faults. 2135 * 2136 * Return: Nothing. 2137 */ 2138 void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc) 2139 { 2140 if (mrioc->watchdog_work_q) 2141 return; 2142 2143 INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work); 2144 snprintf(mrioc->watchdog_work_q_name, 2145 sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name, 2146 mrioc->id); 2147 mrioc->watchdog_work_q = 2148 create_singlethread_workqueue(mrioc->watchdog_work_q_name); 2149 if (!mrioc->watchdog_work_q) { 2150 ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__); 2151 return; 2152 } 2153 2154 if (mrioc->watchdog_work_q) 2155 queue_delayed_work(mrioc->watchdog_work_q, 2156 &mrioc->watchdog_work, 2157 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); 2158 } 2159 2160 /** 2161 * mpi3mr_stop_watchdog - Stop watchdog 2162 * @mrioc: Adapter instance reference 2163 * 2164 * Stop the watchdog thread created to monitor controller 2165 * faults. 2166 * 2167 * Return: Nothing. 2168 */ 2169 void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc) 2170 { 2171 unsigned long flags; 2172 struct workqueue_struct *wq; 2173 2174 spin_lock_irqsave(&mrioc->watchdog_lock, flags); 2175 wq = mrioc->watchdog_work_q; 2176 mrioc->watchdog_work_q = NULL; 2177 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); 2178 if (wq) { 2179 if (!cancel_delayed_work_sync(&mrioc->watchdog_work)) 2180 flush_workqueue(wq); 2181 destroy_workqueue(wq); 2182 } 2183 } 2184 2185 /** 2186 * mpi3mr_kill_ioc - Kill the controller 2187 * @mrioc: Adapter instance reference 2188 * @reason: reason for the failure. 2189 * 2190 * If fault debug is enabled, display the fault info else issue 2191 * diag fault and freeze the system for controller debug 2192 * purpose. 2193 * 2194 * Return: Nothing. 2195 */ 2196 static void mpi3mr_kill_ioc(struct mpi3mr_ioc *mrioc, u32 reason) 2197 { 2198 enum mpi3mr_iocstate ioc_state; 2199 2200 if (!mrioc->fault_dbg) 2201 return; 2202 2203 dump_stack(); 2204 2205 ioc_state = mpi3mr_get_iocstate(mrioc); 2206 if (ioc_state == MRIOC_STATE_FAULT) 2207 mpi3mr_print_fault_info(mrioc); 2208 else { 2209 ioc_err(mrioc, "Firmware is halted due to the reason %d\n", 2210 reason); 2211 mpi3mr_diagfault_reset_handler(mrioc, reason); 2212 } 2213 if (mrioc->fault_dbg == 2) 2214 for (;;) 2215 ; 2216 else 2217 panic("panic in %s\n", __func__); 2218 } 2219 2220 /** 2221 * mpi3mr_setup_admin_qpair - Setup admin queue pair 2222 * @mrioc: Adapter instance reference 2223 * 2224 * Allocate memory for admin queue pair if required and register 2225 * the admin queue with the controller. 2226 * 2227 * Return: 0 on success, non-zero on failures. 2228 */ 2229 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc) 2230 { 2231 int retval = 0; 2232 u32 num_admin_entries = 0; 2233 2234 mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE; 2235 mrioc->num_admin_req = mrioc->admin_req_q_sz / 2236 MPI3MR_ADMIN_REQ_FRAME_SZ; 2237 mrioc->admin_req_ci = mrioc->admin_req_pi = 0; 2238 mrioc->admin_req_base = NULL; 2239 2240 mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE; 2241 mrioc->num_admin_replies = mrioc->admin_reply_q_sz / 2242 MPI3MR_ADMIN_REPLY_FRAME_SZ; 2243 mrioc->admin_reply_ci = 0; 2244 mrioc->admin_reply_ephase = 1; 2245 mrioc->admin_reply_base = NULL; 2246 2247 if (!mrioc->admin_req_base) { 2248 mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev, 2249 mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL); 2250 2251 if (!mrioc->admin_req_base) { 2252 retval = -1; 2253 goto out_failed; 2254 } 2255 2256 mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev, 2257 mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma, 2258 GFP_KERNEL); 2259 2260 if (!mrioc->admin_reply_base) { 2261 retval = -1; 2262 goto out_failed; 2263 } 2264 } 2265 2266 num_admin_entries = (mrioc->num_admin_replies << 16) | 2267 (mrioc->num_admin_req); 2268 writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries); 2269 mpi3mr_writeq(mrioc->admin_req_dma, 2270 &mrioc->sysif_regs->admin_request_queue_address); 2271 mpi3mr_writeq(mrioc->admin_reply_dma, 2272 &mrioc->sysif_regs->admin_reply_queue_address); 2273 writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); 2274 writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); 2275 return retval; 2276 2277 out_failed: 2278 2279 if (mrioc->admin_reply_base) { 2280 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz, 2281 mrioc->admin_reply_base, mrioc->admin_reply_dma); 2282 mrioc->admin_reply_base = NULL; 2283 } 2284 if (mrioc->admin_req_base) { 2285 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz, 2286 mrioc->admin_req_base, mrioc->admin_req_dma); 2287 mrioc->admin_req_base = NULL; 2288 } 2289 return retval; 2290 } 2291 2292 /** 2293 * mpi3mr_issue_iocfacts - Send IOC Facts 2294 * @mrioc: Adapter instance reference 2295 * @facts_data: Cached IOC facts data 2296 * 2297 * Issue IOC Facts MPI request through admin queue and wait for 2298 * the completion of it or time out. 2299 * 2300 * Return: 0 on success, non-zero on failures. 2301 */ 2302 static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc, 2303 struct mpi3_ioc_facts_data *facts_data) 2304 { 2305 struct mpi3_ioc_facts_request iocfacts_req; 2306 void *data = NULL; 2307 dma_addr_t data_dma; 2308 u32 data_len = sizeof(*facts_data); 2309 int retval = 0; 2310 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 2311 2312 data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 2313 GFP_KERNEL); 2314 2315 if (!data) { 2316 retval = -1; 2317 goto out; 2318 } 2319 2320 memset(&iocfacts_req, 0, sizeof(iocfacts_req)); 2321 mutex_lock(&mrioc->init_cmds.mutex); 2322 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2323 retval = -1; 2324 ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n"); 2325 mutex_unlock(&mrioc->init_cmds.mutex); 2326 goto out; 2327 } 2328 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2329 mrioc->init_cmds.is_waiting = 1; 2330 mrioc->init_cmds.callback = NULL; 2331 iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2332 iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS; 2333 2334 mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len, 2335 data_dma); 2336 2337 init_completion(&mrioc->init_cmds.done); 2338 retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req, 2339 sizeof(iocfacts_req), 1); 2340 if (retval) { 2341 ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n"); 2342 goto out_unlock; 2343 } 2344 wait_for_completion_timeout(&mrioc->init_cmds.done, 2345 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2346 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2347 ioc_err(mrioc, "Issue IOCFacts: command timed out\n"); 2348 mpi3mr_set_diagsave(mrioc); 2349 mpi3mr_issue_reset(mrioc, 2350 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 2351 MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT); 2352 mrioc->unrecoverable = 1; 2353 retval = -1; 2354 goto out_unlock; 2355 } 2356 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2357 != MPI3_IOCSTATUS_SUCCESS) { 2358 ioc_err(mrioc, 2359 "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2360 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2361 mrioc->init_cmds.ioc_loginfo); 2362 retval = -1; 2363 goto out_unlock; 2364 } 2365 memcpy(facts_data, (u8 *)data, data_len); 2366 out_unlock: 2367 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2368 mutex_unlock(&mrioc->init_cmds.mutex); 2369 2370 out: 2371 if (data) 2372 dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma); 2373 2374 return retval; 2375 } 2376 2377 /** 2378 * mpi3mr_check_reset_dma_mask - Process IOC facts data 2379 * @mrioc: Adapter instance reference 2380 * 2381 * Check whether the new DMA mask requested through IOCFacts by 2382 * firmware needs to be set, if so set it . 2383 * 2384 * Return: 0 on success, non-zero on failure. 2385 */ 2386 static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc) 2387 { 2388 struct pci_dev *pdev = mrioc->pdev; 2389 int r; 2390 u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask); 2391 2392 if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask)) 2393 return 0; 2394 2395 ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n", 2396 mrioc->dma_mask, facts_dma_mask); 2397 2398 r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask); 2399 if (r) { 2400 ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n", 2401 facts_dma_mask, r); 2402 return r; 2403 } 2404 mrioc->dma_mask = facts_dma_mask; 2405 return r; 2406 } 2407 2408 /** 2409 * mpi3mr_process_factsdata - Process IOC facts data 2410 * @mrioc: Adapter instance reference 2411 * @facts_data: Cached IOC facts data 2412 * 2413 * Convert IOC facts data into cpu endianness and cache it in 2414 * the driver . 2415 * 2416 * Return: Nothing. 2417 */ 2418 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, 2419 struct mpi3_ioc_facts_data *facts_data) 2420 { 2421 u32 ioc_config, req_sz, facts_flags; 2422 2423 if ((le16_to_cpu(facts_data->ioc_facts_data_length)) != 2424 (sizeof(*facts_data) / 4)) { 2425 ioc_warn(mrioc, 2426 "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n", 2427 sizeof(*facts_data), 2428 le16_to_cpu(facts_data->ioc_facts_data_length) * 4); 2429 } 2430 2431 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 2432 req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >> 2433 MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT); 2434 if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) { 2435 ioc_err(mrioc, 2436 "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n", 2437 req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size)); 2438 } 2439 2440 memset(&mrioc->facts, 0, sizeof(mrioc->facts)); 2441 2442 facts_flags = le32_to_cpu(facts_data->flags); 2443 mrioc->facts.op_req_sz = req_sz; 2444 mrioc->op_reply_desc_sz = 1 << ((ioc_config & 2445 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >> 2446 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT); 2447 2448 mrioc->facts.ioc_num = facts_data->ioc_number; 2449 mrioc->facts.who_init = facts_data->who_init; 2450 mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors); 2451 mrioc->facts.personality = (facts_flags & 2452 MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK); 2453 mrioc->facts.dma_mask = (facts_flags & 2454 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >> 2455 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT; 2456 mrioc->facts.protocol_flags = facts_data->protocol_flags; 2457 mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word); 2458 mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_request); 2459 mrioc->facts.product_id = le16_to_cpu(facts_data->product_id); 2460 mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4; 2461 mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions); 2462 mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id); 2463 mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds); 2464 mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds); 2465 mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds); 2466 mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds); 2467 mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme); 2468 mrioc->facts.max_pcie_switches = 2469 le16_to_cpu(facts_data->max_pcie_switches); 2470 mrioc->facts.max_sasexpanders = 2471 le16_to_cpu(facts_data->max_sas_expanders); 2472 mrioc->facts.max_sasinitiators = 2473 le16_to_cpu(facts_data->max_sas_initiators); 2474 mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures); 2475 mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle); 2476 mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle); 2477 mrioc->facts.max_op_req_q = 2478 le16_to_cpu(facts_data->max_operational_request_queues); 2479 mrioc->facts.max_op_reply_q = 2480 le16_to_cpu(facts_data->max_operational_reply_queues); 2481 mrioc->facts.ioc_capabilities = 2482 le32_to_cpu(facts_data->ioc_capabilities); 2483 mrioc->facts.fw_ver.build_num = 2484 le16_to_cpu(facts_data->fw_version.build_num); 2485 mrioc->facts.fw_ver.cust_id = 2486 le16_to_cpu(facts_data->fw_version.customer_id); 2487 mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor; 2488 mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major; 2489 mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor; 2490 mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major; 2491 mrioc->msix_count = min_t(int, mrioc->msix_count, 2492 mrioc->facts.max_msix_vectors); 2493 mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask; 2494 mrioc->facts.sge_mod_value = facts_data->sge_modifier_value; 2495 mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift; 2496 mrioc->facts.shutdown_timeout = 2497 le16_to_cpu(facts_data->shutdown_timeout); 2498 2499 ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),", 2500 mrioc->facts.ioc_num, mrioc->facts.max_op_req_q, 2501 mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle); 2502 ioc_info(mrioc, 2503 "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n", 2504 mrioc->facts.max_reqs, mrioc->facts.min_devhandle, 2505 mrioc->facts.max_msix_vectors, mrioc->facts.max_perids); 2506 ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ", 2507 mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value, 2508 mrioc->facts.sge_mod_shift); 2509 ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n", 2510 mrioc->facts.dma_mask, (facts_flags & 2511 MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK)); 2512 2513 mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD; 2514 2515 if (reset_devices) 2516 mrioc->max_host_ios = min_t(int, mrioc->max_host_ios, 2517 MPI3MR_HOST_IOS_KDUMP); 2518 } 2519 2520 /** 2521 * mpi3mr_alloc_reply_sense_bufs - Send IOC Init 2522 * @mrioc: Adapter instance reference 2523 * 2524 * Allocate and initialize the reply free buffers, sense 2525 * buffers, reply free queue and sense buffer queue. 2526 * 2527 * Return: 0 on success, non-zero on failures. 2528 */ 2529 static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc) 2530 { 2531 int retval = 0; 2532 u32 sz, i; 2533 dma_addr_t phy_addr; 2534 2535 if (mrioc->init_cmds.reply) 2536 goto post_reply_sbuf; 2537 2538 mrioc->init_cmds.reply = kzalloc(mrioc->facts.reply_sz, GFP_KERNEL); 2539 if (!mrioc->init_cmds.reply) 2540 goto out_failed; 2541 2542 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { 2543 mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->facts.reply_sz, 2544 GFP_KERNEL); 2545 if (!mrioc->dev_rmhs_cmds[i].reply) 2546 goto out_failed; 2547 } 2548 2549 mrioc->host_tm_cmds.reply = kzalloc(mrioc->facts.reply_sz, GFP_KERNEL); 2550 if (!mrioc->host_tm_cmds.reply) 2551 goto out_failed; 2552 2553 mrioc->dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8; 2554 if (mrioc->facts.max_devhandle % 8) 2555 mrioc->dev_handle_bitmap_sz++; 2556 mrioc->removepend_bitmap = kzalloc(mrioc->dev_handle_bitmap_sz, 2557 GFP_KERNEL); 2558 if (!mrioc->removepend_bitmap) 2559 goto out_failed; 2560 2561 mrioc->devrem_bitmap_sz = MPI3MR_NUM_DEVRMCMD / 8; 2562 if (MPI3MR_NUM_DEVRMCMD % 8) 2563 mrioc->devrem_bitmap_sz++; 2564 mrioc->devrem_bitmap = kzalloc(mrioc->devrem_bitmap_sz, 2565 GFP_KERNEL); 2566 if (!mrioc->devrem_bitmap) 2567 goto out_failed; 2568 2569 mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES; 2570 mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1; 2571 mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR; 2572 mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1; 2573 2574 /* reply buffer pool, 16 byte align */ 2575 sz = mrioc->num_reply_bufs * mrioc->facts.reply_sz; 2576 mrioc->reply_buf_pool = dma_pool_create("reply_buf pool", 2577 &mrioc->pdev->dev, sz, 16, 0); 2578 if (!mrioc->reply_buf_pool) { 2579 ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n"); 2580 goto out_failed; 2581 } 2582 2583 mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL, 2584 &mrioc->reply_buf_dma); 2585 if (!mrioc->reply_buf) 2586 goto out_failed; 2587 2588 mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz; 2589 2590 /* reply free queue, 8 byte align */ 2591 sz = mrioc->reply_free_qsz * 8; 2592 mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool", 2593 &mrioc->pdev->dev, sz, 8, 0); 2594 if (!mrioc->reply_free_q_pool) { 2595 ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n"); 2596 goto out_failed; 2597 } 2598 mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool, 2599 GFP_KERNEL, &mrioc->reply_free_q_dma); 2600 if (!mrioc->reply_free_q) 2601 goto out_failed; 2602 2603 /* sense buffer pool, 4 byte align */ 2604 sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ; 2605 mrioc->sense_buf_pool = dma_pool_create("sense_buf pool", 2606 &mrioc->pdev->dev, sz, 4, 0); 2607 if (!mrioc->sense_buf_pool) { 2608 ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n"); 2609 goto out_failed; 2610 } 2611 mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL, 2612 &mrioc->sense_buf_dma); 2613 if (!mrioc->sense_buf) 2614 goto out_failed; 2615 2616 /* sense buffer queue, 8 byte align */ 2617 sz = mrioc->sense_buf_q_sz * 8; 2618 mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool", 2619 &mrioc->pdev->dev, sz, 8, 0); 2620 if (!mrioc->sense_buf_q_pool) { 2621 ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n"); 2622 goto out_failed; 2623 } 2624 mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool, 2625 GFP_KERNEL, &mrioc->sense_buf_q_dma); 2626 if (!mrioc->sense_buf_q) 2627 goto out_failed; 2628 2629 post_reply_sbuf: 2630 sz = mrioc->num_reply_bufs * mrioc->facts.reply_sz; 2631 ioc_info(mrioc, 2632 "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n", 2633 mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->facts.reply_sz, 2634 (sz / 1024), (unsigned long long)mrioc->reply_buf_dma); 2635 sz = mrioc->reply_free_qsz * 8; 2636 ioc_info(mrioc, 2637 "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n", 2638 mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024), 2639 (unsigned long long)mrioc->reply_free_q_dma); 2640 sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ; 2641 ioc_info(mrioc, 2642 "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n", 2643 mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ, 2644 (sz / 1024), (unsigned long long)mrioc->sense_buf_dma); 2645 sz = mrioc->sense_buf_q_sz * 8; 2646 ioc_info(mrioc, 2647 "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n", 2648 mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024), 2649 (unsigned long long)mrioc->sense_buf_q_dma); 2650 2651 /* initialize Reply buffer Queue */ 2652 for (i = 0, phy_addr = mrioc->reply_buf_dma; 2653 i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->facts.reply_sz) 2654 mrioc->reply_free_q[i] = cpu_to_le64(phy_addr); 2655 mrioc->reply_free_q[i] = cpu_to_le64(0); 2656 2657 /* initialize Sense Buffer Queue */ 2658 for (i = 0, phy_addr = mrioc->sense_buf_dma; 2659 i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ) 2660 mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr); 2661 mrioc->sense_buf_q[i] = cpu_to_le64(0); 2662 return retval; 2663 2664 out_failed: 2665 retval = -1; 2666 return retval; 2667 } 2668 2669 /** 2670 * mpi3mr_issue_iocinit - Send IOC Init 2671 * @mrioc: Adapter instance reference 2672 * 2673 * Issue IOC Init MPI request through admin queue and wait for 2674 * the completion of it or time out. 2675 * 2676 * Return: 0 on success, non-zero on failures. 2677 */ 2678 static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc) 2679 { 2680 struct mpi3_ioc_init_request iocinit_req; 2681 struct mpi3_driver_info_layout *drv_info; 2682 dma_addr_t data_dma; 2683 u32 data_len = sizeof(*drv_info); 2684 int retval = 0; 2685 ktime_t current_time; 2686 2687 drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 2688 GFP_KERNEL); 2689 if (!drv_info) { 2690 retval = -1; 2691 goto out; 2692 } 2693 drv_info->information_length = cpu_to_le32(data_len); 2694 strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature)); 2695 strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name)); 2696 strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version)); 2697 strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name)); 2698 strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version)); 2699 strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE, 2700 sizeof(drv_info->driver_release_date)); 2701 drv_info->driver_capabilities = 0; 2702 memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info, 2703 sizeof(mrioc->driver_info)); 2704 2705 memset(&iocinit_req, 0, sizeof(iocinit_req)); 2706 mutex_lock(&mrioc->init_cmds.mutex); 2707 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2708 retval = -1; 2709 ioc_err(mrioc, "Issue IOCInit: Init command is in use\n"); 2710 mutex_unlock(&mrioc->init_cmds.mutex); 2711 goto out; 2712 } 2713 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2714 mrioc->init_cmds.is_waiting = 1; 2715 mrioc->init_cmds.callback = NULL; 2716 iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2717 iocinit_req.function = MPI3_FUNCTION_IOC_INIT; 2718 iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV; 2719 iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT; 2720 iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR; 2721 iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR; 2722 iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER; 2723 iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz); 2724 iocinit_req.reply_free_queue_address = 2725 cpu_to_le64(mrioc->reply_free_q_dma); 2726 iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ); 2727 iocinit_req.sense_buffer_free_queue_depth = 2728 cpu_to_le16(mrioc->sense_buf_q_sz); 2729 iocinit_req.sense_buffer_free_queue_address = 2730 cpu_to_le64(mrioc->sense_buf_q_dma); 2731 iocinit_req.driver_information_address = cpu_to_le64(data_dma); 2732 2733 current_time = ktime_get_real(); 2734 iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time)); 2735 2736 init_completion(&mrioc->init_cmds.done); 2737 retval = mpi3mr_admin_request_post(mrioc, &iocinit_req, 2738 sizeof(iocinit_req), 1); 2739 if (retval) { 2740 ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n"); 2741 goto out_unlock; 2742 } 2743 wait_for_completion_timeout(&mrioc->init_cmds.done, 2744 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2745 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2746 mpi3mr_set_diagsave(mrioc); 2747 mpi3mr_issue_reset(mrioc, 2748 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 2749 MPI3MR_RESET_FROM_IOCINIT_TIMEOUT); 2750 mrioc->unrecoverable = 1; 2751 ioc_err(mrioc, "Issue IOCInit: command timed out\n"); 2752 retval = -1; 2753 goto out_unlock; 2754 } 2755 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2756 != MPI3_IOCSTATUS_SUCCESS) { 2757 ioc_err(mrioc, 2758 "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2759 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2760 mrioc->init_cmds.ioc_loginfo); 2761 retval = -1; 2762 goto out_unlock; 2763 } 2764 2765 out_unlock: 2766 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2767 mutex_unlock(&mrioc->init_cmds.mutex); 2768 2769 out: 2770 if (drv_info) 2771 dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info, 2772 data_dma); 2773 2774 return retval; 2775 } 2776 2777 /** 2778 * mpi3mr_unmask_events - Unmask events in event mask bitmap 2779 * @mrioc: Adapter instance reference 2780 * @event: MPI event ID 2781 * 2782 * Un mask the specific event by resetting the event_mask 2783 * bitmap. 2784 * 2785 * Return: 0 on success, non-zero on failures. 2786 */ 2787 static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event) 2788 { 2789 u32 desired_event; 2790 u8 word; 2791 2792 if (event >= 128) 2793 return; 2794 2795 desired_event = (1 << (event % 32)); 2796 word = event / 32; 2797 2798 mrioc->event_masks[word] &= ~desired_event; 2799 } 2800 2801 /** 2802 * mpi3mr_issue_event_notification - Send event notification 2803 * @mrioc: Adapter instance reference 2804 * 2805 * Issue event notification MPI request through admin queue and 2806 * wait for the completion of it or time out. 2807 * 2808 * Return: 0 on success, non-zero on failures. 2809 */ 2810 static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc) 2811 { 2812 struct mpi3_event_notification_request evtnotify_req; 2813 int retval = 0; 2814 u8 i; 2815 2816 memset(&evtnotify_req, 0, sizeof(evtnotify_req)); 2817 mutex_lock(&mrioc->init_cmds.mutex); 2818 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2819 retval = -1; 2820 ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n"); 2821 mutex_unlock(&mrioc->init_cmds.mutex); 2822 goto out; 2823 } 2824 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2825 mrioc->init_cmds.is_waiting = 1; 2826 mrioc->init_cmds.callback = NULL; 2827 evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2828 evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION; 2829 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 2830 evtnotify_req.event_masks[i] = 2831 cpu_to_le32(mrioc->event_masks[i]); 2832 init_completion(&mrioc->init_cmds.done); 2833 retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req, 2834 sizeof(evtnotify_req), 1); 2835 if (retval) { 2836 ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n"); 2837 goto out_unlock; 2838 } 2839 wait_for_completion_timeout(&mrioc->init_cmds.done, 2840 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2841 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2842 ioc_err(mrioc, "Issue EvtNotify: command timed out\n"); 2843 mpi3mr_set_diagsave(mrioc); 2844 mpi3mr_issue_reset(mrioc, 2845 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 2846 MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT); 2847 mrioc->unrecoverable = 1; 2848 retval = -1; 2849 goto out_unlock; 2850 } 2851 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2852 != MPI3_IOCSTATUS_SUCCESS) { 2853 ioc_err(mrioc, 2854 "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2855 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2856 mrioc->init_cmds.ioc_loginfo); 2857 retval = -1; 2858 goto out_unlock; 2859 } 2860 2861 out_unlock: 2862 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2863 mutex_unlock(&mrioc->init_cmds.mutex); 2864 out: 2865 return retval; 2866 } 2867 2868 /** 2869 * mpi3mr_send_event_ack - Send event acknowledgment 2870 * @mrioc: Adapter instance reference 2871 * @event: MPI3 event ID 2872 * @event_ctx: Event context 2873 * 2874 * Send event acknowledgment through admin queue and wait for 2875 * it to complete. 2876 * 2877 * Return: 0 on success, non-zero on failures. 2878 */ 2879 int mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 2880 u32 event_ctx) 2881 { 2882 struct mpi3_event_ack_request evtack_req; 2883 int retval = 0; 2884 2885 memset(&evtack_req, 0, sizeof(evtack_req)); 2886 mutex_lock(&mrioc->init_cmds.mutex); 2887 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2888 retval = -1; 2889 ioc_err(mrioc, "Send EvtAck: Init command is in use\n"); 2890 mutex_unlock(&mrioc->init_cmds.mutex); 2891 goto out; 2892 } 2893 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2894 mrioc->init_cmds.is_waiting = 1; 2895 mrioc->init_cmds.callback = NULL; 2896 evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2897 evtack_req.function = MPI3_FUNCTION_EVENT_ACK; 2898 evtack_req.event = event; 2899 evtack_req.event_context = cpu_to_le32(event_ctx); 2900 2901 init_completion(&mrioc->init_cmds.done); 2902 retval = mpi3mr_admin_request_post(mrioc, &evtack_req, 2903 sizeof(evtack_req), 1); 2904 if (retval) { 2905 ioc_err(mrioc, "Send EvtAck: Admin Post failed\n"); 2906 goto out_unlock; 2907 } 2908 wait_for_completion_timeout(&mrioc->init_cmds.done, 2909 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2910 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2911 ioc_err(mrioc, "Issue EvtNotify: command timed out\n"); 2912 if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET)) 2913 mpi3mr_soft_reset_handler(mrioc, 2914 MPI3MR_RESET_FROM_EVTACK_TIMEOUT, 1); 2915 retval = -1; 2916 goto out_unlock; 2917 } 2918 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2919 != MPI3_IOCSTATUS_SUCCESS) { 2920 ioc_err(mrioc, 2921 "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2922 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2923 mrioc->init_cmds.ioc_loginfo); 2924 retval = -1; 2925 goto out_unlock; 2926 } 2927 2928 out_unlock: 2929 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2930 mutex_unlock(&mrioc->init_cmds.mutex); 2931 out: 2932 return retval; 2933 } 2934 2935 /** 2936 * mpi3mr_alloc_chain_bufs - Allocate chain buffers 2937 * @mrioc: Adapter instance reference 2938 * 2939 * Allocate chain buffers and set a bitmap to indicate free 2940 * chain buffers. Chain buffers are used to pass the SGE 2941 * information along with MPI3 SCSI IO requests for host I/O. 2942 * 2943 * Return: 0 on success, non-zero on failure 2944 */ 2945 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc) 2946 { 2947 int retval = 0; 2948 u32 sz, i; 2949 u16 num_chains; 2950 2951 num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR; 2952 2953 if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION 2954 | SHOST_DIX_TYPE1_PROTECTION 2955 | SHOST_DIX_TYPE2_PROTECTION 2956 | SHOST_DIX_TYPE3_PROTECTION)) 2957 num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR); 2958 2959 mrioc->chain_buf_count = num_chains; 2960 sz = sizeof(struct chain_element) * num_chains; 2961 mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL); 2962 if (!mrioc->chain_sgl_list) 2963 goto out_failed; 2964 2965 sz = MPI3MR_PAGE_SIZE_4K; 2966 mrioc->chain_buf_pool = dma_pool_create("chain_buf pool", 2967 &mrioc->pdev->dev, sz, 16, 0); 2968 if (!mrioc->chain_buf_pool) { 2969 ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n"); 2970 goto out_failed; 2971 } 2972 2973 for (i = 0; i < num_chains; i++) { 2974 mrioc->chain_sgl_list[i].addr = 2975 dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL, 2976 &mrioc->chain_sgl_list[i].dma_addr); 2977 2978 if (!mrioc->chain_sgl_list[i].addr) 2979 goto out_failed; 2980 } 2981 mrioc->chain_bitmap_sz = num_chains / 8; 2982 if (num_chains % 8) 2983 mrioc->chain_bitmap_sz++; 2984 mrioc->chain_bitmap = kzalloc(mrioc->chain_bitmap_sz, GFP_KERNEL); 2985 if (!mrioc->chain_bitmap) 2986 goto out_failed; 2987 return retval; 2988 out_failed: 2989 retval = -1; 2990 return retval; 2991 } 2992 2993 /** 2994 * mpi3mr_port_enable_complete - Mark port enable complete 2995 * @mrioc: Adapter instance reference 2996 * @drv_cmd: Internal command tracker 2997 * 2998 * Call back for asynchronous port enable request sets the 2999 * driver command to indicate port enable request is complete. 3000 * 3001 * Return: Nothing 3002 */ 3003 static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc, 3004 struct mpi3mr_drv_cmd *drv_cmd) 3005 { 3006 drv_cmd->state = MPI3MR_CMD_NOTUSED; 3007 drv_cmd->callback = NULL; 3008 mrioc->scan_failed = drv_cmd->ioc_status; 3009 mrioc->scan_started = 0; 3010 } 3011 3012 /** 3013 * mpi3mr_issue_port_enable - Issue Port Enable 3014 * @mrioc: Adapter instance reference 3015 * @async: Flag to wait for completion or not 3016 * 3017 * Issue Port Enable MPI request through admin queue and if the 3018 * async flag is not set wait for the completion of the port 3019 * enable or time out. 3020 * 3021 * Return: 0 on success, non-zero on failures. 3022 */ 3023 int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async) 3024 { 3025 struct mpi3_port_enable_request pe_req; 3026 int retval = 0; 3027 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT; 3028 3029 memset(&pe_req, 0, sizeof(pe_req)); 3030 mutex_lock(&mrioc->init_cmds.mutex); 3031 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 3032 retval = -1; 3033 ioc_err(mrioc, "Issue PortEnable: Init command is in use\n"); 3034 mutex_unlock(&mrioc->init_cmds.mutex); 3035 goto out; 3036 } 3037 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 3038 if (async) { 3039 mrioc->init_cmds.is_waiting = 0; 3040 mrioc->init_cmds.callback = mpi3mr_port_enable_complete; 3041 } else { 3042 mrioc->init_cmds.is_waiting = 1; 3043 mrioc->init_cmds.callback = NULL; 3044 init_completion(&mrioc->init_cmds.done); 3045 } 3046 pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 3047 pe_req.function = MPI3_FUNCTION_PORT_ENABLE; 3048 3049 retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1); 3050 if (retval) { 3051 ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n"); 3052 goto out_unlock; 3053 } 3054 if (!async) { 3055 wait_for_completion_timeout(&mrioc->init_cmds.done, 3056 (pe_timeout * HZ)); 3057 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 3058 ioc_err(mrioc, "Issue PortEnable: command timed out\n"); 3059 retval = -1; 3060 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 3061 mpi3mr_set_diagsave(mrioc); 3062 mpi3mr_issue_reset(mrioc, 3063 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 3064 MPI3MR_RESET_FROM_PE_TIMEOUT); 3065 mrioc->unrecoverable = 1; 3066 goto out_unlock; 3067 } 3068 mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds); 3069 } 3070 out_unlock: 3071 mutex_unlock(&mrioc->init_cmds.mutex); 3072 out: 3073 return retval; 3074 } 3075 3076 /* Protocol type to name mapper structure*/ 3077 static const struct { 3078 u8 protocol; 3079 char *name; 3080 } mpi3mr_protocols[] = { 3081 { MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" }, 3082 { MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" }, 3083 { MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" }, 3084 }; 3085 3086 /* Capability to name mapper structure*/ 3087 static const struct { 3088 u32 capability; 3089 char *name; 3090 } mpi3mr_capabilities[] = { 3091 { MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE, "RAID" }, 3092 }; 3093 3094 /** 3095 * mpi3mr_print_ioc_info - Display controller information 3096 * @mrioc: Adapter instance reference 3097 * 3098 * Display controller personalit, capability, supported 3099 * protocols etc. 3100 * 3101 * Return: Nothing 3102 */ 3103 static void 3104 mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc) 3105 { 3106 int i = 0, bytes_written = 0; 3107 char personality[16]; 3108 char protocol[50] = {0}; 3109 char capabilities[100] = {0}; 3110 struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver; 3111 3112 switch (mrioc->facts.personality) { 3113 case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA: 3114 strncpy(personality, "Enhanced HBA", sizeof(personality)); 3115 break; 3116 case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR: 3117 strncpy(personality, "RAID", sizeof(personality)); 3118 break; 3119 default: 3120 strncpy(personality, "Unknown", sizeof(personality)); 3121 break; 3122 } 3123 3124 ioc_info(mrioc, "Running in %s Personality", personality); 3125 3126 ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n", 3127 fwver->gen_major, fwver->gen_minor, fwver->ph_major, 3128 fwver->ph_minor, fwver->cust_id, fwver->build_num); 3129 3130 for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) { 3131 if (mrioc->facts.protocol_flags & 3132 mpi3mr_protocols[i].protocol) { 3133 bytes_written += scnprintf(protocol + bytes_written, 3134 sizeof(protocol) - bytes_written, "%s%s", 3135 bytes_written ? "," : "", 3136 mpi3mr_protocols[i].name); 3137 } 3138 } 3139 3140 bytes_written = 0; 3141 for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) { 3142 if (mrioc->facts.protocol_flags & 3143 mpi3mr_capabilities[i].capability) { 3144 bytes_written += scnprintf(capabilities + bytes_written, 3145 sizeof(capabilities) - bytes_written, "%s%s", 3146 bytes_written ? "," : "", 3147 mpi3mr_capabilities[i].name); 3148 } 3149 } 3150 3151 ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n", 3152 protocol, capabilities); 3153 } 3154 3155 /** 3156 * mpi3mr_cleanup_resources - Free PCI resources 3157 * @mrioc: Adapter instance reference 3158 * 3159 * Unmap PCI device memory and disable PCI device. 3160 * 3161 * Return: 0 on success and non-zero on failure. 3162 */ 3163 void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc) 3164 { 3165 struct pci_dev *pdev = mrioc->pdev; 3166 3167 mpi3mr_cleanup_isr(mrioc); 3168 3169 if (mrioc->sysif_regs) { 3170 iounmap((void __iomem *)mrioc->sysif_regs); 3171 mrioc->sysif_regs = NULL; 3172 } 3173 3174 if (pci_is_enabled(pdev)) { 3175 if (mrioc->bars) 3176 pci_release_selected_regions(pdev, mrioc->bars); 3177 pci_disable_device(pdev); 3178 } 3179 } 3180 3181 /** 3182 * mpi3mr_setup_resources - Enable PCI resources 3183 * @mrioc: Adapter instance reference 3184 * 3185 * Enable PCI device memory, MSI-x registers and set DMA mask. 3186 * 3187 * Return: 0 on success and non-zero on failure. 3188 */ 3189 int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc) 3190 { 3191 struct pci_dev *pdev = mrioc->pdev; 3192 u32 memap_sz = 0; 3193 int i, retval = 0, capb = 0; 3194 u16 message_control; 3195 u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask : 3196 (((dma_get_required_mask(&pdev->dev) > DMA_BIT_MASK(32)) && 3197 (sizeof(dma_addr_t) > 4)) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32)); 3198 3199 if (pci_enable_device_mem(pdev)) { 3200 ioc_err(mrioc, "pci_enable_device_mem: failed\n"); 3201 retval = -ENODEV; 3202 goto out_failed; 3203 } 3204 3205 capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 3206 if (!capb) { 3207 ioc_err(mrioc, "Unable to find MSI-X Capabilities\n"); 3208 retval = -ENODEV; 3209 goto out_failed; 3210 } 3211 mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); 3212 3213 if (pci_request_selected_regions(pdev, mrioc->bars, 3214 mrioc->driver_name)) { 3215 ioc_err(mrioc, "pci_request_selected_regions: failed\n"); 3216 retval = -ENODEV; 3217 goto out_failed; 3218 } 3219 3220 for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) { 3221 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 3222 mrioc->sysif_regs_phys = pci_resource_start(pdev, i); 3223 memap_sz = pci_resource_len(pdev, i); 3224 mrioc->sysif_regs = 3225 ioremap(mrioc->sysif_regs_phys, memap_sz); 3226 break; 3227 } 3228 } 3229 3230 pci_set_master(pdev); 3231 3232 retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask); 3233 if (retval) { 3234 if (dma_mask != DMA_BIT_MASK(32)) { 3235 ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n"); 3236 dma_mask = DMA_BIT_MASK(32); 3237 retval = dma_set_mask_and_coherent(&pdev->dev, 3238 dma_mask); 3239 } 3240 if (retval) { 3241 mrioc->dma_mask = 0; 3242 ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n"); 3243 goto out_failed; 3244 } 3245 } 3246 mrioc->dma_mask = dma_mask; 3247 3248 if (!mrioc->sysif_regs) { 3249 ioc_err(mrioc, 3250 "Unable to map adapter memory or resource not found\n"); 3251 retval = -EINVAL; 3252 goto out_failed; 3253 } 3254 3255 pci_read_config_word(pdev, capb + 2, &message_control); 3256 mrioc->msix_count = (message_control & 0x3FF) + 1; 3257 3258 pci_save_state(pdev); 3259 3260 pci_set_drvdata(pdev, mrioc->shost); 3261 3262 mpi3mr_ioc_disable_intr(mrioc); 3263 3264 ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n", 3265 (unsigned long long)mrioc->sysif_regs_phys, 3266 mrioc->sysif_regs, memap_sz); 3267 ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n", 3268 mrioc->msix_count); 3269 return retval; 3270 3271 out_failed: 3272 mpi3mr_cleanup_resources(mrioc); 3273 return retval; 3274 } 3275 3276 /** 3277 * mpi3mr_init_ioc - Initialize the controller 3278 * @mrioc: Adapter instance reference 3279 * @init_type: Flag to indicate is the init_type 3280 * 3281 * This the controller initialization routine, executed either 3282 * after soft reset or from pci probe callback. 3283 * Setup the required resources, memory map the controller 3284 * registers, create admin and operational reply queue pairs, 3285 * allocate required memory for reply pool, sense buffer pool, 3286 * issue IOC init request to the firmware, unmask the events and 3287 * issue port enable to discover SAS/SATA/NVMe devies and RAID 3288 * volumes. 3289 * 3290 * Return: 0 on success and non-zero on failure. 3291 */ 3292 int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 init_type) 3293 { 3294 int retval = 0; 3295 enum mpi3mr_iocstate ioc_state; 3296 u64 base_info; 3297 u32 timeout; 3298 u32 ioc_status, ioc_config, i; 3299 struct mpi3_ioc_facts_data facts_data; 3300 3301 mrioc->irqpoll_sleep = MPI3MR_IRQ_POLL_SLEEP; 3302 mrioc->change_count = 0; 3303 if (init_type == MPI3MR_IT_INIT) { 3304 mrioc->cpu_count = num_online_cpus(); 3305 retval = mpi3mr_setup_resources(mrioc); 3306 if (retval) { 3307 ioc_err(mrioc, "Failed to setup resources:error %d\n", 3308 retval); 3309 goto out_nocleanup; 3310 } 3311 } 3312 3313 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 3314 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 3315 3316 ioc_info(mrioc, "SOD status %x configuration %x\n", 3317 ioc_status, ioc_config); 3318 3319 base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information); 3320 ioc_info(mrioc, "SOD base_info %llx\n", base_info); 3321 3322 /*The timeout value is in 2sec unit, changing it to seconds*/ 3323 mrioc->ready_timeout = 3324 ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >> 3325 MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2; 3326 3327 ioc_info(mrioc, "IOC ready timeout %d\n", mrioc->ready_timeout); 3328 3329 ioc_state = mpi3mr_get_iocstate(mrioc); 3330 ioc_info(mrioc, "IOC in %s state during detection\n", 3331 mpi3mr_iocstate_name(ioc_state)); 3332 3333 if (ioc_state == MRIOC_STATE_BECOMING_READY || 3334 ioc_state == MRIOC_STATE_RESET_REQUESTED) { 3335 timeout = mrioc->ready_timeout * 10; 3336 do { 3337 msleep(100); 3338 } while (--timeout); 3339 3340 ioc_state = mpi3mr_get_iocstate(mrioc); 3341 ioc_info(mrioc, 3342 "IOC in %s state after waiting for reset time\n", 3343 mpi3mr_iocstate_name(ioc_state)); 3344 } 3345 3346 if (ioc_state == MRIOC_STATE_READY) { 3347 retval = mpi3mr_issue_and_process_mur(mrioc, 3348 MPI3MR_RESET_FROM_BRINGUP); 3349 if (retval) { 3350 ioc_err(mrioc, "Failed to MU reset IOC error %d\n", 3351 retval); 3352 } 3353 ioc_state = mpi3mr_get_iocstate(mrioc); 3354 } 3355 if (ioc_state != MRIOC_STATE_RESET) { 3356 mpi3mr_print_fault_info(mrioc); 3357 retval = mpi3mr_issue_reset(mrioc, 3358 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, 3359 MPI3MR_RESET_FROM_BRINGUP); 3360 if (retval) { 3361 ioc_err(mrioc, 3362 "%s :Failed to soft reset IOC error %d\n", 3363 __func__, retval); 3364 goto out_failed; 3365 } 3366 } 3367 ioc_state = mpi3mr_get_iocstate(mrioc); 3368 if (ioc_state != MRIOC_STATE_RESET) { 3369 retval = -1; 3370 ioc_err(mrioc, "Cannot bring IOC to reset state\n"); 3371 goto out_failed; 3372 } 3373 3374 retval = mpi3mr_setup_admin_qpair(mrioc); 3375 if (retval) { 3376 ioc_err(mrioc, "Failed to setup admin Qs: error %d\n", 3377 retval); 3378 goto out_failed; 3379 } 3380 3381 retval = mpi3mr_bring_ioc_ready(mrioc); 3382 if (retval) { 3383 ioc_err(mrioc, "Failed to bring ioc ready: error %d\n", 3384 retval); 3385 goto out_failed; 3386 } 3387 3388 if (init_type != MPI3MR_IT_RESET) { 3389 retval = mpi3mr_setup_isr(mrioc, 1); 3390 if (retval) { 3391 ioc_err(mrioc, "Failed to setup ISR error %d\n", 3392 retval); 3393 goto out_failed; 3394 } 3395 } else 3396 mpi3mr_ioc_enable_intr(mrioc); 3397 3398 retval = mpi3mr_issue_iocfacts(mrioc, &facts_data); 3399 if (retval) { 3400 ioc_err(mrioc, "Failed to Issue IOC Facts %d\n", 3401 retval); 3402 goto out_failed; 3403 } 3404 3405 mpi3mr_process_factsdata(mrioc, &facts_data); 3406 if (init_type == MPI3MR_IT_INIT) { 3407 retval = mpi3mr_check_reset_dma_mask(mrioc); 3408 if (retval) { 3409 ioc_err(mrioc, "Resetting dma mask failed %d\n", 3410 retval); 3411 goto out_failed; 3412 } 3413 } 3414 3415 mpi3mr_print_ioc_info(mrioc); 3416 3417 retval = mpi3mr_alloc_reply_sense_bufs(mrioc); 3418 if (retval) { 3419 ioc_err(mrioc, 3420 "%s :Failed to allocated reply sense buffers %d\n", 3421 __func__, retval); 3422 goto out_failed; 3423 } 3424 3425 if (init_type == MPI3MR_IT_INIT) { 3426 retval = mpi3mr_alloc_chain_bufs(mrioc); 3427 if (retval) { 3428 ioc_err(mrioc, "Failed to allocated chain buffers %d\n", 3429 retval); 3430 goto out_failed; 3431 } 3432 } 3433 3434 retval = mpi3mr_issue_iocinit(mrioc); 3435 if (retval) { 3436 ioc_err(mrioc, "Failed to Issue IOC Init %d\n", 3437 retval); 3438 goto out_failed; 3439 } 3440 mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs; 3441 writel(mrioc->reply_free_queue_host_index, 3442 &mrioc->sysif_regs->reply_free_host_index); 3443 3444 mrioc->sbq_host_index = mrioc->num_sense_bufs; 3445 writel(mrioc->sbq_host_index, 3446 &mrioc->sysif_regs->sense_buffer_free_host_index); 3447 3448 retval = mpi3mr_print_pkg_ver(mrioc); 3449 if (retval) { 3450 ioc_err(mrioc, "failed to get package version\n"); 3451 goto out_failed; 3452 } 3453 3454 if (init_type != MPI3MR_IT_RESET) { 3455 retval = mpi3mr_setup_isr(mrioc, 0); 3456 if (retval) { 3457 ioc_err(mrioc, "Failed to re-setup ISR, error %d\n", 3458 retval); 3459 goto out_failed; 3460 } 3461 } 3462 3463 retval = mpi3mr_create_op_queues(mrioc); 3464 if (retval) { 3465 ioc_err(mrioc, "Failed to create OpQueues error %d\n", 3466 retval); 3467 goto out_failed; 3468 } 3469 3470 if ((init_type != MPI3MR_IT_INIT) && 3471 (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q)) { 3472 retval = -1; 3473 ioc_err(mrioc, 3474 "Cannot create minimum number of OpQueues expected:%d created:%d\n", 3475 mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q); 3476 goto out_failed; 3477 } 3478 3479 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 3480 mrioc->event_masks[i] = -1; 3481 3482 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED); 3483 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED); 3484 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE); 3485 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE); 3486 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST); 3487 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY); 3488 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR); 3489 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE); 3490 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST); 3491 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION); 3492 mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT); 3493 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE); 3494 3495 retval = mpi3mr_issue_event_notification(mrioc); 3496 if (retval) { 3497 ioc_err(mrioc, "Failed to issue event notification %d\n", 3498 retval); 3499 goto out_failed; 3500 } 3501 3502 if (init_type != MPI3MR_IT_INIT) { 3503 ioc_info(mrioc, "Issuing Port Enable\n"); 3504 retval = mpi3mr_issue_port_enable(mrioc, 0); 3505 if (retval) { 3506 ioc_err(mrioc, "Failed to issue port enable %d\n", 3507 retval); 3508 goto out_failed; 3509 } 3510 } 3511 return retval; 3512 3513 out_failed: 3514 if (init_type == MPI3MR_IT_INIT) 3515 mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP); 3516 else 3517 mpi3mr_cleanup_ioc(mrioc, MPI3MR_REINIT_FAILURE); 3518 out_nocleanup: 3519 return retval; 3520 } 3521 3522 /** 3523 * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's 3524 * segments 3525 * @mrioc: Adapter instance reference 3526 * @qidx: Operational reply queue index 3527 * 3528 * Return: Nothing. 3529 */ 3530 static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx) 3531 { 3532 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 3533 struct segments *segments; 3534 int i, size; 3535 3536 if (!op_reply_q->q_segments) 3537 return; 3538 3539 size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz; 3540 segments = op_reply_q->q_segments; 3541 for (i = 0; i < op_reply_q->num_segments; i++) 3542 memset(segments[i].segment, 0, size); 3543 } 3544 3545 /** 3546 * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's 3547 * segments 3548 * @mrioc: Adapter instance reference 3549 * @qidx: Operational request queue index 3550 * 3551 * Return: Nothing. 3552 */ 3553 static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx) 3554 { 3555 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx; 3556 struct segments *segments; 3557 int i, size; 3558 3559 if (!op_req_q->q_segments) 3560 return; 3561 3562 size = op_req_q->segment_qd * mrioc->facts.op_req_sz; 3563 segments = op_req_q->q_segments; 3564 for (i = 0; i < op_req_q->num_segments; i++) 3565 memset(segments[i].segment, 0, size); 3566 } 3567 3568 /** 3569 * mpi3mr_memset_buffers - memset memory for a controller 3570 * @mrioc: Adapter instance reference 3571 * 3572 * clear all the memory allocated for a controller, typically 3573 * called post reset to reuse the memory allocated during the 3574 * controller init. 3575 * 3576 * Return: Nothing. 3577 */ 3578 void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc) 3579 { 3580 u16 i; 3581 3582 memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz); 3583 memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz); 3584 3585 memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply)); 3586 memset(mrioc->host_tm_cmds.reply, 0, 3587 sizeof(*mrioc->host_tm_cmds.reply)); 3588 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) 3589 memset(mrioc->dev_rmhs_cmds[i].reply, 0, 3590 sizeof(*mrioc->dev_rmhs_cmds[i].reply)); 3591 memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz); 3592 memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz); 3593 3594 for (i = 0; i < mrioc->num_queues; i++) { 3595 mrioc->op_reply_qinfo[i].qid = 0; 3596 mrioc->op_reply_qinfo[i].ci = 0; 3597 mrioc->op_reply_qinfo[i].num_replies = 0; 3598 mrioc->op_reply_qinfo[i].ephase = 0; 3599 atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0); 3600 atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0); 3601 mpi3mr_memset_op_reply_q_buffers(mrioc, i); 3602 3603 mrioc->req_qinfo[i].ci = 0; 3604 mrioc->req_qinfo[i].pi = 0; 3605 mrioc->req_qinfo[i].num_requests = 0; 3606 mrioc->req_qinfo[i].qid = 0; 3607 mrioc->req_qinfo[i].reply_qid = 0; 3608 spin_lock_init(&mrioc->req_qinfo[i].q_lock); 3609 mpi3mr_memset_op_req_q_buffers(mrioc, i); 3610 } 3611 } 3612 3613 /** 3614 * mpi3mr_free_mem - Free memory allocated for a controller 3615 * @mrioc: Adapter instance reference 3616 * 3617 * Free all the memory allocated for a controller. 3618 * 3619 * Return: Nothing. 3620 */ 3621 static void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc) 3622 { 3623 u16 i; 3624 struct mpi3mr_intr_info *intr_info; 3625 3626 if (mrioc->sense_buf_pool) { 3627 if (mrioc->sense_buf) 3628 dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf, 3629 mrioc->sense_buf_dma); 3630 dma_pool_destroy(mrioc->sense_buf_pool); 3631 mrioc->sense_buf = NULL; 3632 mrioc->sense_buf_pool = NULL; 3633 } 3634 if (mrioc->sense_buf_q_pool) { 3635 if (mrioc->sense_buf_q) 3636 dma_pool_free(mrioc->sense_buf_q_pool, 3637 mrioc->sense_buf_q, mrioc->sense_buf_q_dma); 3638 dma_pool_destroy(mrioc->sense_buf_q_pool); 3639 mrioc->sense_buf_q = NULL; 3640 mrioc->sense_buf_q_pool = NULL; 3641 } 3642 3643 if (mrioc->reply_buf_pool) { 3644 if (mrioc->reply_buf) 3645 dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf, 3646 mrioc->reply_buf_dma); 3647 dma_pool_destroy(mrioc->reply_buf_pool); 3648 mrioc->reply_buf = NULL; 3649 mrioc->reply_buf_pool = NULL; 3650 } 3651 if (mrioc->reply_free_q_pool) { 3652 if (mrioc->reply_free_q) 3653 dma_pool_free(mrioc->reply_free_q_pool, 3654 mrioc->reply_free_q, mrioc->reply_free_q_dma); 3655 dma_pool_destroy(mrioc->reply_free_q_pool); 3656 mrioc->reply_free_q = NULL; 3657 mrioc->reply_free_q_pool = NULL; 3658 } 3659 3660 for (i = 0; i < mrioc->num_op_req_q; i++) 3661 mpi3mr_free_op_req_q_segments(mrioc, i); 3662 3663 for (i = 0; i < mrioc->num_op_reply_q; i++) 3664 mpi3mr_free_op_reply_q_segments(mrioc, i); 3665 3666 for (i = 0; i < mrioc->intr_info_count; i++) { 3667 intr_info = mrioc->intr_info + i; 3668 intr_info->op_reply_q = NULL; 3669 } 3670 3671 kfree(mrioc->req_qinfo); 3672 mrioc->req_qinfo = NULL; 3673 mrioc->num_op_req_q = 0; 3674 3675 kfree(mrioc->op_reply_qinfo); 3676 mrioc->op_reply_qinfo = NULL; 3677 mrioc->num_op_reply_q = 0; 3678 3679 kfree(mrioc->init_cmds.reply); 3680 mrioc->init_cmds.reply = NULL; 3681 3682 kfree(mrioc->host_tm_cmds.reply); 3683 mrioc->host_tm_cmds.reply = NULL; 3684 3685 kfree(mrioc->removepend_bitmap); 3686 mrioc->removepend_bitmap = NULL; 3687 3688 kfree(mrioc->devrem_bitmap); 3689 mrioc->devrem_bitmap = NULL; 3690 3691 kfree(mrioc->chain_bitmap); 3692 mrioc->chain_bitmap = NULL; 3693 3694 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { 3695 kfree(mrioc->dev_rmhs_cmds[i].reply); 3696 mrioc->dev_rmhs_cmds[i].reply = NULL; 3697 } 3698 3699 if (mrioc->chain_buf_pool) { 3700 for (i = 0; i < mrioc->chain_buf_count; i++) { 3701 if (mrioc->chain_sgl_list[i].addr) { 3702 dma_pool_free(mrioc->chain_buf_pool, 3703 mrioc->chain_sgl_list[i].addr, 3704 mrioc->chain_sgl_list[i].dma_addr); 3705 mrioc->chain_sgl_list[i].addr = NULL; 3706 } 3707 } 3708 dma_pool_destroy(mrioc->chain_buf_pool); 3709 mrioc->chain_buf_pool = NULL; 3710 } 3711 3712 kfree(mrioc->chain_sgl_list); 3713 mrioc->chain_sgl_list = NULL; 3714 3715 if (mrioc->admin_reply_base) { 3716 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz, 3717 mrioc->admin_reply_base, mrioc->admin_reply_dma); 3718 mrioc->admin_reply_base = NULL; 3719 } 3720 if (mrioc->admin_req_base) { 3721 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz, 3722 mrioc->admin_req_base, mrioc->admin_req_dma); 3723 mrioc->admin_req_base = NULL; 3724 } 3725 } 3726 3727 /** 3728 * mpi3mr_issue_ioc_shutdown - shutdown controller 3729 * @mrioc: Adapter instance reference 3730 * 3731 * Send shutodwn notification to the controller and wait for the 3732 * shutdown_timeout for it to be completed. 3733 * 3734 * Return: Nothing. 3735 */ 3736 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc) 3737 { 3738 u32 ioc_config, ioc_status; 3739 u8 retval = 1; 3740 u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10; 3741 3742 ioc_info(mrioc, "Issuing shutdown Notification\n"); 3743 if (mrioc->unrecoverable) { 3744 ioc_warn(mrioc, 3745 "IOC is unrecoverable shutdown is not issued\n"); 3746 return; 3747 } 3748 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 3749 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 3750 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) { 3751 ioc_info(mrioc, "shutdown already in progress\n"); 3752 return; 3753 } 3754 3755 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 3756 ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL; 3757 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ; 3758 3759 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 3760 3761 if (mrioc->facts.shutdown_timeout) 3762 timeout = mrioc->facts.shutdown_timeout * 10; 3763 3764 do { 3765 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 3766 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 3767 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) { 3768 retval = 0; 3769 break; 3770 } 3771 msleep(100); 3772 } while (--timeout); 3773 3774 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 3775 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 3776 3777 if (retval) { 3778 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 3779 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) 3780 ioc_warn(mrioc, 3781 "shutdown still in progress after timeout\n"); 3782 } 3783 3784 ioc_info(mrioc, 3785 "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n", 3786 (!retval) ? "successful" : "failed", ioc_status, 3787 ioc_config); 3788 } 3789 3790 /** 3791 * mpi3mr_cleanup_ioc - Cleanup controller 3792 * @mrioc: Adapter instance reference 3793 * @reason: Cleanup reason 3794 * 3795 * controller cleanup handler, Message unit reset or soft reset 3796 * and shutdown notification is issued to the controller and the 3797 * associated memory resources are freed. 3798 * 3799 * Return: Nothing. 3800 */ 3801 void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 reason) 3802 { 3803 enum mpi3mr_iocstate ioc_state; 3804 3805 if (reason == MPI3MR_COMPLETE_CLEANUP) 3806 mpi3mr_stop_watchdog(mrioc); 3807 3808 mpi3mr_ioc_disable_intr(mrioc); 3809 3810 ioc_state = mpi3mr_get_iocstate(mrioc); 3811 3812 if ((!mrioc->unrecoverable) && (!mrioc->reset_in_progress) && 3813 (ioc_state == MRIOC_STATE_READY)) { 3814 if (mpi3mr_issue_and_process_mur(mrioc, 3815 MPI3MR_RESET_FROM_CTLR_CLEANUP)) 3816 mpi3mr_issue_reset(mrioc, 3817 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, 3818 MPI3MR_RESET_FROM_MUR_FAILURE); 3819 3820 if (reason != MPI3MR_REINIT_FAILURE) 3821 mpi3mr_issue_ioc_shutdown(mrioc); 3822 } 3823 3824 if (reason == MPI3MR_COMPLETE_CLEANUP) { 3825 mpi3mr_free_mem(mrioc); 3826 mpi3mr_cleanup_resources(mrioc); 3827 } 3828 } 3829 3830 /** 3831 * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command 3832 * @mrioc: Adapter instance reference 3833 * @cmdptr: Internal command tracker 3834 * 3835 * Complete an internal driver commands with state indicating it 3836 * is completed due to reset. 3837 * 3838 * Return: Nothing. 3839 */ 3840 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc, 3841 struct mpi3mr_drv_cmd *cmdptr) 3842 { 3843 if (cmdptr->state & MPI3MR_CMD_PENDING) { 3844 cmdptr->state |= MPI3MR_CMD_RESET; 3845 cmdptr->state &= ~MPI3MR_CMD_PENDING; 3846 if (cmdptr->is_waiting) { 3847 complete(&cmdptr->done); 3848 cmdptr->is_waiting = 0; 3849 } else if (cmdptr->callback) 3850 cmdptr->callback(mrioc, cmdptr); 3851 } 3852 } 3853 3854 /** 3855 * mpi3mr_flush_drv_cmds - Flush internaldriver commands 3856 * @mrioc: Adapter instance reference 3857 * 3858 * Flush all internal driver commands post reset 3859 * 3860 * Return: Nothing. 3861 */ 3862 static void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc) 3863 { 3864 struct mpi3mr_drv_cmd *cmdptr; 3865 u8 i; 3866 3867 cmdptr = &mrioc->init_cmds; 3868 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 3869 cmdptr = &mrioc->host_tm_cmds; 3870 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 3871 3872 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { 3873 cmdptr = &mrioc->dev_rmhs_cmds[i]; 3874 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 3875 } 3876 } 3877 3878 /** 3879 * mpi3mr_diagfault_reset_handler - Diag fault reset handler 3880 * @mrioc: Adapter instance reference 3881 * @reset_reason: Reset reason code 3882 * 3883 * This is an handler for issuing diag fault reset from the 3884 * applications through IOCTL path to stop the execution of the 3885 * controller 3886 * 3887 * Return: 0 on success, non-zero on failure. 3888 */ 3889 int mpi3mr_diagfault_reset_handler(struct mpi3mr_ioc *mrioc, 3890 u32 reset_reason) 3891 { 3892 int retval = 0; 3893 3894 ioc_info(mrioc, "Entry: reason code: %s\n", 3895 mpi3mr_reset_rc_name(reset_reason)); 3896 mrioc->reset_in_progress = 1; 3897 3898 mpi3mr_ioc_disable_intr(mrioc); 3899 3900 retval = mpi3mr_issue_reset(mrioc, 3901 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason); 3902 3903 if (retval) { 3904 ioc_err(mrioc, "The diag fault reset failed: reason %d\n", 3905 reset_reason); 3906 mpi3mr_ioc_enable_intr(mrioc); 3907 } 3908 ioc_info(mrioc, "%s\n", ((retval == 0) ? "SUCCESS" : "FAILED")); 3909 mrioc->reset_in_progress = 0; 3910 return retval; 3911 } 3912 3913 /** 3914 * mpi3mr_soft_reset_handler - Reset the controller 3915 * @mrioc: Adapter instance reference 3916 * @reset_reason: Reset reason code 3917 * @snapdump: Flag to generate snapdump in firmware or not 3918 * 3919 * This is an handler for recovering controller by issuing soft 3920 * reset are diag fault reset. This is a blocking function and 3921 * when one reset is executed if any other resets they will be 3922 * blocked. All IOCTLs/IO will be blocked during the reset. If 3923 * controller reset is successful then the controller will be 3924 * reinitalized, otherwise the controller will be marked as not 3925 * recoverable 3926 * 3927 * In snapdump bit is set, the controller is issued with diag 3928 * fault reset so that the firmware can create a snap dump and 3929 * post that the firmware will result in F000 fault and the 3930 * driver will issue soft reset to recover from that. 3931 * 3932 * Return: 0 on success, non-zero on failure. 3933 */ 3934 int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, 3935 u32 reset_reason, u8 snapdump) 3936 { 3937 int retval = 0, i; 3938 unsigned long flags; 3939 u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; 3940 3941 if (mrioc->fault_dbg) { 3942 if (snapdump) 3943 mpi3mr_set_diagsave(mrioc); 3944 mpi3mr_kill_ioc(mrioc, reset_reason); 3945 } 3946 3947 /* 3948 * Block new resets until the currently executing one is finished and 3949 * return the status of the existing reset for all blocked resets 3950 */ 3951 if (!mutex_trylock(&mrioc->reset_mutex)) { 3952 ioc_info(mrioc, "Another reset in progress\n"); 3953 return -1; 3954 } 3955 mrioc->reset_in_progress = 1; 3956 3957 if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) && 3958 (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) { 3959 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 3960 mrioc->event_masks[i] = -1; 3961 3962 retval = mpi3mr_issue_event_notification(mrioc); 3963 3964 if (retval) { 3965 ioc_err(mrioc, 3966 "Failed to turn off events prior to reset %d\n", 3967 retval); 3968 } 3969 } 3970 3971 mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT); 3972 3973 mpi3mr_ioc_disable_intr(mrioc); 3974 3975 if (snapdump) { 3976 mpi3mr_set_diagsave(mrioc); 3977 retval = mpi3mr_issue_reset(mrioc, 3978 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason); 3979 if (!retval) { 3980 do { 3981 host_diagnostic = 3982 readl(&mrioc->sysif_regs->host_diagnostic); 3983 if (!(host_diagnostic & 3984 MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS)) 3985 break; 3986 msleep(100); 3987 } while (--timeout); 3988 } 3989 } 3990 3991 retval = mpi3mr_issue_reset(mrioc, 3992 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason); 3993 if (retval) { 3994 ioc_err(mrioc, "Failed to issue soft reset to the ioc\n"); 3995 goto out; 3996 } 3997 3998 mpi3mr_flush_delayed_rmhs_list(mrioc); 3999 mpi3mr_flush_drv_cmds(mrioc); 4000 memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz); 4001 memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz); 4002 mpi3mr_cleanup_fwevt_list(mrioc); 4003 mpi3mr_flush_host_io(mrioc); 4004 mpi3mr_invalidate_devhandles(mrioc); 4005 mpi3mr_memset_buffers(mrioc); 4006 retval = mpi3mr_init_ioc(mrioc, MPI3MR_IT_RESET); 4007 if (retval) { 4008 pr_err(IOCNAME "reinit after soft reset failed: reason %d\n", 4009 mrioc->name, reset_reason); 4010 goto out; 4011 } 4012 ssleep(10); 4013 4014 out: 4015 if (!retval) { 4016 mrioc->reset_in_progress = 0; 4017 scsi_unblock_requests(mrioc->shost); 4018 mpi3mr_rfresh_tgtdevs(mrioc); 4019 mrioc->ts_update_counter = 0; 4020 spin_lock_irqsave(&mrioc->watchdog_lock, flags); 4021 if (mrioc->watchdog_work_q) 4022 queue_delayed_work(mrioc->watchdog_work_q, 4023 &mrioc->watchdog_work, 4024 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); 4025 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); 4026 } else { 4027 mpi3mr_issue_reset(mrioc, 4028 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason); 4029 mrioc->unrecoverable = 1; 4030 mrioc->reset_in_progress = 0; 4031 retval = -1; 4032 } 4033 4034 mutex_unlock(&mrioc->reset_mutex); 4035 ioc_info(mrioc, "%s\n", ((retval == 0) ? "SUCCESS" : "FAILED")); 4036 return retval; 4037 } 4038