1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2021 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 #include <linux/io-64-nonatomic-lo-hi.h> 12 13 static int 14 mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u32 reset_reason); 15 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc); 16 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, 17 struct mpi3_ioc_facts_data *facts_data); 18 19 #if defined(writeq) && defined(CONFIG_64BIT) 20 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) 21 { 22 writeq(b, addr); 23 } 24 #else 25 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) 26 { 27 __u64 data_out = b; 28 29 writel((u32)(data_out), addr); 30 writel((u32)(data_out >> 32), (addr + 4)); 31 } 32 #endif 33 34 static inline bool 35 mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q) 36 { 37 u16 pi, ci, max_entries; 38 bool is_qfull = false; 39 40 pi = op_req_q->pi; 41 ci = READ_ONCE(op_req_q->ci); 42 max_entries = op_req_q->num_requests; 43 44 if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1)))) 45 is_qfull = true; 46 47 return is_qfull; 48 } 49 50 static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc) 51 { 52 u16 i, max_vectors; 53 54 max_vectors = mrioc->intr_info_count; 55 56 for (i = 0; i < max_vectors; i++) 57 synchronize_irq(pci_irq_vector(mrioc->pdev, i)); 58 } 59 60 void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc) 61 { 62 mrioc->intr_enabled = 0; 63 mpi3mr_sync_irqs(mrioc); 64 } 65 66 void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc) 67 { 68 mrioc->intr_enabled = 1; 69 } 70 71 static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc) 72 { 73 u16 i; 74 75 mpi3mr_ioc_disable_intr(mrioc); 76 77 if (!mrioc->intr_info) 78 return; 79 80 for (i = 0; i < mrioc->intr_info_count; i++) 81 free_irq(pci_irq_vector(mrioc->pdev, i), 82 (mrioc->intr_info + i)); 83 84 kfree(mrioc->intr_info); 85 mrioc->intr_info = NULL; 86 mrioc->intr_info_count = 0; 87 mrioc->is_intr_info_set = false; 88 pci_free_irq_vectors(mrioc->pdev); 89 } 90 91 void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length, 92 dma_addr_t dma_addr) 93 { 94 struct mpi3_sge_common *sgel = paddr; 95 96 sgel->flags = flags; 97 sgel->length = cpu_to_le32(length); 98 sgel->address = cpu_to_le64(dma_addr); 99 } 100 101 void mpi3mr_build_zero_len_sge(void *paddr) 102 { 103 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 104 105 mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1); 106 } 107 108 void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc, 109 dma_addr_t phys_addr) 110 { 111 if (!phys_addr) 112 return NULL; 113 114 if ((phys_addr < mrioc->reply_buf_dma) || 115 (phys_addr > mrioc->reply_buf_dma_max_address)) 116 return NULL; 117 118 return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma); 119 } 120 121 void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc, 122 dma_addr_t phys_addr) 123 { 124 if (!phys_addr) 125 return NULL; 126 127 return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma); 128 } 129 130 static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc, 131 u64 reply_dma) 132 { 133 u32 old_idx = 0; 134 unsigned long flags; 135 136 spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags); 137 old_idx = mrioc->reply_free_queue_host_index; 138 mrioc->reply_free_queue_host_index = ( 139 (mrioc->reply_free_queue_host_index == 140 (mrioc->reply_free_qsz - 1)) ? 0 : 141 (mrioc->reply_free_queue_host_index + 1)); 142 mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma); 143 writel(mrioc->reply_free_queue_host_index, 144 &mrioc->sysif_regs->reply_free_host_index); 145 spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags); 146 } 147 148 void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc, 149 u64 sense_buf_dma) 150 { 151 u32 old_idx = 0; 152 unsigned long flags; 153 154 spin_lock_irqsave(&mrioc->sbq_lock, flags); 155 old_idx = mrioc->sbq_host_index; 156 mrioc->sbq_host_index = ((mrioc->sbq_host_index == 157 (mrioc->sense_buf_q_sz - 1)) ? 0 : 158 (mrioc->sbq_host_index + 1)); 159 mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma); 160 writel(mrioc->sbq_host_index, 161 &mrioc->sysif_regs->sense_buffer_free_host_index); 162 spin_unlock_irqrestore(&mrioc->sbq_lock, flags); 163 } 164 165 static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc, 166 struct mpi3_event_notification_reply *event_reply) 167 { 168 char *desc = NULL; 169 u16 event; 170 171 event = event_reply->event; 172 173 switch (event) { 174 case MPI3_EVENT_LOG_DATA: 175 desc = "Log Data"; 176 break; 177 case MPI3_EVENT_CHANGE: 178 desc = "Event Change"; 179 break; 180 case MPI3_EVENT_GPIO_INTERRUPT: 181 desc = "GPIO Interrupt"; 182 break; 183 case MPI3_EVENT_TEMP_THRESHOLD: 184 desc = "Temperature Threshold"; 185 break; 186 case MPI3_EVENT_CABLE_MGMT: 187 desc = "Cable Management"; 188 break; 189 case MPI3_EVENT_ENERGY_PACK_CHANGE: 190 desc = "Energy Pack Change"; 191 break; 192 case MPI3_EVENT_DEVICE_ADDED: 193 { 194 struct mpi3_device_page0 *event_data = 195 (struct mpi3_device_page0 *)event_reply->event_data; 196 ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n", 197 event_data->dev_handle, event_data->device_form); 198 return; 199 } 200 case MPI3_EVENT_DEVICE_INFO_CHANGED: 201 { 202 struct mpi3_device_page0 *event_data = 203 (struct mpi3_device_page0 *)event_reply->event_data; 204 ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n", 205 event_data->dev_handle, event_data->device_form); 206 return; 207 } 208 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 209 { 210 struct mpi3_event_data_device_status_change *event_data = 211 (struct mpi3_event_data_device_status_change *)event_reply->event_data; 212 ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n", 213 event_data->dev_handle, event_data->reason_code); 214 return; 215 } 216 case MPI3_EVENT_SAS_DISCOVERY: 217 { 218 struct mpi3_event_data_sas_discovery *event_data = 219 (struct mpi3_event_data_sas_discovery *)event_reply->event_data; 220 ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n", 221 (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ? 222 "start" : "stop", 223 le32_to_cpu(event_data->discovery_status)); 224 return; 225 } 226 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE: 227 desc = "SAS Broadcast Primitive"; 228 break; 229 case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE: 230 desc = "SAS Notify Primitive"; 231 break; 232 case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: 233 desc = "SAS Init Device Status Change"; 234 break; 235 case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW: 236 desc = "SAS Init Table Overflow"; 237 break; 238 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 239 desc = "SAS Topology Change List"; 240 break; 241 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 242 desc = "Enclosure Device Status Change"; 243 break; 244 case MPI3_EVENT_HARD_RESET_RECEIVED: 245 desc = "Hard Reset Received"; 246 break; 247 case MPI3_EVENT_SAS_PHY_COUNTER: 248 desc = "SAS PHY Counter"; 249 break; 250 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 251 desc = "SAS Device Discovery Error"; 252 break; 253 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 254 desc = "PCIE Topology Change List"; 255 break; 256 case MPI3_EVENT_PCIE_ENUMERATION: 257 { 258 struct mpi3_event_data_pcie_enumeration *event_data = 259 (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data; 260 ioc_info(mrioc, "PCIE Enumeration: (%s)", 261 (event_data->reason_code == 262 MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop"); 263 if (event_data->enumeration_status) 264 ioc_info(mrioc, "enumeration_status(0x%08x)\n", 265 le32_to_cpu(event_data->enumeration_status)); 266 return; 267 } 268 case MPI3_EVENT_PREPARE_FOR_RESET: 269 desc = "Prepare For Reset"; 270 break; 271 } 272 273 if (!desc) 274 return; 275 276 ioc_info(mrioc, "%s\n", desc); 277 } 278 279 static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc, 280 struct mpi3_default_reply *def_reply) 281 { 282 struct mpi3_event_notification_reply *event_reply = 283 (struct mpi3_event_notification_reply *)def_reply; 284 285 mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count); 286 mpi3mr_print_event_data(mrioc, event_reply); 287 mpi3mr_os_handle_events(mrioc, event_reply); 288 } 289 290 static struct mpi3mr_drv_cmd * 291 mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag, 292 struct mpi3_default_reply *def_reply) 293 { 294 u16 idx; 295 296 switch (host_tag) { 297 case MPI3MR_HOSTTAG_INITCMDS: 298 return &mrioc->init_cmds; 299 case MPI3MR_HOSTTAG_BLK_TMS: 300 return &mrioc->host_tm_cmds; 301 case MPI3MR_HOSTTAG_INVALID: 302 if (def_reply && def_reply->function == 303 MPI3_FUNCTION_EVENT_NOTIFICATION) 304 mpi3mr_handle_events(mrioc, def_reply); 305 return NULL; 306 default: 307 break; 308 } 309 if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN && 310 host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) { 311 idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 312 return &mrioc->dev_rmhs_cmds[idx]; 313 } 314 315 return NULL; 316 } 317 318 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc, 319 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma) 320 { 321 u16 reply_desc_type, host_tag = 0; 322 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; 323 u32 ioc_loginfo = 0; 324 struct mpi3_status_reply_descriptor *status_desc; 325 struct mpi3_address_reply_descriptor *addr_desc; 326 struct mpi3_success_reply_descriptor *success_desc; 327 struct mpi3_default_reply *def_reply = NULL; 328 struct mpi3mr_drv_cmd *cmdptr = NULL; 329 struct mpi3_scsi_io_reply *scsi_reply; 330 u8 *sense_buf = NULL; 331 332 *reply_dma = 0; 333 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & 334 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK; 335 switch (reply_desc_type) { 336 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS: 337 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc; 338 host_tag = le16_to_cpu(status_desc->host_tag); 339 ioc_status = le16_to_cpu(status_desc->ioc_status); 340 if (ioc_status & 341 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 342 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); 343 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 344 break; 345 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: 346 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; 347 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address); 348 def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma); 349 if (!def_reply) 350 goto out; 351 host_tag = le16_to_cpu(def_reply->host_tag); 352 ioc_status = le16_to_cpu(def_reply->ioc_status); 353 if (ioc_status & 354 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 355 ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info); 356 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 357 if (def_reply->function == MPI3_FUNCTION_SCSI_IO) { 358 scsi_reply = (struct mpi3_scsi_io_reply *)def_reply; 359 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, 360 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 361 } 362 break; 363 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: 364 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; 365 host_tag = le16_to_cpu(success_desc->host_tag); 366 break; 367 default: 368 break; 369 } 370 371 cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply); 372 if (cmdptr) { 373 if (cmdptr->state & MPI3MR_CMD_PENDING) { 374 cmdptr->state |= MPI3MR_CMD_COMPLETE; 375 cmdptr->ioc_loginfo = ioc_loginfo; 376 cmdptr->ioc_status = ioc_status; 377 cmdptr->state &= ~MPI3MR_CMD_PENDING; 378 if (def_reply) { 379 cmdptr->state |= MPI3MR_CMD_REPLY_VALID; 380 memcpy((u8 *)cmdptr->reply, (u8 *)def_reply, 381 mrioc->reply_sz); 382 } 383 if (cmdptr->is_waiting) { 384 complete(&cmdptr->done); 385 cmdptr->is_waiting = 0; 386 } else if (cmdptr->callback) 387 cmdptr->callback(mrioc, cmdptr); 388 } 389 } 390 out: 391 if (sense_buf) 392 mpi3mr_repost_sense_buf(mrioc, 393 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 394 } 395 396 static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc) 397 { 398 u32 exp_phase = mrioc->admin_reply_ephase; 399 u32 admin_reply_ci = mrioc->admin_reply_ci; 400 u32 num_admin_replies = 0; 401 u64 reply_dma = 0; 402 struct mpi3_default_reply_descriptor *reply_desc; 403 404 reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base + 405 admin_reply_ci; 406 407 if ((le16_to_cpu(reply_desc->reply_flags) & 408 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 409 return 0; 410 411 do { 412 mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci); 413 mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma); 414 if (reply_dma) 415 mpi3mr_repost_reply_buf(mrioc, reply_dma); 416 num_admin_replies++; 417 if (++admin_reply_ci == mrioc->num_admin_replies) { 418 admin_reply_ci = 0; 419 exp_phase ^= 1; 420 } 421 reply_desc = 422 (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base + 423 admin_reply_ci; 424 if ((le16_to_cpu(reply_desc->reply_flags) & 425 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 426 break; 427 } while (1); 428 429 writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); 430 mrioc->admin_reply_ci = admin_reply_ci; 431 mrioc->admin_reply_ephase = exp_phase; 432 433 return num_admin_replies; 434 } 435 436 /** 437 * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to 438 * queue's consumer index from operational reply descriptor queue. 439 * @op_reply_q: op_reply_qinfo object 440 * @reply_ci: operational reply descriptor's queue consumer index 441 * 442 * Returns reply descriptor frame address 443 */ 444 static inline struct mpi3_default_reply_descriptor * 445 mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci) 446 { 447 void *segment_base_addr; 448 struct segments *segments = op_reply_q->q_segments; 449 struct mpi3_default_reply_descriptor *reply_desc = NULL; 450 451 segment_base_addr = 452 segments[reply_ci / op_reply_q->segment_qd].segment; 453 reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr + 454 (reply_ci % op_reply_q->segment_qd); 455 return reply_desc; 456 } 457 458 static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, 459 struct mpi3mr_intr_info *intr_info) 460 { 461 struct op_reply_qinfo *op_reply_q = intr_info->op_reply_q; 462 struct op_req_qinfo *op_req_q; 463 u32 exp_phase; 464 u32 reply_ci; 465 u32 num_op_reply = 0; 466 u64 reply_dma = 0; 467 struct mpi3_default_reply_descriptor *reply_desc; 468 u16 req_q_idx = 0, reply_qidx; 469 470 reply_qidx = op_reply_q->qid - 1; 471 472 if (!atomic_add_unless(&op_reply_q->in_use, 1, 1)) 473 return 0; 474 475 exp_phase = op_reply_q->ephase; 476 reply_ci = op_reply_q->ci; 477 478 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci); 479 if ((le16_to_cpu(reply_desc->reply_flags) & 480 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) { 481 atomic_dec(&op_reply_q->in_use); 482 return 0; 483 } 484 485 do { 486 req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1; 487 op_req_q = &mrioc->req_qinfo[req_q_idx]; 488 489 WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci)); 490 mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma, 491 reply_qidx); 492 atomic_dec(&op_reply_q->pend_ios); 493 if (reply_dma) 494 mpi3mr_repost_reply_buf(mrioc, reply_dma); 495 num_op_reply++; 496 497 if (++reply_ci == op_reply_q->num_replies) { 498 reply_ci = 0; 499 exp_phase ^= 1; 500 } 501 502 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci); 503 504 if ((le16_to_cpu(reply_desc->reply_flags) & 505 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 506 break; 507 /* 508 * Exit completion loop to avoid CPU lockup 509 * Ensure remaining completion happens from threaded ISR. 510 */ 511 if (num_op_reply > mrioc->max_host_ios) { 512 intr_info->op_reply_q->enable_irq_poll = true; 513 break; 514 } 515 516 } while (1); 517 518 writel(reply_ci, 519 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index); 520 op_reply_q->ci = reply_ci; 521 op_reply_q->ephase = exp_phase; 522 523 atomic_dec(&op_reply_q->in_use); 524 return num_op_reply; 525 } 526 527 static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata) 528 { 529 struct mpi3mr_intr_info *intr_info = privdata; 530 struct mpi3mr_ioc *mrioc; 531 u16 midx; 532 u32 num_admin_replies = 0, num_op_reply = 0; 533 534 if (!intr_info) 535 return IRQ_NONE; 536 537 mrioc = intr_info->mrioc; 538 539 if (!mrioc->intr_enabled) 540 return IRQ_NONE; 541 542 midx = intr_info->msix_index; 543 544 if (!midx) 545 num_admin_replies = mpi3mr_process_admin_reply_q(mrioc); 546 if (intr_info->op_reply_q) 547 num_op_reply = mpi3mr_process_op_reply_q(mrioc, intr_info); 548 549 if (num_admin_replies || num_op_reply) 550 return IRQ_HANDLED; 551 else 552 return IRQ_NONE; 553 } 554 555 static irqreturn_t mpi3mr_isr(int irq, void *privdata) 556 { 557 struct mpi3mr_intr_info *intr_info = privdata; 558 struct mpi3mr_ioc *mrioc; 559 u16 midx; 560 int ret; 561 562 if (!intr_info) 563 return IRQ_NONE; 564 565 mrioc = intr_info->mrioc; 566 midx = intr_info->msix_index; 567 /* Call primary ISR routine */ 568 ret = mpi3mr_isr_primary(irq, privdata); 569 570 /* 571 * If more IOs are expected, schedule IRQ polling thread. 572 * Otherwise exit from ISR. 573 */ 574 if (!intr_info->op_reply_q) 575 return ret; 576 577 if (!intr_info->op_reply_q->enable_irq_poll || 578 !atomic_read(&intr_info->op_reply_q->pend_ios)) 579 return ret; 580 581 disable_irq_nosync(pci_irq_vector(mrioc->pdev, midx)); 582 583 return IRQ_WAKE_THREAD; 584 } 585 586 /** 587 * mpi3mr_isr_poll - Reply queue polling routine 588 * @irq: IRQ 589 * @privdata: Interrupt info 590 * 591 * poll for pending I/O completions in a loop until pending I/Os 592 * present or controller queue depth I/Os are processed. 593 * 594 * Return: IRQ_NONE or IRQ_HANDLED 595 */ 596 static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata) 597 { 598 struct mpi3mr_intr_info *intr_info = privdata; 599 struct mpi3mr_ioc *mrioc; 600 u16 midx; 601 u32 num_op_reply = 0; 602 603 if (!intr_info || !intr_info->op_reply_q) 604 return IRQ_NONE; 605 606 mrioc = intr_info->mrioc; 607 midx = intr_info->msix_index; 608 609 /* Poll for pending IOs completions */ 610 do { 611 if (!mrioc->intr_enabled) 612 break; 613 614 if (!midx) 615 mpi3mr_process_admin_reply_q(mrioc); 616 if (intr_info->op_reply_q) 617 num_op_reply += 618 mpi3mr_process_op_reply_q(mrioc, intr_info); 619 620 usleep_range(mrioc->irqpoll_sleep, 10 * mrioc->irqpoll_sleep); 621 622 } while (atomic_read(&intr_info->op_reply_q->pend_ios) && 623 (num_op_reply < mrioc->max_host_ios)); 624 625 intr_info->op_reply_q->enable_irq_poll = false; 626 enable_irq(pci_irq_vector(mrioc->pdev, midx)); 627 628 return IRQ_HANDLED; 629 } 630 631 /** 632 * mpi3mr_request_irq - Request IRQ and register ISR 633 * @mrioc: Adapter instance reference 634 * @index: IRQ vector index 635 * 636 * Request threaded ISR with primary ISR and secondary 637 * 638 * Return: 0 on success and non zero on failures. 639 */ 640 static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index) 641 { 642 struct pci_dev *pdev = mrioc->pdev; 643 struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index; 644 int retval = 0; 645 646 intr_info->mrioc = mrioc; 647 intr_info->msix_index = index; 648 intr_info->op_reply_q = NULL; 649 650 snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d", 651 mrioc->driver_name, mrioc->id, index); 652 653 retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr, 654 mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info); 655 if (retval) { 656 ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n", 657 intr_info->name, pci_irq_vector(pdev, index)); 658 return retval; 659 } 660 661 return retval; 662 } 663 664 /** 665 * mpi3mr_setup_isr - Setup ISR for the controller 666 * @mrioc: Adapter instance reference 667 * @setup_one: Request one IRQ or more 668 * 669 * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR 670 * 671 * Return: 0 on success and non zero on failures. 672 */ 673 static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one) 674 { 675 unsigned int irq_flags = PCI_IRQ_MSIX; 676 int max_vectors; 677 int retval; 678 int i; 679 struct irq_affinity desc = { .pre_vectors = 1}; 680 681 if (mrioc->is_intr_info_set) 682 return 0; 683 684 mpi3mr_cleanup_isr(mrioc); 685 686 if (setup_one || reset_devices) 687 max_vectors = 1; 688 else { 689 max_vectors = 690 min_t(int, mrioc->cpu_count + 1, mrioc->msix_count); 691 692 ioc_info(mrioc, 693 "MSI-X vectors supported: %d, no of cores: %d,", 694 mrioc->msix_count, mrioc->cpu_count); 695 ioc_info(mrioc, 696 "MSI-x vectors requested: %d\n", max_vectors); 697 } 698 699 irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; 700 701 mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0; 702 retval = pci_alloc_irq_vectors_affinity(mrioc->pdev, 703 1, max_vectors, irq_flags, &desc); 704 if (retval < 0) { 705 ioc_err(mrioc, "Cannot alloc irq vectors\n"); 706 goto out_failed; 707 } 708 if (retval != max_vectors) { 709 ioc_info(mrioc, 710 "allocated vectors (%d) are less than configured (%d)\n", 711 retval, max_vectors); 712 /* 713 * If only one MSI-x is allocated, then MSI-x 0 will be shared 714 * between Admin queue and operational queue 715 */ 716 if (retval == 1) 717 mrioc->op_reply_q_offset = 0; 718 719 max_vectors = retval; 720 } 721 mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors, 722 GFP_KERNEL); 723 if (!mrioc->intr_info) { 724 retval = -ENOMEM; 725 pci_free_irq_vectors(mrioc->pdev); 726 goto out_failed; 727 } 728 for (i = 0; i < max_vectors; i++) { 729 retval = mpi3mr_request_irq(mrioc, i); 730 if (retval) { 731 mrioc->intr_info_count = i; 732 goto out_failed; 733 } 734 } 735 if (reset_devices || !setup_one) 736 mrioc->is_intr_info_set = true; 737 mrioc->intr_info_count = max_vectors; 738 mpi3mr_ioc_enable_intr(mrioc); 739 return 0; 740 741 out_failed: 742 mpi3mr_cleanup_isr(mrioc); 743 744 return retval; 745 } 746 747 static const struct { 748 enum mpi3mr_iocstate value; 749 char *name; 750 } mrioc_states[] = { 751 { MRIOC_STATE_READY, "ready" }, 752 { MRIOC_STATE_FAULT, "fault" }, 753 { MRIOC_STATE_RESET, "reset" }, 754 { MRIOC_STATE_BECOMING_READY, "becoming ready" }, 755 { MRIOC_STATE_RESET_REQUESTED, "reset requested" }, 756 { MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" }, 757 }; 758 759 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state) 760 { 761 int i; 762 char *name = NULL; 763 764 for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) { 765 if (mrioc_states[i].value == mrioc_state) { 766 name = mrioc_states[i].name; 767 break; 768 } 769 } 770 return name; 771 } 772 773 /* Reset reason to name mapper structure*/ 774 static const struct { 775 enum mpi3mr_reset_reason value; 776 char *name; 777 } mpi3mr_reset_reason_codes[] = { 778 { MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" }, 779 { MPI3MR_RESET_FROM_FAULT_WATCH, "fault" }, 780 { MPI3MR_RESET_FROM_IOCTL, "application invocation" }, 781 { MPI3MR_RESET_FROM_EH_HOS, "error handling" }, 782 { MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" }, 783 { MPI3MR_RESET_FROM_IOCTL_TIMEOUT, "IOCTL timeout" }, 784 { MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" }, 785 { MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" }, 786 { MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" }, 787 { MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" }, 788 { MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" }, 789 { MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" }, 790 { MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" }, 791 { 792 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT, 793 "create request queue timeout" 794 }, 795 { 796 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT, 797 "create reply queue timeout" 798 }, 799 { MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" }, 800 { MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" }, 801 { MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" }, 802 { MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" }, 803 { 804 MPI3MR_RESET_FROM_CIACTVRST_TIMER, 805 "component image activation timeout" 806 }, 807 { 808 MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT, 809 "get package version timeout" 810 }, 811 { MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" }, 812 { MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" }, 813 { MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronus reset" }, 814 }; 815 816 /** 817 * mpi3mr_reset_rc_name - get reset reason code name 818 * @reason_code: reset reason code value 819 * 820 * Map reset reason to an NULL terminated ASCII string 821 * 822 * Return: name corresponding to reset reason value or NULL. 823 */ 824 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code) 825 { 826 int i; 827 char *name = NULL; 828 829 for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) { 830 if (mpi3mr_reset_reason_codes[i].value == reason_code) { 831 name = mpi3mr_reset_reason_codes[i].name; 832 break; 833 } 834 } 835 return name; 836 } 837 838 /* Reset type to name mapper structure*/ 839 static const struct { 840 u16 reset_type; 841 char *name; 842 } mpi3mr_reset_types[] = { 843 { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" }, 844 { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" }, 845 }; 846 847 /** 848 * mpi3mr_reset_type_name - get reset type name 849 * @reset_type: reset type value 850 * 851 * Map reset type to an NULL terminated ASCII string 852 * 853 * Return: name corresponding to reset type value or NULL. 854 */ 855 static const char *mpi3mr_reset_type_name(u16 reset_type) 856 { 857 int i; 858 char *name = NULL; 859 860 for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) { 861 if (mpi3mr_reset_types[i].reset_type == reset_type) { 862 name = mpi3mr_reset_types[i].name; 863 break; 864 } 865 } 866 return name; 867 } 868 869 /** 870 * mpi3mr_print_fault_info - Display fault information 871 * @mrioc: Adapter instance reference 872 * 873 * Display the controller fault information if there is a 874 * controller fault. 875 * 876 * Return: Nothing. 877 */ 878 void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc) 879 { 880 u32 ioc_status, code, code1, code2, code3; 881 882 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 883 884 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) { 885 code = readl(&mrioc->sysif_regs->fault); 886 code1 = readl(&mrioc->sysif_regs->fault_info[0]); 887 code2 = readl(&mrioc->sysif_regs->fault_info[1]); 888 code3 = readl(&mrioc->sysif_regs->fault_info[2]); 889 890 ioc_info(mrioc, 891 "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n", 892 code, code1, code2, code3); 893 } 894 } 895 896 /** 897 * mpi3mr_get_iocstate - Get IOC State 898 * @mrioc: Adapter instance reference 899 * 900 * Return a proper IOC state enum based on the IOC status and 901 * IOC configuration and unrcoverable state of the controller. 902 * 903 * Return: Current IOC state. 904 */ 905 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc) 906 { 907 u32 ioc_status, ioc_config; 908 u8 ready, enabled; 909 910 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 911 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 912 913 if (mrioc->unrecoverable) 914 return MRIOC_STATE_UNRECOVERABLE; 915 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) 916 return MRIOC_STATE_FAULT; 917 918 ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY); 919 enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC); 920 921 if (ready && enabled) 922 return MRIOC_STATE_READY; 923 if ((!ready) && (!enabled)) 924 return MRIOC_STATE_RESET; 925 if ((!ready) && (enabled)) 926 return MRIOC_STATE_BECOMING_READY; 927 928 return MRIOC_STATE_RESET_REQUESTED; 929 } 930 931 /** 932 * mpi3mr_clear_reset_history - clear reset history 933 * @mrioc: Adapter instance reference 934 * 935 * Write the reset history bit in IOC status to clear the bit, 936 * if it is already set. 937 * 938 * Return: Nothing. 939 */ 940 static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc) 941 { 942 u32 ioc_status; 943 944 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 945 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) 946 writel(ioc_status, &mrioc->sysif_regs->ioc_status); 947 } 948 949 /** 950 * mpi3mr_issue_and_process_mur - Message unit Reset handler 951 * @mrioc: Adapter instance reference 952 * @reset_reason: Reset reason code 953 * 954 * Issue Message unit Reset to the controller and wait for it to 955 * be complete. 956 * 957 * Return: 0 on success, -1 on failure. 958 */ 959 static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc, 960 u32 reset_reason) 961 { 962 u32 ioc_config, timeout, ioc_status; 963 int retval = -1; 964 965 ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n"); 966 if (mrioc->unrecoverable) { 967 ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n"); 968 return retval; 969 } 970 mpi3mr_clear_reset_history(mrioc); 971 writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]); 972 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 973 ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; 974 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 975 976 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10; 977 do { 978 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 979 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) { 980 mpi3mr_clear_reset_history(mrioc); 981 break; 982 } 983 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) { 984 mpi3mr_print_fault_info(mrioc); 985 break; 986 } 987 msleep(100); 988 } while (--timeout); 989 990 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 991 if (timeout && !((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) || 992 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) || 993 (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) 994 retval = 0; 995 996 ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n", 997 (!retval) ? "successful" : "failed", ioc_status, ioc_config); 998 return retval; 999 } 1000 1001 /** 1002 * mpi3mr_revalidate_factsdata - validate IOCFacts parameters 1003 * during reset/resume 1004 * @mrioc: Adapter instance reference 1005 * 1006 * Return zero if the new IOCFacts parameters value is compatible with 1007 * older values else return -EPERM 1008 */ 1009 static int 1010 mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc) 1011 { 1012 u16 dev_handle_bitmap_sz; 1013 void *removepend_bitmap; 1014 1015 if (mrioc->facts.reply_sz > mrioc->reply_sz) { 1016 ioc_err(mrioc, 1017 "cannot increase reply size from %d to %d\n", 1018 mrioc->reply_sz, mrioc->facts.reply_sz); 1019 return -EPERM; 1020 } 1021 1022 if (mrioc->facts.max_op_reply_q < mrioc->num_op_reply_q) { 1023 ioc_err(mrioc, 1024 "cannot reduce number of operational reply queues from %d to %d\n", 1025 mrioc->num_op_reply_q, 1026 mrioc->facts.max_op_reply_q); 1027 return -EPERM; 1028 } 1029 1030 if (mrioc->facts.max_op_req_q < mrioc->num_op_req_q) { 1031 ioc_err(mrioc, 1032 "cannot reduce number of operational request queues from %d to %d\n", 1033 mrioc->num_op_req_q, mrioc->facts.max_op_req_q); 1034 return -EPERM; 1035 } 1036 1037 dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8; 1038 if (mrioc->facts.max_devhandle % 8) 1039 dev_handle_bitmap_sz++; 1040 if (dev_handle_bitmap_sz > mrioc->dev_handle_bitmap_sz) { 1041 removepend_bitmap = krealloc(mrioc->removepend_bitmap, 1042 dev_handle_bitmap_sz, GFP_KERNEL); 1043 if (!removepend_bitmap) { 1044 ioc_err(mrioc, 1045 "failed to increase removepend_bitmap sz from: %d to %d\n", 1046 mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz); 1047 return -EPERM; 1048 } 1049 memset(removepend_bitmap + mrioc->dev_handle_bitmap_sz, 0, 1050 dev_handle_bitmap_sz - mrioc->dev_handle_bitmap_sz); 1051 mrioc->removepend_bitmap = removepend_bitmap; 1052 ioc_info(mrioc, 1053 "increased dev_handle_bitmap_sz from %d to %d\n", 1054 mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz); 1055 mrioc->dev_handle_bitmap_sz = dev_handle_bitmap_sz; 1056 } 1057 1058 return 0; 1059 } 1060 1061 /** 1062 * mpi3mr_bring_ioc_ready - Bring controller to ready state 1063 * @mrioc: Adapter instance reference 1064 * 1065 * Set Enable IOC bit in IOC configuration register and wait for 1066 * the controller to become ready. 1067 * 1068 * Return: 0 on success, appropriate error on failure. 1069 */ 1070 static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc) 1071 { 1072 u32 ioc_config, ioc_status, timeout; 1073 int retval = 0; 1074 enum mpi3mr_iocstate ioc_state; 1075 u64 base_info; 1076 1077 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1078 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1079 base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information); 1080 ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n", 1081 ioc_status, ioc_config, base_info); 1082 1083 /*The timeout value is in 2sec unit, changing it to seconds*/ 1084 mrioc->ready_timeout = 1085 ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >> 1086 MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2; 1087 1088 ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout); 1089 1090 ioc_state = mpi3mr_get_iocstate(mrioc); 1091 ioc_info(mrioc, "controller is in %s state during detection\n", 1092 mpi3mr_iocstate_name(ioc_state)); 1093 1094 if (ioc_state == MRIOC_STATE_BECOMING_READY || 1095 ioc_state == MRIOC_STATE_RESET_REQUESTED) { 1096 timeout = mrioc->ready_timeout * 10; 1097 do { 1098 msleep(100); 1099 } while (--timeout); 1100 1101 ioc_state = mpi3mr_get_iocstate(mrioc); 1102 ioc_info(mrioc, 1103 "controller is in %s state after waiting to reset\n", 1104 mpi3mr_iocstate_name(ioc_state)); 1105 } 1106 1107 if (ioc_state == MRIOC_STATE_READY) { 1108 ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n"); 1109 retval = mpi3mr_issue_and_process_mur(mrioc, 1110 MPI3MR_RESET_FROM_BRINGUP); 1111 ioc_state = mpi3mr_get_iocstate(mrioc); 1112 if (retval) 1113 ioc_err(mrioc, 1114 "message unit reset failed with error %d current state %s\n", 1115 retval, mpi3mr_iocstate_name(ioc_state)); 1116 } 1117 if (ioc_state != MRIOC_STATE_RESET) { 1118 mpi3mr_print_fault_info(mrioc); 1119 ioc_info(mrioc, "issuing soft reset to bring to reset state\n"); 1120 retval = mpi3mr_issue_reset(mrioc, 1121 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, 1122 MPI3MR_RESET_FROM_BRINGUP); 1123 if (retval) { 1124 ioc_err(mrioc, 1125 "soft reset failed with error %d\n", retval); 1126 goto out_failed; 1127 } 1128 } 1129 ioc_state = mpi3mr_get_iocstate(mrioc); 1130 if (ioc_state != MRIOC_STATE_RESET) { 1131 ioc_err(mrioc, 1132 "cannot bring controller to reset state, current state: %s\n", 1133 mpi3mr_iocstate_name(ioc_state)); 1134 goto out_failed; 1135 } 1136 mpi3mr_clear_reset_history(mrioc); 1137 retval = mpi3mr_setup_admin_qpair(mrioc); 1138 if (retval) { 1139 ioc_err(mrioc, "failed to setup admin queues: error %d\n", 1140 retval); 1141 goto out_failed; 1142 } 1143 1144 ioc_info(mrioc, "bringing controller to ready state\n"); 1145 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1146 ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; 1147 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 1148 1149 timeout = mrioc->ready_timeout * 10; 1150 do { 1151 ioc_state = mpi3mr_get_iocstate(mrioc); 1152 if (ioc_state == MRIOC_STATE_READY) { 1153 ioc_info(mrioc, 1154 "successfully transistioned to %s state\n", 1155 mpi3mr_iocstate_name(ioc_state)); 1156 return 0; 1157 } 1158 msleep(100); 1159 } while (--timeout); 1160 1161 out_failed: 1162 ioc_state = mpi3mr_get_iocstate(mrioc); 1163 ioc_err(mrioc, 1164 "failed to bring to ready state, current state: %s\n", 1165 mpi3mr_iocstate_name(ioc_state)); 1166 return retval; 1167 } 1168 1169 /** 1170 * mpi3mr_soft_reset_success - Check softreset is success or not 1171 * @ioc_status: IOC status register value 1172 * @ioc_config: IOC config register value 1173 * 1174 * Check whether the soft reset is successful or not based on 1175 * IOC status and IOC config register values. 1176 * 1177 * Return: True when the soft reset is success, false otherwise. 1178 */ 1179 static inline bool 1180 mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config) 1181 { 1182 if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) || 1183 (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) 1184 return true; 1185 return false; 1186 } 1187 1188 /** 1189 * mpi3mr_diagfault_success - Check diag fault is success or not 1190 * @mrioc: Adapter reference 1191 * @ioc_status: IOC status register value 1192 * 1193 * Check whether the controller hit diag reset fault code. 1194 * 1195 * Return: True when there is diag fault, false otherwise. 1196 */ 1197 static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc, 1198 u32 ioc_status) 1199 { 1200 u32 fault; 1201 1202 if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) 1203 return false; 1204 fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK; 1205 if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) { 1206 mpi3mr_print_fault_info(mrioc); 1207 return true; 1208 } 1209 return false; 1210 } 1211 1212 /** 1213 * mpi3mr_set_diagsave - Set diag save bit for snapdump 1214 * @mrioc: Adapter reference 1215 * 1216 * Set diag save bit in IOC configuration register to enable 1217 * snapdump. 1218 * 1219 * Return: Nothing. 1220 */ 1221 static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc) 1222 { 1223 u32 ioc_config; 1224 1225 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1226 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE; 1227 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 1228 } 1229 1230 /** 1231 * mpi3mr_issue_reset - Issue reset to the controller 1232 * @mrioc: Adapter reference 1233 * @reset_type: Reset type 1234 * @reset_reason: Reset reason code 1235 * 1236 * Unlock the host diagnostic registers and write the specific 1237 * reset type to that, wait for reset acknowledgment from the 1238 * controller, if the reset is not successful retry for the 1239 * predefined number of times. 1240 * 1241 * Return: 0 on success, non-zero on failure. 1242 */ 1243 static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, 1244 u32 reset_reason) 1245 { 1246 int retval = -1; 1247 u8 unlock_retry_count = 0; 1248 u32 host_diagnostic, ioc_status, ioc_config; 1249 u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10; 1250 1251 if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) && 1252 (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT)) 1253 return retval; 1254 if (mrioc->unrecoverable) 1255 return retval; 1256 if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) { 1257 retval = 0; 1258 return retval; 1259 } 1260 1261 ioc_info(mrioc, "%s reset due to %s(0x%x)\n", 1262 mpi3mr_reset_type_name(reset_type), 1263 mpi3mr_reset_rc_name(reset_reason), reset_reason); 1264 1265 mpi3mr_clear_reset_history(mrioc); 1266 do { 1267 ioc_info(mrioc, 1268 "Write magic sequence to unlock host diag register (retry=%d)\n", 1269 ++unlock_retry_count); 1270 if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) { 1271 ioc_err(mrioc, 1272 "%s reset failed due to unlock failure, host_diagnostic(0x%08x)\n", 1273 mpi3mr_reset_type_name(reset_type), 1274 host_diagnostic); 1275 mrioc->unrecoverable = 1; 1276 return retval; 1277 } 1278 1279 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH, 1280 &mrioc->sysif_regs->write_sequence); 1281 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST, 1282 &mrioc->sysif_regs->write_sequence); 1283 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND, 1284 &mrioc->sysif_regs->write_sequence); 1285 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD, 1286 &mrioc->sysif_regs->write_sequence); 1287 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH, 1288 &mrioc->sysif_regs->write_sequence); 1289 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH, 1290 &mrioc->sysif_regs->write_sequence); 1291 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH, 1292 &mrioc->sysif_regs->write_sequence); 1293 usleep_range(1000, 1100); 1294 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); 1295 ioc_info(mrioc, 1296 "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n", 1297 unlock_retry_count, host_diagnostic); 1298 } while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE)); 1299 1300 writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]); 1301 writel(host_diagnostic | reset_type, 1302 &mrioc->sysif_regs->host_diagnostic); 1303 switch (reset_type) { 1304 case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET: 1305 do { 1306 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1307 ioc_config = 1308 readl(&mrioc->sysif_regs->ioc_configuration); 1309 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) 1310 && mpi3mr_soft_reset_success(ioc_status, ioc_config) 1311 ) { 1312 mpi3mr_clear_reset_history(mrioc); 1313 retval = 0; 1314 break; 1315 } 1316 msleep(100); 1317 } while (--timeout); 1318 mpi3mr_print_fault_info(mrioc); 1319 break; 1320 case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT: 1321 do { 1322 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1323 if (mpi3mr_diagfault_success(mrioc, ioc_status)) { 1324 retval = 0; 1325 break; 1326 } 1327 msleep(100); 1328 } while (--timeout); 1329 break; 1330 default: 1331 break; 1332 } 1333 1334 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND, 1335 &mrioc->sysif_regs->write_sequence); 1336 1337 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1338 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1339 ioc_info(mrioc, 1340 "ioc_status/ioc_onfig after %s reset is (0x%x)/(0x%x)\n", 1341 (!retval)?"successful":"failed", ioc_status, 1342 ioc_config); 1343 if (retval) 1344 mrioc->unrecoverable = 1; 1345 return retval; 1346 } 1347 1348 /** 1349 * mpi3mr_admin_request_post - Post request to admin queue 1350 * @mrioc: Adapter reference 1351 * @admin_req: MPI3 request 1352 * @admin_req_sz: Request size 1353 * @ignore_reset: Ignore reset in process 1354 * 1355 * Post the MPI3 request into admin request queue and 1356 * inform the controller, if the queue is full return 1357 * appropriate error. 1358 * 1359 * Return: 0 on success, non-zero on failure. 1360 */ 1361 int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req, 1362 u16 admin_req_sz, u8 ignore_reset) 1363 { 1364 u16 areq_pi = 0, areq_ci = 0, max_entries = 0; 1365 int retval = 0; 1366 unsigned long flags; 1367 u8 *areq_entry; 1368 1369 if (mrioc->unrecoverable) { 1370 ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__); 1371 return -EFAULT; 1372 } 1373 1374 spin_lock_irqsave(&mrioc->admin_req_lock, flags); 1375 areq_pi = mrioc->admin_req_pi; 1376 areq_ci = mrioc->admin_req_ci; 1377 max_entries = mrioc->num_admin_req; 1378 if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) && 1379 (areq_pi == (max_entries - 1)))) { 1380 ioc_err(mrioc, "AdminReqQ full condition detected\n"); 1381 retval = -EAGAIN; 1382 goto out; 1383 } 1384 if (!ignore_reset && mrioc->reset_in_progress) { 1385 ioc_err(mrioc, "AdminReqQ submit reset in progress\n"); 1386 retval = -EAGAIN; 1387 goto out; 1388 } 1389 areq_entry = (u8 *)mrioc->admin_req_base + 1390 (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ); 1391 memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); 1392 memcpy(areq_entry, (u8 *)admin_req, admin_req_sz); 1393 1394 if (++areq_pi == max_entries) 1395 areq_pi = 0; 1396 mrioc->admin_req_pi = areq_pi; 1397 1398 writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); 1399 1400 out: 1401 spin_unlock_irqrestore(&mrioc->admin_req_lock, flags); 1402 1403 return retval; 1404 } 1405 1406 /** 1407 * mpi3mr_free_op_req_q_segments - free request memory segments 1408 * @mrioc: Adapter instance reference 1409 * @q_idx: operational request queue index 1410 * 1411 * Free memory segments allocated for operational request queue 1412 * 1413 * Return: Nothing. 1414 */ 1415 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) 1416 { 1417 u16 j; 1418 int size; 1419 struct segments *segments; 1420 1421 segments = mrioc->req_qinfo[q_idx].q_segments; 1422 if (!segments) 1423 return; 1424 1425 if (mrioc->enable_segqueue) { 1426 size = MPI3MR_OP_REQ_Q_SEG_SIZE; 1427 if (mrioc->req_qinfo[q_idx].q_segment_list) { 1428 dma_free_coherent(&mrioc->pdev->dev, 1429 MPI3MR_MAX_SEG_LIST_SIZE, 1430 mrioc->req_qinfo[q_idx].q_segment_list, 1431 mrioc->req_qinfo[q_idx].q_segment_list_dma); 1432 mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL; 1433 } 1434 } else 1435 size = mrioc->req_qinfo[q_idx].num_requests * 1436 mrioc->facts.op_req_sz; 1437 1438 for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) { 1439 if (!segments[j].segment) 1440 continue; 1441 dma_free_coherent(&mrioc->pdev->dev, 1442 size, segments[j].segment, segments[j].segment_dma); 1443 segments[j].segment = NULL; 1444 } 1445 kfree(mrioc->req_qinfo[q_idx].q_segments); 1446 mrioc->req_qinfo[q_idx].q_segments = NULL; 1447 mrioc->req_qinfo[q_idx].qid = 0; 1448 } 1449 1450 /** 1451 * mpi3mr_free_op_reply_q_segments - free reply memory segments 1452 * @mrioc: Adapter instance reference 1453 * @q_idx: operational reply queue index 1454 * 1455 * Free memory segments allocated for operational reply queue 1456 * 1457 * Return: Nothing. 1458 */ 1459 static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) 1460 { 1461 u16 j; 1462 int size; 1463 struct segments *segments; 1464 1465 segments = mrioc->op_reply_qinfo[q_idx].q_segments; 1466 if (!segments) 1467 return; 1468 1469 if (mrioc->enable_segqueue) { 1470 size = MPI3MR_OP_REP_Q_SEG_SIZE; 1471 if (mrioc->op_reply_qinfo[q_idx].q_segment_list) { 1472 dma_free_coherent(&mrioc->pdev->dev, 1473 MPI3MR_MAX_SEG_LIST_SIZE, 1474 mrioc->op_reply_qinfo[q_idx].q_segment_list, 1475 mrioc->op_reply_qinfo[q_idx].q_segment_list_dma); 1476 mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL; 1477 } 1478 } else 1479 size = mrioc->op_reply_qinfo[q_idx].segment_qd * 1480 mrioc->op_reply_desc_sz; 1481 1482 for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) { 1483 if (!segments[j].segment) 1484 continue; 1485 dma_free_coherent(&mrioc->pdev->dev, 1486 size, segments[j].segment, segments[j].segment_dma); 1487 segments[j].segment = NULL; 1488 } 1489 1490 kfree(mrioc->op_reply_qinfo[q_idx].q_segments); 1491 mrioc->op_reply_qinfo[q_idx].q_segments = NULL; 1492 mrioc->op_reply_qinfo[q_idx].qid = 0; 1493 } 1494 1495 /** 1496 * mpi3mr_delete_op_reply_q - delete operational reply queue 1497 * @mrioc: Adapter instance reference 1498 * @qidx: operational reply queue index 1499 * 1500 * Delete operatinal reply queue by issuing MPI request 1501 * through admin queue. 1502 * 1503 * Return: 0 on success, non-zero on failure. 1504 */ 1505 static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) 1506 { 1507 struct mpi3_delete_reply_queue_request delq_req; 1508 int retval = 0; 1509 u16 reply_qid = 0, midx; 1510 1511 reply_qid = mrioc->op_reply_qinfo[qidx].qid; 1512 1513 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset); 1514 1515 if (!reply_qid) { 1516 retval = -1; 1517 ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n"); 1518 goto out; 1519 } 1520 1521 memset(&delq_req, 0, sizeof(delq_req)); 1522 mutex_lock(&mrioc->init_cmds.mutex); 1523 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1524 retval = -1; 1525 ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n"); 1526 mutex_unlock(&mrioc->init_cmds.mutex); 1527 goto out; 1528 } 1529 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1530 mrioc->init_cmds.is_waiting = 1; 1531 mrioc->init_cmds.callback = NULL; 1532 delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1533 delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE; 1534 delq_req.queue_id = cpu_to_le16(reply_qid); 1535 1536 init_completion(&mrioc->init_cmds.done); 1537 retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req), 1538 1); 1539 if (retval) { 1540 ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n"); 1541 goto out_unlock; 1542 } 1543 wait_for_completion_timeout(&mrioc->init_cmds.done, 1544 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1545 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1546 ioc_err(mrioc, "delete reply queue timed out\n"); 1547 mpi3mr_check_rh_fault_ioc(mrioc, 1548 MPI3MR_RESET_FROM_DELREPQ_TIMEOUT); 1549 retval = -1; 1550 goto out_unlock; 1551 } 1552 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1553 != MPI3_IOCSTATUS_SUCCESS) { 1554 ioc_err(mrioc, 1555 "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1556 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1557 mrioc->init_cmds.ioc_loginfo); 1558 retval = -1; 1559 goto out_unlock; 1560 } 1561 mrioc->intr_info[midx].op_reply_q = NULL; 1562 1563 mpi3mr_free_op_reply_q_segments(mrioc, qidx); 1564 out_unlock: 1565 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1566 mutex_unlock(&mrioc->init_cmds.mutex); 1567 out: 1568 1569 return retval; 1570 } 1571 1572 /** 1573 * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool 1574 * @mrioc: Adapter instance reference 1575 * @qidx: request queue index 1576 * 1577 * Allocate segmented memory pools for operational reply 1578 * queue. 1579 * 1580 * Return: 0 on success, non-zero on failure. 1581 */ 1582 static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx) 1583 { 1584 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 1585 int i, size; 1586 u64 *q_segment_list_entry = NULL; 1587 struct segments *segments; 1588 1589 if (mrioc->enable_segqueue) { 1590 op_reply_q->segment_qd = 1591 MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz; 1592 1593 size = MPI3MR_OP_REP_Q_SEG_SIZE; 1594 1595 op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev, 1596 MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma, 1597 GFP_KERNEL); 1598 if (!op_reply_q->q_segment_list) 1599 return -ENOMEM; 1600 q_segment_list_entry = (u64 *)op_reply_q->q_segment_list; 1601 } else { 1602 op_reply_q->segment_qd = op_reply_q->num_replies; 1603 size = op_reply_q->num_replies * mrioc->op_reply_desc_sz; 1604 } 1605 1606 op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies, 1607 op_reply_q->segment_qd); 1608 1609 op_reply_q->q_segments = kcalloc(op_reply_q->num_segments, 1610 sizeof(struct segments), GFP_KERNEL); 1611 if (!op_reply_q->q_segments) 1612 return -ENOMEM; 1613 1614 segments = op_reply_q->q_segments; 1615 for (i = 0; i < op_reply_q->num_segments; i++) { 1616 segments[i].segment = 1617 dma_alloc_coherent(&mrioc->pdev->dev, 1618 size, &segments[i].segment_dma, GFP_KERNEL); 1619 if (!segments[i].segment) 1620 return -ENOMEM; 1621 if (mrioc->enable_segqueue) 1622 q_segment_list_entry[i] = 1623 (unsigned long)segments[i].segment_dma; 1624 } 1625 1626 return 0; 1627 } 1628 1629 /** 1630 * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool. 1631 * @mrioc: Adapter instance reference 1632 * @qidx: request queue index 1633 * 1634 * Allocate segmented memory pools for operational request 1635 * queue. 1636 * 1637 * Return: 0 on success, non-zero on failure. 1638 */ 1639 static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx) 1640 { 1641 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx; 1642 int i, size; 1643 u64 *q_segment_list_entry = NULL; 1644 struct segments *segments; 1645 1646 if (mrioc->enable_segqueue) { 1647 op_req_q->segment_qd = 1648 MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz; 1649 1650 size = MPI3MR_OP_REQ_Q_SEG_SIZE; 1651 1652 op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev, 1653 MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma, 1654 GFP_KERNEL); 1655 if (!op_req_q->q_segment_list) 1656 return -ENOMEM; 1657 q_segment_list_entry = (u64 *)op_req_q->q_segment_list; 1658 1659 } else { 1660 op_req_q->segment_qd = op_req_q->num_requests; 1661 size = op_req_q->num_requests * mrioc->facts.op_req_sz; 1662 } 1663 1664 op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests, 1665 op_req_q->segment_qd); 1666 1667 op_req_q->q_segments = kcalloc(op_req_q->num_segments, 1668 sizeof(struct segments), GFP_KERNEL); 1669 if (!op_req_q->q_segments) 1670 return -ENOMEM; 1671 1672 segments = op_req_q->q_segments; 1673 for (i = 0; i < op_req_q->num_segments; i++) { 1674 segments[i].segment = 1675 dma_alloc_coherent(&mrioc->pdev->dev, 1676 size, &segments[i].segment_dma, GFP_KERNEL); 1677 if (!segments[i].segment) 1678 return -ENOMEM; 1679 if (mrioc->enable_segqueue) 1680 q_segment_list_entry[i] = 1681 (unsigned long)segments[i].segment_dma; 1682 } 1683 1684 return 0; 1685 } 1686 1687 /** 1688 * mpi3mr_create_op_reply_q - create operational reply queue 1689 * @mrioc: Adapter instance reference 1690 * @qidx: operational reply queue index 1691 * 1692 * Create operatinal reply queue by issuing MPI request 1693 * through admin queue. 1694 * 1695 * Return: 0 on success, non-zero on failure. 1696 */ 1697 static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) 1698 { 1699 struct mpi3_create_reply_queue_request create_req; 1700 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 1701 int retval = 0; 1702 u16 reply_qid = 0, midx; 1703 1704 reply_qid = op_reply_q->qid; 1705 1706 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset); 1707 1708 if (reply_qid) { 1709 retval = -1; 1710 ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n", 1711 reply_qid); 1712 1713 return retval; 1714 } 1715 1716 reply_qid = qidx + 1; 1717 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD; 1718 op_reply_q->ci = 0; 1719 op_reply_q->ephase = 1; 1720 atomic_set(&op_reply_q->pend_ios, 0); 1721 atomic_set(&op_reply_q->in_use, 0); 1722 op_reply_q->enable_irq_poll = false; 1723 1724 if (!op_reply_q->q_segments) { 1725 retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx); 1726 if (retval) { 1727 mpi3mr_free_op_reply_q_segments(mrioc, qidx); 1728 goto out; 1729 } 1730 } 1731 1732 memset(&create_req, 0, sizeof(create_req)); 1733 mutex_lock(&mrioc->init_cmds.mutex); 1734 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1735 retval = -1; 1736 ioc_err(mrioc, "CreateRepQ: Init command is in use\n"); 1737 goto out_unlock; 1738 } 1739 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1740 mrioc->init_cmds.is_waiting = 1; 1741 mrioc->init_cmds.callback = NULL; 1742 create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1743 create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE; 1744 create_req.queue_id = cpu_to_le16(reply_qid); 1745 create_req.flags = MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE; 1746 create_req.msix_index = cpu_to_le16(mrioc->intr_info[midx].msix_index); 1747 if (mrioc->enable_segqueue) { 1748 create_req.flags |= 1749 MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED; 1750 create_req.base_address = cpu_to_le64( 1751 op_reply_q->q_segment_list_dma); 1752 } else 1753 create_req.base_address = cpu_to_le64( 1754 op_reply_q->q_segments[0].segment_dma); 1755 1756 create_req.size = cpu_to_le16(op_reply_q->num_replies); 1757 1758 init_completion(&mrioc->init_cmds.done); 1759 retval = mpi3mr_admin_request_post(mrioc, &create_req, 1760 sizeof(create_req), 1); 1761 if (retval) { 1762 ioc_err(mrioc, "CreateRepQ: Admin Post failed\n"); 1763 goto out_unlock; 1764 } 1765 wait_for_completion_timeout(&mrioc->init_cmds.done, 1766 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1767 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1768 ioc_err(mrioc, "create reply queue timed out\n"); 1769 mpi3mr_check_rh_fault_ioc(mrioc, 1770 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT); 1771 retval = -1; 1772 goto out_unlock; 1773 } 1774 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1775 != MPI3_IOCSTATUS_SUCCESS) { 1776 ioc_err(mrioc, 1777 "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1778 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1779 mrioc->init_cmds.ioc_loginfo); 1780 retval = -1; 1781 goto out_unlock; 1782 } 1783 op_reply_q->qid = reply_qid; 1784 if (midx < mrioc->intr_info_count) 1785 mrioc->intr_info[midx].op_reply_q = op_reply_q; 1786 1787 out_unlock: 1788 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1789 mutex_unlock(&mrioc->init_cmds.mutex); 1790 out: 1791 1792 return retval; 1793 } 1794 1795 /** 1796 * mpi3mr_create_op_req_q - create operational request queue 1797 * @mrioc: Adapter instance reference 1798 * @idx: operational request queue index 1799 * @reply_qid: Reply queue ID 1800 * 1801 * Create operatinal request queue by issuing MPI request 1802 * through admin queue. 1803 * 1804 * Return: 0 on success, non-zero on failure. 1805 */ 1806 static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx, 1807 u16 reply_qid) 1808 { 1809 struct mpi3_create_request_queue_request create_req; 1810 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx; 1811 int retval = 0; 1812 u16 req_qid = 0; 1813 1814 req_qid = op_req_q->qid; 1815 1816 if (req_qid) { 1817 retval = -1; 1818 ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n", 1819 req_qid); 1820 1821 return retval; 1822 } 1823 req_qid = idx + 1; 1824 1825 op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD; 1826 op_req_q->ci = 0; 1827 op_req_q->pi = 0; 1828 op_req_q->reply_qid = reply_qid; 1829 spin_lock_init(&op_req_q->q_lock); 1830 1831 if (!op_req_q->q_segments) { 1832 retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx); 1833 if (retval) { 1834 mpi3mr_free_op_req_q_segments(mrioc, idx); 1835 goto out; 1836 } 1837 } 1838 1839 memset(&create_req, 0, sizeof(create_req)); 1840 mutex_lock(&mrioc->init_cmds.mutex); 1841 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1842 retval = -1; 1843 ioc_err(mrioc, "CreateReqQ: Init command is in use\n"); 1844 goto out_unlock; 1845 } 1846 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1847 mrioc->init_cmds.is_waiting = 1; 1848 mrioc->init_cmds.callback = NULL; 1849 create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1850 create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE; 1851 create_req.queue_id = cpu_to_le16(req_qid); 1852 if (mrioc->enable_segqueue) { 1853 create_req.flags = 1854 MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED; 1855 create_req.base_address = cpu_to_le64( 1856 op_req_q->q_segment_list_dma); 1857 } else 1858 create_req.base_address = cpu_to_le64( 1859 op_req_q->q_segments[0].segment_dma); 1860 create_req.reply_queue_id = cpu_to_le16(reply_qid); 1861 create_req.size = cpu_to_le16(op_req_q->num_requests); 1862 1863 init_completion(&mrioc->init_cmds.done); 1864 retval = mpi3mr_admin_request_post(mrioc, &create_req, 1865 sizeof(create_req), 1); 1866 if (retval) { 1867 ioc_err(mrioc, "CreateReqQ: Admin Post failed\n"); 1868 goto out_unlock; 1869 } 1870 wait_for_completion_timeout(&mrioc->init_cmds.done, 1871 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1872 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1873 ioc_err(mrioc, "create request queue timed out\n"); 1874 mpi3mr_check_rh_fault_ioc(mrioc, 1875 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT); 1876 retval = -1; 1877 goto out_unlock; 1878 } 1879 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1880 != MPI3_IOCSTATUS_SUCCESS) { 1881 ioc_err(mrioc, 1882 "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1883 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1884 mrioc->init_cmds.ioc_loginfo); 1885 retval = -1; 1886 goto out_unlock; 1887 } 1888 op_req_q->qid = req_qid; 1889 1890 out_unlock: 1891 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1892 mutex_unlock(&mrioc->init_cmds.mutex); 1893 out: 1894 1895 return retval; 1896 } 1897 1898 /** 1899 * mpi3mr_create_op_queues - create operational queue pairs 1900 * @mrioc: Adapter instance reference 1901 * 1902 * Allocate memory for operational queue meta data and call 1903 * create request and reply queue functions. 1904 * 1905 * Return: 0 on success, non-zero on failures. 1906 */ 1907 static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc) 1908 { 1909 int retval = 0; 1910 u16 num_queues = 0, i = 0, msix_count_op_q = 1; 1911 1912 num_queues = min_t(int, mrioc->facts.max_op_reply_q, 1913 mrioc->facts.max_op_req_q); 1914 1915 msix_count_op_q = 1916 mrioc->intr_info_count - mrioc->op_reply_q_offset; 1917 if (!mrioc->num_queues) 1918 mrioc->num_queues = min_t(int, num_queues, msix_count_op_q); 1919 /* 1920 * During reset set the num_queues to the number of queues 1921 * that was set before the reset. 1922 */ 1923 num_queues = mrioc->num_op_reply_q ? 1924 mrioc->num_op_reply_q : mrioc->num_queues; 1925 ioc_info(mrioc, "trying to create %d operational queue pairs\n", 1926 num_queues); 1927 1928 if (!mrioc->req_qinfo) { 1929 mrioc->req_qinfo = kcalloc(num_queues, 1930 sizeof(struct op_req_qinfo), GFP_KERNEL); 1931 if (!mrioc->req_qinfo) { 1932 retval = -1; 1933 goto out_failed; 1934 } 1935 1936 mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) * 1937 num_queues, GFP_KERNEL); 1938 if (!mrioc->op_reply_qinfo) { 1939 retval = -1; 1940 goto out_failed; 1941 } 1942 } 1943 1944 if (mrioc->enable_segqueue) 1945 ioc_info(mrioc, 1946 "allocating operational queues through segmented queues\n"); 1947 1948 for (i = 0; i < num_queues; i++) { 1949 if (mpi3mr_create_op_reply_q(mrioc, i)) { 1950 ioc_err(mrioc, "Cannot create OP RepQ %d\n", i); 1951 break; 1952 } 1953 if (mpi3mr_create_op_req_q(mrioc, i, 1954 mrioc->op_reply_qinfo[i].qid)) { 1955 ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i); 1956 mpi3mr_delete_op_reply_q(mrioc, i); 1957 break; 1958 } 1959 } 1960 1961 if (i == 0) { 1962 /* Not even one queue is created successfully*/ 1963 retval = -1; 1964 goto out_failed; 1965 } 1966 mrioc->num_op_reply_q = mrioc->num_op_req_q = i; 1967 ioc_info(mrioc, "Successfully created %d Operational Q pairs\n", 1968 mrioc->num_op_reply_q); 1969 1970 return retval; 1971 out_failed: 1972 kfree(mrioc->req_qinfo); 1973 mrioc->req_qinfo = NULL; 1974 1975 kfree(mrioc->op_reply_qinfo); 1976 mrioc->op_reply_qinfo = NULL; 1977 1978 return retval; 1979 } 1980 1981 /** 1982 * mpi3mr_op_request_post - Post request to operational queue 1983 * @mrioc: Adapter reference 1984 * @op_req_q: Operational request queue info 1985 * @req: MPI3 request 1986 * 1987 * Post the MPI3 request into operational request queue and 1988 * inform the controller, if the queue is full return 1989 * appropriate error. 1990 * 1991 * Return: 0 on success, non-zero on failure. 1992 */ 1993 int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc, 1994 struct op_req_qinfo *op_req_q, u8 *req) 1995 { 1996 u16 pi = 0, max_entries, reply_qidx = 0, midx; 1997 int retval = 0; 1998 unsigned long flags; 1999 u8 *req_entry; 2000 void *segment_base_addr; 2001 u16 req_sz = mrioc->facts.op_req_sz; 2002 struct segments *segments = op_req_q->q_segments; 2003 2004 reply_qidx = op_req_q->reply_qid - 1; 2005 2006 if (mrioc->unrecoverable) 2007 return -EFAULT; 2008 2009 spin_lock_irqsave(&op_req_q->q_lock, flags); 2010 pi = op_req_q->pi; 2011 max_entries = op_req_q->num_requests; 2012 2013 if (mpi3mr_check_req_qfull(op_req_q)) { 2014 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX( 2015 reply_qidx, mrioc->op_reply_q_offset); 2016 mpi3mr_process_op_reply_q(mrioc, &mrioc->intr_info[midx]); 2017 2018 if (mpi3mr_check_req_qfull(op_req_q)) { 2019 retval = -EAGAIN; 2020 goto out; 2021 } 2022 } 2023 2024 if (mrioc->reset_in_progress) { 2025 ioc_err(mrioc, "OpReqQ submit reset in progress\n"); 2026 retval = -EAGAIN; 2027 goto out; 2028 } 2029 2030 segment_base_addr = segments[pi / op_req_q->segment_qd].segment; 2031 req_entry = (u8 *)segment_base_addr + 2032 ((pi % op_req_q->segment_qd) * req_sz); 2033 2034 memset(req_entry, 0, req_sz); 2035 memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ); 2036 2037 if (++pi == max_entries) 2038 pi = 0; 2039 op_req_q->pi = pi; 2040 2041 if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios) 2042 > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT) 2043 mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true; 2044 2045 writel(op_req_q->pi, 2046 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index); 2047 2048 out: 2049 spin_unlock_irqrestore(&op_req_q->q_lock, flags); 2050 return retval; 2051 } 2052 2053 /** 2054 * mpi3mr_check_rh_fault_ioc - check reset history and fault 2055 * controller 2056 * @mrioc: Adapter instance reference 2057 * @reason_code, reason code for the fault. 2058 * 2059 * This routine will save snapdump and fault the controller with 2060 * the given reason code if it is not already in the fault or 2061 * not asynchronosuly reset. This will be used to handle 2062 * initilaization time faults/resets/timeout as in those cases 2063 * immediate soft reset invocation is not required. 2064 * 2065 * Return: None. 2066 */ 2067 void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code) 2068 { 2069 u32 ioc_status, host_diagnostic, timeout; 2070 2071 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 2072 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || 2073 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { 2074 mpi3mr_print_fault_info(mrioc); 2075 return; 2076 } 2077 mpi3mr_set_diagsave(mrioc); 2078 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 2079 reason_code); 2080 timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; 2081 do { 2082 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); 2083 if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS)) 2084 break; 2085 msleep(100); 2086 } while (--timeout); 2087 } 2088 2089 /** 2090 * mpi3mr_sync_timestamp - Issue time stamp sync request 2091 * @mrioc: Adapter reference 2092 * 2093 * Issue IO unit control MPI request to synchornize firmware 2094 * timestamp with host time. 2095 * 2096 * Return: 0 on success, non-zero on failure. 2097 */ 2098 static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc) 2099 { 2100 ktime_t current_time; 2101 struct mpi3_iounit_control_request iou_ctrl; 2102 int retval = 0; 2103 2104 memset(&iou_ctrl, 0, sizeof(iou_ctrl)); 2105 mutex_lock(&mrioc->init_cmds.mutex); 2106 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2107 retval = -1; 2108 ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n"); 2109 mutex_unlock(&mrioc->init_cmds.mutex); 2110 goto out; 2111 } 2112 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2113 mrioc->init_cmds.is_waiting = 1; 2114 mrioc->init_cmds.callback = NULL; 2115 iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2116 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL; 2117 iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP; 2118 current_time = ktime_get_real(); 2119 iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time)); 2120 2121 init_completion(&mrioc->init_cmds.done); 2122 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, 2123 sizeof(iou_ctrl), 0); 2124 if (retval) { 2125 ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n"); 2126 goto out_unlock; 2127 } 2128 2129 wait_for_completion_timeout(&mrioc->init_cmds.done, 2130 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2131 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2132 ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n"); 2133 mrioc->init_cmds.is_waiting = 0; 2134 if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET)) 2135 mpi3mr_soft_reset_handler(mrioc, 2136 MPI3MR_RESET_FROM_TSU_TIMEOUT, 1); 2137 retval = -1; 2138 goto out_unlock; 2139 } 2140 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2141 != MPI3_IOCSTATUS_SUCCESS) { 2142 ioc_err(mrioc, 2143 "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2144 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2145 mrioc->init_cmds.ioc_loginfo); 2146 retval = -1; 2147 goto out_unlock; 2148 } 2149 2150 out_unlock: 2151 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2152 mutex_unlock(&mrioc->init_cmds.mutex); 2153 2154 out: 2155 return retval; 2156 } 2157 2158 /** 2159 * mpi3mr_print_pkg_ver - display controller fw package version 2160 * @mrioc: Adapter reference 2161 * 2162 * Retrieve firmware package version from the component image 2163 * header of the controller flash and display it. 2164 * 2165 * Return: 0 on success and non-zero on failure. 2166 */ 2167 static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc) 2168 { 2169 struct mpi3_ci_upload_request ci_upload; 2170 int retval = -1; 2171 void *data = NULL; 2172 dma_addr_t data_dma; 2173 struct mpi3_ci_manifest_mpi *manifest; 2174 u32 data_len = sizeof(struct mpi3_ci_manifest_mpi); 2175 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 2176 2177 data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 2178 GFP_KERNEL); 2179 if (!data) 2180 return -ENOMEM; 2181 2182 memset(&ci_upload, 0, sizeof(ci_upload)); 2183 mutex_lock(&mrioc->init_cmds.mutex); 2184 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2185 ioc_err(mrioc, "sending get package version failed due to command in use\n"); 2186 mutex_unlock(&mrioc->init_cmds.mutex); 2187 goto out; 2188 } 2189 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2190 mrioc->init_cmds.is_waiting = 1; 2191 mrioc->init_cmds.callback = NULL; 2192 ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2193 ci_upload.function = MPI3_FUNCTION_CI_UPLOAD; 2194 ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY; 2195 ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST); 2196 ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE); 2197 ci_upload.segment_size = cpu_to_le32(data_len); 2198 2199 mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len, 2200 data_dma); 2201 init_completion(&mrioc->init_cmds.done); 2202 retval = mpi3mr_admin_request_post(mrioc, &ci_upload, 2203 sizeof(ci_upload), 1); 2204 if (retval) { 2205 ioc_err(mrioc, "posting get package version failed\n"); 2206 goto out_unlock; 2207 } 2208 wait_for_completion_timeout(&mrioc->init_cmds.done, 2209 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2210 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2211 ioc_err(mrioc, "get package version timed out\n"); 2212 mpi3mr_check_rh_fault_ioc(mrioc, 2213 MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT); 2214 retval = -1; 2215 goto out_unlock; 2216 } 2217 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2218 == MPI3_IOCSTATUS_SUCCESS) { 2219 manifest = (struct mpi3_ci_manifest_mpi *) data; 2220 if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) { 2221 ioc_info(mrioc, 2222 "firmware package version(%d.%d.%d.%d.%05d-%05d)\n", 2223 manifest->package_version.gen_major, 2224 manifest->package_version.gen_minor, 2225 manifest->package_version.phase_major, 2226 manifest->package_version.phase_minor, 2227 manifest->package_version.customer_id, 2228 manifest->package_version.build_num); 2229 } 2230 } 2231 retval = 0; 2232 out_unlock: 2233 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2234 mutex_unlock(&mrioc->init_cmds.mutex); 2235 2236 out: 2237 if (data) 2238 dma_free_coherent(&mrioc->pdev->dev, data_len, data, 2239 data_dma); 2240 return retval; 2241 } 2242 2243 /** 2244 * mpi3mr_watchdog_work - watchdog thread to monitor faults 2245 * @work: work struct 2246 * 2247 * Watch dog work periodically executed (1 second interval) to 2248 * monitor firmware fault and to issue periodic timer sync to 2249 * the firmware. 2250 * 2251 * Return: Nothing. 2252 */ 2253 static void mpi3mr_watchdog_work(struct work_struct *work) 2254 { 2255 struct mpi3mr_ioc *mrioc = 2256 container_of(work, struct mpi3mr_ioc, watchdog_work.work); 2257 unsigned long flags; 2258 enum mpi3mr_iocstate ioc_state; 2259 u32 fault, host_diagnostic; 2260 2261 if (mrioc->reset_in_progress || mrioc->unrecoverable) 2262 return; 2263 2264 if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) { 2265 mrioc->ts_update_counter = 0; 2266 mpi3mr_sync_timestamp(mrioc); 2267 } 2268 2269 /*Check for fault state every one second and issue Soft reset*/ 2270 ioc_state = mpi3mr_get_iocstate(mrioc); 2271 if (ioc_state == MRIOC_STATE_FAULT) { 2272 fault = readl(&mrioc->sysif_regs->fault) & 2273 MPI3_SYSIF_FAULT_CODE_MASK; 2274 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); 2275 if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) { 2276 if (!mrioc->diagsave_timeout) { 2277 mpi3mr_print_fault_info(mrioc); 2278 ioc_warn(mrioc, "Diag save in progress\n"); 2279 } 2280 if ((mrioc->diagsave_timeout++) <= 2281 MPI3_SYSIF_DIAG_SAVE_TIMEOUT) 2282 goto schedule_work; 2283 } else 2284 mpi3mr_print_fault_info(mrioc); 2285 mrioc->diagsave_timeout = 0; 2286 2287 if (fault == MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED) { 2288 ioc_info(mrioc, 2289 "Factory Reset fault occurred marking controller as unrecoverable" 2290 ); 2291 mrioc->unrecoverable = 1; 2292 goto out; 2293 } 2294 2295 if ((fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) || 2296 (fault == MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS) || 2297 (mrioc->reset_in_progress)) 2298 goto out; 2299 if (fault == MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET) 2300 mpi3mr_soft_reset_handler(mrioc, 2301 MPI3MR_RESET_FROM_CIACTIV_FAULT, 0); 2302 else 2303 mpi3mr_soft_reset_handler(mrioc, 2304 MPI3MR_RESET_FROM_FAULT_WATCH, 0); 2305 } 2306 2307 schedule_work: 2308 spin_lock_irqsave(&mrioc->watchdog_lock, flags); 2309 if (mrioc->watchdog_work_q) 2310 queue_delayed_work(mrioc->watchdog_work_q, 2311 &mrioc->watchdog_work, 2312 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); 2313 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); 2314 out: 2315 return; 2316 } 2317 2318 /** 2319 * mpi3mr_start_watchdog - Start watchdog 2320 * @mrioc: Adapter instance reference 2321 * 2322 * Create and start the watchdog thread to monitor controller 2323 * faults. 2324 * 2325 * Return: Nothing. 2326 */ 2327 void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc) 2328 { 2329 if (mrioc->watchdog_work_q) 2330 return; 2331 2332 INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work); 2333 snprintf(mrioc->watchdog_work_q_name, 2334 sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name, 2335 mrioc->id); 2336 mrioc->watchdog_work_q = 2337 create_singlethread_workqueue(mrioc->watchdog_work_q_name); 2338 if (!mrioc->watchdog_work_q) { 2339 ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__); 2340 return; 2341 } 2342 2343 if (mrioc->watchdog_work_q) 2344 queue_delayed_work(mrioc->watchdog_work_q, 2345 &mrioc->watchdog_work, 2346 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); 2347 } 2348 2349 /** 2350 * mpi3mr_stop_watchdog - Stop watchdog 2351 * @mrioc: Adapter instance reference 2352 * 2353 * Stop the watchdog thread created to monitor controller 2354 * faults. 2355 * 2356 * Return: Nothing. 2357 */ 2358 void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc) 2359 { 2360 unsigned long flags; 2361 struct workqueue_struct *wq; 2362 2363 spin_lock_irqsave(&mrioc->watchdog_lock, flags); 2364 wq = mrioc->watchdog_work_q; 2365 mrioc->watchdog_work_q = NULL; 2366 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); 2367 if (wq) { 2368 if (!cancel_delayed_work_sync(&mrioc->watchdog_work)) 2369 flush_workqueue(wq); 2370 destroy_workqueue(wq); 2371 } 2372 } 2373 2374 /** 2375 * mpi3mr_setup_admin_qpair - Setup admin queue pair 2376 * @mrioc: Adapter instance reference 2377 * 2378 * Allocate memory for admin queue pair if required and register 2379 * the admin queue with the controller. 2380 * 2381 * Return: 0 on success, non-zero on failures. 2382 */ 2383 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc) 2384 { 2385 int retval = 0; 2386 u32 num_admin_entries = 0; 2387 2388 mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE; 2389 mrioc->num_admin_req = mrioc->admin_req_q_sz / 2390 MPI3MR_ADMIN_REQ_FRAME_SZ; 2391 mrioc->admin_req_ci = mrioc->admin_req_pi = 0; 2392 mrioc->admin_req_base = NULL; 2393 2394 mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE; 2395 mrioc->num_admin_replies = mrioc->admin_reply_q_sz / 2396 MPI3MR_ADMIN_REPLY_FRAME_SZ; 2397 mrioc->admin_reply_ci = 0; 2398 mrioc->admin_reply_ephase = 1; 2399 mrioc->admin_reply_base = NULL; 2400 2401 if (!mrioc->admin_req_base) { 2402 mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev, 2403 mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL); 2404 2405 if (!mrioc->admin_req_base) { 2406 retval = -1; 2407 goto out_failed; 2408 } 2409 2410 mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev, 2411 mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma, 2412 GFP_KERNEL); 2413 2414 if (!mrioc->admin_reply_base) { 2415 retval = -1; 2416 goto out_failed; 2417 } 2418 } 2419 2420 num_admin_entries = (mrioc->num_admin_replies << 16) | 2421 (mrioc->num_admin_req); 2422 writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries); 2423 mpi3mr_writeq(mrioc->admin_req_dma, 2424 &mrioc->sysif_regs->admin_request_queue_address); 2425 mpi3mr_writeq(mrioc->admin_reply_dma, 2426 &mrioc->sysif_regs->admin_reply_queue_address); 2427 writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); 2428 writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); 2429 return retval; 2430 2431 out_failed: 2432 2433 if (mrioc->admin_reply_base) { 2434 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz, 2435 mrioc->admin_reply_base, mrioc->admin_reply_dma); 2436 mrioc->admin_reply_base = NULL; 2437 } 2438 if (mrioc->admin_req_base) { 2439 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz, 2440 mrioc->admin_req_base, mrioc->admin_req_dma); 2441 mrioc->admin_req_base = NULL; 2442 } 2443 return retval; 2444 } 2445 2446 /** 2447 * mpi3mr_issue_iocfacts - Send IOC Facts 2448 * @mrioc: Adapter instance reference 2449 * @facts_data: Cached IOC facts data 2450 * 2451 * Issue IOC Facts MPI request through admin queue and wait for 2452 * the completion of it or time out. 2453 * 2454 * Return: 0 on success, non-zero on failures. 2455 */ 2456 static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc, 2457 struct mpi3_ioc_facts_data *facts_data) 2458 { 2459 struct mpi3_ioc_facts_request iocfacts_req; 2460 void *data = NULL; 2461 dma_addr_t data_dma; 2462 u32 data_len = sizeof(*facts_data); 2463 int retval = 0; 2464 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 2465 2466 data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 2467 GFP_KERNEL); 2468 2469 if (!data) { 2470 retval = -1; 2471 goto out; 2472 } 2473 2474 memset(&iocfacts_req, 0, sizeof(iocfacts_req)); 2475 mutex_lock(&mrioc->init_cmds.mutex); 2476 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2477 retval = -1; 2478 ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n"); 2479 mutex_unlock(&mrioc->init_cmds.mutex); 2480 goto out; 2481 } 2482 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2483 mrioc->init_cmds.is_waiting = 1; 2484 mrioc->init_cmds.callback = NULL; 2485 iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2486 iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS; 2487 2488 mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len, 2489 data_dma); 2490 2491 init_completion(&mrioc->init_cmds.done); 2492 retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req, 2493 sizeof(iocfacts_req), 1); 2494 if (retval) { 2495 ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n"); 2496 goto out_unlock; 2497 } 2498 wait_for_completion_timeout(&mrioc->init_cmds.done, 2499 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2500 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2501 ioc_err(mrioc, "ioc_facts timed out\n"); 2502 mpi3mr_check_rh_fault_ioc(mrioc, 2503 MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT); 2504 retval = -1; 2505 goto out_unlock; 2506 } 2507 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2508 != MPI3_IOCSTATUS_SUCCESS) { 2509 ioc_err(mrioc, 2510 "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2511 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2512 mrioc->init_cmds.ioc_loginfo); 2513 retval = -1; 2514 goto out_unlock; 2515 } 2516 memcpy(facts_data, (u8 *)data, data_len); 2517 mpi3mr_process_factsdata(mrioc, facts_data); 2518 out_unlock: 2519 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2520 mutex_unlock(&mrioc->init_cmds.mutex); 2521 2522 out: 2523 if (data) 2524 dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma); 2525 2526 return retval; 2527 } 2528 2529 /** 2530 * mpi3mr_check_reset_dma_mask - Process IOC facts data 2531 * @mrioc: Adapter instance reference 2532 * 2533 * Check whether the new DMA mask requested through IOCFacts by 2534 * firmware needs to be set, if so set it . 2535 * 2536 * Return: 0 on success, non-zero on failure. 2537 */ 2538 static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc) 2539 { 2540 struct pci_dev *pdev = mrioc->pdev; 2541 int r; 2542 u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask); 2543 2544 if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask)) 2545 return 0; 2546 2547 ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n", 2548 mrioc->dma_mask, facts_dma_mask); 2549 2550 r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask); 2551 if (r) { 2552 ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n", 2553 facts_dma_mask, r); 2554 return r; 2555 } 2556 mrioc->dma_mask = facts_dma_mask; 2557 return r; 2558 } 2559 2560 /** 2561 * mpi3mr_process_factsdata - Process IOC facts data 2562 * @mrioc: Adapter instance reference 2563 * @facts_data: Cached IOC facts data 2564 * 2565 * Convert IOC facts data into cpu endianness and cache it in 2566 * the driver . 2567 * 2568 * Return: Nothing. 2569 */ 2570 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, 2571 struct mpi3_ioc_facts_data *facts_data) 2572 { 2573 u32 ioc_config, req_sz, facts_flags; 2574 2575 if ((le16_to_cpu(facts_data->ioc_facts_data_length)) != 2576 (sizeof(*facts_data) / 4)) { 2577 ioc_warn(mrioc, 2578 "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n", 2579 sizeof(*facts_data), 2580 le16_to_cpu(facts_data->ioc_facts_data_length) * 4); 2581 } 2582 2583 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 2584 req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >> 2585 MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT); 2586 if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) { 2587 ioc_err(mrioc, 2588 "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n", 2589 req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size)); 2590 } 2591 2592 memset(&mrioc->facts, 0, sizeof(mrioc->facts)); 2593 2594 facts_flags = le32_to_cpu(facts_data->flags); 2595 mrioc->facts.op_req_sz = req_sz; 2596 mrioc->op_reply_desc_sz = 1 << ((ioc_config & 2597 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >> 2598 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT); 2599 2600 mrioc->facts.ioc_num = facts_data->ioc_number; 2601 mrioc->facts.who_init = facts_data->who_init; 2602 mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors); 2603 mrioc->facts.personality = (facts_flags & 2604 MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK); 2605 mrioc->facts.dma_mask = (facts_flags & 2606 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >> 2607 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT; 2608 mrioc->facts.protocol_flags = facts_data->protocol_flags; 2609 mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word); 2610 mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_request); 2611 mrioc->facts.product_id = le16_to_cpu(facts_data->product_id); 2612 mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4; 2613 mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions); 2614 mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id); 2615 mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds); 2616 mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds); 2617 mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds); 2618 mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds); 2619 mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme); 2620 mrioc->facts.max_pcie_switches = 2621 le16_to_cpu(facts_data->max_pcie_switches); 2622 mrioc->facts.max_sasexpanders = 2623 le16_to_cpu(facts_data->max_sas_expanders); 2624 mrioc->facts.max_sasinitiators = 2625 le16_to_cpu(facts_data->max_sas_initiators); 2626 mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures); 2627 mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle); 2628 mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle); 2629 mrioc->facts.max_op_req_q = 2630 le16_to_cpu(facts_data->max_operational_request_queues); 2631 mrioc->facts.max_op_reply_q = 2632 le16_to_cpu(facts_data->max_operational_reply_queues); 2633 mrioc->facts.ioc_capabilities = 2634 le32_to_cpu(facts_data->ioc_capabilities); 2635 mrioc->facts.fw_ver.build_num = 2636 le16_to_cpu(facts_data->fw_version.build_num); 2637 mrioc->facts.fw_ver.cust_id = 2638 le16_to_cpu(facts_data->fw_version.customer_id); 2639 mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor; 2640 mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major; 2641 mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor; 2642 mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major; 2643 mrioc->msix_count = min_t(int, mrioc->msix_count, 2644 mrioc->facts.max_msix_vectors); 2645 mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask; 2646 mrioc->facts.sge_mod_value = facts_data->sge_modifier_value; 2647 mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift; 2648 mrioc->facts.shutdown_timeout = 2649 le16_to_cpu(facts_data->shutdown_timeout); 2650 2651 ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),", 2652 mrioc->facts.ioc_num, mrioc->facts.max_op_req_q, 2653 mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle); 2654 ioc_info(mrioc, 2655 "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n", 2656 mrioc->facts.max_reqs, mrioc->facts.min_devhandle, 2657 mrioc->facts.max_msix_vectors, mrioc->facts.max_perids); 2658 ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ", 2659 mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value, 2660 mrioc->facts.sge_mod_shift); 2661 ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n", 2662 mrioc->facts.dma_mask, (facts_flags & 2663 MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK)); 2664 } 2665 2666 /** 2667 * mpi3mr_alloc_reply_sense_bufs - Send IOC Init 2668 * @mrioc: Adapter instance reference 2669 * 2670 * Allocate and initialize the reply free buffers, sense 2671 * buffers, reply free queue and sense buffer queue. 2672 * 2673 * Return: 0 on success, non-zero on failures. 2674 */ 2675 static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc) 2676 { 2677 int retval = 0; 2678 u32 sz, i; 2679 2680 if (mrioc->init_cmds.reply) 2681 return retval; 2682 2683 mrioc->init_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); 2684 if (!mrioc->init_cmds.reply) 2685 goto out_failed; 2686 2687 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { 2688 mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz, 2689 GFP_KERNEL); 2690 if (!mrioc->dev_rmhs_cmds[i].reply) 2691 goto out_failed; 2692 } 2693 2694 mrioc->host_tm_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); 2695 if (!mrioc->host_tm_cmds.reply) 2696 goto out_failed; 2697 2698 mrioc->dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8; 2699 if (mrioc->facts.max_devhandle % 8) 2700 mrioc->dev_handle_bitmap_sz++; 2701 mrioc->removepend_bitmap = kzalloc(mrioc->dev_handle_bitmap_sz, 2702 GFP_KERNEL); 2703 if (!mrioc->removepend_bitmap) 2704 goto out_failed; 2705 2706 mrioc->devrem_bitmap_sz = MPI3MR_NUM_DEVRMCMD / 8; 2707 if (MPI3MR_NUM_DEVRMCMD % 8) 2708 mrioc->devrem_bitmap_sz++; 2709 mrioc->devrem_bitmap = kzalloc(mrioc->devrem_bitmap_sz, 2710 GFP_KERNEL); 2711 if (!mrioc->devrem_bitmap) 2712 goto out_failed; 2713 2714 mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES; 2715 mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1; 2716 mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR; 2717 mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1; 2718 2719 /* reply buffer pool, 16 byte align */ 2720 sz = mrioc->num_reply_bufs * mrioc->reply_sz; 2721 mrioc->reply_buf_pool = dma_pool_create("reply_buf pool", 2722 &mrioc->pdev->dev, sz, 16, 0); 2723 if (!mrioc->reply_buf_pool) { 2724 ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n"); 2725 goto out_failed; 2726 } 2727 2728 mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL, 2729 &mrioc->reply_buf_dma); 2730 if (!mrioc->reply_buf) 2731 goto out_failed; 2732 2733 mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz; 2734 2735 /* reply free queue, 8 byte align */ 2736 sz = mrioc->reply_free_qsz * 8; 2737 mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool", 2738 &mrioc->pdev->dev, sz, 8, 0); 2739 if (!mrioc->reply_free_q_pool) { 2740 ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n"); 2741 goto out_failed; 2742 } 2743 mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool, 2744 GFP_KERNEL, &mrioc->reply_free_q_dma); 2745 if (!mrioc->reply_free_q) 2746 goto out_failed; 2747 2748 /* sense buffer pool, 4 byte align */ 2749 sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ; 2750 mrioc->sense_buf_pool = dma_pool_create("sense_buf pool", 2751 &mrioc->pdev->dev, sz, 4, 0); 2752 if (!mrioc->sense_buf_pool) { 2753 ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n"); 2754 goto out_failed; 2755 } 2756 mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL, 2757 &mrioc->sense_buf_dma); 2758 if (!mrioc->sense_buf) 2759 goto out_failed; 2760 2761 /* sense buffer queue, 8 byte align */ 2762 sz = mrioc->sense_buf_q_sz * 8; 2763 mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool", 2764 &mrioc->pdev->dev, sz, 8, 0); 2765 if (!mrioc->sense_buf_q_pool) { 2766 ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n"); 2767 goto out_failed; 2768 } 2769 mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool, 2770 GFP_KERNEL, &mrioc->sense_buf_q_dma); 2771 if (!mrioc->sense_buf_q) 2772 goto out_failed; 2773 2774 return retval; 2775 2776 out_failed: 2777 retval = -1; 2778 return retval; 2779 } 2780 2781 /** 2782 * mpimr_initialize_reply_sbuf_queues - initialize reply sense 2783 * buffers 2784 * @mrioc: Adapter instance reference 2785 * 2786 * Helper function to initialize reply and sense buffers along 2787 * with some debug prints. 2788 * 2789 * Return: None. 2790 */ 2791 static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc) 2792 { 2793 u32 sz, i; 2794 dma_addr_t phy_addr; 2795 2796 sz = mrioc->num_reply_bufs * mrioc->reply_sz; 2797 ioc_info(mrioc, 2798 "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n", 2799 mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->reply_sz, 2800 (sz / 1024), (unsigned long long)mrioc->reply_buf_dma); 2801 sz = mrioc->reply_free_qsz * 8; 2802 ioc_info(mrioc, 2803 "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n", 2804 mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024), 2805 (unsigned long long)mrioc->reply_free_q_dma); 2806 sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ; 2807 ioc_info(mrioc, 2808 "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n", 2809 mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ, 2810 (sz / 1024), (unsigned long long)mrioc->sense_buf_dma); 2811 sz = mrioc->sense_buf_q_sz * 8; 2812 ioc_info(mrioc, 2813 "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n", 2814 mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024), 2815 (unsigned long long)mrioc->sense_buf_q_dma); 2816 2817 /* initialize Reply buffer Queue */ 2818 for (i = 0, phy_addr = mrioc->reply_buf_dma; 2819 i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->reply_sz) 2820 mrioc->reply_free_q[i] = cpu_to_le64(phy_addr); 2821 mrioc->reply_free_q[i] = cpu_to_le64(0); 2822 2823 /* initialize Sense Buffer Queue */ 2824 for (i = 0, phy_addr = mrioc->sense_buf_dma; 2825 i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ) 2826 mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr); 2827 mrioc->sense_buf_q[i] = cpu_to_le64(0); 2828 } 2829 2830 /** 2831 * mpi3mr_issue_iocinit - Send IOC Init 2832 * @mrioc: Adapter instance reference 2833 * 2834 * Issue IOC Init MPI request through admin queue and wait for 2835 * the completion of it or time out. 2836 * 2837 * Return: 0 on success, non-zero on failures. 2838 */ 2839 static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc) 2840 { 2841 struct mpi3_ioc_init_request iocinit_req; 2842 struct mpi3_driver_info_layout *drv_info; 2843 dma_addr_t data_dma; 2844 u32 data_len = sizeof(*drv_info); 2845 int retval = 0; 2846 ktime_t current_time; 2847 2848 drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 2849 GFP_KERNEL); 2850 if (!drv_info) { 2851 retval = -1; 2852 goto out; 2853 } 2854 mpimr_initialize_reply_sbuf_queues(mrioc); 2855 2856 drv_info->information_length = cpu_to_le32(data_len); 2857 strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature)); 2858 strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name)); 2859 strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version)); 2860 strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name)); 2861 strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version)); 2862 strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE, 2863 sizeof(drv_info->driver_release_date)); 2864 drv_info->driver_capabilities = 0; 2865 memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info, 2866 sizeof(mrioc->driver_info)); 2867 2868 memset(&iocinit_req, 0, sizeof(iocinit_req)); 2869 mutex_lock(&mrioc->init_cmds.mutex); 2870 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2871 retval = -1; 2872 ioc_err(mrioc, "Issue IOCInit: Init command is in use\n"); 2873 mutex_unlock(&mrioc->init_cmds.mutex); 2874 goto out; 2875 } 2876 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2877 mrioc->init_cmds.is_waiting = 1; 2878 mrioc->init_cmds.callback = NULL; 2879 iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2880 iocinit_req.function = MPI3_FUNCTION_IOC_INIT; 2881 iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV; 2882 iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT; 2883 iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR; 2884 iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR; 2885 iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER; 2886 iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz); 2887 iocinit_req.reply_free_queue_address = 2888 cpu_to_le64(mrioc->reply_free_q_dma); 2889 iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ); 2890 iocinit_req.sense_buffer_free_queue_depth = 2891 cpu_to_le16(mrioc->sense_buf_q_sz); 2892 iocinit_req.sense_buffer_free_queue_address = 2893 cpu_to_le64(mrioc->sense_buf_q_dma); 2894 iocinit_req.driver_information_address = cpu_to_le64(data_dma); 2895 2896 current_time = ktime_get_real(); 2897 iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time)); 2898 2899 init_completion(&mrioc->init_cmds.done); 2900 retval = mpi3mr_admin_request_post(mrioc, &iocinit_req, 2901 sizeof(iocinit_req), 1); 2902 if (retval) { 2903 ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n"); 2904 goto out_unlock; 2905 } 2906 wait_for_completion_timeout(&mrioc->init_cmds.done, 2907 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2908 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2909 mpi3mr_check_rh_fault_ioc(mrioc, 2910 MPI3MR_RESET_FROM_IOCINIT_TIMEOUT); 2911 ioc_err(mrioc, "ioc_init timed out\n"); 2912 retval = -1; 2913 goto out_unlock; 2914 } 2915 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2916 != MPI3_IOCSTATUS_SUCCESS) { 2917 ioc_err(mrioc, 2918 "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2919 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2920 mrioc->init_cmds.ioc_loginfo); 2921 retval = -1; 2922 goto out_unlock; 2923 } 2924 2925 mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs; 2926 writel(mrioc->reply_free_queue_host_index, 2927 &mrioc->sysif_regs->reply_free_host_index); 2928 2929 mrioc->sbq_host_index = mrioc->num_sense_bufs; 2930 writel(mrioc->sbq_host_index, 2931 &mrioc->sysif_regs->sense_buffer_free_host_index); 2932 out_unlock: 2933 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2934 mutex_unlock(&mrioc->init_cmds.mutex); 2935 2936 out: 2937 if (drv_info) 2938 dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info, 2939 data_dma); 2940 2941 return retval; 2942 } 2943 2944 /** 2945 * mpi3mr_unmask_events - Unmask events in event mask bitmap 2946 * @mrioc: Adapter instance reference 2947 * @event: MPI event ID 2948 * 2949 * Un mask the specific event by resetting the event_mask 2950 * bitmap. 2951 * 2952 * Return: 0 on success, non-zero on failures. 2953 */ 2954 static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event) 2955 { 2956 u32 desired_event; 2957 u8 word; 2958 2959 if (event >= 128) 2960 return; 2961 2962 desired_event = (1 << (event % 32)); 2963 word = event / 32; 2964 2965 mrioc->event_masks[word] &= ~desired_event; 2966 } 2967 2968 /** 2969 * mpi3mr_issue_event_notification - Send event notification 2970 * @mrioc: Adapter instance reference 2971 * 2972 * Issue event notification MPI request through admin queue and 2973 * wait for the completion of it or time out. 2974 * 2975 * Return: 0 on success, non-zero on failures. 2976 */ 2977 static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc) 2978 { 2979 struct mpi3_event_notification_request evtnotify_req; 2980 int retval = 0; 2981 u8 i; 2982 2983 memset(&evtnotify_req, 0, sizeof(evtnotify_req)); 2984 mutex_lock(&mrioc->init_cmds.mutex); 2985 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2986 retval = -1; 2987 ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n"); 2988 mutex_unlock(&mrioc->init_cmds.mutex); 2989 goto out; 2990 } 2991 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2992 mrioc->init_cmds.is_waiting = 1; 2993 mrioc->init_cmds.callback = NULL; 2994 evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2995 evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION; 2996 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 2997 evtnotify_req.event_masks[i] = 2998 cpu_to_le32(mrioc->event_masks[i]); 2999 init_completion(&mrioc->init_cmds.done); 3000 retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req, 3001 sizeof(evtnotify_req), 1); 3002 if (retval) { 3003 ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n"); 3004 goto out_unlock; 3005 } 3006 wait_for_completion_timeout(&mrioc->init_cmds.done, 3007 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 3008 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 3009 ioc_err(mrioc, "event notification timed out\n"); 3010 mpi3mr_check_rh_fault_ioc(mrioc, 3011 MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT); 3012 retval = -1; 3013 goto out_unlock; 3014 } 3015 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 3016 != MPI3_IOCSTATUS_SUCCESS) { 3017 ioc_err(mrioc, 3018 "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 3019 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 3020 mrioc->init_cmds.ioc_loginfo); 3021 retval = -1; 3022 goto out_unlock; 3023 } 3024 3025 out_unlock: 3026 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3027 mutex_unlock(&mrioc->init_cmds.mutex); 3028 out: 3029 return retval; 3030 } 3031 3032 /** 3033 * mpi3mr_send_event_ack - Send event acknowledgment 3034 * @mrioc: Adapter instance reference 3035 * @event: MPI3 event ID 3036 * @event_ctx: Event context 3037 * 3038 * Send event acknowledgment through admin queue and wait for 3039 * it to complete. 3040 * 3041 * Return: 0 on success, non-zero on failures. 3042 */ 3043 int mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 3044 u32 event_ctx) 3045 { 3046 struct mpi3_event_ack_request evtack_req; 3047 int retval = 0; 3048 3049 memset(&evtack_req, 0, sizeof(evtack_req)); 3050 mutex_lock(&mrioc->init_cmds.mutex); 3051 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 3052 retval = -1; 3053 ioc_err(mrioc, "Send EvtAck: Init command is in use\n"); 3054 mutex_unlock(&mrioc->init_cmds.mutex); 3055 goto out; 3056 } 3057 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 3058 mrioc->init_cmds.is_waiting = 1; 3059 mrioc->init_cmds.callback = NULL; 3060 evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 3061 evtack_req.function = MPI3_FUNCTION_EVENT_ACK; 3062 evtack_req.event = event; 3063 evtack_req.event_context = cpu_to_le32(event_ctx); 3064 3065 init_completion(&mrioc->init_cmds.done); 3066 retval = mpi3mr_admin_request_post(mrioc, &evtack_req, 3067 sizeof(evtack_req), 1); 3068 if (retval) { 3069 ioc_err(mrioc, "Send EvtAck: Admin Post failed\n"); 3070 goto out_unlock; 3071 } 3072 wait_for_completion_timeout(&mrioc->init_cmds.done, 3073 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 3074 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 3075 ioc_err(mrioc, "Issue EvtNotify: command timed out\n"); 3076 if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET)) 3077 mpi3mr_soft_reset_handler(mrioc, 3078 MPI3MR_RESET_FROM_EVTACK_TIMEOUT, 1); 3079 retval = -1; 3080 goto out_unlock; 3081 } 3082 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 3083 != MPI3_IOCSTATUS_SUCCESS) { 3084 ioc_err(mrioc, 3085 "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 3086 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 3087 mrioc->init_cmds.ioc_loginfo); 3088 retval = -1; 3089 goto out_unlock; 3090 } 3091 3092 out_unlock: 3093 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3094 mutex_unlock(&mrioc->init_cmds.mutex); 3095 out: 3096 return retval; 3097 } 3098 3099 /** 3100 * mpi3mr_alloc_chain_bufs - Allocate chain buffers 3101 * @mrioc: Adapter instance reference 3102 * 3103 * Allocate chain buffers and set a bitmap to indicate free 3104 * chain buffers. Chain buffers are used to pass the SGE 3105 * information along with MPI3 SCSI IO requests for host I/O. 3106 * 3107 * Return: 0 on success, non-zero on failure 3108 */ 3109 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc) 3110 { 3111 int retval = 0; 3112 u32 sz, i; 3113 u16 num_chains; 3114 3115 if (mrioc->chain_sgl_list) 3116 return retval; 3117 3118 num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR; 3119 3120 if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION 3121 | SHOST_DIX_TYPE1_PROTECTION 3122 | SHOST_DIX_TYPE2_PROTECTION 3123 | SHOST_DIX_TYPE3_PROTECTION)) 3124 num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR); 3125 3126 mrioc->chain_buf_count = num_chains; 3127 sz = sizeof(struct chain_element) * num_chains; 3128 mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL); 3129 if (!mrioc->chain_sgl_list) 3130 goto out_failed; 3131 3132 sz = MPI3MR_PAGE_SIZE_4K; 3133 mrioc->chain_buf_pool = dma_pool_create("chain_buf pool", 3134 &mrioc->pdev->dev, sz, 16, 0); 3135 if (!mrioc->chain_buf_pool) { 3136 ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n"); 3137 goto out_failed; 3138 } 3139 3140 for (i = 0; i < num_chains; i++) { 3141 mrioc->chain_sgl_list[i].addr = 3142 dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL, 3143 &mrioc->chain_sgl_list[i].dma_addr); 3144 3145 if (!mrioc->chain_sgl_list[i].addr) 3146 goto out_failed; 3147 } 3148 mrioc->chain_bitmap_sz = num_chains / 8; 3149 if (num_chains % 8) 3150 mrioc->chain_bitmap_sz++; 3151 mrioc->chain_bitmap = kzalloc(mrioc->chain_bitmap_sz, GFP_KERNEL); 3152 if (!mrioc->chain_bitmap) 3153 goto out_failed; 3154 return retval; 3155 out_failed: 3156 retval = -1; 3157 return retval; 3158 } 3159 3160 /** 3161 * mpi3mr_port_enable_complete - Mark port enable complete 3162 * @mrioc: Adapter instance reference 3163 * @drv_cmd: Internal command tracker 3164 * 3165 * Call back for asynchronous port enable request sets the 3166 * driver command to indicate port enable request is complete. 3167 * 3168 * Return: Nothing 3169 */ 3170 static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc, 3171 struct mpi3mr_drv_cmd *drv_cmd) 3172 { 3173 drv_cmd->state = MPI3MR_CMD_NOTUSED; 3174 drv_cmd->callback = NULL; 3175 mrioc->scan_failed = drv_cmd->ioc_status; 3176 mrioc->scan_started = 0; 3177 } 3178 3179 /** 3180 * mpi3mr_issue_port_enable - Issue Port Enable 3181 * @mrioc: Adapter instance reference 3182 * @async: Flag to wait for completion or not 3183 * 3184 * Issue Port Enable MPI request through admin queue and if the 3185 * async flag is not set wait for the completion of the port 3186 * enable or time out. 3187 * 3188 * Return: 0 on success, non-zero on failures. 3189 */ 3190 int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async) 3191 { 3192 struct mpi3_port_enable_request pe_req; 3193 int retval = 0; 3194 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT; 3195 3196 memset(&pe_req, 0, sizeof(pe_req)); 3197 mutex_lock(&mrioc->init_cmds.mutex); 3198 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 3199 retval = -1; 3200 ioc_err(mrioc, "Issue PortEnable: Init command is in use\n"); 3201 mutex_unlock(&mrioc->init_cmds.mutex); 3202 goto out; 3203 } 3204 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 3205 if (async) { 3206 mrioc->init_cmds.is_waiting = 0; 3207 mrioc->init_cmds.callback = mpi3mr_port_enable_complete; 3208 } else { 3209 mrioc->init_cmds.is_waiting = 1; 3210 mrioc->init_cmds.callback = NULL; 3211 init_completion(&mrioc->init_cmds.done); 3212 } 3213 pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 3214 pe_req.function = MPI3_FUNCTION_PORT_ENABLE; 3215 3216 retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1); 3217 if (retval) { 3218 ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n"); 3219 goto out_unlock; 3220 } 3221 if (async) { 3222 mutex_unlock(&mrioc->init_cmds.mutex); 3223 goto out; 3224 } 3225 3226 wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ)); 3227 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 3228 ioc_err(mrioc, "port enable timed out\n"); 3229 retval = -1; 3230 mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT); 3231 goto out_unlock; 3232 } 3233 mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds); 3234 3235 out_unlock: 3236 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3237 mutex_unlock(&mrioc->init_cmds.mutex); 3238 out: 3239 return retval; 3240 } 3241 3242 /* Protocol type to name mapper structure */ 3243 static const struct { 3244 u8 protocol; 3245 char *name; 3246 } mpi3mr_protocols[] = { 3247 { MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" }, 3248 { MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" }, 3249 { MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" }, 3250 }; 3251 3252 /* Capability to name mapper structure*/ 3253 static const struct { 3254 u32 capability; 3255 char *name; 3256 } mpi3mr_capabilities[] = { 3257 { MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE, "RAID" }, 3258 }; 3259 3260 /** 3261 * mpi3mr_print_ioc_info - Display controller information 3262 * @mrioc: Adapter instance reference 3263 * 3264 * Display controller personalit, capability, supported 3265 * protocols etc. 3266 * 3267 * Return: Nothing 3268 */ 3269 static void 3270 mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc) 3271 { 3272 int i = 0, bytes_written = 0; 3273 char personality[16]; 3274 char protocol[50] = {0}; 3275 char capabilities[100] = {0}; 3276 struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver; 3277 3278 switch (mrioc->facts.personality) { 3279 case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA: 3280 strncpy(personality, "Enhanced HBA", sizeof(personality)); 3281 break; 3282 case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR: 3283 strncpy(personality, "RAID", sizeof(personality)); 3284 break; 3285 default: 3286 strncpy(personality, "Unknown", sizeof(personality)); 3287 break; 3288 } 3289 3290 ioc_info(mrioc, "Running in %s Personality", personality); 3291 3292 ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n", 3293 fwver->gen_major, fwver->gen_minor, fwver->ph_major, 3294 fwver->ph_minor, fwver->cust_id, fwver->build_num); 3295 3296 for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) { 3297 if (mrioc->facts.protocol_flags & 3298 mpi3mr_protocols[i].protocol) { 3299 bytes_written += scnprintf(protocol + bytes_written, 3300 sizeof(protocol) - bytes_written, "%s%s", 3301 bytes_written ? "," : "", 3302 mpi3mr_protocols[i].name); 3303 } 3304 } 3305 3306 bytes_written = 0; 3307 for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) { 3308 if (mrioc->facts.protocol_flags & 3309 mpi3mr_capabilities[i].capability) { 3310 bytes_written += scnprintf(capabilities + bytes_written, 3311 sizeof(capabilities) - bytes_written, "%s%s", 3312 bytes_written ? "," : "", 3313 mpi3mr_capabilities[i].name); 3314 } 3315 } 3316 3317 ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n", 3318 protocol, capabilities); 3319 } 3320 3321 /** 3322 * mpi3mr_cleanup_resources - Free PCI resources 3323 * @mrioc: Adapter instance reference 3324 * 3325 * Unmap PCI device memory and disable PCI device. 3326 * 3327 * Return: 0 on success and non-zero on failure. 3328 */ 3329 void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc) 3330 { 3331 struct pci_dev *pdev = mrioc->pdev; 3332 3333 mpi3mr_cleanup_isr(mrioc); 3334 3335 if (mrioc->sysif_regs) { 3336 iounmap((void __iomem *)mrioc->sysif_regs); 3337 mrioc->sysif_regs = NULL; 3338 } 3339 3340 if (pci_is_enabled(pdev)) { 3341 if (mrioc->bars) 3342 pci_release_selected_regions(pdev, mrioc->bars); 3343 pci_disable_device(pdev); 3344 } 3345 } 3346 3347 /** 3348 * mpi3mr_setup_resources - Enable PCI resources 3349 * @mrioc: Adapter instance reference 3350 * 3351 * Enable PCI device memory, MSI-x registers and set DMA mask. 3352 * 3353 * Return: 0 on success and non-zero on failure. 3354 */ 3355 int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc) 3356 { 3357 struct pci_dev *pdev = mrioc->pdev; 3358 u32 memap_sz = 0; 3359 int i, retval = 0, capb = 0; 3360 u16 message_control; 3361 u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask : 3362 (((dma_get_required_mask(&pdev->dev) > DMA_BIT_MASK(32)) && 3363 (sizeof(dma_addr_t) > 4)) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32)); 3364 3365 if (pci_enable_device_mem(pdev)) { 3366 ioc_err(mrioc, "pci_enable_device_mem: failed\n"); 3367 retval = -ENODEV; 3368 goto out_failed; 3369 } 3370 3371 capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 3372 if (!capb) { 3373 ioc_err(mrioc, "Unable to find MSI-X Capabilities\n"); 3374 retval = -ENODEV; 3375 goto out_failed; 3376 } 3377 mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); 3378 3379 if (pci_request_selected_regions(pdev, mrioc->bars, 3380 mrioc->driver_name)) { 3381 ioc_err(mrioc, "pci_request_selected_regions: failed\n"); 3382 retval = -ENODEV; 3383 goto out_failed; 3384 } 3385 3386 for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) { 3387 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 3388 mrioc->sysif_regs_phys = pci_resource_start(pdev, i); 3389 memap_sz = pci_resource_len(pdev, i); 3390 mrioc->sysif_regs = 3391 ioremap(mrioc->sysif_regs_phys, memap_sz); 3392 break; 3393 } 3394 } 3395 3396 pci_set_master(pdev); 3397 3398 retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask); 3399 if (retval) { 3400 if (dma_mask != DMA_BIT_MASK(32)) { 3401 ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n"); 3402 dma_mask = DMA_BIT_MASK(32); 3403 retval = dma_set_mask_and_coherent(&pdev->dev, 3404 dma_mask); 3405 } 3406 if (retval) { 3407 mrioc->dma_mask = 0; 3408 ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n"); 3409 goto out_failed; 3410 } 3411 } 3412 mrioc->dma_mask = dma_mask; 3413 3414 if (!mrioc->sysif_regs) { 3415 ioc_err(mrioc, 3416 "Unable to map adapter memory or resource not found\n"); 3417 retval = -EINVAL; 3418 goto out_failed; 3419 } 3420 3421 pci_read_config_word(pdev, capb + 2, &message_control); 3422 mrioc->msix_count = (message_control & 0x3FF) + 1; 3423 3424 pci_save_state(pdev); 3425 3426 pci_set_drvdata(pdev, mrioc->shost); 3427 3428 mpi3mr_ioc_disable_intr(mrioc); 3429 3430 ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n", 3431 (unsigned long long)mrioc->sysif_regs_phys, 3432 mrioc->sysif_regs, memap_sz); 3433 ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n", 3434 mrioc->msix_count); 3435 return retval; 3436 3437 out_failed: 3438 mpi3mr_cleanup_resources(mrioc); 3439 return retval; 3440 } 3441 3442 /** 3443 * mpi3mr_enable_events - Enable required events 3444 * @mrioc: Adapter instance reference 3445 * 3446 * This routine unmasks the events required by the driver by 3447 * sennding appropriate event mask bitmapt through an event 3448 * notification request. 3449 * 3450 * Return: 0 on success and non-zero on failure. 3451 */ 3452 static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc) 3453 { 3454 int retval = 0; 3455 u32 i; 3456 3457 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 3458 mrioc->event_masks[i] = -1; 3459 3460 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED); 3461 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED); 3462 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE); 3463 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE); 3464 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST); 3465 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY); 3466 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR); 3467 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE); 3468 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST); 3469 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION); 3470 mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT); 3471 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE); 3472 3473 retval = mpi3mr_issue_event_notification(mrioc); 3474 if (retval) 3475 ioc_err(mrioc, "failed to issue event notification %d\n", 3476 retval); 3477 return retval; 3478 } 3479 3480 /** 3481 * mpi3mr_init_ioc - Initialize the controller 3482 * @mrioc: Adapter instance reference 3483 * @init_type: Flag to indicate is the init_type 3484 * 3485 * This the controller initialization routine, executed either 3486 * after soft reset or from pci probe callback. 3487 * Setup the required resources, memory map the controller 3488 * registers, create admin and operational reply queue pairs, 3489 * allocate required memory for reply pool, sense buffer pool, 3490 * issue IOC init request to the firmware, unmask the events and 3491 * issue port enable to discover SAS/SATA/NVMe devies and RAID 3492 * volumes. 3493 * 3494 * Return: 0 on success and non-zero on failure. 3495 */ 3496 int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc) 3497 { 3498 int retval = 0; 3499 u8 retry = 0; 3500 struct mpi3_ioc_facts_data facts_data; 3501 3502 retry_init: 3503 retval = mpi3mr_bring_ioc_ready(mrioc); 3504 if (retval) { 3505 ioc_err(mrioc, "Failed to bring ioc ready: error %d\n", 3506 retval); 3507 goto out_failed_noretry; 3508 } 3509 3510 retval = mpi3mr_setup_isr(mrioc, 1); 3511 if (retval) { 3512 ioc_err(mrioc, "Failed to setup ISR error %d\n", 3513 retval); 3514 goto out_failed_noretry; 3515 } 3516 3517 retval = mpi3mr_issue_iocfacts(mrioc, &facts_data); 3518 if (retval) { 3519 ioc_err(mrioc, "Failed to Issue IOC Facts %d\n", 3520 retval); 3521 goto out_failed; 3522 } 3523 3524 mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD; 3525 3526 if (reset_devices) 3527 mrioc->max_host_ios = min_t(int, mrioc->max_host_ios, 3528 MPI3MR_HOST_IOS_KDUMP); 3529 3530 mrioc->reply_sz = mrioc->facts.reply_sz; 3531 3532 retval = mpi3mr_check_reset_dma_mask(mrioc); 3533 if (retval) { 3534 ioc_err(mrioc, "Resetting dma mask failed %d\n", 3535 retval); 3536 goto out_failed_noretry; 3537 } 3538 3539 mpi3mr_print_ioc_info(mrioc); 3540 3541 retval = mpi3mr_alloc_reply_sense_bufs(mrioc); 3542 if (retval) { 3543 ioc_err(mrioc, 3544 "%s :Failed to allocated reply sense buffers %d\n", 3545 __func__, retval); 3546 goto out_failed_noretry; 3547 } 3548 3549 retval = mpi3mr_alloc_chain_bufs(mrioc); 3550 if (retval) { 3551 ioc_err(mrioc, "Failed to allocated chain buffers %d\n", 3552 retval); 3553 goto out_failed_noretry; 3554 } 3555 3556 retval = mpi3mr_issue_iocinit(mrioc); 3557 if (retval) { 3558 ioc_err(mrioc, "Failed to Issue IOC Init %d\n", 3559 retval); 3560 goto out_failed; 3561 } 3562 3563 retval = mpi3mr_print_pkg_ver(mrioc); 3564 if (retval) { 3565 ioc_err(mrioc, "failed to get package version\n"); 3566 goto out_failed; 3567 } 3568 3569 retval = mpi3mr_setup_isr(mrioc, 0); 3570 if (retval) { 3571 ioc_err(mrioc, "Failed to re-setup ISR, error %d\n", 3572 retval); 3573 goto out_failed_noretry; 3574 } 3575 3576 retval = mpi3mr_create_op_queues(mrioc); 3577 if (retval) { 3578 ioc_err(mrioc, "Failed to create OpQueues error %d\n", 3579 retval); 3580 goto out_failed; 3581 } 3582 3583 retval = mpi3mr_enable_events(mrioc); 3584 if (retval) { 3585 ioc_err(mrioc, "failed to enable events %d\n", 3586 retval); 3587 goto out_failed; 3588 } 3589 3590 ioc_info(mrioc, "controller initialization completed successfully\n"); 3591 return retval; 3592 out_failed: 3593 if (retry < 2) { 3594 retry++; 3595 ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n", 3596 retry); 3597 mpi3mr_memset_buffers(mrioc); 3598 goto retry_init; 3599 } 3600 out_failed_noretry: 3601 ioc_err(mrioc, "controller initialization failed\n"); 3602 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 3603 MPI3MR_RESET_FROM_CTLR_CLEANUP); 3604 mrioc->unrecoverable = 1; 3605 return retval; 3606 } 3607 3608 /** 3609 * mpi3mr_reinit_ioc - Re-Initialize the controller 3610 * @mrioc: Adapter instance reference 3611 * @is_resume: Called from resume or reset path 3612 * 3613 * This the controller re-initialization routine, executed from 3614 * the soft reset handler or resume callback. Creates 3615 * operational reply queue pairs, allocate required memory for 3616 * reply pool, sense buffer pool, issue IOC init request to the 3617 * firmware, unmask the events and issue port enable to discover 3618 * SAS/SATA/NVMe devices and RAID volumes. 3619 * 3620 * Return: 0 on success and non-zero on failure. 3621 */ 3622 int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume) 3623 { 3624 int retval = 0; 3625 u8 retry = 0; 3626 struct mpi3_ioc_facts_data facts_data; 3627 3628 retry_init: 3629 dprint_reset(mrioc, "bringing up the controller to ready state\n"); 3630 retval = mpi3mr_bring_ioc_ready(mrioc); 3631 if (retval) { 3632 ioc_err(mrioc, "failed to bring to ready state\n"); 3633 goto out_failed_noretry; 3634 } 3635 3636 if (is_resume) { 3637 dprint_reset(mrioc, "setting up single ISR\n"); 3638 retval = mpi3mr_setup_isr(mrioc, 1); 3639 if (retval) { 3640 ioc_err(mrioc, "failed to setup ISR\n"); 3641 goto out_failed_noretry; 3642 } 3643 } else 3644 mpi3mr_ioc_enable_intr(mrioc); 3645 3646 dprint_reset(mrioc, "getting ioc_facts\n"); 3647 retval = mpi3mr_issue_iocfacts(mrioc, &facts_data); 3648 if (retval) { 3649 ioc_err(mrioc, "failed to get ioc_facts\n"); 3650 goto out_failed; 3651 } 3652 3653 dprint_reset(mrioc, "validating ioc_facts\n"); 3654 retval = mpi3mr_revalidate_factsdata(mrioc); 3655 if (retval) { 3656 ioc_err(mrioc, "failed to revalidate ioc_facts data\n"); 3657 goto out_failed_noretry; 3658 } 3659 3660 mpi3mr_print_ioc_info(mrioc); 3661 3662 dprint_reset(mrioc, "sending ioc_init\n"); 3663 retval = mpi3mr_issue_iocinit(mrioc); 3664 if (retval) { 3665 ioc_err(mrioc, "failed to send ioc_init\n"); 3666 goto out_failed; 3667 } 3668 3669 dprint_reset(mrioc, "getting package version\n"); 3670 retval = mpi3mr_print_pkg_ver(mrioc); 3671 if (retval) { 3672 ioc_err(mrioc, "failed to get package version\n"); 3673 goto out_failed; 3674 } 3675 3676 if (is_resume) { 3677 dprint_reset(mrioc, "setting up multiple ISR\n"); 3678 retval = mpi3mr_setup_isr(mrioc, 0); 3679 if (retval) { 3680 ioc_err(mrioc, "failed to re-setup ISR\n"); 3681 goto out_failed_noretry; 3682 } 3683 } 3684 3685 dprint_reset(mrioc, "creating operational queue pairs\n"); 3686 retval = mpi3mr_create_op_queues(mrioc); 3687 if (retval) { 3688 ioc_err(mrioc, "failed to create operational queue pairs\n"); 3689 goto out_failed; 3690 } 3691 3692 if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) { 3693 ioc_err(mrioc, 3694 "cannot create minimum number of operatioanl queues expected:%d created:%d\n", 3695 mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q); 3696 goto out_failed_noretry; 3697 } 3698 3699 dprint_reset(mrioc, "enabling events\n"); 3700 retval = mpi3mr_enable_events(mrioc); 3701 if (retval) { 3702 ioc_err(mrioc, "failed to enable events\n"); 3703 goto out_failed; 3704 } 3705 3706 ioc_info(mrioc, "sending port enable\n"); 3707 retval = mpi3mr_issue_port_enable(mrioc, 0); 3708 if (retval) { 3709 ioc_err(mrioc, "failed to issue port enable\n"); 3710 goto out_failed; 3711 } 3712 3713 ioc_info(mrioc, "controller %s completed successfully\n", 3714 (is_resume)?"resume":"re-initialization"); 3715 return retval; 3716 out_failed: 3717 if (retry < 2) { 3718 retry++; 3719 ioc_warn(mrioc, "retrying controller %s, retry_count:%d\n", 3720 (is_resume)?"resume":"re-initialization", retry); 3721 mpi3mr_memset_buffers(mrioc); 3722 goto retry_init; 3723 } 3724 out_failed_noretry: 3725 ioc_err(mrioc, "controller %s is failed\n", 3726 (is_resume)?"resume":"re-initialization"); 3727 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 3728 MPI3MR_RESET_FROM_CTLR_CLEANUP); 3729 mrioc->unrecoverable = 1; 3730 return retval; 3731 } 3732 3733 /** 3734 * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's 3735 * segments 3736 * @mrioc: Adapter instance reference 3737 * @qidx: Operational reply queue index 3738 * 3739 * Return: Nothing. 3740 */ 3741 static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx) 3742 { 3743 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 3744 struct segments *segments; 3745 int i, size; 3746 3747 if (!op_reply_q->q_segments) 3748 return; 3749 3750 size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz; 3751 segments = op_reply_q->q_segments; 3752 for (i = 0; i < op_reply_q->num_segments; i++) 3753 memset(segments[i].segment, 0, size); 3754 } 3755 3756 /** 3757 * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's 3758 * segments 3759 * @mrioc: Adapter instance reference 3760 * @qidx: Operational request queue index 3761 * 3762 * Return: Nothing. 3763 */ 3764 static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx) 3765 { 3766 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx; 3767 struct segments *segments; 3768 int i, size; 3769 3770 if (!op_req_q->q_segments) 3771 return; 3772 3773 size = op_req_q->segment_qd * mrioc->facts.op_req_sz; 3774 segments = op_req_q->q_segments; 3775 for (i = 0; i < op_req_q->num_segments; i++) 3776 memset(segments[i].segment, 0, size); 3777 } 3778 3779 /** 3780 * mpi3mr_memset_buffers - memset memory for a controller 3781 * @mrioc: Adapter instance reference 3782 * 3783 * clear all the memory allocated for a controller, typically 3784 * called post reset to reuse the memory allocated during the 3785 * controller init. 3786 * 3787 * Return: Nothing. 3788 */ 3789 void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc) 3790 { 3791 u16 i; 3792 3793 mrioc->change_count = 0; 3794 if (mrioc->admin_req_base) 3795 memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz); 3796 if (mrioc->admin_reply_base) 3797 memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz); 3798 3799 if (mrioc->init_cmds.reply) { 3800 memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply)); 3801 memset(mrioc->host_tm_cmds.reply, 0, 3802 sizeof(*mrioc->host_tm_cmds.reply)); 3803 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) 3804 memset(mrioc->dev_rmhs_cmds[i].reply, 0, 3805 sizeof(*mrioc->dev_rmhs_cmds[i].reply)); 3806 memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz); 3807 memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz); 3808 } 3809 3810 for (i = 0; i < mrioc->num_queues; i++) { 3811 mrioc->op_reply_qinfo[i].qid = 0; 3812 mrioc->op_reply_qinfo[i].ci = 0; 3813 mrioc->op_reply_qinfo[i].num_replies = 0; 3814 mrioc->op_reply_qinfo[i].ephase = 0; 3815 atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0); 3816 atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0); 3817 mpi3mr_memset_op_reply_q_buffers(mrioc, i); 3818 3819 mrioc->req_qinfo[i].ci = 0; 3820 mrioc->req_qinfo[i].pi = 0; 3821 mrioc->req_qinfo[i].num_requests = 0; 3822 mrioc->req_qinfo[i].qid = 0; 3823 mrioc->req_qinfo[i].reply_qid = 0; 3824 spin_lock_init(&mrioc->req_qinfo[i].q_lock); 3825 mpi3mr_memset_op_req_q_buffers(mrioc, i); 3826 } 3827 } 3828 3829 /** 3830 * mpi3mr_free_mem - Free memory allocated for a controller 3831 * @mrioc: Adapter instance reference 3832 * 3833 * Free all the memory allocated for a controller. 3834 * 3835 * Return: Nothing. 3836 */ 3837 void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc) 3838 { 3839 u16 i; 3840 struct mpi3mr_intr_info *intr_info; 3841 3842 if (mrioc->sense_buf_pool) { 3843 if (mrioc->sense_buf) 3844 dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf, 3845 mrioc->sense_buf_dma); 3846 dma_pool_destroy(mrioc->sense_buf_pool); 3847 mrioc->sense_buf = NULL; 3848 mrioc->sense_buf_pool = NULL; 3849 } 3850 if (mrioc->sense_buf_q_pool) { 3851 if (mrioc->sense_buf_q) 3852 dma_pool_free(mrioc->sense_buf_q_pool, 3853 mrioc->sense_buf_q, mrioc->sense_buf_q_dma); 3854 dma_pool_destroy(mrioc->sense_buf_q_pool); 3855 mrioc->sense_buf_q = NULL; 3856 mrioc->sense_buf_q_pool = NULL; 3857 } 3858 3859 if (mrioc->reply_buf_pool) { 3860 if (mrioc->reply_buf) 3861 dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf, 3862 mrioc->reply_buf_dma); 3863 dma_pool_destroy(mrioc->reply_buf_pool); 3864 mrioc->reply_buf = NULL; 3865 mrioc->reply_buf_pool = NULL; 3866 } 3867 if (mrioc->reply_free_q_pool) { 3868 if (mrioc->reply_free_q) 3869 dma_pool_free(mrioc->reply_free_q_pool, 3870 mrioc->reply_free_q, mrioc->reply_free_q_dma); 3871 dma_pool_destroy(mrioc->reply_free_q_pool); 3872 mrioc->reply_free_q = NULL; 3873 mrioc->reply_free_q_pool = NULL; 3874 } 3875 3876 for (i = 0; i < mrioc->num_op_req_q; i++) 3877 mpi3mr_free_op_req_q_segments(mrioc, i); 3878 3879 for (i = 0; i < mrioc->num_op_reply_q; i++) 3880 mpi3mr_free_op_reply_q_segments(mrioc, i); 3881 3882 for (i = 0; i < mrioc->intr_info_count; i++) { 3883 intr_info = mrioc->intr_info + i; 3884 intr_info->op_reply_q = NULL; 3885 } 3886 3887 kfree(mrioc->req_qinfo); 3888 mrioc->req_qinfo = NULL; 3889 mrioc->num_op_req_q = 0; 3890 3891 kfree(mrioc->op_reply_qinfo); 3892 mrioc->op_reply_qinfo = NULL; 3893 mrioc->num_op_reply_q = 0; 3894 3895 kfree(mrioc->init_cmds.reply); 3896 mrioc->init_cmds.reply = NULL; 3897 3898 kfree(mrioc->host_tm_cmds.reply); 3899 mrioc->host_tm_cmds.reply = NULL; 3900 3901 kfree(mrioc->removepend_bitmap); 3902 mrioc->removepend_bitmap = NULL; 3903 3904 kfree(mrioc->devrem_bitmap); 3905 mrioc->devrem_bitmap = NULL; 3906 3907 kfree(mrioc->chain_bitmap); 3908 mrioc->chain_bitmap = NULL; 3909 3910 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { 3911 kfree(mrioc->dev_rmhs_cmds[i].reply); 3912 mrioc->dev_rmhs_cmds[i].reply = NULL; 3913 } 3914 3915 if (mrioc->chain_buf_pool) { 3916 for (i = 0; i < mrioc->chain_buf_count; i++) { 3917 if (mrioc->chain_sgl_list[i].addr) { 3918 dma_pool_free(mrioc->chain_buf_pool, 3919 mrioc->chain_sgl_list[i].addr, 3920 mrioc->chain_sgl_list[i].dma_addr); 3921 mrioc->chain_sgl_list[i].addr = NULL; 3922 } 3923 } 3924 dma_pool_destroy(mrioc->chain_buf_pool); 3925 mrioc->chain_buf_pool = NULL; 3926 } 3927 3928 kfree(mrioc->chain_sgl_list); 3929 mrioc->chain_sgl_list = NULL; 3930 3931 if (mrioc->admin_reply_base) { 3932 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz, 3933 mrioc->admin_reply_base, mrioc->admin_reply_dma); 3934 mrioc->admin_reply_base = NULL; 3935 } 3936 if (mrioc->admin_req_base) { 3937 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz, 3938 mrioc->admin_req_base, mrioc->admin_req_dma); 3939 mrioc->admin_req_base = NULL; 3940 } 3941 } 3942 3943 /** 3944 * mpi3mr_issue_ioc_shutdown - shutdown controller 3945 * @mrioc: Adapter instance reference 3946 * 3947 * Send shutodwn notification to the controller and wait for the 3948 * shutdown_timeout for it to be completed. 3949 * 3950 * Return: Nothing. 3951 */ 3952 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc) 3953 { 3954 u32 ioc_config, ioc_status; 3955 u8 retval = 1; 3956 u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10; 3957 3958 ioc_info(mrioc, "Issuing shutdown Notification\n"); 3959 if (mrioc->unrecoverable) { 3960 ioc_warn(mrioc, 3961 "IOC is unrecoverable shutdown is not issued\n"); 3962 return; 3963 } 3964 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 3965 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 3966 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) { 3967 ioc_info(mrioc, "shutdown already in progress\n"); 3968 return; 3969 } 3970 3971 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 3972 ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL; 3973 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ; 3974 3975 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 3976 3977 if (mrioc->facts.shutdown_timeout) 3978 timeout = mrioc->facts.shutdown_timeout * 10; 3979 3980 do { 3981 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 3982 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 3983 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) { 3984 retval = 0; 3985 break; 3986 } 3987 msleep(100); 3988 } while (--timeout); 3989 3990 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 3991 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 3992 3993 if (retval) { 3994 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 3995 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) 3996 ioc_warn(mrioc, 3997 "shutdown still in progress after timeout\n"); 3998 } 3999 4000 ioc_info(mrioc, 4001 "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n", 4002 (!retval) ? "successful" : "failed", ioc_status, 4003 ioc_config); 4004 } 4005 4006 /** 4007 * mpi3mr_cleanup_ioc - Cleanup controller 4008 * @mrioc: Adapter instance reference 4009 4010 * controller cleanup handler, Message unit reset or soft reset 4011 * and shutdown notification is issued to the controller. 4012 * 4013 * Return: Nothing. 4014 */ 4015 void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc) 4016 { 4017 enum mpi3mr_iocstate ioc_state; 4018 4019 dprint_exit(mrioc, "cleaning up the controller\n"); 4020 mpi3mr_ioc_disable_intr(mrioc); 4021 4022 ioc_state = mpi3mr_get_iocstate(mrioc); 4023 4024 if ((!mrioc->unrecoverable) && (!mrioc->reset_in_progress) && 4025 (ioc_state == MRIOC_STATE_READY)) { 4026 if (mpi3mr_issue_and_process_mur(mrioc, 4027 MPI3MR_RESET_FROM_CTLR_CLEANUP)) 4028 mpi3mr_issue_reset(mrioc, 4029 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, 4030 MPI3MR_RESET_FROM_MUR_FAILURE); 4031 mpi3mr_issue_ioc_shutdown(mrioc); 4032 } 4033 dprint_exit(mrioc, "controller cleanup completed\n"); 4034 } 4035 4036 /** 4037 * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command 4038 * @mrioc: Adapter instance reference 4039 * @cmdptr: Internal command tracker 4040 * 4041 * Complete an internal driver commands with state indicating it 4042 * is completed due to reset. 4043 * 4044 * Return: Nothing. 4045 */ 4046 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc, 4047 struct mpi3mr_drv_cmd *cmdptr) 4048 { 4049 if (cmdptr->state & MPI3MR_CMD_PENDING) { 4050 cmdptr->state |= MPI3MR_CMD_RESET; 4051 cmdptr->state &= ~MPI3MR_CMD_PENDING; 4052 if (cmdptr->is_waiting) { 4053 complete(&cmdptr->done); 4054 cmdptr->is_waiting = 0; 4055 } else if (cmdptr->callback) 4056 cmdptr->callback(mrioc, cmdptr); 4057 } 4058 } 4059 4060 /** 4061 * mpi3mr_flush_drv_cmds - Flush internaldriver commands 4062 * @mrioc: Adapter instance reference 4063 * 4064 * Flush all internal driver commands post reset 4065 * 4066 * Return: Nothing. 4067 */ 4068 static void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc) 4069 { 4070 struct mpi3mr_drv_cmd *cmdptr; 4071 u8 i; 4072 4073 cmdptr = &mrioc->init_cmds; 4074 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4075 cmdptr = &mrioc->host_tm_cmds; 4076 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4077 4078 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { 4079 cmdptr = &mrioc->dev_rmhs_cmds[i]; 4080 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4081 } 4082 } 4083 4084 /** 4085 * mpi3mr_soft_reset_handler - Reset the controller 4086 * @mrioc: Adapter instance reference 4087 * @reset_reason: Reset reason code 4088 * @snapdump: Flag to generate snapdump in firmware or not 4089 * 4090 * This is an handler for recovering controller by issuing soft 4091 * reset are diag fault reset. This is a blocking function and 4092 * when one reset is executed if any other resets they will be 4093 * blocked. All IOCTLs/IO will be blocked during the reset. If 4094 * controller reset is successful then the controller will be 4095 * reinitalized, otherwise the controller will be marked as not 4096 * recoverable 4097 * 4098 * In snapdump bit is set, the controller is issued with diag 4099 * fault reset so that the firmware can create a snap dump and 4100 * post that the firmware will result in F000 fault and the 4101 * driver will issue soft reset to recover from that. 4102 * 4103 * Return: 0 on success, non-zero on failure. 4104 */ 4105 int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, 4106 u32 reset_reason, u8 snapdump) 4107 { 4108 int retval = 0, i; 4109 unsigned long flags; 4110 u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; 4111 4112 /* Block the reset handler until diag save in progress*/ 4113 dprint_reset(mrioc, 4114 "soft_reset_handler: check and block on diagsave_timeout(%d)\n", 4115 mrioc->diagsave_timeout); 4116 while (mrioc->diagsave_timeout) 4117 ssleep(1); 4118 /* 4119 * Block new resets until the currently executing one is finished and 4120 * return the status of the existing reset for all blocked resets 4121 */ 4122 dprint_reset(mrioc, "soft_reset_handler: acquiring reset_mutex\n"); 4123 if (!mutex_trylock(&mrioc->reset_mutex)) { 4124 ioc_info(mrioc, 4125 "controller reset triggered by %s is blocked due to another reset in progress\n", 4126 mpi3mr_reset_rc_name(reset_reason)); 4127 do { 4128 ssleep(1); 4129 } while (mrioc->reset_in_progress == 1); 4130 ioc_info(mrioc, 4131 "returning previous reset result(%d) for the reset triggered by %s\n", 4132 mrioc->prev_reset_result, 4133 mpi3mr_reset_rc_name(reset_reason)); 4134 return mrioc->prev_reset_result; 4135 } 4136 ioc_info(mrioc, "controller reset is triggered by %s\n", 4137 mpi3mr_reset_rc_name(reset_reason)); 4138 4139 mrioc->reset_in_progress = 1; 4140 mrioc->prev_reset_result = -1; 4141 4142 if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) && 4143 (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) && 4144 (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) { 4145 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 4146 mrioc->event_masks[i] = -1; 4147 4148 dprint_reset(mrioc, "soft_reset_handler: masking events\n"); 4149 mpi3mr_issue_event_notification(mrioc); 4150 } 4151 4152 mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT); 4153 4154 mpi3mr_ioc_disable_intr(mrioc); 4155 4156 if (snapdump) { 4157 mpi3mr_set_diagsave(mrioc); 4158 retval = mpi3mr_issue_reset(mrioc, 4159 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason); 4160 if (!retval) { 4161 do { 4162 host_diagnostic = 4163 readl(&mrioc->sysif_regs->host_diagnostic); 4164 if (!(host_diagnostic & 4165 MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS)) 4166 break; 4167 msleep(100); 4168 } while (--timeout); 4169 } 4170 } 4171 4172 retval = mpi3mr_issue_reset(mrioc, 4173 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason); 4174 if (retval) { 4175 ioc_err(mrioc, "Failed to issue soft reset to the ioc\n"); 4176 goto out; 4177 } 4178 4179 mpi3mr_flush_delayed_rmhs_list(mrioc); 4180 mpi3mr_flush_drv_cmds(mrioc); 4181 memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz); 4182 memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz); 4183 mpi3mr_cleanup_fwevt_list(mrioc); 4184 mpi3mr_flush_host_io(mrioc); 4185 mpi3mr_invalidate_devhandles(mrioc); 4186 mpi3mr_memset_buffers(mrioc); 4187 retval = mpi3mr_reinit_ioc(mrioc, 0); 4188 if (retval) { 4189 pr_err(IOCNAME "reinit after soft reset failed: reason %d\n", 4190 mrioc->name, reset_reason); 4191 goto out; 4192 } 4193 ssleep(10); 4194 4195 out: 4196 if (!retval) { 4197 mrioc->diagsave_timeout = 0; 4198 mrioc->reset_in_progress = 0; 4199 mpi3mr_rfresh_tgtdevs(mrioc); 4200 mrioc->ts_update_counter = 0; 4201 spin_lock_irqsave(&mrioc->watchdog_lock, flags); 4202 if (mrioc->watchdog_work_q) 4203 queue_delayed_work(mrioc->watchdog_work_q, 4204 &mrioc->watchdog_work, 4205 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); 4206 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); 4207 } else { 4208 mpi3mr_issue_reset(mrioc, 4209 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason); 4210 mrioc->unrecoverable = 1; 4211 mrioc->reset_in_progress = 0; 4212 retval = -1; 4213 } 4214 mrioc->prev_reset_result = retval; 4215 mutex_unlock(&mrioc->reset_mutex); 4216 ioc_info(mrioc, "controller reset is %s\n", 4217 ((retval == 0) ? "successful" : "failed")); 4218 return retval; 4219 } 4220