1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2021 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 #include <linux/io-64-nonatomic-lo-hi.h> 12 13 static int 14 mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u32 reset_reason); 15 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc); 16 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, 17 struct mpi3_ioc_facts_data *facts_data); 18 19 static int poll_queues; 20 module_param(poll_queues, int, 0444); 21 MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)"); 22 23 #if defined(writeq) && defined(CONFIG_64BIT) 24 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) 25 { 26 writeq(b, addr); 27 } 28 #else 29 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) 30 { 31 __u64 data_out = b; 32 33 writel((u32)(data_out), addr); 34 writel((u32)(data_out >> 32), (addr + 4)); 35 } 36 #endif 37 38 static inline bool 39 mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q) 40 { 41 u16 pi, ci, max_entries; 42 bool is_qfull = false; 43 44 pi = op_req_q->pi; 45 ci = READ_ONCE(op_req_q->ci); 46 max_entries = op_req_q->num_requests; 47 48 if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1)))) 49 is_qfull = true; 50 51 return is_qfull; 52 } 53 54 static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc) 55 { 56 u16 i, max_vectors; 57 58 max_vectors = mrioc->intr_info_count; 59 60 for (i = 0; i < max_vectors; i++) 61 synchronize_irq(pci_irq_vector(mrioc->pdev, i)); 62 } 63 64 void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc) 65 { 66 mrioc->intr_enabled = 0; 67 mpi3mr_sync_irqs(mrioc); 68 } 69 70 void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc) 71 { 72 mrioc->intr_enabled = 1; 73 } 74 75 static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc) 76 { 77 u16 i; 78 79 mpi3mr_ioc_disable_intr(mrioc); 80 81 if (!mrioc->intr_info) 82 return; 83 84 for (i = 0; i < mrioc->intr_info_count; i++) 85 free_irq(pci_irq_vector(mrioc->pdev, i), 86 (mrioc->intr_info + i)); 87 88 kfree(mrioc->intr_info); 89 mrioc->intr_info = NULL; 90 mrioc->intr_info_count = 0; 91 mrioc->is_intr_info_set = false; 92 pci_free_irq_vectors(mrioc->pdev); 93 } 94 95 void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length, 96 dma_addr_t dma_addr) 97 { 98 struct mpi3_sge_common *sgel = paddr; 99 100 sgel->flags = flags; 101 sgel->length = cpu_to_le32(length); 102 sgel->address = cpu_to_le64(dma_addr); 103 } 104 105 void mpi3mr_build_zero_len_sge(void *paddr) 106 { 107 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 108 109 mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1); 110 } 111 112 void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc, 113 dma_addr_t phys_addr) 114 { 115 if (!phys_addr) 116 return NULL; 117 118 if ((phys_addr < mrioc->reply_buf_dma) || 119 (phys_addr > mrioc->reply_buf_dma_max_address)) 120 return NULL; 121 122 return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma); 123 } 124 125 void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc, 126 dma_addr_t phys_addr) 127 { 128 if (!phys_addr) 129 return NULL; 130 131 return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma); 132 } 133 134 static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc, 135 u64 reply_dma) 136 { 137 u32 old_idx = 0; 138 unsigned long flags; 139 140 spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags); 141 old_idx = mrioc->reply_free_queue_host_index; 142 mrioc->reply_free_queue_host_index = ( 143 (mrioc->reply_free_queue_host_index == 144 (mrioc->reply_free_qsz - 1)) ? 0 : 145 (mrioc->reply_free_queue_host_index + 1)); 146 mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma); 147 writel(mrioc->reply_free_queue_host_index, 148 &mrioc->sysif_regs->reply_free_host_index); 149 spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags); 150 } 151 152 void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc, 153 u64 sense_buf_dma) 154 { 155 u32 old_idx = 0; 156 unsigned long flags; 157 158 spin_lock_irqsave(&mrioc->sbq_lock, flags); 159 old_idx = mrioc->sbq_host_index; 160 mrioc->sbq_host_index = ((mrioc->sbq_host_index == 161 (mrioc->sense_buf_q_sz - 1)) ? 0 : 162 (mrioc->sbq_host_index + 1)); 163 mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma); 164 writel(mrioc->sbq_host_index, 165 &mrioc->sysif_regs->sense_buffer_free_host_index); 166 spin_unlock_irqrestore(&mrioc->sbq_lock, flags); 167 } 168 169 static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc, 170 struct mpi3_event_notification_reply *event_reply) 171 { 172 char *desc = NULL; 173 u16 event; 174 175 event = event_reply->event; 176 177 switch (event) { 178 case MPI3_EVENT_LOG_DATA: 179 desc = "Log Data"; 180 break; 181 case MPI3_EVENT_CHANGE: 182 desc = "Event Change"; 183 break; 184 case MPI3_EVENT_GPIO_INTERRUPT: 185 desc = "GPIO Interrupt"; 186 break; 187 case MPI3_EVENT_TEMP_THRESHOLD: 188 desc = "Temperature Threshold"; 189 break; 190 case MPI3_EVENT_CABLE_MGMT: 191 desc = "Cable Management"; 192 break; 193 case MPI3_EVENT_ENERGY_PACK_CHANGE: 194 desc = "Energy Pack Change"; 195 break; 196 case MPI3_EVENT_DEVICE_ADDED: 197 { 198 struct mpi3_device_page0 *event_data = 199 (struct mpi3_device_page0 *)event_reply->event_data; 200 ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n", 201 event_data->dev_handle, event_data->device_form); 202 return; 203 } 204 case MPI3_EVENT_DEVICE_INFO_CHANGED: 205 { 206 struct mpi3_device_page0 *event_data = 207 (struct mpi3_device_page0 *)event_reply->event_data; 208 ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n", 209 event_data->dev_handle, event_data->device_form); 210 return; 211 } 212 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 213 { 214 struct mpi3_event_data_device_status_change *event_data = 215 (struct mpi3_event_data_device_status_change *)event_reply->event_data; 216 ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n", 217 event_data->dev_handle, event_data->reason_code); 218 return; 219 } 220 case MPI3_EVENT_SAS_DISCOVERY: 221 { 222 struct mpi3_event_data_sas_discovery *event_data = 223 (struct mpi3_event_data_sas_discovery *)event_reply->event_data; 224 ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n", 225 (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ? 226 "start" : "stop", 227 le32_to_cpu(event_data->discovery_status)); 228 return; 229 } 230 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE: 231 desc = "SAS Broadcast Primitive"; 232 break; 233 case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE: 234 desc = "SAS Notify Primitive"; 235 break; 236 case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: 237 desc = "SAS Init Device Status Change"; 238 break; 239 case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW: 240 desc = "SAS Init Table Overflow"; 241 break; 242 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 243 desc = "SAS Topology Change List"; 244 break; 245 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 246 desc = "Enclosure Device Status Change"; 247 break; 248 case MPI3_EVENT_HARD_RESET_RECEIVED: 249 desc = "Hard Reset Received"; 250 break; 251 case MPI3_EVENT_SAS_PHY_COUNTER: 252 desc = "SAS PHY Counter"; 253 break; 254 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 255 desc = "SAS Device Discovery Error"; 256 break; 257 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 258 desc = "PCIE Topology Change List"; 259 break; 260 case MPI3_EVENT_PCIE_ENUMERATION: 261 { 262 struct mpi3_event_data_pcie_enumeration *event_data = 263 (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data; 264 ioc_info(mrioc, "PCIE Enumeration: (%s)", 265 (event_data->reason_code == 266 MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop"); 267 if (event_data->enumeration_status) 268 ioc_info(mrioc, "enumeration_status(0x%08x)\n", 269 le32_to_cpu(event_data->enumeration_status)); 270 return; 271 } 272 case MPI3_EVENT_PREPARE_FOR_RESET: 273 desc = "Prepare For Reset"; 274 break; 275 } 276 277 if (!desc) 278 return; 279 280 ioc_info(mrioc, "%s\n", desc); 281 } 282 283 static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc, 284 struct mpi3_default_reply *def_reply) 285 { 286 struct mpi3_event_notification_reply *event_reply = 287 (struct mpi3_event_notification_reply *)def_reply; 288 289 mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count); 290 mpi3mr_print_event_data(mrioc, event_reply); 291 mpi3mr_os_handle_events(mrioc, event_reply); 292 } 293 294 static struct mpi3mr_drv_cmd * 295 mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag, 296 struct mpi3_default_reply *def_reply) 297 { 298 u16 idx; 299 300 switch (host_tag) { 301 case MPI3MR_HOSTTAG_INITCMDS: 302 return &mrioc->init_cmds; 303 case MPI3MR_HOSTTAG_BLK_TMS: 304 return &mrioc->host_tm_cmds; 305 case MPI3MR_HOSTTAG_INVALID: 306 if (def_reply && def_reply->function == 307 MPI3_FUNCTION_EVENT_NOTIFICATION) 308 mpi3mr_handle_events(mrioc, def_reply); 309 return NULL; 310 default: 311 break; 312 } 313 if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN && 314 host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) { 315 idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 316 return &mrioc->dev_rmhs_cmds[idx]; 317 } 318 319 if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN && 320 host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) { 321 idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 322 return &mrioc->evtack_cmds[idx]; 323 } 324 325 return NULL; 326 } 327 328 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc, 329 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma) 330 { 331 u16 reply_desc_type, host_tag = 0; 332 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; 333 u32 ioc_loginfo = 0; 334 struct mpi3_status_reply_descriptor *status_desc; 335 struct mpi3_address_reply_descriptor *addr_desc; 336 struct mpi3_success_reply_descriptor *success_desc; 337 struct mpi3_default_reply *def_reply = NULL; 338 struct mpi3mr_drv_cmd *cmdptr = NULL; 339 struct mpi3_scsi_io_reply *scsi_reply; 340 u8 *sense_buf = NULL; 341 342 *reply_dma = 0; 343 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & 344 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK; 345 switch (reply_desc_type) { 346 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS: 347 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc; 348 host_tag = le16_to_cpu(status_desc->host_tag); 349 ioc_status = le16_to_cpu(status_desc->ioc_status); 350 if (ioc_status & 351 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 352 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); 353 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 354 break; 355 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: 356 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; 357 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address); 358 def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma); 359 if (!def_reply) 360 goto out; 361 host_tag = le16_to_cpu(def_reply->host_tag); 362 ioc_status = le16_to_cpu(def_reply->ioc_status); 363 if (ioc_status & 364 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 365 ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info); 366 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 367 if (def_reply->function == MPI3_FUNCTION_SCSI_IO) { 368 scsi_reply = (struct mpi3_scsi_io_reply *)def_reply; 369 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, 370 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 371 } 372 break; 373 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: 374 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; 375 host_tag = le16_to_cpu(success_desc->host_tag); 376 break; 377 default: 378 break; 379 } 380 381 cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply); 382 if (cmdptr) { 383 if (cmdptr->state & MPI3MR_CMD_PENDING) { 384 cmdptr->state |= MPI3MR_CMD_COMPLETE; 385 cmdptr->ioc_loginfo = ioc_loginfo; 386 cmdptr->ioc_status = ioc_status; 387 cmdptr->state &= ~MPI3MR_CMD_PENDING; 388 if (def_reply) { 389 cmdptr->state |= MPI3MR_CMD_REPLY_VALID; 390 memcpy((u8 *)cmdptr->reply, (u8 *)def_reply, 391 mrioc->reply_sz); 392 } 393 if (cmdptr->is_waiting) { 394 complete(&cmdptr->done); 395 cmdptr->is_waiting = 0; 396 } else if (cmdptr->callback) 397 cmdptr->callback(mrioc, cmdptr); 398 } 399 } 400 out: 401 if (sense_buf) 402 mpi3mr_repost_sense_buf(mrioc, 403 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 404 } 405 406 static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc) 407 { 408 u32 exp_phase = mrioc->admin_reply_ephase; 409 u32 admin_reply_ci = mrioc->admin_reply_ci; 410 u32 num_admin_replies = 0; 411 u64 reply_dma = 0; 412 struct mpi3_default_reply_descriptor *reply_desc; 413 414 reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base + 415 admin_reply_ci; 416 417 if ((le16_to_cpu(reply_desc->reply_flags) & 418 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 419 return 0; 420 421 do { 422 mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci); 423 mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma); 424 if (reply_dma) 425 mpi3mr_repost_reply_buf(mrioc, reply_dma); 426 num_admin_replies++; 427 if (++admin_reply_ci == mrioc->num_admin_replies) { 428 admin_reply_ci = 0; 429 exp_phase ^= 1; 430 } 431 reply_desc = 432 (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base + 433 admin_reply_ci; 434 if ((le16_to_cpu(reply_desc->reply_flags) & 435 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 436 break; 437 } while (1); 438 439 writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); 440 mrioc->admin_reply_ci = admin_reply_ci; 441 mrioc->admin_reply_ephase = exp_phase; 442 443 return num_admin_replies; 444 } 445 446 /** 447 * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to 448 * queue's consumer index from operational reply descriptor queue. 449 * @op_reply_q: op_reply_qinfo object 450 * @reply_ci: operational reply descriptor's queue consumer index 451 * 452 * Returns reply descriptor frame address 453 */ 454 static inline struct mpi3_default_reply_descriptor * 455 mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci) 456 { 457 void *segment_base_addr; 458 struct segments *segments = op_reply_q->q_segments; 459 struct mpi3_default_reply_descriptor *reply_desc = NULL; 460 461 segment_base_addr = 462 segments[reply_ci / op_reply_q->segment_qd].segment; 463 reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr + 464 (reply_ci % op_reply_q->segment_qd); 465 return reply_desc; 466 } 467 468 /** 469 * mpi3mr_process_op_reply_q - Operational reply queue handler 470 * @mrioc: Adapter instance reference 471 * @op_reply_q: Operational reply queue info 472 * 473 * Checks the specific operational reply queue and drains the 474 * reply queue entries until the queue is empty and process the 475 * individual reply descriptors. 476 * 477 * Return: 0 if queue is already processed,or number of reply 478 * descriptors processed. 479 */ 480 int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, 481 struct op_reply_qinfo *op_reply_q) 482 { 483 struct op_req_qinfo *op_req_q; 484 u32 exp_phase; 485 u32 reply_ci; 486 u32 num_op_reply = 0; 487 u64 reply_dma = 0; 488 struct mpi3_default_reply_descriptor *reply_desc; 489 u16 req_q_idx = 0, reply_qidx; 490 491 reply_qidx = op_reply_q->qid - 1; 492 493 if (!atomic_add_unless(&op_reply_q->in_use, 1, 1)) 494 return 0; 495 496 exp_phase = op_reply_q->ephase; 497 reply_ci = op_reply_q->ci; 498 499 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci); 500 if ((le16_to_cpu(reply_desc->reply_flags) & 501 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) { 502 atomic_dec(&op_reply_q->in_use); 503 return 0; 504 } 505 506 do { 507 req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1; 508 op_req_q = &mrioc->req_qinfo[req_q_idx]; 509 510 WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci)); 511 mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma, 512 reply_qidx); 513 atomic_dec(&op_reply_q->pend_ios); 514 if (reply_dma) 515 mpi3mr_repost_reply_buf(mrioc, reply_dma); 516 num_op_reply++; 517 518 if (++reply_ci == op_reply_q->num_replies) { 519 reply_ci = 0; 520 exp_phase ^= 1; 521 } 522 523 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci); 524 525 if ((le16_to_cpu(reply_desc->reply_flags) & 526 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 527 break; 528 /* 529 * Exit completion loop to avoid CPU lockup 530 * Ensure remaining completion happens from threaded ISR. 531 */ 532 if (num_op_reply > mrioc->max_host_ios) { 533 op_reply_q->enable_irq_poll = true; 534 break; 535 } 536 537 } while (1); 538 539 writel(reply_ci, 540 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index); 541 op_reply_q->ci = reply_ci; 542 op_reply_q->ephase = exp_phase; 543 544 atomic_dec(&op_reply_q->in_use); 545 return num_op_reply; 546 } 547 548 /** 549 * mpi3mr_blk_mq_poll - Operational reply queue handler 550 * @shost: SCSI Host reference 551 * @queue_num: Request queue number (w.r.t OS it is hardware context number) 552 * 553 * Checks the specific operational reply queue and drains the 554 * reply queue entries until the queue is empty and process the 555 * individual reply descriptors. 556 * 557 * Return: 0 if queue is already processed,or number of reply 558 * descriptors processed. 559 */ 560 int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) 561 { 562 int num_entries = 0; 563 struct mpi3mr_ioc *mrioc; 564 565 mrioc = (struct mpi3mr_ioc *)shost->hostdata; 566 567 if ((mrioc->reset_in_progress || mrioc->prepare_for_reset)) 568 return 0; 569 570 num_entries = mpi3mr_process_op_reply_q(mrioc, 571 &mrioc->op_reply_qinfo[queue_num]); 572 573 return num_entries; 574 } 575 576 static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata) 577 { 578 struct mpi3mr_intr_info *intr_info = privdata; 579 struct mpi3mr_ioc *mrioc; 580 u16 midx; 581 u32 num_admin_replies = 0, num_op_reply = 0; 582 583 if (!intr_info) 584 return IRQ_NONE; 585 586 mrioc = intr_info->mrioc; 587 588 if (!mrioc->intr_enabled) 589 return IRQ_NONE; 590 591 midx = intr_info->msix_index; 592 593 if (!midx) 594 num_admin_replies = mpi3mr_process_admin_reply_q(mrioc); 595 if (intr_info->op_reply_q) 596 num_op_reply = mpi3mr_process_op_reply_q(mrioc, 597 intr_info->op_reply_q); 598 599 if (num_admin_replies || num_op_reply) 600 return IRQ_HANDLED; 601 else 602 return IRQ_NONE; 603 } 604 605 static irqreturn_t mpi3mr_isr(int irq, void *privdata) 606 { 607 struct mpi3mr_intr_info *intr_info = privdata; 608 struct mpi3mr_ioc *mrioc; 609 u16 midx; 610 int ret; 611 612 if (!intr_info) 613 return IRQ_NONE; 614 615 mrioc = intr_info->mrioc; 616 midx = intr_info->msix_index; 617 /* Call primary ISR routine */ 618 ret = mpi3mr_isr_primary(irq, privdata); 619 620 /* 621 * If more IOs are expected, schedule IRQ polling thread. 622 * Otherwise exit from ISR. 623 */ 624 if (!intr_info->op_reply_q) 625 return ret; 626 627 if (!intr_info->op_reply_q->enable_irq_poll || 628 !atomic_read(&intr_info->op_reply_q->pend_ios)) 629 return ret; 630 631 disable_irq_nosync(pci_irq_vector(mrioc->pdev, midx)); 632 633 return IRQ_WAKE_THREAD; 634 } 635 636 /** 637 * mpi3mr_isr_poll - Reply queue polling routine 638 * @irq: IRQ 639 * @privdata: Interrupt info 640 * 641 * poll for pending I/O completions in a loop until pending I/Os 642 * present or controller queue depth I/Os are processed. 643 * 644 * Return: IRQ_NONE or IRQ_HANDLED 645 */ 646 static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata) 647 { 648 struct mpi3mr_intr_info *intr_info = privdata; 649 struct mpi3mr_ioc *mrioc; 650 u16 midx; 651 u32 num_op_reply = 0; 652 653 if (!intr_info || !intr_info->op_reply_q) 654 return IRQ_NONE; 655 656 mrioc = intr_info->mrioc; 657 midx = intr_info->msix_index; 658 659 /* Poll for pending IOs completions */ 660 do { 661 if (!mrioc->intr_enabled) 662 break; 663 664 if (!midx) 665 mpi3mr_process_admin_reply_q(mrioc); 666 if (intr_info->op_reply_q) 667 num_op_reply += 668 mpi3mr_process_op_reply_q(mrioc, 669 intr_info->op_reply_q); 670 671 usleep_range(MPI3MR_IRQ_POLL_SLEEP, 10 * MPI3MR_IRQ_POLL_SLEEP); 672 673 } while (atomic_read(&intr_info->op_reply_q->pend_ios) && 674 (num_op_reply < mrioc->max_host_ios)); 675 676 intr_info->op_reply_q->enable_irq_poll = false; 677 enable_irq(pci_irq_vector(mrioc->pdev, midx)); 678 679 return IRQ_HANDLED; 680 } 681 682 /** 683 * mpi3mr_request_irq - Request IRQ and register ISR 684 * @mrioc: Adapter instance reference 685 * @index: IRQ vector index 686 * 687 * Request threaded ISR with primary ISR and secondary 688 * 689 * Return: 0 on success and non zero on failures. 690 */ 691 static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index) 692 { 693 struct pci_dev *pdev = mrioc->pdev; 694 struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index; 695 int retval = 0; 696 697 intr_info->mrioc = mrioc; 698 intr_info->msix_index = index; 699 intr_info->op_reply_q = NULL; 700 701 snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d", 702 mrioc->driver_name, mrioc->id, index); 703 704 retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr, 705 mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info); 706 if (retval) { 707 ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n", 708 intr_info->name, pci_irq_vector(pdev, index)); 709 return retval; 710 } 711 712 return retval; 713 } 714 715 static void mpi3mr_calc_poll_queues(struct mpi3mr_ioc *mrioc, u16 max_vectors) 716 { 717 if (!mrioc->requested_poll_qcount) 718 return; 719 720 /* Reserved for Admin and Default Queue */ 721 if (max_vectors > 2 && 722 (mrioc->requested_poll_qcount < max_vectors - 2)) { 723 ioc_info(mrioc, 724 "enabled polled queues (%d) msix (%d)\n", 725 mrioc->requested_poll_qcount, max_vectors); 726 } else { 727 ioc_info(mrioc, 728 "disabled polled queues (%d) msix (%d) because of no resources for default queue\n", 729 mrioc->requested_poll_qcount, max_vectors); 730 mrioc->requested_poll_qcount = 0; 731 } 732 } 733 734 /** 735 * mpi3mr_setup_isr - Setup ISR for the controller 736 * @mrioc: Adapter instance reference 737 * @setup_one: Request one IRQ or more 738 * 739 * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR 740 * 741 * Return: 0 on success and non zero on failures. 742 */ 743 static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one) 744 { 745 unsigned int irq_flags = PCI_IRQ_MSIX; 746 int max_vectors, min_vec; 747 int retval; 748 int i; 749 struct irq_affinity desc = { .pre_vectors = 1, .post_vectors = 1 }; 750 751 if (mrioc->is_intr_info_set) 752 return 0; 753 754 mpi3mr_cleanup_isr(mrioc); 755 756 if (setup_one || reset_devices) { 757 max_vectors = 1; 758 retval = pci_alloc_irq_vectors(mrioc->pdev, 759 1, max_vectors, irq_flags); 760 if (retval < 0) { 761 ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n", 762 retval); 763 goto out_failed; 764 } 765 } else { 766 max_vectors = 767 min_t(int, mrioc->cpu_count + 1 + 768 mrioc->requested_poll_qcount, mrioc->msix_count); 769 770 mpi3mr_calc_poll_queues(mrioc, max_vectors); 771 772 ioc_info(mrioc, 773 "MSI-X vectors supported: %d, no of cores: %d,", 774 mrioc->msix_count, mrioc->cpu_count); 775 ioc_info(mrioc, 776 "MSI-x vectors requested: %d poll_queues %d\n", 777 max_vectors, mrioc->requested_poll_qcount); 778 779 desc.post_vectors = mrioc->requested_poll_qcount; 780 min_vec = desc.pre_vectors + desc.post_vectors; 781 irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; 782 783 retval = pci_alloc_irq_vectors_affinity(mrioc->pdev, 784 min_vec, max_vectors, irq_flags, &desc); 785 786 if (retval < 0) { 787 ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n", 788 retval); 789 goto out_failed; 790 } 791 792 793 /* 794 * If only one MSI-x is allocated, then MSI-x 0 will be shared 795 * between Admin queue and operational queue 796 */ 797 if (retval == min_vec) 798 mrioc->op_reply_q_offset = 0; 799 else if (retval != (max_vectors)) { 800 ioc_info(mrioc, 801 "allocated vectors (%d) are less than configured (%d)\n", 802 retval, max_vectors); 803 } 804 805 max_vectors = retval; 806 mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0; 807 808 mpi3mr_calc_poll_queues(mrioc, max_vectors); 809 810 } 811 812 mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors, 813 GFP_KERNEL); 814 if (!mrioc->intr_info) { 815 retval = -ENOMEM; 816 pci_free_irq_vectors(mrioc->pdev); 817 goto out_failed; 818 } 819 for (i = 0; i < max_vectors; i++) { 820 retval = mpi3mr_request_irq(mrioc, i); 821 if (retval) { 822 mrioc->intr_info_count = i; 823 goto out_failed; 824 } 825 } 826 if (reset_devices || !setup_one) 827 mrioc->is_intr_info_set = true; 828 mrioc->intr_info_count = max_vectors; 829 mpi3mr_ioc_enable_intr(mrioc); 830 return 0; 831 832 out_failed: 833 mpi3mr_cleanup_isr(mrioc); 834 835 return retval; 836 } 837 838 static const struct { 839 enum mpi3mr_iocstate value; 840 char *name; 841 } mrioc_states[] = { 842 { MRIOC_STATE_READY, "ready" }, 843 { MRIOC_STATE_FAULT, "fault" }, 844 { MRIOC_STATE_RESET, "reset" }, 845 { MRIOC_STATE_BECOMING_READY, "becoming ready" }, 846 { MRIOC_STATE_RESET_REQUESTED, "reset requested" }, 847 { MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" }, 848 }; 849 850 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state) 851 { 852 int i; 853 char *name = NULL; 854 855 for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) { 856 if (mrioc_states[i].value == mrioc_state) { 857 name = mrioc_states[i].name; 858 break; 859 } 860 } 861 return name; 862 } 863 864 /* Reset reason to name mapper structure*/ 865 static const struct { 866 enum mpi3mr_reset_reason value; 867 char *name; 868 } mpi3mr_reset_reason_codes[] = { 869 { MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" }, 870 { MPI3MR_RESET_FROM_FAULT_WATCH, "fault" }, 871 { MPI3MR_RESET_FROM_IOCTL, "application invocation" }, 872 { MPI3MR_RESET_FROM_EH_HOS, "error handling" }, 873 { MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" }, 874 { MPI3MR_RESET_FROM_IOCTL_TIMEOUT, "IOCTL timeout" }, 875 { MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" }, 876 { MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" }, 877 { MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" }, 878 { MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" }, 879 { MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" }, 880 { MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" }, 881 { MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" }, 882 { 883 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT, 884 "create request queue timeout" 885 }, 886 { 887 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT, 888 "create reply queue timeout" 889 }, 890 { MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" }, 891 { MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" }, 892 { MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" }, 893 { MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" }, 894 { 895 MPI3MR_RESET_FROM_CIACTVRST_TIMER, 896 "component image activation timeout" 897 }, 898 { 899 MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT, 900 "get package version timeout" 901 }, 902 { MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" }, 903 { MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" }, 904 { MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" }, 905 }; 906 907 /** 908 * mpi3mr_reset_rc_name - get reset reason code name 909 * @reason_code: reset reason code value 910 * 911 * Map reset reason to an NULL terminated ASCII string 912 * 913 * Return: name corresponding to reset reason value or NULL. 914 */ 915 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code) 916 { 917 int i; 918 char *name = NULL; 919 920 for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) { 921 if (mpi3mr_reset_reason_codes[i].value == reason_code) { 922 name = mpi3mr_reset_reason_codes[i].name; 923 break; 924 } 925 } 926 return name; 927 } 928 929 /* Reset type to name mapper structure*/ 930 static const struct { 931 u16 reset_type; 932 char *name; 933 } mpi3mr_reset_types[] = { 934 { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" }, 935 { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" }, 936 }; 937 938 /** 939 * mpi3mr_reset_type_name - get reset type name 940 * @reset_type: reset type value 941 * 942 * Map reset type to an NULL terminated ASCII string 943 * 944 * Return: name corresponding to reset type value or NULL. 945 */ 946 static const char *mpi3mr_reset_type_name(u16 reset_type) 947 { 948 int i; 949 char *name = NULL; 950 951 for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) { 952 if (mpi3mr_reset_types[i].reset_type == reset_type) { 953 name = mpi3mr_reset_types[i].name; 954 break; 955 } 956 } 957 return name; 958 } 959 960 /** 961 * mpi3mr_print_fault_info - Display fault information 962 * @mrioc: Adapter instance reference 963 * 964 * Display the controller fault information if there is a 965 * controller fault. 966 * 967 * Return: Nothing. 968 */ 969 void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc) 970 { 971 u32 ioc_status, code, code1, code2, code3; 972 973 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 974 975 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) { 976 code = readl(&mrioc->sysif_regs->fault); 977 code1 = readl(&mrioc->sysif_regs->fault_info[0]); 978 code2 = readl(&mrioc->sysif_regs->fault_info[1]); 979 code3 = readl(&mrioc->sysif_regs->fault_info[2]); 980 981 ioc_info(mrioc, 982 "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n", 983 code, code1, code2, code3); 984 } 985 } 986 987 /** 988 * mpi3mr_get_iocstate - Get IOC State 989 * @mrioc: Adapter instance reference 990 * 991 * Return a proper IOC state enum based on the IOC status and 992 * IOC configuration and unrcoverable state of the controller. 993 * 994 * Return: Current IOC state. 995 */ 996 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc) 997 { 998 u32 ioc_status, ioc_config; 999 u8 ready, enabled; 1000 1001 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1002 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1003 1004 if (mrioc->unrecoverable) 1005 return MRIOC_STATE_UNRECOVERABLE; 1006 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) 1007 return MRIOC_STATE_FAULT; 1008 1009 ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY); 1010 enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC); 1011 1012 if (ready && enabled) 1013 return MRIOC_STATE_READY; 1014 if ((!ready) && (!enabled)) 1015 return MRIOC_STATE_RESET; 1016 if ((!ready) && (enabled)) 1017 return MRIOC_STATE_BECOMING_READY; 1018 1019 return MRIOC_STATE_RESET_REQUESTED; 1020 } 1021 1022 /** 1023 * mpi3mr_clear_reset_history - clear reset history 1024 * @mrioc: Adapter instance reference 1025 * 1026 * Write the reset history bit in IOC status to clear the bit, 1027 * if it is already set. 1028 * 1029 * Return: Nothing. 1030 */ 1031 static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc) 1032 { 1033 u32 ioc_status; 1034 1035 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1036 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) 1037 writel(ioc_status, &mrioc->sysif_regs->ioc_status); 1038 } 1039 1040 /** 1041 * mpi3mr_issue_and_process_mur - Message unit Reset handler 1042 * @mrioc: Adapter instance reference 1043 * @reset_reason: Reset reason code 1044 * 1045 * Issue Message unit Reset to the controller and wait for it to 1046 * be complete. 1047 * 1048 * Return: 0 on success, -1 on failure. 1049 */ 1050 static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc, 1051 u32 reset_reason) 1052 { 1053 u32 ioc_config, timeout, ioc_status; 1054 int retval = -1; 1055 1056 ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n"); 1057 if (mrioc->unrecoverable) { 1058 ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n"); 1059 return retval; 1060 } 1061 mpi3mr_clear_reset_history(mrioc); 1062 writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]); 1063 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1064 ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; 1065 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 1066 1067 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10; 1068 do { 1069 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1070 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) { 1071 mpi3mr_clear_reset_history(mrioc); 1072 break; 1073 } 1074 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) { 1075 mpi3mr_print_fault_info(mrioc); 1076 break; 1077 } 1078 msleep(100); 1079 } while (--timeout); 1080 1081 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1082 if (timeout && !((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) || 1083 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) || 1084 (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) 1085 retval = 0; 1086 1087 ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n", 1088 (!retval) ? "successful" : "failed", ioc_status, ioc_config); 1089 return retval; 1090 } 1091 1092 /** 1093 * mpi3mr_revalidate_factsdata - validate IOCFacts parameters 1094 * during reset/resume 1095 * @mrioc: Adapter instance reference 1096 * 1097 * Return zero if the new IOCFacts parameters value is compatible with 1098 * older values else return -EPERM 1099 */ 1100 static int 1101 mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc) 1102 { 1103 u16 dev_handle_bitmap_sz; 1104 void *removepend_bitmap; 1105 1106 if (mrioc->facts.reply_sz > mrioc->reply_sz) { 1107 ioc_err(mrioc, 1108 "cannot increase reply size from %d to %d\n", 1109 mrioc->reply_sz, mrioc->facts.reply_sz); 1110 return -EPERM; 1111 } 1112 1113 if (mrioc->facts.max_op_reply_q < mrioc->num_op_reply_q) { 1114 ioc_err(mrioc, 1115 "cannot reduce number of operational reply queues from %d to %d\n", 1116 mrioc->num_op_reply_q, 1117 mrioc->facts.max_op_reply_q); 1118 return -EPERM; 1119 } 1120 1121 if (mrioc->facts.max_op_req_q < mrioc->num_op_req_q) { 1122 ioc_err(mrioc, 1123 "cannot reduce number of operational request queues from %d to %d\n", 1124 mrioc->num_op_req_q, mrioc->facts.max_op_req_q); 1125 return -EPERM; 1126 } 1127 1128 dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8; 1129 if (mrioc->facts.max_devhandle % 8) 1130 dev_handle_bitmap_sz++; 1131 if (dev_handle_bitmap_sz > mrioc->dev_handle_bitmap_sz) { 1132 removepend_bitmap = krealloc(mrioc->removepend_bitmap, 1133 dev_handle_bitmap_sz, GFP_KERNEL); 1134 if (!removepend_bitmap) { 1135 ioc_err(mrioc, 1136 "failed to increase removepend_bitmap sz from: %d to %d\n", 1137 mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz); 1138 return -EPERM; 1139 } 1140 memset(removepend_bitmap + mrioc->dev_handle_bitmap_sz, 0, 1141 dev_handle_bitmap_sz - mrioc->dev_handle_bitmap_sz); 1142 mrioc->removepend_bitmap = removepend_bitmap; 1143 ioc_info(mrioc, 1144 "increased dev_handle_bitmap_sz from %d to %d\n", 1145 mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz); 1146 mrioc->dev_handle_bitmap_sz = dev_handle_bitmap_sz; 1147 } 1148 1149 return 0; 1150 } 1151 1152 /** 1153 * mpi3mr_bring_ioc_ready - Bring controller to ready state 1154 * @mrioc: Adapter instance reference 1155 * 1156 * Set Enable IOC bit in IOC configuration register and wait for 1157 * the controller to become ready. 1158 * 1159 * Return: 0 on success, appropriate error on failure. 1160 */ 1161 static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc) 1162 { 1163 u32 ioc_config, ioc_status, timeout; 1164 int retval = 0; 1165 enum mpi3mr_iocstate ioc_state; 1166 u64 base_info; 1167 1168 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1169 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1170 base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information); 1171 ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n", 1172 ioc_status, ioc_config, base_info); 1173 1174 /*The timeout value is in 2sec unit, changing it to seconds*/ 1175 mrioc->ready_timeout = 1176 ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >> 1177 MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2; 1178 1179 ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout); 1180 1181 ioc_state = mpi3mr_get_iocstate(mrioc); 1182 ioc_info(mrioc, "controller is in %s state during detection\n", 1183 mpi3mr_iocstate_name(ioc_state)); 1184 1185 if (ioc_state == MRIOC_STATE_BECOMING_READY || 1186 ioc_state == MRIOC_STATE_RESET_REQUESTED) { 1187 timeout = mrioc->ready_timeout * 10; 1188 do { 1189 msleep(100); 1190 } while (--timeout); 1191 1192 ioc_state = mpi3mr_get_iocstate(mrioc); 1193 ioc_info(mrioc, 1194 "controller is in %s state after waiting to reset\n", 1195 mpi3mr_iocstate_name(ioc_state)); 1196 } 1197 1198 if (ioc_state == MRIOC_STATE_READY) { 1199 ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n"); 1200 retval = mpi3mr_issue_and_process_mur(mrioc, 1201 MPI3MR_RESET_FROM_BRINGUP); 1202 ioc_state = mpi3mr_get_iocstate(mrioc); 1203 if (retval) 1204 ioc_err(mrioc, 1205 "message unit reset failed with error %d current state %s\n", 1206 retval, mpi3mr_iocstate_name(ioc_state)); 1207 } 1208 if (ioc_state != MRIOC_STATE_RESET) { 1209 mpi3mr_print_fault_info(mrioc); 1210 ioc_info(mrioc, "issuing soft reset to bring to reset state\n"); 1211 retval = mpi3mr_issue_reset(mrioc, 1212 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, 1213 MPI3MR_RESET_FROM_BRINGUP); 1214 if (retval) { 1215 ioc_err(mrioc, 1216 "soft reset failed with error %d\n", retval); 1217 goto out_failed; 1218 } 1219 } 1220 ioc_state = mpi3mr_get_iocstate(mrioc); 1221 if (ioc_state != MRIOC_STATE_RESET) { 1222 ioc_err(mrioc, 1223 "cannot bring controller to reset state, current state: %s\n", 1224 mpi3mr_iocstate_name(ioc_state)); 1225 goto out_failed; 1226 } 1227 mpi3mr_clear_reset_history(mrioc); 1228 retval = mpi3mr_setup_admin_qpair(mrioc); 1229 if (retval) { 1230 ioc_err(mrioc, "failed to setup admin queues: error %d\n", 1231 retval); 1232 goto out_failed; 1233 } 1234 1235 ioc_info(mrioc, "bringing controller to ready state\n"); 1236 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1237 ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; 1238 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 1239 1240 timeout = mrioc->ready_timeout * 10; 1241 do { 1242 ioc_state = mpi3mr_get_iocstate(mrioc); 1243 if (ioc_state == MRIOC_STATE_READY) { 1244 ioc_info(mrioc, 1245 "successfully transitioned to %s state\n", 1246 mpi3mr_iocstate_name(ioc_state)); 1247 return 0; 1248 } 1249 msleep(100); 1250 } while (--timeout); 1251 1252 out_failed: 1253 ioc_state = mpi3mr_get_iocstate(mrioc); 1254 ioc_err(mrioc, 1255 "failed to bring to ready state, current state: %s\n", 1256 mpi3mr_iocstate_name(ioc_state)); 1257 return retval; 1258 } 1259 1260 /** 1261 * mpi3mr_soft_reset_success - Check softreset is success or not 1262 * @ioc_status: IOC status register value 1263 * @ioc_config: IOC config register value 1264 * 1265 * Check whether the soft reset is successful or not based on 1266 * IOC status and IOC config register values. 1267 * 1268 * Return: True when the soft reset is success, false otherwise. 1269 */ 1270 static inline bool 1271 mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config) 1272 { 1273 if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) || 1274 (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) 1275 return true; 1276 return false; 1277 } 1278 1279 /** 1280 * mpi3mr_diagfault_success - Check diag fault is success or not 1281 * @mrioc: Adapter reference 1282 * @ioc_status: IOC status register value 1283 * 1284 * Check whether the controller hit diag reset fault code. 1285 * 1286 * Return: True when there is diag fault, false otherwise. 1287 */ 1288 static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc, 1289 u32 ioc_status) 1290 { 1291 u32 fault; 1292 1293 if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) 1294 return false; 1295 fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK; 1296 if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) { 1297 mpi3mr_print_fault_info(mrioc); 1298 return true; 1299 } 1300 return false; 1301 } 1302 1303 /** 1304 * mpi3mr_set_diagsave - Set diag save bit for snapdump 1305 * @mrioc: Adapter reference 1306 * 1307 * Set diag save bit in IOC configuration register to enable 1308 * snapdump. 1309 * 1310 * Return: Nothing. 1311 */ 1312 static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc) 1313 { 1314 u32 ioc_config; 1315 1316 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1317 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE; 1318 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 1319 } 1320 1321 /** 1322 * mpi3mr_issue_reset - Issue reset to the controller 1323 * @mrioc: Adapter reference 1324 * @reset_type: Reset type 1325 * @reset_reason: Reset reason code 1326 * 1327 * Unlock the host diagnostic registers and write the specific 1328 * reset type to that, wait for reset acknowledgment from the 1329 * controller, if the reset is not successful retry for the 1330 * predefined number of times. 1331 * 1332 * Return: 0 on success, non-zero on failure. 1333 */ 1334 static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, 1335 u32 reset_reason) 1336 { 1337 int retval = -1; 1338 u8 unlock_retry_count = 0; 1339 u32 host_diagnostic, ioc_status, ioc_config; 1340 u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10; 1341 1342 if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) && 1343 (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT)) 1344 return retval; 1345 if (mrioc->unrecoverable) 1346 return retval; 1347 if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) { 1348 retval = 0; 1349 return retval; 1350 } 1351 1352 ioc_info(mrioc, "%s reset due to %s(0x%x)\n", 1353 mpi3mr_reset_type_name(reset_type), 1354 mpi3mr_reset_rc_name(reset_reason), reset_reason); 1355 1356 mpi3mr_clear_reset_history(mrioc); 1357 do { 1358 ioc_info(mrioc, 1359 "Write magic sequence to unlock host diag register (retry=%d)\n", 1360 ++unlock_retry_count); 1361 if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) { 1362 ioc_err(mrioc, 1363 "%s reset failed due to unlock failure, host_diagnostic(0x%08x)\n", 1364 mpi3mr_reset_type_name(reset_type), 1365 host_diagnostic); 1366 mrioc->unrecoverable = 1; 1367 return retval; 1368 } 1369 1370 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH, 1371 &mrioc->sysif_regs->write_sequence); 1372 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST, 1373 &mrioc->sysif_regs->write_sequence); 1374 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND, 1375 &mrioc->sysif_regs->write_sequence); 1376 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD, 1377 &mrioc->sysif_regs->write_sequence); 1378 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH, 1379 &mrioc->sysif_regs->write_sequence); 1380 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH, 1381 &mrioc->sysif_regs->write_sequence); 1382 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH, 1383 &mrioc->sysif_regs->write_sequence); 1384 usleep_range(1000, 1100); 1385 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); 1386 ioc_info(mrioc, 1387 "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n", 1388 unlock_retry_count, host_diagnostic); 1389 } while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE)); 1390 1391 writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]); 1392 writel(host_diagnostic | reset_type, 1393 &mrioc->sysif_regs->host_diagnostic); 1394 switch (reset_type) { 1395 case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET: 1396 do { 1397 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1398 ioc_config = 1399 readl(&mrioc->sysif_regs->ioc_configuration); 1400 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) 1401 && mpi3mr_soft_reset_success(ioc_status, ioc_config) 1402 ) { 1403 mpi3mr_clear_reset_history(mrioc); 1404 retval = 0; 1405 break; 1406 } 1407 msleep(100); 1408 } while (--timeout); 1409 mpi3mr_print_fault_info(mrioc); 1410 break; 1411 case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT: 1412 do { 1413 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1414 if (mpi3mr_diagfault_success(mrioc, ioc_status)) { 1415 retval = 0; 1416 break; 1417 } 1418 msleep(100); 1419 } while (--timeout); 1420 break; 1421 default: 1422 break; 1423 } 1424 1425 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND, 1426 &mrioc->sysif_regs->write_sequence); 1427 1428 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1429 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1430 ioc_info(mrioc, 1431 "ioc_status/ioc_onfig after %s reset is (0x%x)/(0x%x)\n", 1432 (!retval)?"successful":"failed", ioc_status, 1433 ioc_config); 1434 if (retval) 1435 mrioc->unrecoverable = 1; 1436 return retval; 1437 } 1438 1439 /** 1440 * mpi3mr_admin_request_post - Post request to admin queue 1441 * @mrioc: Adapter reference 1442 * @admin_req: MPI3 request 1443 * @admin_req_sz: Request size 1444 * @ignore_reset: Ignore reset in process 1445 * 1446 * Post the MPI3 request into admin request queue and 1447 * inform the controller, if the queue is full return 1448 * appropriate error. 1449 * 1450 * Return: 0 on success, non-zero on failure. 1451 */ 1452 int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req, 1453 u16 admin_req_sz, u8 ignore_reset) 1454 { 1455 u16 areq_pi = 0, areq_ci = 0, max_entries = 0; 1456 int retval = 0; 1457 unsigned long flags; 1458 u8 *areq_entry; 1459 1460 if (mrioc->unrecoverable) { 1461 ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__); 1462 return -EFAULT; 1463 } 1464 1465 spin_lock_irqsave(&mrioc->admin_req_lock, flags); 1466 areq_pi = mrioc->admin_req_pi; 1467 areq_ci = mrioc->admin_req_ci; 1468 max_entries = mrioc->num_admin_req; 1469 if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) && 1470 (areq_pi == (max_entries - 1)))) { 1471 ioc_err(mrioc, "AdminReqQ full condition detected\n"); 1472 retval = -EAGAIN; 1473 goto out; 1474 } 1475 if (!ignore_reset && mrioc->reset_in_progress) { 1476 ioc_err(mrioc, "AdminReqQ submit reset in progress\n"); 1477 retval = -EAGAIN; 1478 goto out; 1479 } 1480 areq_entry = (u8 *)mrioc->admin_req_base + 1481 (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ); 1482 memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); 1483 memcpy(areq_entry, (u8 *)admin_req, admin_req_sz); 1484 1485 if (++areq_pi == max_entries) 1486 areq_pi = 0; 1487 mrioc->admin_req_pi = areq_pi; 1488 1489 writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); 1490 1491 out: 1492 spin_unlock_irqrestore(&mrioc->admin_req_lock, flags); 1493 1494 return retval; 1495 } 1496 1497 /** 1498 * mpi3mr_free_op_req_q_segments - free request memory segments 1499 * @mrioc: Adapter instance reference 1500 * @q_idx: operational request queue index 1501 * 1502 * Free memory segments allocated for operational request queue 1503 * 1504 * Return: Nothing. 1505 */ 1506 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) 1507 { 1508 u16 j; 1509 int size; 1510 struct segments *segments; 1511 1512 segments = mrioc->req_qinfo[q_idx].q_segments; 1513 if (!segments) 1514 return; 1515 1516 if (mrioc->enable_segqueue) { 1517 size = MPI3MR_OP_REQ_Q_SEG_SIZE; 1518 if (mrioc->req_qinfo[q_idx].q_segment_list) { 1519 dma_free_coherent(&mrioc->pdev->dev, 1520 MPI3MR_MAX_SEG_LIST_SIZE, 1521 mrioc->req_qinfo[q_idx].q_segment_list, 1522 mrioc->req_qinfo[q_idx].q_segment_list_dma); 1523 mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL; 1524 } 1525 } else 1526 size = mrioc->req_qinfo[q_idx].segment_qd * 1527 mrioc->facts.op_req_sz; 1528 1529 for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) { 1530 if (!segments[j].segment) 1531 continue; 1532 dma_free_coherent(&mrioc->pdev->dev, 1533 size, segments[j].segment, segments[j].segment_dma); 1534 segments[j].segment = NULL; 1535 } 1536 kfree(mrioc->req_qinfo[q_idx].q_segments); 1537 mrioc->req_qinfo[q_idx].q_segments = NULL; 1538 mrioc->req_qinfo[q_idx].qid = 0; 1539 } 1540 1541 /** 1542 * mpi3mr_free_op_reply_q_segments - free reply memory segments 1543 * @mrioc: Adapter instance reference 1544 * @q_idx: operational reply queue index 1545 * 1546 * Free memory segments allocated for operational reply queue 1547 * 1548 * Return: Nothing. 1549 */ 1550 static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) 1551 { 1552 u16 j; 1553 int size; 1554 struct segments *segments; 1555 1556 segments = mrioc->op_reply_qinfo[q_idx].q_segments; 1557 if (!segments) 1558 return; 1559 1560 if (mrioc->enable_segqueue) { 1561 size = MPI3MR_OP_REP_Q_SEG_SIZE; 1562 if (mrioc->op_reply_qinfo[q_idx].q_segment_list) { 1563 dma_free_coherent(&mrioc->pdev->dev, 1564 MPI3MR_MAX_SEG_LIST_SIZE, 1565 mrioc->op_reply_qinfo[q_idx].q_segment_list, 1566 mrioc->op_reply_qinfo[q_idx].q_segment_list_dma); 1567 mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL; 1568 } 1569 } else 1570 size = mrioc->op_reply_qinfo[q_idx].segment_qd * 1571 mrioc->op_reply_desc_sz; 1572 1573 for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) { 1574 if (!segments[j].segment) 1575 continue; 1576 dma_free_coherent(&mrioc->pdev->dev, 1577 size, segments[j].segment, segments[j].segment_dma); 1578 segments[j].segment = NULL; 1579 } 1580 1581 kfree(mrioc->op_reply_qinfo[q_idx].q_segments); 1582 mrioc->op_reply_qinfo[q_idx].q_segments = NULL; 1583 mrioc->op_reply_qinfo[q_idx].qid = 0; 1584 } 1585 1586 /** 1587 * mpi3mr_delete_op_reply_q - delete operational reply queue 1588 * @mrioc: Adapter instance reference 1589 * @qidx: operational reply queue index 1590 * 1591 * Delete operatinal reply queue by issuing MPI request 1592 * through admin queue. 1593 * 1594 * Return: 0 on success, non-zero on failure. 1595 */ 1596 static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) 1597 { 1598 struct mpi3_delete_reply_queue_request delq_req; 1599 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 1600 int retval = 0; 1601 u16 reply_qid = 0, midx; 1602 1603 reply_qid = op_reply_q->qid; 1604 1605 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset); 1606 1607 if (!reply_qid) { 1608 retval = -1; 1609 ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n"); 1610 goto out; 1611 } 1612 1613 (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount-- : 1614 mrioc->active_poll_qcount--; 1615 1616 memset(&delq_req, 0, sizeof(delq_req)); 1617 mutex_lock(&mrioc->init_cmds.mutex); 1618 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1619 retval = -1; 1620 ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n"); 1621 mutex_unlock(&mrioc->init_cmds.mutex); 1622 goto out; 1623 } 1624 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1625 mrioc->init_cmds.is_waiting = 1; 1626 mrioc->init_cmds.callback = NULL; 1627 delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1628 delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE; 1629 delq_req.queue_id = cpu_to_le16(reply_qid); 1630 1631 init_completion(&mrioc->init_cmds.done); 1632 retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req), 1633 1); 1634 if (retval) { 1635 ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n"); 1636 goto out_unlock; 1637 } 1638 wait_for_completion_timeout(&mrioc->init_cmds.done, 1639 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1640 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1641 ioc_err(mrioc, "delete reply queue timed out\n"); 1642 mpi3mr_check_rh_fault_ioc(mrioc, 1643 MPI3MR_RESET_FROM_DELREPQ_TIMEOUT); 1644 retval = -1; 1645 goto out_unlock; 1646 } 1647 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1648 != MPI3_IOCSTATUS_SUCCESS) { 1649 ioc_err(mrioc, 1650 "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1651 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1652 mrioc->init_cmds.ioc_loginfo); 1653 retval = -1; 1654 goto out_unlock; 1655 } 1656 mrioc->intr_info[midx].op_reply_q = NULL; 1657 1658 mpi3mr_free_op_reply_q_segments(mrioc, qidx); 1659 out_unlock: 1660 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1661 mutex_unlock(&mrioc->init_cmds.mutex); 1662 out: 1663 1664 return retval; 1665 } 1666 1667 /** 1668 * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool 1669 * @mrioc: Adapter instance reference 1670 * @qidx: request queue index 1671 * 1672 * Allocate segmented memory pools for operational reply 1673 * queue. 1674 * 1675 * Return: 0 on success, non-zero on failure. 1676 */ 1677 static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx) 1678 { 1679 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 1680 int i, size; 1681 u64 *q_segment_list_entry = NULL; 1682 struct segments *segments; 1683 1684 if (mrioc->enable_segqueue) { 1685 op_reply_q->segment_qd = 1686 MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz; 1687 1688 size = MPI3MR_OP_REP_Q_SEG_SIZE; 1689 1690 op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev, 1691 MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma, 1692 GFP_KERNEL); 1693 if (!op_reply_q->q_segment_list) 1694 return -ENOMEM; 1695 q_segment_list_entry = (u64 *)op_reply_q->q_segment_list; 1696 } else { 1697 op_reply_q->segment_qd = op_reply_q->num_replies; 1698 size = op_reply_q->num_replies * mrioc->op_reply_desc_sz; 1699 } 1700 1701 op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies, 1702 op_reply_q->segment_qd); 1703 1704 op_reply_q->q_segments = kcalloc(op_reply_q->num_segments, 1705 sizeof(struct segments), GFP_KERNEL); 1706 if (!op_reply_q->q_segments) 1707 return -ENOMEM; 1708 1709 segments = op_reply_q->q_segments; 1710 for (i = 0; i < op_reply_q->num_segments; i++) { 1711 segments[i].segment = 1712 dma_alloc_coherent(&mrioc->pdev->dev, 1713 size, &segments[i].segment_dma, GFP_KERNEL); 1714 if (!segments[i].segment) 1715 return -ENOMEM; 1716 if (mrioc->enable_segqueue) 1717 q_segment_list_entry[i] = 1718 (unsigned long)segments[i].segment_dma; 1719 } 1720 1721 return 0; 1722 } 1723 1724 /** 1725 * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool. 1726 * @mrioc: Adapter instance reference 1727 * @qidx: request queue index 1728 * 1729 * Allocate segmented memory pools for operational request 1730 * queue. 1731 * 1732 * Return: 0 on success, non-zero on failure. 1733 */ 1734 static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx) 1735 { 1736 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx; 1737 int i, size; 1738 u64 *q_segment_list_entry = NULL; 1739 struct segments *segments; 1740 1741 if (mrioc->enable_segqueue) { 1742 op_req_q->segment_qd = 1743 MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz; 1744 1745 size = MPI3MR_OP_REQ_Q_SEG_SIZE; 1746 1747 op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev, 1748 MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma, 1749 GFP_KERNEL); 1750 if (!op_req_q->q_segment_list) 1751 return -ENOMEM; 1752 q_segment_list_entry = (u64 *)op_req_q->q_segment_list; 1753 1754 } else { 1755 op_req_q->segment_qd = op_req_q->num_requests; 1756 size = op_req_q->num_requests * mrioc->facts.op_req_sz; 1757 } 1758 1759 op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests, 1760 op_req_q->segment_qd); 1761 1762 op_req_q->q_segments = kcalloc(op_req_q->num_segments, 1763 sizeof(struct segments), GFP_KERNEL); 1764 if (!op_req_q->q_segments) 1765 return -ENOMEM; 1766 1767 segments = op_req_q->q_segments; 1768 for (i = 0; i < op_req_q->num_segments; i++) { 1769 segments[i].segment = 1770 dma_alloc_coherent(&mrioc->pdev->dev, 1771 size, &segments[i].segment_dma, GFP_KERNEL); 1772 if (!segments[i].segment) 1773 return -ENOMEM; 1774 if (mrioc->enable_segqueue) 1775 q_segment_list_entry[i] = 1776 (unsigned long)segments[i].segment_dma; 1777 } 1778 1779 return 0; 1780 } 1781 1782 /** 1783 * mpi3mr_create_op_reply_q - create operational reply queue 1784 * @mrioc: Adapter instance reference 1785 * @qidx: operational reply queue index 1786 * 1787 * Create operatinal reply queue by issuing MPI request 1788 * through admin queue. 1789 * 1790 * Return: 0 on success, non-zero on failure. 1791 */ 1792 static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) 1793 { 1794 struct mpi3_create_reply_queue_request create_req; 1795 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 1796 int retval = 0; 1797 u16 reply_qid = 0, midx; 1798 1799 reply_qid = op_reply_q->qid; 1800 1801 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset); 1802 1803 if (reply_qid) { 1804 retval = -1; 1805 ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n", 1806 reply_qid); 1807 1808 return retval; 1809 } 1810 1811 reply_qid = qidx + 1; 1812 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD; 1813 if (!mrioc->pdev->revision) 1814 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K; 1815 op_reply_q->ci = 0; 1816 op_reply_q->ephase = 1; 1817 atomic_set(&op_reply_q->pend_ios, 0); 1818 atomic_set(&op_reply_q->in_use, 0); 1819 op_reply_q->enable_irq_poll = false; 1820 1821 if (!op_reply_q->q_segments) { 1822 retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx); 1823 if (retval) { 1824 mpi3mr_free_op_reply_q_segments(mrioc, qidx); 1825 goto out; 1826 } 1827 } 1828 1829 memset(&create_req, 0, sizeof(create_req)); 1830 mutex_lock(&mrioc->init_cmds.mutex); 1831 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1832 retval = -1; 1833 ioc_err(mrioc, "CreateRepQ: Init command is in use\n"); 1834 goto out_unlock; 1835 } 1836 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1837 mrioc->init_cmds.is_waiting = 1; 1838 mrioc->init_cmds.callback = NULL; 1839 create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1840 create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE; 1841 create_req.queue_id = cpu_to_le16(reply_qid); 1842 1843 if (midx < (mrioc->intr_info_count - mrioc->requested_poll_qcount)) 1844 op_reply_q->qtype = MPI3MR_DEFAULT_QUEUE; 1845 else 1846 op_reply_q->qtype = MPI3MR_POLL_QUEUE; 1847 1848 if (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) { 1849 create_req.flags = 1850 MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE; 1851 create_req.msix_index = 1852 cpu_to_le16(mrioc->intr_info[midx].msix_index); 1853 } else { 1854 create_req.msix_index = cpu_to_le16(mrioc->intr_info_count - 1); 1855 ioc_info(mrioc, "create reply queue(polled): for qid(%d), midx(%d)\n", 1856 reply_qid, midx); 1857 if (!mrioc->active_poll_qcount) 1858 disable_irq_nosync(pci_irq_vector(mrioc->pdev, 1859 mrioc->intr_info_count - 1)); 1860 } 1861 1862 if (mrioc->enable_segqueue) { 1863 create_req.flags |= 1864 MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED; 1865 create_req.base_address = cpu_to_le64( 1866 op_reply_q->q_segment_list_dma); 1867 } else 1868 create_req.base_address = cpu_to_le64( 1869 op_reply_q->q_segments[0].segment_dma); 1870 1871 create_req.size = cpu_to_le16(op_reply_q->num_replies); 1872 1873 init_completion(&mrioc->init_cmds.done); 1874 retval = mpi3mr_admin_request_post(mrioc, &create_req, 1875 sizeof(create_req), 1); 1876 if (retval) { 1877 ioc_err(mrioc, "CreateRepQ: Admin Post failed\n"); 1878 goto out_unlock; 1879 } 1880 wait_for_completion_timeout(&mrioc->init_cmds.done, 1881 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1882 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1883 ioc_err(mrioc, "create reply queue timed out\n"); 1884 mpi3mr_check_rh_fault_ioc(mrioc, 1885 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT); 1886 retval = -1; 1887 goto out_unlock; 1888 } 1889 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1890 != MPI3_IOCSTATUS_SUCCESS) { 1891 ioc_err(mrioc, 1892 "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1893 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1894 mrioc->init_cmds.ioc_loginfo); 1895 retval = -1; 1896 goto out_unlock; 1897 } 1898 op_reply_q->qid = reply_qid; 1899 if (midx < mrioc->intr_info_count) 1900 mrioc->intr_info[midx].op_reply_q = op_reply_q; 1901 1902 (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount++ : 1903 mrioc->active_poll_qcount++; 1904 1905 out_unlock: 1906 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1907 mutex_unlock(&mrioc->init_cmds.mutex); 1908 out: 1909 1910 return retval; 1911 } 1912 1913 /** 1914 * mpi3mr_create_op_req_q - create operational request queue 1915 * @mrioc: Adapter instance reference 1916 * @idx: operational request queue index 1917 * @reply_qid: Reply queue ID 1918 * 1919 * Create operatinal request queue by issuing MPI request 1920 * through admin queue. 1921 * 1922 * Return: 0 on success, non-zero on failure. 1923 */ 1924 static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx, 1925 u16 reply_qid) 1926 { 1927 struct mpi3_create_request_queue_request create_req; 1928 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx; 1929 int retval = 0; 1930 u16 req_qid = 0; 1931 1932 req_qid = op_req_q->qid; 1933 1934 if (req_qid) { 1935 retval = -1; 1936 ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n", 1937 req_qid); 1938 1939 return retval; 1940 } 1941 req_qid = idx + 1; 1942 1943 op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD; 1944 op_req_q->ci = 0; 1945 op_req_q->pi = 0; 1946 op_req_q->reply_qid = reply_qid; 1947 spin_lock_init(&op_req_q->q_lock); 1948 1949 if (!op_req_q->q_segments) { 1950 retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx); 1951 if (retval) { 1952 mpi3mr_free_op_req_q_segments(mrioc, idx); 1953 goto out; 1954 } 1955 } 1956 1957 memset(&create_req, 0, sizeof(create_req)); 1958 mutex_lock(&mrioc->init_cmds.mutex); 1959 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1960 retval = -1; 1961 ioc_err(mrioc, "CreateReqQ: Init command is in use\n"); 1962 goto out_unlock; 1963 } 1964 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1965 mrioc->init_cmds.is_waiting = 1; 1966 mrioc->init_cmds.callback = NULL; 1967 create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1968 create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE; 1969 create_req.queue_id = cpu_to_le16(req_qid); 1970 if (mrioc->enable_segqueue) { 1971 create_req.flags = 1972 MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED; 1973 create_req.base_address = cpu_to_le64( 1974 op_req_q->q_segment_list_dma); 1975 } else 1976 create_req.base_address = cpu_to_le64( 1977 op_req_q->q_segments[0].segment_dma); 1978 create_req.reply_queue_id = cpu_to_le16(reply_qid); 1979 create_req.size = cpu_to_le16(op_req_q->num_requests); 1980 1981 init_completion(&mrioc->init_cmds.done); 1982 retval = mpi3mr_admin_request_post(mrioc, &create_req, 1983 sizeof(create_req), 1); 1984 if (retval) { 1985 ioc_err(mrioc, "CreateReqQ: Admin Post failed\n"); 1986 goto out_unlock; 1987 } 1988 wait_for_completion_timeout(&mrioc->init_cmds.done, 1989 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1990 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1991 ioc_err(mrioc, "create request queue timed out\n"); 1992 mpi3mr_check_rh_fault_ioc(mrioc, 1993 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT); 1994 retval = -1; 1995 goto out_unlock; 1996 } 1997 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1998 != MPI3_IOCSTATUS_SUCCESS) { 1999 ioc_err(mrioc, 2000 "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2001 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2002 mrioc->init_cmds.ioc_loginfo); 2003 retval = -1; 2004 goto out_unlock; 2005 } 2006 op_req_q->qid = req_qid; 2007 2008 out_unlock: 2009 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2010 mutex_unlock(&mrioc->init_cmds.mutex); 2011 out: 2012 2013 return retval; 2014 } 2015 2016 /** 2017 * mpi3mr_create_op_queues - create operational queue pairs 2018 * @mrioc: Adapter instance reference 2019 * 2020 * Allocate memory for operational queue meta data and call 2021 * create request and reply queue functions. 2022 * 2023 * Return: 0 on success, non-zero on failures. 2024 */ 2025 static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc) 2026 { 2027 int retval = 0; 2028 u16 num_queues = 0, i = 0, msix_count_op_q = 1; 2029 2030 num_queues = min_t(int, mrioc->facts.max_op_reply_q, 2031 mrioc->facts.max_op_req_q); 2032 2033 msix_count_op_q = 2034 mrioc->intr_info_count - mrioc->op_reply_q_offset; 2035 if (!mrioc->num_queues) 2036 mrioc->num_queues = min_t(int, num_queues, msix_count_op_q); 2037 /* 2038 * During reset set the num_queues to the number of queues 2039 * that was set before the reset. 2040 */ 2041 num_queues = mrioc->num_op_reply_q ? 2042 mrioc->num_op_reply_q : mrioc->num_queues; 2043 ioc_info(mrioc, "trying to create %d operational queue pairs\n", 2044 num_queues); 2045 2046 if (!mrioc->req_qinfo) { 2047 mrioc->req_qinfo = kcalloc(num_queues, 2048 sizeof(struct op_req_qinfo), GFP_KERNEL); 2049 if (!mrioc->req_qinfo) { 2050 retval = -1; 2051 goto out_failed; 2052 } 2053 2054 mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) * 2055 num_queues, GFP_KERNEL); 2056 if (!mrioc->op_reply_qinfo) { 2057 retval = -1; 2058 goto out_failed; 2059 } 2060 } 2061 2062 if (mrioc->enable_segqueue) 2063 ioc_info(mrioc, 2064 "allocating operational queues through segmented queues\n"); 2065 2066 for (i = 0; i < num_queues; i++) { 2067 if (mpi3mr_create_op_reply_q(mrioc, i)) { 2068 ioc_err(mrioc, "Cannot create OP RepQ %d\n", i); 2069 break; 2070 } 2071 if (mpi3mr_create_op_req_q(mrioc, i, 2072 mrioc->op_reply_qinfo[i].qid)) { 2073 ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i); 2074 mpi3mr_delete_op_reply_q(mrioc, i); 2075 break; 2076 } 2077 } 2078 2079 if (i == 0) { 2080 /* Not even one queue is created successfully*/ 2081 retval = -1; 2082 goto out_failed; 2083 } 2084 mrioc->num_op_reply_q = mrioc->num_op_req_q = i; 2085 ioc_info(mrioc, 2086 "successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n", 2087 mrioc->num_op_reply_q, mrioc->default_qcount, 2088 mrioc->active_poll_qcount); 2089 2090 return retval; 2091 out_failed: 2092 kfree(mrioc->req_qinfo); 2093 mrioc->req_qinfo = NULL; 2094 2095 kfree(mrioc->op_reply_qinfo); 2096 mrioc->op_reply_qinfo = NULL; 2097 2098 return retval; 2099 } 2100 2101 /** 2102 * mpi3mr_op_request_post - Post request to operational queue 2103 * @mrioc: Adapter reference 2104 * @op_req_q: Operational request queue info 2105 * @req: MPI3 request 2106 * 2107 * Post the MPI3 request into operational request queue and 2108 * inform the controller, if the queue is full return 2109 * appropriate error. 2110 * 2111 * Return: 0 on success, non-zero on failure. 2112 */ 2113 int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc, 2114 struct op_req_qinfo *op_req_q, u8 *req) 2115 { 2116 u16 pi = 0, max_entries, reply_qidx = 0, midx; 2117 int retval = 0; 2118 unsigned long flags; 2119 u8 *req_entry; 2120 void *segment_base_addr; 2121 u16 req_sz = mrioc->facts.op_req_sz; 2122 struct segments *segments = op_req_q->q_segments; 2123 2124 reply_qidx = op_req_q->reply_qid - 1; 2125 2126 if (mrioc->unrecoverable) 2127 return -EFAULT; 2128 2129 spin_lock_irqsave(&op_req_q->q_lock, flags); 2130 pi = op_req_q->pi; 2131 max_entries = op_req_q->num_requests; 2132 2133 if (mpi3mr_check_req_qfull(op_req_q)) { 2134 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX( 2135 reply_qidx, mrioc->op_reply_q_offset); 2136 mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q); 2137 2138 if (mpi3mr_check_req_qfull(op_req_q)) { 2139 retval = -EAGAIN; 2140 goto out; 2141 } 2142 } 2143 2144 if (mrioc->reset_in_progress) { 2145 ioc_err(mrioc, "OpReqQ submit reset in progress\n"); 2146 retval = -EAGAIN; 2147 goto out; 2148 } 2149 2150 segment_base_addr = segments[pi / op_req_q->segment_qd].segment; 2151 req_entry = (u8 *)segment_base_addr + 2152 ((pi % op_req_q->segment_qd) * req_sz); 2153 2154 memset(req_entry, 0, req_sz); 2155 memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ); 2156 2157 if (++pi == max_entries) 2158 pi = 0; 2159 op_req_q->pi = pi; 2160 2161 if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios) 2162 > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT) 2163 mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true; 2164 2165 writel(op_req_q->pi, 2166 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index); 2167 2168 out: 2169 spin_unlock_irqrestore(&op_req_q->q_lock, flags); 2170 return retval; 2171 } 2172 2173 /** 2174 * mpi3mr_check_rh_fault_ioc - check reset history and fault 2175 * controller 2176 * @mrioc: Adapter instance reference 2177 * @reason_code: reason code for the fault. 2178 * 2179 * This routine will save snapdump and fault the controller with 2180 * the given reason code if it is not already in the fault or 2181 * not asynchronosuly reset. This will be used to handle 2182 * initilaization time faults/resets/timeout as in those cases 2183 * immediate soft reset invocation is not required. 2184 * 2185 * Return: None. 2186 */ 2187 void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code) 2188 { 2189 u32 ioc_status, host_diagnostic, timeout; 2190 2191 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 2192 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || 2193 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { 2194 mpi3mr_print_fault_info(mrioc); 2195 return; 2196 } 2197 mpi3mr_set_diagsave(mrioc); 2198 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 2199 reason_code); 2200 timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; 2201 do { 2202 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); 2203 if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS)) 2204 break; 2205 msleep(100); 2206 } while (--timeout); 2207 } 2208 2209 /** 2210 * mpi3mr_sync_timestamp - Issue time stamp sync request 2211 * @mrioc: Adapter reference 2212 * 2213 * Issue IO unit control MPI request to synchornize firmware 2214 * timestamp with host time. 2215 * 2216 * Return: 0 on success, non-zero on failure. 2217 */ 2218 static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc) 2219 { 2220 ktime_t current_time; 2221 struct mpi3_iounit_control_request iou_ctrl; 2222 int retval = 0; 2223 2224 memset(&iou_ctrl, 0, sizeof(iou_ctrl)); 2225 mutex_lock(&mrioc->init_cmds.mutex); 2226 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2227 retval = -1; 2228 ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n"); 2229 mutex_unlock(&mrioc->init_cmds.mutex); 2230 goto out; 2231 } 2232 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2233 mrioc->init_cmds.is_waiting = 1; 2234 mrioc->init_cmds.callback = NULL; 2235 iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2236 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL; 2237 iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP; 2238 current_time = ktime_get_real(); 2239 iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time)); 2240 2241 init_completion(&mrioc->init_cmds.done); 2242 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, 2243 sizeof(iou_ctrl), 0); 2244 if (retval) { 2245 ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n"); 2246 goto out_unlock; 2247 } 2248 2249 wait_for_completion_timeout(&mrioc->init_cmds.done, 2250 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2251 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2252 ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n"); 2253 mrioc->init_cmds.is_waiting = 0; 2254 if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET)) 2255 mpi3mr_soft_reset_handler(mrioc, 2256 MPI3MR_RESET_FROM_TSU_TIMEOUT, 1); 2257 retval = -1; 2258 goto out_unlock; 2259 } 2260 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2261 != MPI3_IOCSTATUS_SUCCESS) { 2262 ioc_err(mrioc, 2263 "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2264 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2265 mrioc->init_cmds.ioc_loginfo); 2266 retval = -1; 2267 goto out_unlock; 2268 } 2269 2270 out_unlock: 2271 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2272 mutex_unlock(&mrioc->init_cmds.mutex); 2273 2274 out: 2275 return retval; 2276 } 2277 2278 /** 2279 * mpi3mr_print_pkg_ver - display controller fw package version 2280 * @mrioc: Adapter reference 2281 * 2282 * Retrieve firmware package version from the component image 2283 * header of the controller flash and display it. 2284 * 2285 * Return: 0 on success and non-zero on failure. 2286 */ 2287 static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc) 2288 { 2289 struct mpi3_ci_upload_request ci_upload; 2290 int retval = -1; 2291 void *data = NULL; 2292 dma_addr_t data_dma; 2293 struct mpi3_ci_manifest_mpi *manifest; 2294 u32 data_len = sizeof(struct mpi3_ci_manifest_mpi); 2295 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 2296 2297 data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 2298 GFP_KERNEL); 2299 if (!data) 2300 return -ENOMEM; 2301 2302 memset(&ci_upload, 0, sizeof(ci_upload)); 2303 mutex_lock(&mrioc->init_cmds.mutex); 2304 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2305 ioc_err(mrioc, "sending get package version failed due to command in use\n"); 2306 mutex_unlock(&mrioc->init_cmds.mutex); 2307 goto out; 2308 } 2309 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2310 mrioc->init_cmds.is_waiting = 1; 2311 mrioc->init_cmds.callback = NULL; 2312 ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2313 ci_upload.function = MPI3_FUNCTION_CI_UPLOAD; 2314 ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY; 2315 ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST); 2316 ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE); 2317 ci_upload.segment_size = cpu_to_le32(data_len); 2318 2319 mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len, 2320 data_dma); 2321 init_completion(&mrioc->init_cmds.done); 2322 retval = mpi3mr_admin_request_post(mrioc, &ci_upload, 2323 sizeof(ci_upload), 1); 2324 if (retval) { 2325 ioc_err(mrioc, "posting get package version failed\n"); 2326 goto out_unlock; 2327 } 2328 wait_for_completion_timeout(&mrioc->init_cmds.done, 2329 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2330 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2331 ioc_err(mrioc, "get package version timed out\n"); 2332 mpi3mr_check_rh_fault_ioc(mrioc, 2333 MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT); 2334 retval = -1; 2335 goto out_unlock; 2336 } 2337 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2338 == MPI3_IOCSTATUS_SUCCESS) { 2339 manifest = (struct mpi3_ci_manifest_mpi *) data; 2340 if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) { 2341 ioc_info(mrioc, 2342 "firmware package version(%d.%d.%d.%d.%05d-%05d)\n", 2343 manifest->package_version.gen_major, 2344 manifest->package_version.gen_minor, 2345 manifest->package_version.phase_major, 2346 manifest->package_version.phase_minor, 2347 manifest->package_version.customer_id, 2348 manifest->package_version.build_num); 2349 } 2350 } 2351 retval = 0; 2352 out_unlock: 2353 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2354 mutex_unlock(&mrioc->init_cmds.mutex); 2355 2356 out: 2357 if (data) 2358 dma_free_coherent(&mrioc->pdev->dev, data_len, data, 2359 data_dma); 2360 return retval; 2361 } 2362 2363 /** 2364 * mpi3mr_watchdog_work - watchdog thread to monitor faults 2365 * @work: work struct 2366 * 2367 * Watch dog work periodically executed (1 second interval) to 2368 * monitor firmware fault and to issue periodic timer sync to 2369 * the firmware. 2370 * 2371 * Return: Nothing. 2372 */ 2373 static void mpi3mr_watchdog_work(struct work_struct *work) 2374 { 2375 struct mpi3mr_ioc *mrioc = 2376 container_of(work, struct mpi3mr_ioc, watchdog_work.work); 2377 unsigned long flags; 2378 enum mpi3mr_iocstate ioc_state; 2379 u32 fault, host_diagnostic, ioc_status; 2380 u32 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH; 2381 2382 if (mrioc->reset_in_progress || mrioc->unrecoverable) 2383 return; 2384 2385 if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) { 2386 mrioc->ts_update_counter = 0; 2387 mpi3mr_sync_timestamp(mrioc); 2388 } 2389 2390 if ((mrioc->prepare_for_reset) && 2391 ((mrioc->prepare_for_reset_timeout_counter++) >= 2392 MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) { 2393 mpi3mr_soft_reset_handler(mrioc, 2394 MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1); 2395 return; 2396 } 2397 2398 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 2399 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) { 2400 mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0); 2401 return; 2402 } 2403 2404 /*Check for fault state every one second and issue Soft reset*/ 2405 ioc_state = mpi3mr_get_iocstate(mrioc); 2406 if (ioc_state != MRIOC_STATE_FAULT) 2407 goto schedule_work; 2408 2409 fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK; 2410 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); 2411 if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) { 2412 if (!mrioc->diagsave_timeout) { 2413 mpi3mr_print_fault_info(mrioc); 2414 ioc_warn(mrioc, "diag save in progress\n"); 2415 } 2416 if ((mrioc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT) 2417 goto schedule_work; 2418 } 2419 2420 mpi3mr_print_fault_info(mrioc); 2421 mrioc->diagsave_timeout = 0; 2422 2423 switch (fault) { 2424 case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED: 2425 ioc_info(mrioc, 2426 "controller requires system power cycle, marking controller as unrecoverable\n"); 2427 mrioc->unrecoverable = 1; 2428 return; 2429 case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS: 2430 return; 2431 case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET: 2432 reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT; 2433 break; 2434 default: 2435 break; 2436 } 2437 mpi3mr_soft_reset_handler(mrioc, reset_reason, 0); 2438 return; 2439 2440 schedule_work: 2441 spin_lock_irqsave(&mrioc->watchdog_lock, flags); 2442 if (mrioc->watchdog_work_q) 2443 queue_delayed_work(mrioc->watchdog_work_q, 2444 &mrioc->watchdog_work, 2445 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); 2446 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); 2447 return; 2448 } 2449 2450 /** 2451 * mpi3mr_start_watchdog - Start watchdog 2452 * @mrioc: Adapter instance reference 2453 * 2454 * Create and start the watchdog thread to monitor controller 2455 * faults. 2456 * 2457 * Return: Nothing. 2458 */ 2459 void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc) 2460 { 2461 if (mrioc->watchdog_work_q) 2462 return; 2463 2464 INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work); 2465 snprintf(mrioc->watchdog_work_q_name, 2466 sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name, 2467 mrioc->id); 2468 mrioc->watchdog_work_q = 2469 create_singlethread_workqueue(mrioc->watchdog_work_q_name); 2470 if (!mrioc->watchdog_work_q) { 2471 ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__); 2472 return; 2473 } 2474 2475 if (mrioc->watchdog_work_q) 2476 queue_delayed_work(mrioc->watchdog_work_q, 2477 &mrioc->watchdog_work, 2478 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); 2479 } 2480 2481 /** 2482 * mpi3mr_stop_watchdog - Stop watchdog 2483 * @mrioc: Adapter instance reference 2484 * 2485 * Stop the watchdog thread created to monitor controller 2486 * faults. 2487 * 2488 * Return: Nothing. 2489 */ 2490 void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc) 2491 { 2492 unsigned long flags; 2493 struct workqueue_struct *wq; 2494 2495 spin_lock_irqsave(&mrioc->watchdog_lock, flags); 2496 wq = mrioc->watchdog_work_q; 2497 mrioc->watchdog_work_q = NULL; 2498 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); 2499 if (wq) { 2500 if (!cancel_delayed_work_sync(&mrioc->watchdog_work)) 2501 flush_workqueue(wq); 2502 destroy_workqueue(wq); 2503 } 2504 } 2505 2506 /** 2507 * mpi3mr_setup_admin_qpair - Setup admin queue pair 2508 * @mrioc: Adapter instance reference 2509 * 2510 * Allocate memory for admin queue pair if required and register 2511 * the admin queue with the controller. 2512 * 2513 * Return: 0 on success, non-zero on failures. 2514 */ 2515 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc) 2516 { 2517 int retval = 0; 2518 u32 num_admin_entries = 0; 2519 2520 mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE; 2521 mrioc->num_admin_req = mrioc->admin_req_q_sz / 2522 MPI3MR_ADMIN_REQ_FRAME_SZ; 2523 mrioc->admin_req_ci = mrioc->admin_req_pi = 0; 2524 mrioc->admin_req_base = NULL; 2525 2526 mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE; 2527 mrioc->num_admin_replies = mrioc->admin_reply_q_sz / 2528 MPI3MR_ADMIN_REPLY_FRAME_SZ; 2529 mrioc->admin_reply_ci = 0; 2530 mrioc->admin_reply_ephase = 1; 2531 mrioc->admin_reply_base = NULL; 2532 2533 if (!mrioc->admin_req_base) { 2534 mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev, 2535 mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL); 2536 2537 if (!mrioc->admin_req_base) { 2538 retval = -1; 2539 goto out_failed; 2540 } 2541 2542 mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev, 2543 mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma, 2544 GFP_KERNEL); 2545 2546 if (!mrioc->admin_reply_base) { 2547 retval = -1; 2548 goto out_failed; 2549 } 2550 } 2551 2552 num_admin_entries = (mrioc->num_admin_replies << 16) | 2553 (mrioc->num_admin_req); 2554 writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries); 2555 mpi3mr_writeq(mrioc->admin_req_dma, 2556 &mrioc->sysif_regs->admin_request_queue_address); 2557 mpi3mr_writeq(mrioc->admin_reply_dma, 2558 &mrioc->sysif_regs->admin_reply_queue_address); 2559 writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); 2560 writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); 2561 return retval; 2562 2563 out_failed: 2564 2565 if (mrioc->admin_reply_base) { 2566 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz, 2567 mrioc->admin_reply_base, mrioc->admin_reply_dma); 2568 mrioc->admin_reply_base = NULL; 2569 } 2570 if (mrioc->admin_req_base) { 2571 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz, 2572 mrioc->admin_req_base, mrioc->admin_req_dma); 2573 mrioc->admin_req_base = NULL; 2574 } 2575 return retval; 2576 } 2577 2578 /** 2579 * mpi3mr_issue_iocfacts - Send IOC Facts 2580 * @mrioc: Adapter instance reference 2581 * @facts_data: Cached IOC facts data 2582 * 2583 * Issue IOC Facts MPI request through admin queue and wait for 2584 * the completion of it or time out. 2585 * 2586 * Return: 0 on success, non-zero on failures. 2587 */ 2588 static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc, 2589 struct mpi3_ioc_facts_data *facts_data) 2590 { 2591 struct mpi3_ioc_facts_request iocfacts_req; 2592 void *data = NULL; 2593 dma_addr_t data_dma; 2594 u32 data_len = sizeof(*facts_data); 2595 int retval = 0; 2596 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 2597 2598 data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 2599 GFP_KERNEL); 2600 2601 if (!data) { 2602 retval = -1; 2603 goto out; 2604 } 2605 2606 memset(&iocfacts_req, 0, sizeof(iocfacts_req)); 2607 mutex_lock(&mrioc->init_cmds.mutex); 2608 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2609 retval = -1; 2610 ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n"); 2611 mutex_unlock(&mrioc->init_cmds.mutex); 2612 goto out; 2613 } 2614 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2615 mrioc->init_cmds.is_waiting = 1; 2616 mrioc->init_cmds.callback = NULL; 2617 iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2618 iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS; 2619 2620 mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len, 2621 data_dma); 2622 2623 init_completion(&mrioc->init_cmds.done); 2624 retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req, 2625 sizeof(iocfacts_req), 1); 2626 if (retval) { 2627 ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n"); 2628 goto out_unlock; 2629 } 2630 wait_for_completion_timeout(&mrioc->init_cmds.done, 2631 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2632 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2633 ioc_err(mrioc, "ioc_facts timed out\n"); 2634 mpi3mr_check_rh_fault_ioc(mrioc, 2635 MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT); 2636 retval = -1; 2637 goto out_unlock; 2638 } 2639 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2640 != MPI3_IOCSTATUS_SUCCESS) { 2641 ioc_err(mrioc, 2642 "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2643 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2644 mrioc->init_cmds.ioc_loginfo); 2645 retval = -1; 2646 goto out_unlock; 2647 } 2648 memcpy(facts_data, (u8 *)data, data_len); 2649 mpi3mr_process_factsdata(mrioc, facts_data); 2650 out_unlock: 2651 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2652 mutex_unlock(&mrioc->init_cmds.mutex); 2653 2654 out: 2655 if (data) 2656 dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma); 2657 2658 return retval; 2659 } 2660 2661 /** 2662 * mpi3mr_check_reset_dma_mask - Process IOC facts data 2663 * @mrioc: Adapter instance reference 2664 * 2665 * Check whether the new DMA mask requested through IOCFacts by 2666 * firmware needs to be set, if so set it . 2667 * 2668 * Return: 0 on success, non-zero on failure. 2669 */ 2670 static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc) 2671 { 2672 struct pci_dev *pdev = mrioc->pdev; 2673 int r; 2674 u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask); 2675 2676 if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask)) 2677 return 0; 2678 2679 ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n", 2680 mrioc->dma_mask, facts_dma_mask); 2681 2682 r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask); 2683 if (r) { 2684 ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n", 2685 facts_dma_mask, r); 2686 return r; 2687 } 2688 mrioc->dma_mask = facts_dma_mask; 2689 return r; 2690 } 2691 2692 /** 2693 * mpi3mr_process_factsdata - Process IOC facts data 2694 * @mrioc: Adapter instance reference 2695 * @facts_data: Cached IOC facts data 2696 * 2697 * Convert IOC facts data into cpu endianness and cache it in 2698 * the driver . 2699 * 2700 * Return: Nothing. 2701 */ 2702 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, 2703 struct mpi3_ioc_facts_data *facts_data) 2704 { 2705 u32 ioc_config, req_sz, facts_flags; 2706 2707 if ((le16_to_cpu(facts_data->ioc_facts_data_length)) != 2708 (sizeof(*facts_data) / 4)) { 2709 ioc_warn(mrioc, 2710 "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n", 2711 sizeof(*facts_data), 2712 le16_to_cpu(facts_data->ioc_facts_data_length) * 4); 2713 } 2714 2715 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 2716 req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >> 2717 MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT); 2718 if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) { 2719 ioc_err(mrioc, 2720 "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n", 2721 req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size)); 2722 } 2723 2724 memset(&mrioc->facts, 0, sizeof(mrioc->facts)); 2725 2726 facts_flags = le32_to_cpu(facts_data->flags); 2727 mrioc->facts.op_req_sz = req_sz; 2728 mrioc->op_reply_desc_sz = 1 << ((ioc_config & 2729 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >> 2730 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT); 2731 2732 mrioc->facts.ioc_num = facts_data->ioc_number; 2733 mrioc->facts.who_init = facts_data->who_init; 2734 mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors); 2735 mrioc->facts.personality = (facts_flags & 2736 MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK); 2737 mrioc->facts.dma_mask = (facts_flags & 2738 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >> 2739 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT; 2740 mrioc->facts.protocol_flags = facts_data->protocol_flags; 2741 mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word); 2742 mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_request); 2743 mrioc->facts.product_id = le16_to_cpu(facts_data->product_id); 2744 mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4; 2745 mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions); 2746 mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id); 2747 mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds); 2748 mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds); 2749 mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds); 2750 mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds); 2751 mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme); 2752 mrioc->facts.max_pcie_switches = 2753 le16_to_cpu(facts_data->max_pcie_switches); 2754 mrioc->facts.max_sasexpanders = 2755 le16_to_cpu(facts_data->max_sas_expanders); 2756 mrioc->facts.max_sasinitiators = 2757 le16_to_cpu(facts_data->max_sas_initiators); 2758 mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures); 2759 mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle); 2760 mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle); 2761 mrioc->facts.max_op_req_q = 2762 le16_to_cpu(facts_data->max_operational_request_queues); 2763 mrioc->facts.max_op_reply_q = 2764 le16_to_cpu(facts_data->max_operational_reply_queues); 2765 mrioc->facts.ioc_capabilities = 2766 le32_to_cpu(facts_data->ioc_capabilities); 2767 mrioc->facts.fw_ver.build_num = 2768 le16_to_cpu(facts_data->fw_version.build_num); 2769 mrioc->facts.fw_ver.cust_id = 2770 le16_to_cpu(facts_data->fw_version.customer_id); 2771 mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor; 2772 mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major; 2773 mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor; 2774 mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major; 2775 mrioc->msix_count = min_t(int, mrioc->msix_count, 2776 mrioc->facts.max_msix_vectors); 2777 mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask; 2778 mrioc->facts.sge_mod_value = facts_data->sge_modifier_value; 2779 mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift; 2780 mrioc->facts.shutdown_timeout = 2781 le16_to_cpu(facts_data->shutdown_timeout); 2782 2783 ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),", 2784 mrioc->facts.ioc_num, mrioc->facts.max_op_req_q, 2785 mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle); 2786 ioc_info(mrioc, 2787 "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n", 2788 mrioc->facts.max_reqs, mrioc->facts.min_devhandle, 2789 mrioc->facts.max_msix_vectors, mrioc->facts.max_perids); 2790 ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ", 2791 mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value, 2792 mrioc->facts.sge_mod_shift); 2793 ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n", 2794 mrioc->facts.dma_mask, (facts_flags & 2795 MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK)); 2796 } 2797 2798 /** 2799 * mpi3mr_alloc_reply_sense_bufs - Send IOC Init 2800 * @mrioc: Adapter instance reference 2801 * 2802 * Allocate and initialize the reply free buffers, sense 2803 * buffers, reply free queue and sense buffer queue. 2804 * 2805 * Return: 0 on success, non-zero on failures. 2806 */ 2807 static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc) 2808 { 2809 int retval = 0; 2810 u32 sz, i; 2811 2812 if (mrioc->init_cmds.reply) 2813 return retval; 2814 2815 mrioc->init_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); 2816 if (!mrioc->init_cmds.reply) 2817 goto out_failed; 2818 2819 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { 2820 mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz, 2821 GFP_KERNEL); 2822 if (!mrioc->dev_rmhs_cmds[i].reply) 2823 goto out_failed; 2824 } 2825 2826 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) { 2827 mrioc->evtack_cmds[i].reply = kzalloc(mrioc->reply_sz, 2828 GFP_KERNEL); 2829 if (!mrioc->evtack_cmds[i].reply) 2830 goto out_failed; 2831 } 2832 2833 mrioc->host_tm_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); 2834 if (!mrioc->host_tm_cmds.reply) 2835 goto out_failed; 2836 2837 mrioc->dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8; 2838 if (mrioc->facts.max_devhandle % 8) 2839 mrioc->dev_handle_bitmap_sz++; 2840 mrioc->removepend_bitmap = kzalloc(mrioc->dev_handle_bitmap_sz, 2841 GFP_KERNEL); 2842 if (!mrioc->removepend_bitmap) 2843 goto out_failed; 2844 2845 mrioc->devrem_bitmap_sz = MPI3MR_NUM_DEVRMCMD / 8; 2846 if (MPI3MR_NUM_DEVRMCMD % 8) 2847 mrioc->devrem_bitmap_sz++; 2848 mrioc->devrem_bitmap = kzalloc(mrioc->devrem_bitmap_sz, 2849 GFP_KERNEL); 2850 if (!mrioc->devrem_bitmap) 2851 goto out_failed; 2852 2853 mrioc->evtack_cmds_bitmap_sz = MPI3MR_NUM_EVTACKCMD / 8; 2854 if (MPI3MR_NUM_EVTACKCMD % 8) 2855 mrioc->evtack_cmds_bitmap_sz++; 2856 mrioc->evtack_cmds_bitmap = kzalloc(mrioc->evtack_cmds_bitmap_sz, 2857 GFP_KERNEL); 2858 if (!mrioc->evtack_cmds_bitmap) 2859 goto out_failed; 2860 2861 mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES; 2862 mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1; 2863 mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR; 2864 mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1; 2865 2866 /* reply buffer pool, 16 byte align */ 2867 sz = mrioc->num_reply_bufs * mrioc->reply_sz; 2868 mrioc->reply_buf_pool = dma_pool_create("reply_buf pool", 2869 &mrioc->pdev->dev, sz, 16, 0); 2870 if (!mrioc->reply_buf_pool) { 2871 ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n"); 2872 goto out_failed; 2873 } 2874 2875 mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL, 2876 &mrioc->reply_buf_dma); 2877 if (!mrioc->reply_buf) 2878 goto out_failed; 2879 2880 mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz; 2881 2882 /* reply free queue, 8 byte align */ 2883 sz = mrioc->reply_free_qsz * 8; 2884 mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool", 2885 &mrioc->pdev->dev, sz, 8, 0); 2886 if (!mrioc->reply_free_q_pool) { 2887 ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n"); 2888 goto out_failed; 2889 } 2890 mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool, 2891 GFP_KERNEL, &mrioc->reply_free_q_dma); 2892 if (!mrioc->reply_free_q) 2893 goto out_failed; 2894 2895 /* sense buffer pool, 4 byte align */ 2896 sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ; 2897 mrioc->sense_buf_pool = dma_pool_create("sense_buf pool", 2898 &mrioc->pdev->dev, sz, 4, 0); 2899 if (!mrioc->sense_buf_pool) { 2900 ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n"); 2901 goto out_failed; 2902 } 2903 mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL, 2904 &mrioc->sense_buf_dma); 2905 if (!mrioc->sense_buf) 2906 goto out_failed; 2907 2908 /* sense buffer queue, 8 byte align */ 2909 sz = mrioc->sense_buf_q_sz * 8; 2910 mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool", 2911 &mrioc->pdev->dev, sz, 8, 0); 2912 if (!mrioc->sense_buf_q_pool) { 2913 ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n"); 2914 goto out_failed; 2915 } 2916 mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool, 2917 GFP_KERNEL, &mrioc->sense_buf_q_dma); 2918 if (!mrioc->sense_buf_q) 2919 goto out_failed; 2920 2921 return retval; 2922 2923 out_failed: 2924 retval = -1; 2925 return retval; 2926 } 2927 2928 /** 2929 * mpimr_initialize_reply_sbuf_queues - initialize reply sense 2930 * buffers 2931 * @mrioc: Adapter instance reference 2932 * 2933 * Helper function to initialize reply and sense buffers along 2934 * with some debug prints. 2935 * 2936 * Return: None. 2937 */ 2938 static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc) 2939 { 2940 u32 sz, i; 2941 dma_addr_t phy_addr; 2942 2943 sz = mrioc->num_reply_bufs * mrioc->reply_sz; 2944 ioc_info(mrioc, 2945 "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n", 2946 mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->reply_sz, 2947 (sz / 1024), (unsigned long long)mrioc->reply_buf_dma); 2948 sz = mrioc->reply_free_qsz * 8; 2949 ioc_info(mrioc, 2950 "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n", 2951 mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024), 2952 (unsigned long long)mrioc->reply_free_q_dma); 2953 sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ; 2954 ioc_info(mrioc, 2955 "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n", 2956 mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ, 2957 (sz / 1024), (unsigned long long)mrioc->sense_buf_dma); 2958 sz = mrioc->sense_buf_q_sz * 8; 2959 ioc_info(mrioc, 2960 "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n", 2961 mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024), 2962 (unsigned long long)mrioc->sense_buf_q_dma); 2963 2964 /* initialize Reply buffer Queue */ 2965 for (i = 0, phy_addr = mrioc->reply_buf_dma; 2966 i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->reply_sz) 2967 mrioc->reply_free_q[i] = cpu_to_le64(phy_addr); 2968 mrioc->reply_free_q[i] = cpu_to_le64(0); 2969 2970 /* initialize Sense Buffer Queue */ 2971 for (i = 0, phy_addr = mrioc->sense_buf_dma; 2972 i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ) 2973 mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr); 2974 mrioc->sense_buf_q[i] = cpu_to_le64(0); 2975 } 2976 2977 /** 2978 * mpi3mr_issue_iocinit - Send IOC Init 2979 * @mrioc: Adapter instance reference 2980 * 2981 * Issue IOC Init MPI request through admin queue and wait for 2982 * the completion of it or time out. 2983 * 2984 * Return: 0 on success, non-zero on failures. 2985 */ 2986 static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc) 2987 { 2988 struct mpi3_ioc_init_request iocinit_req; 2989 struct mpi3_driver_info_layout *drv_info; 2990 dma_addr_t data_dma; 2991 u32 data_len = sizeof(*drv_info); 2992 int retval = 0; 2993 ktime_t current_time; 2994 2995 drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 2996 GFP_KERNEL); 2997 if (!drv_info) { 2998 retval = -1; 2999 goto out; 3000 } 3001 mpimr_initialize_reply_sbuf_queues(mrioc); 3002 3003 drv_info->information_length = cpu_to_le32(data_len); 3004 strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature)); 3005 strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name)); 3006 strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version)); 3007 strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name)); 3008 strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version)); 3009 strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE, 3010 sizeof(drv_info->driver_release_date)); 3011 drv_info->driver_capabilities = 0; 3012 memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info, 3013 sizeof(mrioc->driver_info)); 3014 3015 memset(&iocinit_req, 0, sizeof(iocinit_req)); 3016 mutex_lock(&mrioc->init_cmds.mutex); 3017 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 3018 retval = -1; 3019 ioc_err(mrioc, "Issue IOCInit: Init command is in use\n"); 3020 mutex_unlock(&mrioc->init_cmds.mutex); 3021 goto out; 3022 } 3023 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 3024 mrioc->init_cmds.is_waiting = 1; 3025 mrioc->init_cmds.callback = NULL; 3026 iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 3027 iocinit_req.function = MPI3_FUNCTION_IOC_INIT; 3028 iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV; 3029 iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT; 3030 iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR; 3031 iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR; 3032 iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER; 3033 iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz); 3034 iocinit_req.reply_free_queue_address = 3035 cpu_to_le64(mrioc->reply_free_q_dma); 3036 iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ); 3037 iocinit_req.sense_buffer_free_queue_depth = 3038 cpu_to_le16(mrioc->sense_buf_q_sz); 3039 iocinit_req.sense_buffer_free_queue_address = 3040 cpu_to_le64(mrioc->sense_buf_q_dma); 3041 iocinit_req.driver_information_address = cpu_to_le64(data_dma); 3042 3043 current_time = ktime_get_real(); 3044 iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time)); 3045 3046 init_completion(&mrioc->init_cmds.done); 3047 retval = mpi3mr_admin_request_post(mrioc, &iocinit_req, 3048 sizeof(iocinit_req), 1); 3049 if (retval) { 3050 ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n"); 3051 goto out_unlock; 3052 } 3053 wait_for_completion_timeout(&mrioc->init_cmds.done, 3054 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 3055 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 3056 mpi3mr_check_rh_fault_ioc(mrioc, 3057 MPI3MR_RESET_FROM_IOCINIT_TIMEOUT); 3058 ioc_err(mrioc, "ioc_init timed out\n"); 3059 retval = -1; 3060 goto out_unlock; 3061 } 3062 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 3063 != MPI3_IOCSTATUS_SUCCESS) { 3064 ioc_err(mrioc, 3065 "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 3066 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 3067 mrioc->init_cmds.ioc_loginfo); 3068 retval = -1; 3069 goto out_unlock; 3070 } 3071 3072 mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs; 3073 writel(mrioc->reply_free_queue_host_index, 3074 &mrioc->sysif_regs->reply_free_host_index); 3075 3076 mrioc->sbq_host_index = mrioc->num_sense_bufs; 3077 writel(mrioc->sbq_host_index, 3078 &mrioc->sysif_regs->sense_buffer_free_host_index); 3079 out_unlock: 3080 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3081 mutex_unlock(&mrioc->init_cmds.mutex); 3082 3083 out: 3084 if (drv_info) 3085 dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info, 3086 data_dma); 3087 3088 return retval; 3089 } 3090 3091 /** 3092 * mpi3mr_unmask_events - Unmask events in event mask bitmap 3093 * @mrioc: Adapter instance reference 3094 * @event: MPI event ID 3095 * 3096 * Un mask the specific event by resetting the event_mask 3097 * bitmap. 3098 * 3099 * Return: 0 on success, non-zero on failures. 3100 */ 3101 static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event) 3102 { 3103 u32 desired_event; 3104 u8 word; 3105 3106 if (event >= 128) 3107 return; 3108 3109 desired_event = (1 << (event % 32)); 3110 word = event / 32; 3111 3112 mrioc->event_masks[word] &= ~desired_event; 3113 } 3114 3115 /** 3116 * mpi3mr_issue_event_notification - Send event notification 3117 * @mrioc: Adapter instance reference 3118 * 3119 * Issue event notification MPI request through admin queue and 3120 * wait for the completion of it or time out. 3121 * 3122 * Return: 0 on success, non-zero on failures. 3123 */ 3124 static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc) 3125 { 3126 struct mpi3_event_notification_request evtnotify_req; 3127 int retval = 0; 3128 u8 i; 3129 3130 memset(&evtnotify_req, 0, sizeof(evtnotify_req)); 3131 mutex_lock(&mrioc->init_cmds.mutex); 3132 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 3133 retval = -1; 3134 ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n"); 3135 mutex_unlock(&mrioc->init_cmds.mutex); 3136 goto out; 3137 } 3138 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 3139 mrioc->init_cmds.is_waiting = 1; 3140 mrioc->init_cmds.callback = NULL; 3141 evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 3142 evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION; 3143 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 3144 evtnotify_req.event_masks[i] = 3145 cpu_to_le32(mrioc->event_masks[i]); 3146 init_completion(&mrioc->init_cmds.done); 3147 retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req, 3148 sizeof(evtnotify_req), 1); 3149 if (retval) { 3150 ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n"); 3151 goto out_unlock; 3152 } 3153 wait_for_completion_timeout(&mrioc->init_cmds.done, 3154 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 3155 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 3156 ioc_err(mrioc, "event notification timed out\n"); 3157 mpi3mr_check_rh_fault_ioc(mrioc, 3158 MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT); 3159 retval = -1; 3160 goto out_unlock; 3161 } 3162 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 3163 != MPI3_IOCSTATUS_SUCCESS) { 3164 ioc_err(mrioc, 3165 "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 3166 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 3167 mrioc->init_cmds.ioc_loginfo); 3168 retval = -1; 3169 goto out_unlock; 3170 } 3171 3172 out_unlock: 3173 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3174 mutex_unlock(&mrioc->init_cmds.mutex); 3175 out: 3176 return retval; 3177 } 3178 3179 /** 3180 * mpi3mr_process_event_ack - Process event acknowledgment 3181 * @mrioc: Adapter instance reference 3182 * @event: MPI3 event ID 3183 * @event_ctx: event context 3184 * 3185 * Send event acknowledgment through admin queue and wait for 3186 * it to complete. 3187 * 3188 * Return: 0 on success, non-zero on failures. 3189 */ 3190 int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 3191 u32 event_ctx) 3192 { 3193 struct mpi3_event_ack_request evtack_req; 3194 int retval = 0; 3195 3196 memset(&evtack_req, 0, sizeof(evtack_req)); 3197 mutex_lock(&mrioc->init_cmds.mutex); 3198 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 3199 retval = -1; 3200 ioc_err(mrioc, "Send EvtAck: Init command is in use\n"); 3201 mutex_unlock(&mrioc->init_cmds.mutex); 3202 goto out; 3203 } 3204 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 3205 mrioc->init_cmds.is_waiting = 1; 3206 mrioc->init_cmds.callback = NULL; 3207 evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 3208 evtack_req.function = MPI3_FUNCTION_EVENT_ACK; 3209 evtack_req.event = event; 3210 evtack_req.event_context = cpu_to_le32(event_ctx); 3211 3212 init_completion(&mrioc->init_cmds.done); 3213 retval = mpi3mr_admin_request_post(mrioc, &evtack_req, 3214 sizeof(evtack_req), 1); 3215 if (retval) { 3216 ioc_err(mrioc, "Send EvtAck: Admin Post failed\n"); 3217 goto out_unlock; 3218 } 3219 wait_for_completion_timeout(&mrioc->init_cmds.done, 3220 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 3221 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 3222 ioc_err(mrioc, "Issue EvtNotify: command timed out\n"); 3223 if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET)) 3224 mpi3mr_soft_reset_handler(mrioc, 3225 MPI3MR_RESET_FROM_EVTACK_TIMEOUT, 1); 3226 retval = -1; 3227 goto out_unlock; 3228 } 3229 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 3230 != MPI3_IOCSTATUS_SUCCESS) { 3231 ioc_err(mrioc, 3232 "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 3233 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 3234 mrioc->init_cmds.ioc_loginfo); 3235 retval = -1; 3236 goto out_unlock; 3237 } 3238 3239 out_unlock: 3240 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3241 mutex_unlock(&mrioc->init_cmds.mutex); 3242 out: 3243 return retval; 3244 } 3245 3246 /** 3247 * mpi3mr_alloc_chain_bufs - Allocate chain buffers 3248 * @mrioc: Adapter instance reference 3249 * 3250 * Allocate chain buffers and set a bitmap to indicate free 3251 * chain buffers. Chain buffers are used to pass the SGE 3252 * information along with MPI3 SCSI IO requests for host I/O. 3253 * 3254 * Return: 0 on success, non-zero on failure 3255 */ 3256 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc) 3257 { 3258 int retval = 0; 3259 u32 sz, i; 3260 u16 num_chains; 3261 3262 if (mrioc->chain_sgl_list) 3263 return retval; 3264 3265 num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR; 3266 3267 if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION 3268 | SHOST_DIX_TYPE1_PROTECTION 3269 | SHOST_DIX_TYPE2_PROTECTION 3270 | SHOST_DIX_TYPE3_PROTECTION)) 3271 num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR); 3272 3273 mrioc->chain_buf_count = num_chains; 3274 sz = sizeof(struct chain_element) * num_chains; 3275 mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL); 3276 if (!mrioc->chain_sgl_list) 3277 goto out_failed; 3278 3279 sz = MPI3MR_PAGE_SIZE_4K; 3280 mrioc->chain_buf_pool = dma_pool_create("chain_buf pool", 3281 &mrioc->pdev->dev, sz, 16, 0); 3282 if (!mrioc->chain_buf_pool) { 3283 ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n"); 3284 goto out_failed; 3285 } 3286 3287 for (i = 0; i < num_chains; i++) { 3288 mrioc->chain_sgl_list[i].addr = 3289 dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL, 3290 &mrioc->chain_sgl_list[i].dma_addr); 3291 3292 if (!mrioc->chain_sgl_list[i].addr) 3293 goto out_failed; 3294 } 3295 mrioc->chain_bitmap_sz = num_chains / 8; 3296 if (num_chains % 8) 3297 mrioc->chain_bitmap_sz++; 3298 mrioc->chain_bitmap = kzalloc(mrioc->chain_bitmap_sz, GFP_KERNEL); 3299 if (!mrioc->chain_bitmap) 3300 goto out_failed; 3301 return retval; 3302 out_failed: 3303 retval = -1; 3304 return retval; 3305 } 3306 3307 /** 3308 * mpi3mr_port_enable_complete - Mark port enable complete 3309 * @mrioc: Adapter instance reference 3310 * @drv_cmd: Internal command tracker 3311 * 3312 * Call back for asynchronous port enable request sets the 3313 * driver command to indicate port enable request is complete. 3314 * 3315 * Return: Nothing 3316 */ 3317 static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc, 3318 struct mpi3mr_drv_cmd *drv_cmd) 3319 { 3320 drv_cmd->state = MPI3MR_CMD_NOTUSED; 3321 drv_cmd->callback = NULL; 3322 mrioc->scan_failed = drv_cmd->ioc_status; 3323 mrioc->scan_started = 0; 3324 } 3325 3326 /** 3327 * mpi3mr_issue_port_enable - Issue Port Enable 3328 * @mrioc: Adapter instance reference 3329 * @async: Flag to wait for completion or not 3330 * 3331 * Issue Port Enable MPI request through admin queue and if the 3332 * async flag is not set wait for the completion of the port 3333 * enable or time out. 3334 * 3335 * Return: 0 on success, non-zero on failures. 3336 */ 3337 int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async) 3338 { 3339 struct mpi3_port_enable_request pe_req; 3340 int retval = 0; 3341 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT; 3342 3343 memset(&pe_req, 0, sizeof(pe_req)); 3344 mutex_lock(&mrioc->init_cmds.mutex); 3345 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 3346 retval = -1; 3347 ioc_err(mrioc, "Issue PortEnable: Init command is in use\n"); 3348 mutex_unlock(&mrioc->init_cmds.mutex); 3349 goto out; 3350 } 3351 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 3352 if (async) { 3353 mrioc->init_cmds.is_waiting = 0; 3354 mrioc->init_cmds.callback = mpi3mr_port_enable_complete; 3355 } else { 3356 mrioc->init_cmds.is_waiting = 1; 3357 mrioc->init_cmds.callback = NULL; 3358 init_completion(&mrioc->init_cmds.done); 3359 } 3360 pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 3361 pe_req.function = MPI3_FUNCTION_PORT_ENABLE; 3362 3363 retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1); 3364 if (retval) { 3365 ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n"); 3366 goto out_unlock; 3367 } 3368 if (async) { 3369 mutex_unlock(&mrioc->init_cmds.mutex); 3370 goto out; 3371 } 3372 3373 wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ)); 3374 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 3375 ioc_err(mrioc, "port enable timed out\n"); 3376 retval = -1; 3377 mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT); 3378 goto out_unlock; 3379 } 3380 mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds); 3381 3382 out_unlock: 3383 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3384 mutex_unlock(&mrioc->init_cmds.mutex); 3385 out: 3386 return retval; 3387 } 3388 3389 /* Protocol type to name mapper structure */ 3390 static const struct { 3391 u8 protocol; 3392 char *name; 3393 } mpi3mr_protocols[] = { 3394 { MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" }, 3395 { MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" }, 3396 { MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" }, 3397 }; 3398 3399 /* Capability to name mapper structure*/ 3400 static const struct { 3401 u32 capability; 3402 char *name; 3403 } mpi3mr_capabilities[] = { 3404 { MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE, "RAID" }, 3405 }; 3406 3407 /** 3408 * mpi3mr_print_ioc_info - Display controller information 3409 * @mrioc: Adapter instance reference 3410 * 3411 * Display controller personalit, capability, supported 3412 * protocols etc. 3413 * 3414 * Return: Nothing 3415 */ 3416 static void 3417 mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc) 3418 { 3419 int i = 0, bytes_written = 0; 3420 char personality[16]; 3421 char protocol[50] = {0}; 3422 char capabilities[100] = {0}; 3423 struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver; 3424 3425 switch (mrioc->facts.personality) { 3426 case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA: 3427 strncpy(personality, "Enhanced HBA", sizeof(personality)); 3428 break; 3429 case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR: 3430 strncpy(personality, "RAID", sizeof(personality)); 3431 break; 3432 default: 3433 strncpy(personality, "Unknown", sizeof(personality)); 3434 break; 3435 } 3436 3437 ioc_info(mrioc, "Running in %s Personality", personality); 3438 3439 ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n", 3440 fwver->gen_major, fwver->gen_minor, fwver->ph_major, 3441 fwver->ph_minor, fwver->cust_id, fwver->build_num); 3442 3443 for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) { 3444 if (mrioc->facts.protocol_flags & 3445 mpi3mr_protocols[i].protocol) { 3446 bytes_written += scnprintf(protocol + bytes_written, 3447 sizeof(protocol) - bytes_written, "%s%s", 3448 bytes_written ? "," : "", 3449 mpi3mr_protocols[i].name); 3450 } 3451 } 3452 3453 bytes_written = 0; 3454 for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) { 3455 if (mrioc->facts.protocol_flags & 3456 mpi3mr_capabilities[i].capability) { 3457 bytes_written += scnprintf(capabilities + bytes_written, 3458 sizeof(capabilities) - bytes_written, "%s%s", 3459 bytes_written ? "," : "", 3460 mpi3mr_capabilities[i].name); 3461 } 3462 } 3463 3464 ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n", 3465 protocol, capabilities); 3466 } 3467 3468 /** 3469 * mpi3mr_cleanup_resources - Free PCI resources 3470 * @mrioc: Adapter instance reference 3471 * 3472 * Unmap PCI device memory and disable PCI device. 3473 * 3474 * Return: 0 on success and non-zero on failure. 3475 */ 3476 void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc) 3477 { 3478 struct pci_dev *pdev = mrioc->pdev; 3479 3480 mpi3mr_cleanup_isr(mrioc); 3481 3482 if (mrioc->sysif_regs) { 3483 iounmap((void __iomem *)mrioc->sysif_regs); 3484 mrioc->sysif_regs = NULL; 3485 } 3486 3487 if (pci_is_enabled(pdev)) { 3488 if (mrioc->bars) 3489 pci_release_selected_regions(pdev, mrioc->bars); 3490 pci_disable_device(pdev); 3491 } 3492 } 3493 3494 /** 3495 * mpi3mr_setup_resources - Enable PCI resources 3496 * @mrioc: Adapter instance reference 3497 * 3498 * Enable PCI device memory, MSI-x registers and set DMA mask. 3499 * 3500 * Return: 0 on success and non-zero on failure. 3501 */ 3502 int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc) 3503 { 3504 struct pci_dev *pdev = mrioc->pdev; 3505 u32 memap_sz = 0; 3506 int i, retval = 0, capb = 0; 3507 u16 message_control; 3508 u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask : 3509 (((dma_get_required_mask(&pdev->dev) > DMA_BIT_MASK(32)) && 3510 (sizeof(dma_addr_t) > 4)) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32)); 3511 3512 if (pci_enable_device_mem(pdev)) { 3513 ioc_err(mrioc, "pci_enable_device_mem: failed\n"); 3514 retval = -ENODEV; 3515 goto out_failed; 3516 } 3517 3518 capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 3519 if (!capb) { 3520 ioc_err(mrioc, "Unable to find MSI-X Capabilities\n"); 3521 retval = -ENODEV; 3522 goto out_failed; 3523 } 3524 mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); 3525 3526 if (pci_request_selected_regions(pdev, mrioc->bars, 3527 mrioc->driver_name)) { 3528 ioc_err(mrioc, "pci_request_selected_regions: failed\n"); 3529 retval = -ENODEV; 3530 goto out_failed; 3531 } 3532 3533 for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) { 3534 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 3535 mrioc->sysif_regs_phys = pci_resource_start(pdev, i); 3536 memap_sz = pci_resource_len(pdev, i); 3537 mrioc->sysif_regs = 3538 ioremap(mrioc->sysif_regs_phys, memap_sz); 3539 break; 3540 } 3541 } 3542 3543 pci_set_master(pdev); 3544 3545 retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask); 3546 if (retval) { 3547 if (dma_mask != DMA_BIT_MASK(32)) { 3548 ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n"); 3549 dma_mask = DMA_BIT_MASK(32); 3550 retval = dma_set_mask_and_coherent(&pdev->dev, 3551 dma_mask); 3552 } 3553 if (retval) { 3554 mrioc->dma_mask = 0; 3555 ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n"); 3556 goto out_failed; 3557 } 3558 } 3559 mrioc->dma_mask = dma_mask; 3560 3561 if (!mrioc->sysif_regs) { 3562 ioc_err(mrioc, 3563 "Unable to map adapter memory or resource not found\n"); 3564 retval = -EINVAL; 3565 goto out_failed; 3566 } 3567 3568 pci_read_config_word(pdev, capb + 2, &message_control); 3569 mrioc->msix_count = (message_control & 0x3FF) + 1; 3570 3571 pci_save_state(pdev); 3572 3573 pci_set_drvdata(pdev, mrioc->shost); 3574 3575 mpi3mr_ioc_disable_intr(mrioc); 3576 3577 ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n", 3578 (unsigned long long)mrioc->sysif_regs_phys, 3579 mrioc->sysif_regs, memap_sz); 3580 ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n", 3581 mrioc->msix_count); 3582 3583 if (!reset_devices && poll_queues > 0) 3584 mrioc->requested_poll_qcount = min_t(int, poll_queues, 3585 mrioc->msix_count - 2); 3586 return retval; 3587 3588 out_failed: 3589 mpi3mr_cleanup_resources(mrioc); 3590 return retval; 3591 } 3592 3593 /** 3594 * mpi3mr_enable_events - Enable required events 3595 * @mrioc: Adapter instance reference 3596 * 3597 * This routine unmasks the events required by the driver by 3598 * sennding appropriate event mask bitmapt through an event 3599 * notification request. 3600 * 3601 * Return: 0 on success and non-zero on failure. 3602 */ 3603 static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc) 3604 { 3605 int retval = 0; 3606 u32 i; 3607 3608 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 3609 mrioc->event_masks[i] = -1; 3610 3611 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED); 3612 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED); 3613 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE); 3614 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE); 3615 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST); 3616 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY); 3617 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR); 3618 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE); 3619 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST); 3620 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION); 3621 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET); 3622 mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT); 3623 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE); 3624 mpi3mr_unmask_events(mrioc, MPI3_EVENT_TEMP_THRESHOLD); 3625 3626 retval = mpi3mr_issue_event_notification(mrioc); 3627 if (retval) 3628 ioc_err(mrioc, "failed to issue event notification %d\n", 3629 retval); 3630 return retval; 3631 } 3632 3633 /** 3634 * mpi3mr_init_ioc - Initialize the controller 3635 * @mrioc: Adapter instance reference 3636 * 3637 * This the controller initialization routine, executed either 3638 * after soft reset or from pci probe callback. 3639 * Setup the required resources, memory map the controller 3640 * registers, create admin and operational reply queue pairs, 3641 * allocate required memory for reply pool, sense buffer pool, 3642 * issue IOC init request to the firmware, unmask the events and 3643 * issue port enable to discover SAS/SATA/NVMe devies and RAID 3644 * volumes. 3645 * 3646 * Return: 0 on success and non-zero on failure. 3647 */ 3648 int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc) 3649 { 3650 int retval = 0; 3651 u8 retry = 0; 3652 struct mpi3_ioc_facts_data facts_data; 3653 3654 retry_init: 3655 retval = mpi3mr_bring_ioc_ready(mrioc); 3656 if (retval) { 3657 ioc_err(mrioc, "Failed to bring ioc ready: error %d\n", 3658 retval); 3659 goto out_failed_noretry; 3660 } 3661 3662 retval = mpi3mr_setup_isr(mrioc, 1); 3663 if (retval) { 3664 ioc_err(mrioc, "Failed to setup ISR error %d\n", 3665 retval); 3666 goto out_failed_noretry; 3667 } 3668 3669 retval = mpi3mr_issue_iocfacts(mrioc, &facts_data); 3670 if (retval) { 3671 ioc_err(mrioc, "Failed to Issue IOC Facts %d\n", 3672 retval); 3673 goto out_failed; 3674 } 3675 3676 mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD; 3677 3678 if (reset_devices) 3679 mrioc->max_host_ios = min_t(int, mrioc->max_host_ios, 3680 MPI3MR_HOST_IOS_KDUMP); 3681 3682 mrioc->reply_sz = mrioc->facts.reply_sz; 3683 3684 retval = mpi3mr_check_reset_dma_mask(mrioc); 3685 if (retval) { 3686 ioc_err(mrioc, "Resetting dma mask failed %d\n", 3687 retval); 3688 goto out_failed_noretry; 3689 } 3690 3691 mpi3mr_print_ioc_info(mrioc); 3692 3693 retval = mpi3mr_alloc_reply_sense_bufs(mrioc); 3694 if (retval) { 3695 ioc_err(mrioc, 3696 "%s :Failed to allocated reply sense buffers %d\n", 3697 __func__, retval); 3698 goto out_failed_noretry; 3699 } 3700 3701 retval = mpi3mr_alloc_chain_bufs(mrioc); 3702 if (retval) { 3703 ioc_err(mrioc, "Failed to allocated chain buffers %d\n", 3704 retval); 3705 goto out_failed_noretry; 3706 } 3707 3708 retval = mpi3mr_issue_iocinit(mrioc); 3709 if (retval) { 3710 ioc_err(mrioc, "Failed to Issue IOC Init %d\n", 3711 retval); 3712 goto out_failed; 3713 } 3714 3715 retval = mpi3mr_print_pkg_ver(mrioc); 3716 if (retval) { 3717 ioc_err(mrioc, "failed to get package version\n"); 3718 goto out_failed; 3719 } 3720 3721 retval = mpi3mr_setup_isr(mrioc, 0); 3722 if (retval) { 3723 ioc_err(mrioc, "Failed to re-setup ISR, error %d\n", 3724 retval); 3725 goto out_failed_noretry; 3726 } 3727 3728 retval = mpi3mr_create_op_queues(mrioc); 3729 if (retval) { 3730 ioc_err(mrioc, "Failed to create OpQueues error %d\n", 3731 retval); 3732 goto out_failed; 3733 } 3734 3735 retval = mpi3mr_enable_events(mrioc); 3736 if (retval) { 3737 ioc_err(mrioc, "failed to enable events %d\n", 3738 retval); 3739 goto out_failed; 3740 } 3741 3742 ioc_info(mrioc, "controller initialization completed successfully\n"); 3743 return retval; 3744 out_failed: 3745 if (retry < 2) { 3746 retry++; 3747 ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n", 3748 retry); 3749 mpi3mr_memset_buffers(mrioc); 3750 goto retry_init; 3751 } 3752 out_failed_noretry: 3753 ioc_err(mrioc, "controller initialization failed\n"); 3754 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 3755 MPI3MR_RESET_FROM_CTLR_CLEANUP); 3756 mrioc->unrecoverable = 1; 3757 return retval; 3758 } 3759 3760 /** 3761 * mpi3mr_reinit_ioc - Re-Initialize the controller 3762 * @mrioc: Adapter instance reference 3763 * @is_resume: Called from resume or reset path 3764 * 3765 * This the controller re-initialization routine, executed from 3766 * the soft reset handler or resume callback. Creates 3767 * operational reply queue pairs, allocate required memory for 3768 * reply pool, sense buffer pool, issue IOC init request to the 3769 * firmware, unmask the events and issue port enable to discover 3770 * SAS/SATA/NVMe devices and RAID volumes. 3771 * 3772 * Return: 0 on success and non-zero on failure. 3773 */ 3774 int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume) 3775 { 3776 int retval = 0; 3777 u8 retry = 0; 3778 struct mpi3_ioc_facts_data facts_data; 3779 3780 retry_init: 3781 dprint_reset(mrioc, "bringing up the controller to ready state\n"); 3782 retval = mpi3mr_bring_ioc_ready(mrioc); 3783 if (retval) { 3784 ioc_err(mrioc, "failed to bring to ready state\n"); 3785 goto out_failed_noretry; 3786 } 3787 3788 if (is_resume) { 3789 dprint_reset(mrioc, "setting up single ISR\n"); 3790 retval = mpi3mr_setup_isr(mrioc, 1); 3791 if (retval) { 3792 ioc_err(mrioc, "failed to setup ISR\n"); 3793 goto out_failed_noretry; 3794 } 3795 } else 3796 mpi3mr_ioc_enable_intr(mrioc); 3797 3798 dprint_reset(mrioc, "getting ioc_facts\n"); 3799 retval = mpi3mr_issue_iocfacts(mrioc, &facts_data); 3800 if (retval) { 3801 ioc_err(mrioc, "failed to get ioc_facts\n"); 3802 goto out_failed; 3803 } 3804 3805 dprint_reset(mrioc, "validating ioc_facts\n"); 3806 retval = mpi3mr_revalidate_factsdata(mrioc); 3807 if (retval) { 3808 ioc_err(mrioc, "failed to revalidate ioc_facts data\n"); 3809 goto out_failed_noretry; 3810 } 3811 3812 mpi3mr_print_ioc_info(mrioc); 3813 3814 dprint_reset(mrioc, "sending ioc_init\n"); 3815 retval = mpi3mr_issue_iocinit(mrioc); 3816 if (retval) { 3817 ioc_err(mrioc, "failed to send ioc_init\n"); 3818 goto out_failed; 3819 } 3820 3821 dprint_reset(mrioc, "getting package version\n"); 3822 retval = mpi3mr_print_pkg_ver(mrioc); 3823 if (retval) { 3824 ioc_err(mrioc, "failed to get package version\n"); 3825 goto out_failed; 3826 } 3827 3828 if (is_resume) { 3829 dprint_reset(mrioc, "setting up multiple ISR\n"); 3830 retval = mpi3mr_setup_isr(mrioc, 0); 3831 if (retval) { 3832 ioc_err(mrioc, "failed to re-setup ISR\n"); 3833 goto out_failed_noretry; 3834 } 3835 } 3836 3837 dprint_reset(mrioc, "creating operational queue pairs\n"); 3838 retval = mpi3mr_create_op_queues(mrioc); 3839 if (retval) { 3840 ioc_err(mrioc, "failed to create operational queue pairs\n"); 3841 goto out_failed; 3842 } 3843 3844 if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) { 3845 ioc_err(mrioc, 3846 "cannot create minimum number of operational queues expected:%d created:%d\n", 3847 mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q); 3848 goto out_failed_noretry; 3849 } 3850 3851 dprint_reset(mrioc, "enabling events\n"); 3852 retval = mpi3mr_enable_events(mrioc); 3853 if (retval) { 3854 ioc_err(mrioc, "failed to enable events\n"); 3855 goto out_failed; 3856 } 3857 3858 ioc_info(mrioc, "sending port enable\n"); 3859 retval = mpi3mr_issue_port_enable(mrioc, 0); 3860 if (retval) { 3861 ioc_err(mrioc, "failed to issue port enable\n"); 3862 goto out_failed; 3863 } 3864 3865 ioc_info(mrioc, "controller %s completed successfully\n", 3866 (is_resume)?"resume":"re-initialization"); 3867 return retval; 3868 out_failed: 3869 if (retry < 2) { 3870 retry++; 3871 ioc_warn(mrioc, "retrying controller %s, retry_count:%d\n", 3872 (is_resume)?"resume":"re-initialization", retry); 3873 mpi3mr_memset_buffers(mrioc); 3874 goto retry_init; 3875 } 3876 out_failed_noretry: 3877 ioc_err(mrioc, "controller %s is failed\n", 3878 (is_resume)?"resume":"re-initialization"); 3879 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 3880 MPI3MR_RESET_FROM_CTLR_CLEANUP); 3881 mrioc->unrecoverable = 1; 3882 return retval; 3883 } 3884 3885 /** 3886 * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's 3887 * segments 3888 * @mrioc: Adapter instance reference 3889 * @qidx: Operational reply queue index 3890 * 3891 * Return: Nothing. 3892 */ 3893 static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx) 3894 { 3895 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 3896 struct segments *segments; 3897 int i, size; 3898 3899 if (!op_reply_q->q_segments) 3900 return; 3901 3902 size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz; 3903 segments = op_reply_q->q_segments; 3904 for (i = 0; i < op_reply_q->num_segments; i++) 3905 memset(segments[i].segment, 0, size); 3906 } 3907 3908 /** 3909 * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's 3910 * segments 3911 * @mrioc: Adapter instance reference 3912 * @qidx: Operational request queue index 3913 * 3914 * Return: Nothing. 3915 */ 3916 static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx) 3917 { 3918 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx; 3919 struct segments *segments; 3920 int i, size; 3921 3922 if (!op_req_q->q_segments) 3923 return; 3924 3925 size = op_req_q->segment_qd * mrioc->facts.op_req_sz; 3926 segments = op_req_q->q_segments; 3927 for (i = 0; i < op_req_q->num_segments; i++) 3928 memset(segments[i].segment, 0, size); 3929 } 3930 3931 /** 3932 * mpi3mr_memset_buffers - memset memory for a controller 3933 * @mrioc: Adapter instance reference 3934 * 3935 * clear all the memory allocated for a controller, typically 3936 * called post reset to reuse the memory allocated during the 3937 * controller init. 3938 * 3939 * Return: Nothing. 3940 */ 3941 void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc) 3942 { 3943 u16 i; 3944 3945 mrioc->change_count = 0; 3946 mrioc->active_poll_qcount = 0; 3947 mrioc->default_qcount = 0; 3948 if (mrioc->admin_req_base) 3949 memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz); 3950 if (mrioc->admin_reply_base) 3951 memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz); 3952 3953 if (mrioc->init_cmds.reply) { 3954 memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply)); 3955 memset(mrioc->host_tm_cmds.reply, 0, 3956 sizeof(*mrioc->host_tm_cmds.reply)); 3957 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) 3958 memset(mrioc->dev_rmhs_cmds[i].reply, 0, 3959 sizeof(*mrioc->dev_rmhs_cmds[i].reply)); 3960 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) 3961 memset(mrioc->evtack_cmds[i].reply, 0, 3962 sizeof(*mrioc->evtack_cmds[i].reply)); 3963 memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz); 3964 memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz); 3965 memset(mrioc->evtack_cmds_bitmap, 0, 3966 mrioc->evtack_cmds_bitmap_sz); 3967 } 3968 3969 for (i = 0; i < mrioc->num_queues; i++) { 3970 mrioc->op_reply_qinfo[i].qid = 0; 3971 mrioc->op_reply_qinfo[i].ci = 0; 3972 mrioc->op_reply_qinfo[i].num_replies = 0; 3973 mrioc->op_reply_qinfo[i].ephase = 0; 3974 atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0); 3975 atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0); 3976 mpi3mr_memset_op_reply_q_buffers(mrioc, i); 3977 3978 mrioc->req_qinfo[i].ci = 0; 3979 mrioc->req_qinfo[i].pi = 0; 3980 mrioc->req_qinfo[i].num_requests = 0; 3981 mrioc->req_qinfo[i].qid = 0; 3982 mrioc->req_qinfo[i].reply_qid = 0; 3983 spin_lock_init(&mrioc->req_qinfo[i].q_lock); 3984 mpi3mr_memset_op_req_q_buffers(mrioc, i); 3985 } 3986 } 3987 3988 /** 3989 * mpi3mr_free_mem - Free memory allocated for a controller 3990 * @mrioc: Adapter instance reference 3991 * 3992 * Free all the memory allocated for a controller. 3993 * 3994 * Return: Nothing. 3995 */ 3996 void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc) 3997 { 3998 u16 i; 3999 struct mpi3mr_intr_info *intr_info; 4000 4001 if (mrioc->sense_buf_pool) { 4002 if (mrioc->sense_buf) 4003 dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf, 4004 mrioc->sense_buf_dma); 4005 dma_pool_destroy(mrioc->sense_buf_pool); 4006 mrioc->sense_buf = NULL; 4007 mrioc->sense_buf_pool = NULL; 4008 } 4009 if (mrioc->sense_buf_q_pool) { 4010 if (mrioc->sense_buf_q) 4011 dma_pool_free(mrioc->sense_buf_q_pool, 4012 mrioc->sense_buf_q, mrioc->sense_buf_q_dma); 4013 dma_pool_destroy(mrioc->sense_buf_q_pool); 4014 mrioc->sense_buf_q = NULL; 4015 mrioc->sense_buf_q_pool = NULL; 4016 } 4017 4018 if (mrioc->reply_buf_pool) { 4019 if (mrioc->reply_buf) 4020 dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf, 4021 mrioc->reply_buf_dma); 4022 dma_pool_destroy(mrioc->reply_buf_pool); 4023 mrioc->reply_buf = NULL; 4024 mrioc->reply_buf_pool = NULL; 4025 } 4026 if (mrioc->reply_free_q_pool) { 4027 if (mrioc->reply_free_q) 4028 dma_pool_free(mrioc->reply_free_q_pool, 4029 mrioc->reply_free_q, mrioc->reply_free_q_dma); 4030 dma_pool_destroy(mrioc->reply_free_q_pool); 4031 mrioc->reply_free_q = NULL; 4032 mrioc->reply_free_q_pool = NULL; 4033 } 4034 4035 for (i = 0; i < mrioc->num_op_req_q; i++) 4036 mpi3mr_free_op_req_q_segments(mrioc, i); 4037 4038 for (i = 0; i < mrioc->num_op_reply_q; i++) 4039 mpi3mr_free_op_reply_q_segments(mrioc, i); 4040 4041 for (i = 0; i < mrioc->intr_info_count; i++) { 4042 intr_info = mrioc->intr_info + i; 4043 intr_info->op_reply_q = NULL; 4044 } 4045 4046 kfree(mrioc->req_qinfo); 4047 mrioc->req_qinfo = NULL; 4048 mrioc->num_op_req_q = 0; 4049 4050 kfree(mrioc->op_reply_qinfo); 4051 mrioc->op_reply_qinfo = NULL; 4052 mrioc->num_op_reply_q = 0; 4053 4054 kfree(mrioc->init_cmds.reply); 4055 mrioc->init_cmds.reply = NULL; 4056 4057 kfree(mrioc->host_tm_cmds.reply); 4058 mrioc->host_tm_cmds.reply = NULL; 4059 4060 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) { 4061 kfree(mrioc->evtack_cmds[i].reply); 4062 mrioc->evtack_cmds[i].reply = NULL; 4063 } 4064 4065 kfree(mrioc->removepend_bitmap); 4066 mrioc->removepend_bitmap = NULL; 4067 4068 kfree(mrioc->devrem_bitmap); 4069 mrioc->devrem_bitmap = NULL; 4070 4071 kfree(mrioc->evtack_cmds_bitmap); 4072 mrioc->evtack_cmds_bitmap = NULL; 4073 4074 kfree(mrioc->chain_bitmap); 4075 mrioc->chain_bitmap = NULL; 4076 4077 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { 4078 kfree(mrioc->dev_rmhs_cmds[i].reply); 4079 mrioc->dev_rmhs_cmds[i].reply = NULL; 4080 } 4081 4082 if (mrioc->chain_buf_pool) { 4083 for (i = 0; i < mrioc->chain_buf_count; i++) { 4084 if (mrioc->chain_sgl_list[i].addr) { 4085 dma_pool_free(mrioc->chain_buf_pool, 4086 mrioc->chain_sgl_list[i].addr, 4087 mrioc->chain_sgl_list[i].dma_addr); 4088 mrioc->chain_sgl_list[i].addr = NULL; 4089 } 4090 } 4091 dma_pool_destroy(mrioc->chain_buf_pool); 4092 mrioc->chain_buf_pool = NULL; 4093 } 4094 4095 kfree(mrioc->chain_sgl_list); 4096 mrioc->chain_sgl_list = NULL; 4097 4098 if (mrioc->admin_reply_base) { 4099 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz, 4100 mrioc->admin_reply_base, mrioc->admin_reply_dma); 4101 mrioc->admin_reply_base = NULL; 4102 } 4103 if (mrioc->admin_req_base) { 4104 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz, 4105 mrioc->admin_req_base, mrioc->admin_req_dma); 4106 mrioc->admin_req_base = NULL; 4107 } 4108 } 4109 4110 /** 4111 * mpi3mr_issue_ioc_shutdown - shutdown controller 4112 * @mrioc: Adapter instance reference 4113 * 4114 * Send shutodwn notification to the controller and wait for the 4115 * shutdown_timeout for it to be completed. 4116 * 4117 * Return: Nothing. 4118 */ 4119 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc) 4120 { 4121 u32 ioc_config, ioc_status; 4122 u8 retval = 1; 4123 u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10; 4124 4125 ioc_info(mrioc, "Issuing shutdown Notification\n"); 4126 if (mrioc->unrecoverable) { 4127 ioc_warn(mrioc, 4128 "IOC is unrecoverable shutdown is not issued\n"); 4129 return; 4130 } 4131 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 4132 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 4133 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) { 4134 ioc_info(mrioc, "shutdown already in progress\n"); 4135 return; 4136 } 4137 4138 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 4139 ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL; 4140 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ; 4141 4142 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 4143 4144 if (mrioc->facts.shutdown_timeout) 4145 timeout = mrioc->facts.shutdown_timeout * 10; 4146 4147 do { 4148 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 4149 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 4150 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) { 4151 retval = 0; 4152 break; 4153 } 4154 msleep(100); 4155 } while (--timeout); 4156 4157 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 4158 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 4159 4160 if (retval) { 4161 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 4162 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) 4163 ioc_warn(mrioc, 4164 "shutdown still in progress after timeout\n"); 4165 } 4166 4167 ioc_info(mrioc, 4168 "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n", 4169 (!retval) ? "successful" : "failed", ioc_status, 4170 ioc_config); 4171 } 4172 4173 /** 4174 * mpi3mr_cleanup_ioc - Cleanup controller 4175 * @mrioc: Adapter instance reference 4176 * 4177 * controller cleanup handler, Message unit reset or soft reset 4178 * and shutdown notification is issued to the controller. 4179 * 4180 * Return: Nothing. 4181 */ 4182 void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc) 4183 { 4184 enum mpi3mr_iocstate ioc_state; 4185 4186 dprint_exit(mrioc, "cleaning up the controller\n"); 4187 mpi3mr_ioc_disable_intr(mrioc); 4188 4189 ioc_state = mpi3mr_get_iocstate(mrioc); 4190 4191 if ((!mrioc->unrecoverable) && (!mrioc->reset_in_progress) && 4192 (ioc_state == MRIOC_STATE_READY)) { 4193 if (mpi3mr_issue_and_process_mur(mrioc, 4194 MPI3MR_RESET_FROM_CTLR_CLEANUP)) 4195 mpi3mr_issue_reset(mrioc, 4196 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, 4197 MPI3MR_RESET_FROM_MUR_FAILURE); 4198 mpi3mr_issue_ioc_shutdown(mrioc); 4199 } 4200 dprint_exit(mrioc, "controller cleanup completed\n"); 4201 } 4202 4203 /** 4204 * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command 4205 * @mrioc: Adapter instance reference 4206 * @cmdptr: Internal command tracker 4207 * 4208 * Complete an internal driver commands with state indicating it 4209 * is completed due to reset. 4210 * 4211 * Return: Nothing. 4212 */ 4213 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc, 4214 struct mpi3mr_drv_cmd *cmdptr) 4215 { 4216 if (cmdptr->state & MPI3MR_CMD_PENDING) { 4217 cmdptr->state |= MPI3MR_CMD_RESET; 4218 cmdptr->state &= ~MPI3MR_CMD_PENDING; 4219 if (cmdptr->is_waiting) { 4220 complete(&cmdptr->done); 4221 cmdptr->is_waiting = 0; 4222 } else if (cmdptr->callback) 4223 cmdptr->callback(mrioc, cmdptr); 4224 } 4225 } 4226 4227 /** 4228 * mpi3mr_flush_drv_cmds - Flush internaldriver commands 4229 * @mrioc: Adapter instance reference 4230 * 4231 * Flush all internal driver commands post reset 4232 * 4233 * Return: Nothing. 4234 */ 4235 static void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc) 4236 { 4237 struct mpi3mr_drv_cmd *cmdptr; 4238 u8 i; 4239 4240 cmdptr = &mrioc->init_cmds; 4241 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4242 cmdptr = &mrioc->host_tm_cmds; 4243 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4244 4245 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { 4246 cmdptr = &mrioc->dev_rmhs_cmds[i]; 4247 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4248 } 4249 4250 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) { 4251 cmdptr = &mrioc->evtack_cmds[i]; 4252 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4253 } 4254 } 4255 4256 /** 4257 * mpi3mr_soft_reset_handler - Reset the controller 4258 * @mrioc: Adapter instance reference 4259 * @reset_reason: Reset reason code 4260 * @snapdump: Flag to generate snapdump in firmware or not 4261 * 4262 * This is an handler for recovering controller by issuing soft 4263 * reset are diag fault reset. This is a blocking function and 4264 * when one reset is executed if any other resets they will be 4265 * blocked. All IOCTLs/IO will be blocked during the reset. If 4266 * controller reset is successful then the controller will be 4267 * reinitalized, otherwise the controller will be marked as not 4268 * recoverable 4269 * 4270 * In snapdump bit is set, the controller is issued with diag 4271 * fault reset so that the firmware can create a snap dump and 4272 * post that the firmware will result in F000 fault and the 4273 * driver will issue soft reset to recover from that. 4274 * 4275 * Return: 0 on success, non-zero on failure. 4276 */ 4277 int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, 4278 u32 reset_reason, u8 snapdump) 4279 { 4280 int retval = 0, i; 4281 unsigned long flags; 4282 u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; 4283 4284 /* Block the reset handler until diag save in progress*/ 4285 dprint_reset(mrioc, 4286 "soft_reset_handler: check and block on diagsave_timeout(%d)\n", 4287 mrioc->diagsave_timeout); 4288 while (mrioc->diagsave_timeout) 4289 ssleep(1); 4290 /* 4291 * Block new resets until the currently executing one is finished and 4292 * return the status of the existing reset for all blocked resets 4293 */ 4294 dprint_reset(mrioc, "soft_reset_handler: acquiring reset_mutex\n"); 4295 if (!mutex_trylock(&mrioc->reset_mutex)) { 4296 ioc_info(mrioc, 4297 "controller reset triggered by %s is blocked due to another reset in progress\n", 4298 mpi3mr_reset_rc_name(reset_reason)); 4299 do { 4300 ssleep(1); 4301 } while (mrioc->reset_in_progress == 1); 4302 ioc_info(mrioc, 4303 "returning previous reset result(%d) for the reset triggered by %s\n", 4304 mrioc->prev_reset_result, 4305 mpi3mr_reset_rc_name(reset_reason)); 4306 return mrioc->prev_reset_result; 4307 } 4308 ioc_info(mrioc, "controller reset is triggered by %s\n", 4309 mpi3mr_reset_rc_name(reset_reason)); 4310 4311 mrioc->reset_in_progress = 1; 4312 mrioc->prev_reset_result = -1; 4313 4314 if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) && 4315 (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) && 4316 (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) { 4317 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 4318 mrioc->event_masks[i] = -1; 4319 4320 dprint_reset(mrioc, "soft_reset_handler: masking events\n"); 4321 mpi3mr_issue_event_notification(mrioc); 4322 } 4323 4324 mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT); 4325 4326 mpi3mr_ioc_disable_intr(mrioc); 4327 4328 if (snapdump) { 4329 mpi3mr_set_diagsave(mrioc); 4330 retval = mpi3mr_issue_reset(mrioc, 4331 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason); 4332 if (!retval) { 4333 do { 4334 host_diagnostic = 4335 readl(&mrioc->sysif_regs->host_diagnostic); 4336 if (!(host_diagnostic & 4337 MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS)) 4338 break; 4339 msleep(100); 4340 } while (--timeout); 4341 } 4342 } 4343 4344 retval = mpi3mr_issue_reset(mrioc, 4345 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason); 4346 if (retval) { 4347 ioc_err(mrioc, "Failed to issue soft reset to the ioc\n"); 4348 goto out; 4349 } 4350 4351 mpi3mr_flush_delayed_cmd_lists(mrioc); 4352 mpi3mr_flush_drv_cmds(mrioc); 4353 memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz); 4354 memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz); 4355 memset(mrioc->evtack_cmds_bitmap, 0, mrioc->evtack_cmds_bitmap_sz); 4356 mpi3mr_cleanup_fwevt_list(mrioc); 4357 mpi3mr_flush_host_io(mrioc); 4358 mpi3mr_invalidate_devhandles(mrioc); 4359 if (mrioc->prepare_for_reset) { 4360 mrioc->prepare_for_reset = 0; 4361 mrioc->prepare_for_reset_timeout_counter = 0; 4362 } 4363 mpi3mr_memset_buffers(mrioc); 4364 retval = mpi3mr_reinit_ioc(mrioc, 0); 4365 if (retval) { 4366 pr_err(IOCNAME "reinit after soft reset failed: reason %d\n", 4367 mrioc->name, reset_reason); 4368 goto out; 4369 } 4370 ssleep(10); 4371 4372 out: 4373 if (!retval) { 4374 mrioc->diagsave_timeout = 0; 4375 mrioc->reset_in_progress = 0; 4376 mpi3mr_rfresh_tgtdevs(mrioc); 4377 mrioc->ts_update_counter = 0; 4378 spin_lock_irqsave(&mrioc->watchdog_lock, flags); 4379 if (mrioc->watchdog_work_q) 4380 queue_delayed_work(mrioc->watchdog_work_q, 4381 &mrioc->watchdog_work, 4382 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); 4383 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); 4384 } else { 4385 mpi3mr_issue_reset(mrioc, 4386 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason); 4387 mrioc->unrecoverable = 1; 4388 mrioc->reset_in_progress = 0; 4389 retval = -1; 4390 } 4391 mrioc->prev_reset_result = retval; 4392 mutex_unlock(&mrioc->reset_mutex); 4393 ioc_info(mrioc, "controller reset is %s\n", 4394 ((retval == 0) ? "successful" : "failed")); 4395 return retval; 4396 } 4397