1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2023 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 #include <linux/io-64-nonatomic-lo-hi.h> 12 13 static int 14 mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u32 reset_reason); 15 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc); 16 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, 17 struct mpi3_ioc_facts_data *facts_data); 18 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc, 19 struct mpi3mr_drv_cmd *drv_cmd); 20 21 static int poll_queues; 22 module_param(poll_queues, int, 0444); 23 MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)"); 24 25 #if defined(writeq) && defined(CONFIG_64BIT) 26 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) 27 { 28 writeq(b, addr); 29 } 30 #else 31 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) 32 { 33 __u64 data_out = b; 34 35 writel((u32)(data_out), addr); 36 writel((u32)(data_out >> 32), (addr + 4)); 37 } 38 #endif 39 40 static inline bool 41 mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q) 42 { 43 u16 pi, ci, max_entries; 44 bool is_qfull = false; 45 46 pi = op_req_q->pi; 47 ci = READ_ONCE(op_req_q->ci); 48 max_entries = op_req_q->num_requests; 49 50 if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1)))) 51 is_qfull = true; 52 53 return is_qfull; 54 } 55 56 static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc) 57 { 58 u16 i, max_vectors; 59 60 max_vectors = mrioc->intr_info_count; 61 62 for (i = 0; i < max_vectors; i++) 63 synchronize_irq(pci_irq_vector(mrioc->pdev, i)); 64 } 65 66 void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc) 67 { 68 mrioc->intr_enabled = 0; 69 mpi3mr_sync_irqs(mrioc); 70 } 71 72 void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc) 73 { 74 mrioc->intr_enabled = 1; 75 } 76 77 static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc) 78 { 79 u16 i; 80 81 mpi3mr_ioc_disable_intr(mrioc); 82 83 if (!mrioc->intr_info) 84 return; 85 86 for (i = 0; i < mrioc->intr_info_count; i++) 87 free_irq(pci_irq_vector(mrioc->pdev, i), 88 (mrioc->intr_info + i)); 89 90 kfree(mrioc->intr_info); 91 mrioc->intr_info = NULL; 92 mrioc->intr_info_count = 0; 93 mrioc->is_intr_info_set = false; 94 pci_free_irq_vectors(mrioc->pdev); 95 } 96 97 void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length, 98 dma_addr_t dma_addr) 99 { 100 struct mpi3_sge_common *sgel = paddr; 101 102 sgel->flags = flags; 103 sgel->length = cpu_to_le32(length); 104 sgel->address = cpu_to_le64(dma_addr); 105 } 106 107 void mpi3mr_build_zero_len_sge(void *paddr) 108 { 109 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 110 111 mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1); 112 } 113 114 void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc, 115 dma_addr_t phys_addr) 116 { 117 if (!phys_addr) 118 return NULL; 119 120 if ((phys_addr < mrioc->reply_buf_dma) || 121 (phys_addr > mrioc->reply_buf_dma_max_address)) 122 return NULL; 123 124 return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma); 125 } 126 127 void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc, 128 dma_addr_t phys_addr) 129 { 130 if (!phys_addr) 131 return NULL; 132 133 return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma); 134 } 135 136 static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc, 137 u64 reply_dma) 138 { 139 u32 old_idx = 0; 140 unsigned long flags; 141 142 spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags); 143 old_idx = mrioc->reply_free_queue_host_index; 144 mrioc->reply_free_queue_host_index = ( 145 (mrioc->reply_free_queue_host_index == 146 (mrioc->reply_free_qsz - 1)) ? 0 : 147 (mrioc->reply_free_queue_host_index + 1)); 148 mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma); 149 writel(mrioc->reply_free_queue_host_index, 150 &mrioc->sysif_regs->reply_free_host_index); 151 spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags); 152 } 153 154 void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc, 155 u64 sense_buf_dma) 156 { 157 u32 old_idx = 0; 158 unsigned long flags; 159 160 spin_lock_irqsave(&mrioc->sbq_lock, flags); 161 old_idx = mrioc->sbq_host_index; 162 mrioc->sbq_host_index = ((mrioc->sbq_host_index == 163 (mrioc->sense_buf_q_sz - 1)) ? 0 : 164 (mrioc->sbq_host_index + 1)); 165 mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma); 166 writel(mrioc->sbq_host_index, 167 &mrioc->sysif_regs->sense_buffer_free_host_index); 168 spin_unlock_irqrestore(&mrioc->sbq_lock, flags); 169 } 170 171 static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc, 172 struct mpi3_event_notification_reply *event_reply) 173 { 174 char *desc = NULL; 175 u16 event; 176 177 event = event_reply->event; 178 179 switch (event) { 180 case MPI3_EVENT_LOG_DATA: 181 desc = "Log Data"; 182 break; 183 case MPI3_EVENT_CHANGE: 184 desc = "Event Change"; 185 break; 186 case MPI3_EVENT_GPIO_INTERRUPT: 187 desc = "GPIO Interrupt"; 188 break; 189 case MPI3_EVENT_CABLE_MGMT: 190 desc = "Cable Management"; 191 break; 192 case MPI3_EVENT_ENERGY_PACK_CHANGE: 193 desc = "Energy Pack Change"; 194 break; 195 case MPI3_EVENT_DEVICE_ADDED: 196 { 197 struct mpi3_device_page0 *event_data = 198 (struct mpi3_device_page0 *)event_reply->event_data; 199 ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n", 200 event_data->dev_handle, event_data->device_form); 201 return; 202 } 203 case MPI3_EVENT_DEVICE_INFO_CHANGED: 204 { 205 struct mpi3_device_page0 *event_data = 206 (struct mpi3_device_page0 *)event_reply->event_data; 207 ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n", 208 event_data->dev_handle, event_data->device_form); 209 return; 210 } 211 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 212 { 213 struct mpi3_event_data_device_status_change *event_data = 214 (struct mpi3_event_data_device_status_change *)event_reply->event_data; 215 ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n", 216 event_data->dev_handle, event_data->reason_code); 217 return; 218 } 219 case MPI3_EVENT_SAS_DISCOVERY: 220 { 221 struct mpi3_event_data_sas_discovery *event_data = 222 (struct mpi3_event_data_sas_discovery *)event_reply->event_data; 223 ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n", 224 (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ? 225 "start" : "stop", 226 le32_to_cpu(event_data->discovery_status)); 227 return; 228 } 229 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE: 230 desc = "SAS Broadcast Primitive"; 231 break; 232 case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE: 233 desc = "SAS Notify Primitive"; 234 break; 235 case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: 236 desc = "SAS Init Device Status Change"; 237 break; 238 case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW: 239 desc = "SAS Init Table Overflow"; 240 break; 241 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 242 desc = "SAS Topology Change List"; 243 break; 244 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 245 desc = "Enclosure Device Status Change"; 246 break; 247 case MPI3_EVENT_ENCL_DEVICE_ADDED: 248 desc = "Enclosure Added"; 249 break; 250 case MPI3_EVENT_HARD_RESET_RECEIVED: 251 desc = "Hard Reset Received"; 252 break; 253 case MPI3_EVENT_SAS_PHY_COUNTER: 254 desc = "SAS PHY Counter"; 255 break; 256 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 257 desc = "SAS Device Discovery Error"; 258 break; 259 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 260 desc = "PCIE Topology Change List"; 261 break; 262 case MPI3_EVENT_PCIE_ENUMERATION: 263 { 264 struct mpi3_event_data_pcie_enumeration *event_data = 265 (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data; 266 ioc_info(mrioc, "PCIE Enumeration: (%s)", 267 (event_data->reason_code == 268 MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop"); 269 if (event_data->enumeration_status) 270 ioc_info(mrioc, "enumeration_status(0x%08x)\n", 271 le32_to_cpu(event_data->enumeration_status)); 272 return; 273 } 274 case MPI3_EVENT_PREPARE_FOR_RESET: 275 desc = "Prepare For Reset"; 276 break; 277 } 278 279 if (!desc) 280 return; 281 282 ioc_info(mrioc, "%s\n", desc); 283 } 284 285 static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc, 286 struct mpi3_default_reply *def_reply) 287 { 288 struct mpi3_event_notification_reply *event_reply = 289 (struct mpi3_event_notification_reply *)def_reply; 290 291 mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count); 292 mpi3mr_print_event_data(mrioc, event_reply); 293 mpi3mr_os_handle_events(mrioc, event_reply); 294 } 295 296 static struct mpi3mr_drv_cmd * 297 mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag, 298 struct mpi3_default_reply *def_reply) 299 { 300 u16 idx; 301 302 switch (host_tag) { 303 case MPI3MR_HOSTTAG_INITCMDS: 304 return &mrioc->init_cmds; 305 case MPI3MR_HOSTTAG_CFG_CMDS: 306 return &mrioc->cfg_cmds; 307 case MPI3MR_HOSTTAG_BSG_CMDS: 308 return &mrioc->bsg_cmds; 309 case MPI3MR_HOSTTAG_BLK_TMS: 310 return &mrioc->host_tm_cmds; 311 case MPI3MR_HOSTTAG_PEL_ABORT: 312 return &mrioc->pel_abort_cmd; 313 case MPI3MR_HOSTTAG_PEL_WAIT: 314 return &mrioc->pel_cmds; 315 case MPI3MR_HOSTTAG_TRANSPORT_CMDS: 316 return &mrioc->transport_cmds; 317 case MPI3MR_HOSTTAG_INVALID: 318 if (def_reply && def_reply->function == 319 MPI3_FUNCTION_EVENT_NOTIFICATION) 320 mpi3mr_handle_events(mrioc, def_reply); 321 return NULL; 322 default: 323 break; 324 } 325 if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN && 326 host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) { 327 idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 328 return &mrioc->dev_rmhs_cmds[idx]; 329 } 330 331 if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN && 332 host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) { 333 idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 334 return &mrioc->evtack_cmds[idx]; 335 } 336 337 return NULL; 338 } 339 340 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc, 341 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma) 342 { 343 u16 reply_desc_type, host_tag = 0; 344 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; 345 u32 ioc_loginfo = 0; 346 struct mpi3_status_reply_descriptor *status_desc; 347 struct mpi3_address_reply_descriptor *addr_desc; 348 struct mpi3_success_reply_descriptor *success_desc; 349 struct mpi3_default_reply *def_reply = NULL; 350 struct mpi3mr_drv_cmd *cmdptr = NULL; 351 struct mpi3_scsi_io_reply *scsi_reply; 352 u8 *sense_buf = NULL; 353 354 *reply_dma = 0; 355 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & 356 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK; 357 switch (reply_desc_type) { 358 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS: 359 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc; 360 host_tag = le16_to_cpu(status_desc->host_tag); 361 ioc_status = le16_to_cpu(status_desc->ioc_status); 362 if (ioc_status & 363 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 364 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); 365 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 366 break; 367 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: 368 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; 369 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address); 370 def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma); 371 if (!def_reply) 372 goto out; 373 host_tag = le16_to_cpu(def_reply->host_tag); 374 ioc_status = le16_to_cpu(def_reply->ioc_status); 375 if (ioc_status & 376 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 377 ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info); 378 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 379 if (def_reply->function == MPI3_FUNCTION_SCSI_IO) { 380 scsi_reply = (struct mpi3_scsi_io_reply *)def_reply; 381 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, 382 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 383 } 384 break; 385 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: 386 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; 387 host_tag = le16_to_cpu(success_desc->host_tag); 388 break; 389 default: 390 break; 391 } 392 393 cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply); 394 if (cmdptr) { 395 if (cmdptr->state & MPI3MR_CMD_PENDING) { 396 cmdptr->state |= MPI3MR_CMD_COMPLETE; 397 cmdptr->ioc_loginfo = ioc_loginfo; 398 cmdptr->ioc_status = ioc_status; 399 cmdptr->state &= ~MPI3MR_CMD_PENDING; 400 if (def_reply) { 401 cmdptr->state |= MPI3MR_CMD_REPLY_VALID; 402 memcpy((u8 *)cmdptr->reply, (u8 *)def_reply, 403 mrioc->reply_sz); 404 } 405 if (cmdptr->is_waiting) { 406 complete(&cmdptr->done); 407 cmdptr->is_waiting = 0; 408 } else if (cmdptr->callback) 409 cmdptr->callback(mrioc, cmdptr); 410 } 411 } 412 out: 413 if (sense_buf) 414 mpi3mr_repost_sense_buf(mrioc, 415 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 416 } 417 418 static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc) 419 { 420 u32 exp_phase = mrioc->admin_reply_ephase; 421 u32 admin_reply_ci = mrioc->admin_reply_ci; 422 u32 num_admin_replies = 0; 423 u64 reply_dma = 0; 424 struct mpi3_default_reply_descriptor *reply_desc; 425 426 reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base + 427 admin_reply_ci; 428 429 if ((le16_to_cpu(reply_desc->reply_flags) & 430 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 431 return 0; 432 433 do { 434 if (mrioc->unrecoverable) 435 break; 436 437 mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci); 438 mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma); 439 if (reply_dma) 440 mpi3mr_repost_reply_buf(mrioc, reply_dma); 441 num_admin_replies++; 442 if (++admin_reply_ci == mrioc->num_admin_replies) { 443 admin_reply_ci = 0; 444 exp_phase ^= 1; 445 } 446 reply_desc = 447 (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base + 448 admin_reply_ci; 449 if ((le16_to_cpu(reply_desc->reply_flags) & 450 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 451 break; 452 } while (1); 453 454 writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); 455 mrioc->admin_reply_ci = admin_reply_ci; 456 mrioc->admin_reply_ephase = exp_phase; 457 458 return num_admin_replies; 459 } 460 461 /** 462 * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to 463 * queue's consumer index from operational reply descriptor queue. 464 * @op_reply_q: op_reply_qinfo object 465 * @reply_ci: operational reply descriptor's queue consumer index 466 * 467 * Returns reply descriptor frame address 468 */ 469 static inline struct mpi3_default_reply_descriptor * 470 mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci) 471 { 472 void *segment_base_addr; 473 struct segments *segments = op_reply_q->q_segments; 474 struct mpi3_default_reply_descriptor *reply_desc = NULL; 475 476 segment_base_addr = 477 segments[reply_ci / op_reply_q->segment_qd].segment; 478 reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr + 479 (reply_ci % op_reply_q->segment_qd); 480 return reply_desc; 481 } 482 483 /** 484 * mpi3mr_process_op_reply_q - Operational reply queue handler 485 * @mrioc: Adapter instance reference 486 * @op_reply_q: Operational reply queue info 487 * 488 * Checks the specific operational reply queue and drains the 489 * reply queue entries until the queue is empty and process the 490 * individual reply descriptors. 491 * 492 * Return: 0 if queue is already processed,or number of reply 493 * descriptors processed. 494 */ 495 int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, 496 struct op_reply_qinfo *op_reply_q) 497 { 498 struct op_req_qinfo *op_req_q; 499 u32 exp_phase; 500 u32 reply_ci; 501 u32 num_op_reply = 0; 502 u64 reply_dma = 0; 503 struct mpi3_default_reply_descriptor *reply_desc; 504 u16 req_q_idx = 0, reply_qidx; 505 506 reply_qidx = op_reply_q->qid - 1; 507 508 if (!atomic_add_unless(&op_reply_q->in_use, 1, 1)) 509 return 0; 510 511 exp_phase = op_reply_q->ephase; 512 reply_ci = op_reply_q->ci; 513 514 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci); 515 if ((le16_to_cpu(reply_desc->reply_flags) & 516 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) { 517 atomic_dec(&op_reply_q->in_use); 518 return 0; 519 } 520 521 do { 522 if (mrioc->unrecoverable) 523 break; 524 525 req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1; 526 op_req_q = &mrioc->req_qinfo[req_q_idx]; 527 528 WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci)); 529 mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma, 530 reply_qidx); 531 atomic_dec(&op_reply_q->pend_ios); 532 if (reply_dma) 533 mpi3mr_repost_reply_buf(mrioc, reply_dma); 534 num_op_reply++; 535 536 if (++reply_ci == op_reply_q->num_replies) { 537 reply_ci = 0; 538 exp_phase ^= 1; 539 } 540 541 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci); 542 543 if ((le16_to_cpu(reply_desc->reply_flags) & 544 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 545 break; 546 #ifndef CONFIG_PREEMPT_RT 547 /* 548 * Exit completion loop to avoid CPU lockup 549 * Ensure remaining completion happens from threaded ISR. 550 */ 551 if (num_op_reply > mrioc->max_host_ios) { 552 op_reply_q->enable_irq_poll = true; 553 break; 554 } 555 #endif 556 } while (1); 557 558 writel(reply_ci, 559 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index); 560 op_reply_q->ci = reply_ci; 561 op_reply_q->ephase = exp_phase; 562 563 atomic_dec(&op_reply_q->in_use); 564 return num_op_reply; 565 } 566 567 /** 568 * mpi3mr_blk_mq_poll - Operational reply queue handler 569 * @shost: SCSI Host reference 570 * @queue_num: Request queue number (w.r.t OS it is hardware context number) 571 * 572 * Checks the specific operational reply queue and drains the 573 * reply queue entries until the queue is empty and process the 574 * individual reply descriptors. 575 * 576 * Return: 0 if queue is already processed,or number of reply 577 * descriptors processed. 578 */ 579 int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) 580 { 581 int num_entries = 0; 582 struct mpi3mr_ioc *mrioc; 583 584 mrioc = (struct mpi3mr_ioc *)shost->hostdata; 585 586 if ((mrioc->reset_in_progress || mrioc->prepare_for_reset || 587 mrioc->unrecoverable)) 588 return 0; 589 590 num_entries = mpi3mr_process_op_reply_q(mrioc, 591 &mrioc->op_reply_qinfo[queue_num]); 592 593 return num_entries; 594 } 595 596 static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata) 597 { 598 struct mpi3mr_intr_info *intr_info = privdata; 599 struct mpi3mr_ioc *mrioc; 600 u16 midx; 601 u32 num_admin_replies = 0, num_op_reply = 0; 602 603 if (!intr_info) 604 return IRQ_NONE; 605 606 mrioc = intr_info->mrioc; 607 608 if (!mrioc->intr_enabled) 609 return IRQ_NONE; 610 611 midx = intr_info->msix_index; 612 613 if (!midx) 614 num_admin_replies = mpi3mr_process_admin_reply_q(mrioc); 615 if (intr_info->op_reply_q) 616 num_op_reply = mpi3mr_process_op_reply_q(mrioc, 617 intr_info->op_reply_q); 618 619 if (num_admin_replies || num_op_reply) 620 return IRQ_HANDLED; 621 else 622 return IRQ_NONE; 623 } 624 625 #ifndef CONFIG_PREEMPT_RT 626 627 static irqreturn_t mpi3mr_isr(int irq, void *privdata) 628 { 629 struct mpi3mr_intr_info *intr_info = privdata; 630 int ret; 631 632 if (!intr_info) 633 return IRQ_NONE; 634 635 /* Call primary ISR routine */ 636 ret = mpi3mr_isr_primary(irq, privdata); 637 638 /* 639 * If more IOs are expected, schedule IRQ polling thread. 640 * Otherwise exit from ISR. 641 */ 642 if (!intr_info->op_reply_q) 643 return ret; 644 645 if (!intr_info->op_reply_q->enable_irq_poll || 646 !atomic_read(&intr_info->op_reply_q->pend_ios)) 647 return ret; 648 649 disable_irq_nosync(intr_info->os_irq); 650 651 return IRQ_WAKE_THREAD; 652 } 653 654 /** 655 * mpi3mr_isr_poll - Reply queue polling routine 656 * @irq: IRQ 657 * @privdata: Interrupt info 658 * 659 * poll for pending I/O completions in a loop until pending I/Os 660 * present or controller queue depth I/Os are processed. 661 * 662 * Return: IRQ_NONE or IRQ_HANDLED 663 */ 664 static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata) 665 { 666 struct mpi3mr_intr_info *intr_info = privdata; 667 struct mpi3mr_ioc *mrioc; 668 u16 midx; 669 u32 num_op_reply = 0; 670 671 if (!intr_info || !intr_info->op_reply_q) 672 return IRQ_NONE; 673 674 mrioc = intr_info->mrioc; 675 midx = intr_info->msix_index; 676 677 /* Poll for pending IOs completions */ 678 do { 679 if (!mrioc->intr_enabled || mrioc->unrecoverable) 680 break; 681 682 if (!midx) 683 mpi3mr_process_admin_reply_q(mrioc); 684 if (intr_info->op_reply_q) 685 num_op_reply += 686 mpi3mr_process_op_reply_q(mrioc, 687 intr_info->op_reply_q); 688 689 usleep_range(MPI3MR_IRQ_POLL_SLEEP, 10 * MPI3MR_IRQ_POLL_SLEEP); 690 691 } while (atomic_read(&intr_info->op_reply_q->pend_ios) && 692 (num_op_reply < mrioc->max_host_ios)); 693 694 intr_info->op_reply_q->enable_irq_poll = false; 695 enable_irq(intr_info->os_irq); 696 697 return IRQ_HANDLED; 698 } 699 700 #endif 701 702 /** 703 * mpi3mr_request_irq - Request IRQ and register ISR 704 * @mrioc: Adapter instance reference 705 * @index: IRQ vector index 706 * 707 * Request threaded ISR with primary ISR and secondary 708 * 709 * Return: 0 on success and non zero on failures. 710 */ 711 static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index) 712 { 713 struct pci_dev *pdev = mrioc->pdev; 714 struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index; 715 int retval = 0; 716 717 intr_info->mrioc = mrioc; 718 intr_info->msix_index = index; 719 intr_info->op_reply_q = NULL; 720 721 snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d", 722 mrioc->driver_name, mrioc->id, index); 723 724 #ifndef CONFIG_PREEMPT_RT 725 retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr, 726 mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info); 727 #else 728 retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr_primary, 729 NULL, IRQF_SHARED, intr_info->name, intr_info); 730 #endif 731 if (retval) { 732 ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n", 733 intr_info->name, pci_irq_vector(pdev, index)); 734 return retval; 735 } 736 737 intr_info->os_irq = pci_irq_vector(pdev, index); 738 return retval; 739 } 740 741 static void mpi3mr_calc_poll_queues(struct mpi3mr_ioc *mrioc, u16 max_vectors) 742 { 743 if (!mrioc->requested_poll_qcount) 744 return; 745 746 /* Reserved for Admin and Default Queue */ 747 if (max_vectors > 2 && 748 (mrioc->requested_poll_qcount < max_vectors - 2)) { 749 ioc_info(mrioc, 750 "enabled polled queues (%d) msix (%d)\n", 751 mrioc->requested_poll_qcount, max_vectors); 752 } else { 753 ioc_info(mrioc, 754 "disabled polled queues (%d) msix (%d) because of no resources for default queue\n", 755 mrioc->requested_poll_qcount, max_vectors); 756 mrioc->requested_poll_qcount = 0; 757 } 758 } 759 760 /** 761 * mpi3mr_setup_isr - Setup ISR for the controller 762 * @mrioc: Adapter instance reference 763 * @setup_one: Request one IRQ or more 764 * 765 * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR 766 * 767 * Return: 0 on success and non zero on failures. 768 */ 769 static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one) 770 { 771 unsigned int irq_flags = PCI_IRQ_MSIX; 772 int max_vectors, min_vec; 773 int retval; 774 int i; 775 struct irq_affinity desc = { .pre_vectors = 1, .post_vectors = 1 }; 776 777 if (mrioc->is_intr_info_set) 778 return 0; 779 780 mpi3mr_cleanup_isr(mrioc); 781 782 if (setup_one || reset_devices) { 783 max_vectors = 1; 784 retval = pci_alloc_irq_vectors(mrioc->pdev, 785 1, max_vectors, irq_flags); 786 if (retval < 0) { 787 ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n", 788 retval); 789 goto out_failed; 790 } 791 } else { 792 max_vectors = 793 min_t(int, mrioc->cpu_count + 1 + 794 mrioc->requested_poll_qcount, mrioc->msix_count); 795 796 mpi3mr_calc_poll_queues(mrioc, max_vectors); 797 798 ioc_info(mrioc, 799 "MSI-X vectors supported: %d, no of cores: %d,", 800 mrioc->msix_count, mrioc->cpu_count); 801 ioc_info(mrioc, 802 "MSI-x vectors requested: %d poll_queues %d\n", 803 max_vectors, mrioc->requested_poll_qcount); 804 805 desc.post_vectors = mrioc->requested_poll_qcount; 806 min_vec = desc.pre_vectors + desc.post_vectors; 807 irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; 808 809 retval = pci_alloc_irq_vectors_affinity(mrioc->pdev, 810 min_vec, max_vectors, irq_flags, &desc); 811 812 if (retval < 0) { 813 ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n", 814 retval); 815 goto out_failed; 816 } 817 818 819 /* 820 * If only one MSI-x is allocated, then MSI-x 0 will be shared 821 * between Admin queue and operational queue 822 */ 823 if (retval == min_vec) 824 mrioc->op_reply_q_offset = 0; 825 else if (retval != (max_vectors)) { 826 ioc_info(mrioc, 827 "allocated vectors (%d) are less than configured (%d)\n", 828 retval, max_vectors); 829 } 830 831 max_vectors = retval; 832 mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0; 833 834 mpi3mr_calc_poll_queues(mrioc, max_vectors); 835 836 } 837 838 mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors, 839 GFP_KERNEL); 840 if (!mrioc->intr_info) { 841 retval = -ENOMEM; 842 pci_free_irq_vectors(mrioc->pdev); 843 goto out_failed; 844 } 845 for (i = 0; i < max_vectors; i++) { 846 retval = mpi3mr_request_irq(mrioc, i); 847 if (retval) { 848 mrioc->intr_info_count = i; 849 goto out_failed; 850 } 851 } 852 if (reset_devices || !setup_one) 853 mrioc->is_intr_info_set = true; 854 mrioc->intr_info_count = max_vectors; 855 mpi3mr_ioc_enable_intr(mrioc); 856 return 0; 857 858 out_failed: 859 mpi3mr_cleanup_isr(mrioc); 860 861 return retval; 862 } 863 864 static const struct { 865 enum mpi3mr_iocstate value; 866 char *name; 867 } mrioc_states[] = { 868 { MRIOC_STATE_READY, "ready" }, 869 { MRIOC_STATE_FAULT, "fault" }, 870 { MRIOC_STATE_RESET, "reset" }, 871 { MRIOC_STATE_BECOMING_READY, "becoming ready" }, 872 { MRIOC_STATE_RESET_REQUESTED, "reset requested" }, 873 { MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" }, 874 }; 875 876 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state) 877 { 878 int i; 879 char *name = NULL; 880 881 for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) { 882 if (mrioc_states[i].value == mrioc_state) { 883 name = mrioc_states[i].name; 884 break; 885 } 886 } 887 return name; 888 } 889 890 /* Reset reason to name mapper structure*/ 891 static const struct { 892 enum mpi3mr_reset_reason value; 893 char *name; 894 } mpi3mr_reset_reason_codes[] = { 895 { MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" }, 896 { MPI3MR_RESET_FROM_FAULT_WATCH, "fault" }, 897 { MPI3MR_RESET_FROM_APP, "application invocation" }, 898 { MPI3MR_RESET_FROM_EH_HOS, "error handling" }, 899 { MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" }, 900 { MPI3MR_RESET_FROM_APP_TIMEOUT, "application command timeout" }, 901 { MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" }, 902 { MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" }, 903 { MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" }, 904 { MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" }, 905 { MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" }, 906 { MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" }, 907 { MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" }, 908 { 909 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT, 910 "create request queue timeout" 911 }, 912 { 913 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT, 914 "create reply queue timeout" 915 }, 916 { MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" }, 917 { MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" }, 918 { MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" }, 919 { MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" }, 920 { 921 MPI3MR_RESET_FROM_CIACTVRST_TIMER, 922 "component image activation timeout" 923 }, 924 { 925 MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT, 926 "get package version timeout" 927 }, 928 { MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" }, 929 { MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" }, 930 { MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" }, 931 { MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"}, 932 { MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT, "timeout of a SAS transport layer request" }, 933 }; 934 935 /** 936 * mpi3mr_reset_rc_name - get reset reason code name 937 * @reason_code: reset reason code value 938 * 939 * Map reset reason to an NULL terminated ASCII string 940 * 941 * Return: name corresponding to reset reason value or NULL. 942 */ 943 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code) 944 { 945 int i; 946 char *name = NULL; 947 948 for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) { 949 if (mpi3mr_reset_reason_codes[i].value == reason_code) { 950 name = mpi3mr_reset_reason_codes[i].name; 951 break; 952 } 953 } 954 return name; 955 } 956 957 /* Reset type to name mapper structure*/ 958 static const struct { 959 u16 reset_type; 960 char *name; 961 } mpi3mr_reset_types[] = { 962 { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" }, 963 { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" }, 964 }; 965 966 /** 967 * mpi3mr_reset_type_name - get reset type name 968 * @reset_type: reset type value 969 * 970 * Map reset type to an NULL terminated ASCII string 971 * 972 * Return: name corresponding to reset type value or NULL. 973 */ 974 static const char *mpi3mr_reset_type_name(u16 reset_type) 975 { 976 int i; 977 char *name = NULL; 978 979 for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) { 980 if (mpi3mr_reset_types[i].reset_type == reset_type) { 981 name = mpi3mr_reset_types[i].name; 982 break; 983 } 984 } 985 return name; 986 } 987 988 /** 989 * mpi3mr_print_fault_info - Display fault information 990 * @mrioc: Adapter instance reference 991 * 992 * Display the controller fault information if there is a 993 * controller fault. 994 * 995 * Return: Nothing. 996 */ 997 void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc) 998 { 999 u32 ioc_status, code, code1, code2, code3; 1000 1001 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1002 1003 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) { 1004 code = readl(&mrioc->sysif_regs->fault); 1005 code1 = readl(&mrioc->sysif_regs->fault_info[0]); 1006 code2 = readl(&mrioc->sysif_regs->fault_info[1]); 1007 code3 = readl(&mrioc->sysif_regs->fault_info[2]); 1008 1009 ioc_info(mrioc, 1010 "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n", 1011 code, code1, code2, code3); 1012 } 1013 } 1014 1015 /** 1016 * mpi3mr_get_iocstate - Get IOC State 1017 * @mrioc: Adapter instance reference 1018 * 1019 * Return a proper IOC state enum based on the IOC status and 1020 * IOC configuration and unrcoverable state of the controller. 1021 * 1022 * Return: Current IOC state. 1023 */ 1024 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc) 1025 { 1026 u32 ioc_status, ioc_config; 1027 u8 ready, enabled; 1028 1029 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1030 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1031 1032 if (mrioc->unrecoverable) 1033 return MRIOC_STATE_UNRECOVERABLE; 1034 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) 1035 return MRIOC_STATE_FAULT; 1036 1037 ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY); 1038 enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC); 1039 1040 if (ready && enabled) 1041 return MRIOC_STATE_READY; 1042 if ((!ready) && (!enabled)) 1043 return MRIOC_STATE_RESET; 1044 if ((!ready) && (enabled)) 1045 return MRIOC_STATE_BECOMING_READY; 1046 1047 return MRIOC_STATE_RESET_REQUESTED; 1048 } 1049 1050 /** 1051 * mpi3mr_clear_reset_history - clear reset history 1052 * @mrioc: Adapter instance reference 1053 * 1054 * Write the reset history bit in IOC status to clear the bit, 1055 * if it is already set. 1056 * 1057 * Return: Nothing. 1058 */ 1059 static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc) 1060 { 1061 u32 ioc_status; 1062 1063 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1064 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) 1065 writel(ioc_status, &mrioc->sysif_regs->ioc_status); 1066 } 1067 1068 /** 1069 * mpi3mr_issue_and_process_mur - Message unit Reset handler 1070 * @mrioc: Adapter instance reference 1071 * @reset_reason: Reset reason code 1072 * 1073 * Issue Message unit Reset to the controller and wait for it to 1074 * be complete. 1075 * 1076 * Return: 0 on success, -1 on failure. 1077 */ 1078 static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc, 1079 u32 reset_reason) 1080 { 1081 u32 ioc_config, timeout, ioc_status; 1082 int retval = -1; 1083 1084 ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n"); 1085 if (mrioc->unrecoverable) { 1086 ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n"); 1087 return retval; 1088 } 1089 mpi3mr_clear_reset_history(mrioc); 1090 writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]); 1091 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1092 ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; 1093 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 1094 1095 timeout = MPI3MR_MUR_TIMEOUT * 10; 1096 do { 1097 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1098 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) { 1099 mpi3mr_clear_reset_history(mrioc); 1100 break; 1101 } 1102 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) { 1103 mpi3mr_print_fault_info(mrioc); 1104 break; 1105 } 1106 msleep(100); 1107 } while (--timeout); 1108 1109 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1110 if (timeout && !((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) || 1111 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) || 1112 (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) 1113 retval = 0; 1114 1115 ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n", 1116 (!retval) ? "successful" : "failed", ioc_status, ioc_config); 1117 return retval; 1118 } 1119 1120 /** 1121 * mpi3mr_revalidate_factsdata - validate IOCFacts parameters 1122 * during reset/resume 1123 * @mrioc: Adapter instance reference 1124 * 1125 * Return zero if the new IOCFacts parameters value is compatible with 1126 * older values else return -EPERM 1127 */ 1128 static int 1129 mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc) 1130 { 1131 void *removepend_bitmap; 1132 1133 if (mrioc->facts.reply_sz > mrioc->reply_sz) { 1134 ioc_err(mrioc, 1135 "cannot increase reply size from %d to %d\n", 1136 mrioc->reply_sz, mrioc->facts.reply_sz); 1137 return -EPERM; 1138 } 1139 1140 if (mrioc->facts.max_op_reply_q < mrioc->num_op_reply_q) { 1141 ioc_err(mrioc, 1142 "cannot reduce number of operational reply queues from %d to %d\n", 1143 mrioc->num_op_reply_q, 1144 mrioc->facts.max_op_reply_q); 1145 return -EPERM; 1146 } 1147 1148 if (mrioc->facts.max_op_req_q < mrioc->num_op_req_q) { 1149 ioc_err(mrioc, 1150 "cannot reduce number of operational request queues from %d to %d\n", 1151 mrioc->num_op_req_q, mrioc->facts.max_op_req_q); 1152 return -EPERM; 1153 } 1154 1155 if ((mrioc->sas_transport_enabled) && (mrioc->facts.ioc_capabilities & 1156 MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED)) 1157 ioc_err(mrioc, 1158 "critical error: multipath capability is enabled at the\n" 1159 "\tcontroller while sas transport support is enabled at the\n" 1160 "\tdriver, please reboot the system or reload the driver\n"); 1161 1162 if (mrioc->facts.max_devhandle > mrioc->dev_handle_bitmap_bits) { 1163 removepend_bitmap = bitmap_zalloc(mrioc->facts.max_devhandle, 1164 GFP_KERNEL); 1165 if (!removepend_bitmap) { 1166 ioc_err(mrioc, 1167 "failed to increase removepend_bitmap bits from %d to %d\n", 1168 mrioc->dev_handle_bitmap_bits, 1169 mrioc->facts.max_devhandle); 1170 return -EPERM; 1171 } 1172 bitmap_free(mrioc->removepend_bitmap); 1173 mrioc->removepend_bitmap = removepend_bitmap; 1174 ioc_info(mrioc, 1175 "increased bits of dev_handle_bitmap from %d to %d\n", 1176 mrioc->dev_handle_bitmap_bits, 1177 mrioc->facts.max_devhandle); 1178 mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle; 1179 } 1180 1181 return 0; 1182 } 1183 1184 /** 1185 * mpi3mr_bring_ioc_ready - Bring controller to ready state 1186 * @mrioc: Adapter instance reference 1187 * 1188 * Set Enable IOC bit in IOC configuration register and wait for 1189 * the controller to become ready. 1190 * 1191 * Return: 0 on success, appropriate error on failure. 1192 */ 1193 static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc) 1194 { 1195 u32 ioc_config, ioc_status, timeout; 1196 int retval = 0; 1197 enum mpi3mr_iocstate ioc_state; 1198 u64 base_info; 1199 1200 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1201 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1202 base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information); 1203 ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n", 1204 ioc_status, ioc_config, base_info); 1205 1206 /*The timeout value is in 2sec unit, changing it to seconds*/ 1207 mrioc->ready_timeout = 1208 ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >> 1209 MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2; 1210 1211 ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout); 1212 1213 ioc_state = mpi3mr_get_iocstate(mrioc); 1214 ioc_info(mrioc, "controller is in %s state during detection\n", 1215 mpi3mr_iocstate_name(ioc_state)); 1216 1217 if (ioc_state == MRIOC_STATE_BECOMING_READY || 1218 ioc_state == MRIOC_STATE_RESET_REQUESTED) { 1219 timeout = mrioc->ready_timeout * 10; 1220 do { 1221 msleep(100); 1222 } while (--timeout); 1223 1224 if (!pci_device_is_present(mrioc->pdev)) { 1225 mrioc->unrecoverable = 1; 1226 ioc_err(mrioc, 1227 "controller is not present while waiting to reset\n"); 1228 retval = -1; 1229 goto out_device_not_present; 1230 } 1231 1232 ioc_state = mpi3mr_get_iocstate(mrioc); 1233 ioc_info(mrioc, 1234 "controller is in %s state after waiting to reset\n", 1235 mpi3mr_iocstate_name(ioc_state)); 1236 } 1237 1238 if (ioc_state == MRIOC_STATE_READY) { 1239 ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n"); 1240 retval = mpi3mr_issue_and_process_mur(mrioc, 1241 MPI3MR_RESET_FROM_BRINGUP); 1242 ioc_state = mpi3mr_get_iocstate(mrioc); 1243 if (retval) 1244 ioc_err(mrioc, 1245 "message unit reset failed with error %d current state %s\n", 1246 retval, mpi3mr_iocstate_name(ioc_state)); 1247 } 1248 if (ioc_state != MRIOC_STATE_RESET) { 1249 mpi3mr_print_fault_info(mrioc); 1250 ioc_info(mrioc, "issuing soft reset to bring to reset state\n"); 1251 retval = mpi3mr_issue_reset(mrioc, 1252 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, 1253 MPI3MR_RESET_FROM_BRINGUP); 1254 if (retval) { 1255 ioc_err(mrioc, 1256 "soft reset failed with error %d\n", retval); 1257 goto out_failed; 1258 } 1259 } 1260 ioc_state = mpi3mr_get_iocstate(mrioc); 1261 if (ioc_state != MRIOC_STATE_RESET) { 1262 ioc_err(mrioc, 1263 "cannot bring controller to reset state, current state: %s\n", 1264 mpi3mr_iocstate_name(ioc_state)); 1265 goto out_failed; 1266 } 1267 mpi3mr_clear_reset_history(mrioc); 1268 retval = mpi3mr_setup_admin_qpair(mrioc); 1269 if (retval) { 1270 ioc_err(mrioc, "failed to setup admin queues: error %d\n", 1271 retval); 1272 goto out_failed; 1273 } 1274 1275 ioc_info(mrioc, "bringing controller to ready state\n"); 1276 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1277 ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; 1278 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 1279 1280 timeout = mrioc->ready_timeout * 10; 1281 do { 1282 ioc_state = mpi3mr_get_iocstate(mrioc); 1283 if (ioc_state == MRIOC_STATE_READY) { 1284 ioc_info(mrioc, 1285 "successfully transitioned to %s state\n", 1286 mpi3mr_iocstate_name(ioc_state)); 1287 return 0; 1288 } 1289 if (!pci_device_is_present(mrioc->pdev)) { 1290 mrioc->unrecoverable = 1; 1291 ioc_err(mrioc, 1292 "controller is not present at the bringup\n"); 1293 retval = -1; 1294 goto out_device_not_present; 1295 } 1296 msleep(100); 1297 } while (--timeout); 1298 1299 out_failed: 1300 ioc_state = mpi3mr_get_iocstate(mrioc); 1301 ioc_err(mrioc, 1302 "failed to bring to ready state, current state: %s\n", 1303 mpi3mr_iocstate_name(ioc_state)); 1304 out_device_not_present: 1305 return retval; 1306 } 1307 1308 /** 1309 * mpi3mr_soft_reset_success - Check softreset is success or not 1310 * @ioc_status: IOC status register value 1311 * @ioc_config: IOC config register value 1312 * 1313 * Check whether the soft reset is successful or not based on 1314 * IOC status and IOC config register values. 1315 * 1316 * Return: True when the soft reset is success, false otherwise. 1317 */ 1318 static inline bool 1319 mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config) 1320 { 1321 if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) || 1322 (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) 1323 return true; 1324 return false; 1325 } 1326 1327 /** 1328 * mpi3mr_diagfault_success - Check diag fault is success or not 1329 * @mrioc: Adapter reference 1330 * @ioc_status: IOC status register value 1331 * 1332 * Check whether the controller hit diag reset fault code. 1333 * 1334 * Return: True when there is diag fault, false otherwise. 1335 */ 1336 static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc, 1337 u32 ioc_status) 1338 { 1339 u32 fault; 1340 1341 if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) 1342 return false; 1343 fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK; 1344 if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) { 1345 mpi3mr_print_fault_info(mrioc); 1346 return true; 1347 } 1348 return false; 1349 } 1350 1351 /** 1352 * mpi3mr_set_diagsave - Set diag save bit for snapdump 1353 * @mrioc: Adapter reference 1354 * 1355 * Set diag save bit in IOC configuration register to enable 1356 * snapdump. 1357 * 1358 * Return: Nothing. 1359 */ 1360 static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc) 1361 { 1362 u32 ioc_config; 1363 1364 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1365 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE; 1366 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 1367 } 1368 1369 /** 1370 * mpi3mr_issue_reset - Issue reset to the controller 1371 * @mrioc: Adapter reference 1372 * @reset_type: Reset type 1373 * @reset_reason: Reset reason code 1374 * 1375 * Unlock the host diagnostic registers and write the specific 1376 * reset type to that, wait for reset acknowledgment from the 1377 * controller, if the reset is not successful retry for the 1378 * predefined number of times. 1379 * 1380 * Return: 0 on success, non-zero on failure. 1381 */ 1382 static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, 1383 u32 reset_reason) 1384 { 1385 int retval = -1; 1386 u8 unlock_retry_count = 0; 1387 u32 host_diagnostic, ioc_status, ioc_config; 1388 u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10; 1389 1390 if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) && 1391 (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT)) 1392 return retval; 1393 if (mrioc->unrecoverable) 1394 return retval; 1395 if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) { 1396 retval = 0; 1397 return retval; 1398 } 1399 1400 ioc_info(mrioc, "%s reset due to %s(0x%x)\n", 1401 mpi3mr_reset_type_name(reset_type), 1402 mpi3mr_reset_rc_name(reset_reason), reset_reason); 1403 1404 mpi3mr_clear_reset_history(mrioc); 1405 do { 1406 ioc_info(mrioc, 1407 "Write magic sequence to unlock host diag register (retry=%d)\n", 1408 ++unlock_retry_count); 1409 if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) { 1410 ioc_err(mrioc, 1411 "%s reset failed due to unlock failure, host_diagnostic(0x%08x)\n", 1412 mpi3mr_reset_type_name(reset_type), 1413 host_diagnostic); 1414 mrioc->unrecoverable = 1; 1415 return retval; 1416 } 1417 1418 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH, 1419 &mrioc->sysif_regs->write_sequence); 1420 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST, 1421 &mrioc->sysif_regs->write_sequence); 1422 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND, 1423 &mrioc->sysif_regs->write_sequence); 1424 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD, 1425 &mrioc->sysif_regs->write_sequence); 1426 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH, 1427 &mrioc->sysif_regs->write_sequence); 1428 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH, 1429 &mrioc->sysif_regs->write_sequence); 1430 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH, 1431 &mrioc->sysif_regs->write_sequence); 1432 usleep_range(1000, 1100); 1433 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); 1434 ioc_info(mrioc, 1435 "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n", 1436 unlock_retry_count, host_diagnostic); 1437 } while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE)); 1438 1439 writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]); 1440 writel(host_diagnostic | reset_type, 1441 &mrioc->sysif_regs->host_diagnostic); 1442 switch (reset_type) { 1443 case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET: 1444 do { 1445 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1446 ioc_config = 1447 readl(&mrioc->sysif_regs->ioc_configuration); 1448 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) 1449 && mpi3mr_soft_reset_success(ioc_status, ioc_config) 1450 ) { 1451 mpi3mr_clear_reset_history(mrioc); 1452 retval = 0; 1453 break; 1454 } 1455 msleep(100); 1456 } while (--timeout); 1457 mpi3mr_print_fault_info(mrioc); 1458 break; 1459 case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT: 1460 do { 1461 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1462 if (mpi3mr_diagfault_success(mrioc, ioc_status)) { 1463 retval = 0; 1464 break; 1465 } 1466 msleep(100); 1467 } while (--timeout); 1468 break; 1469 default: 1470 break; 1471 } 1472 1473 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND, 1474 &mrioc->sysif_regs->write_sequence); 1475 1476 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1477 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1478 ioc_info(mrioc, 1479 "ioc_status/ioc_onfig after %s reset is (0x%x)/(0x%x)\n", 1480 (!retval)?"successful":"failed", ioc_status, 1481 ioc_config); 1482 if (retval) 1483 mrioc->unrecoverable = 1; 1484 return retval; 1485 } 1486 1487 /** 1488 * mpi3mr_admin_request_post - Post request to admin queue 1489 * @mrioc: Adapter reference 1490 * @admin_req: MPI3 request 1491 * @admin_req_sz: Request size 1492 * @ignore_reset: Ignore reset in process 1493 * 1494 * Post the MPI3 request into admin request queue and 1495 * inform the controller, if the queue is full return 1496 * appropriate error. 1497 * 1498 * Return: 0 on success, non-zero on failure. 1499 */ 1500 int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req, 1501 u16 admin_req_sz, u8 ignore_reset) 1502 { 1503 u16 areq_pi = 0, areq_ci = 0, max_entries = 0; 1504 int retval = 0; 1505 unsigned long flags; 1506 u8 *areq_entry; 1507 1508 if (mrioc->unrecoverable) { 1509 ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__); 1510 return -EFAULT; 1511 } 1512 1513 spin_lock_irqsave(&mrioc->admin_req_lock, flags); 1514 areq_pi = mrioc->admin_req_pi; 1515 areq_ci = mrioc->admin_req_ci; 1516 max_entries = mrioc->num_admin_req; 1517 if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) && 1518 (areq_pi == (max_entries - 1)))) { 1519 ioc_err(mrioc, "AdminReqQ full condition detected\n"); 1520 retval = -EAGAIN; 1521 goto out; 1522 } 1523 if (!ignore_reset && mrioc->reset_in_progress) { 1524 ioc_err(mrioc, "AdminReqQ submit reset in progress\n"); 1525 retval = -EAGAIN; 1526 goto out; 1527 } 1528 areq_entry = (u8 *)mrioc->admin_req_base + 1529 (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ); 1530 memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); 1531 memcpy(areq_entry, (u8 *)admin_req, admin_req_sz); 1532 1533 if (++areq_pi == max_entries) 1534 areq_pi = 0; 1535 mrioc->admin_req_pi = areq_pi; 1536 1537 writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); 1538 1539 out: 1540 spin_unlock_irqrestore(&mrioc->admin_req_lock, flags); 1541 1542 return retval; 1543 } 1544 1545 /** 1546 * mpi3mr_free_op_req_q_segments - free request memory segments 1547 * @mrioc: Adapter instance reference 1548 * @q_idx: operational request queue index 1549 * 1550 * Free memory segments allocated for operational request queue 1551 * 1552 * Return: Nothing. 1553 */ 1554 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) 1555 { 1556 u16 j; 1557 int size; 1558 struct segments *segments; 1559 1560 segments = mrioc->req_qinfo[q_idx].q_segments; 1561 if (!segments) 1562 return; 1563 1564 if (mrioc->enable_segqueue) { 1565 size = MPI3MR_OP_REQ_Q_SEG_SIZE; 1566 if (mrioc->req_qinfo[q_idx].q_segment_list) { 1567 dma_free_coherent(&mrioc->pdev->dev, 1568 MPI3MR_MAX_SEG_LIST_SIZE, 1569 mrioc->req_qinfo[q_idx].q_segment_list, 1570 mrioc->req_qinfo[q_idx].q_segment_list_dma); 1571 mrioc->req_qinfo[q_idx].q_segment_list = NULL; 1572 } 1573 } else 1574 size = mrioc->req_qinfo[q_idx].segment_qd * 1575 mrioc->facts.op_req_sz; 1576 1577 for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) { 1578 if (!segments[j].segment) 1579 continue; 1580 dma_free_coherent(&mrioc->pdev->dev, 1581 size, segments[j].segment, segments[j].segment_dma); 1582 segments[j].segment = NULL; 1583 } 1584 kfree(mrioc->req_qinfo[q_idx].q_segments); 1585 mrioc->req_qinfo[q_idx].q_segments = NULL; 1586 mrioc->req_qinfo[q_idx].qid = 0; 1587 } 1588 1589 /** 1590 * mpi3mr_free_op_reply_q_segments - free reply memory segments 1591 * @mrioc: Adapter instance reference 1592 * @q_idx: operational reply queue index 1593 * 1594 * Free memory segments allocated for operational reply queue 1595 * 1596 * Return: Nothing. 1597 */ 1598 static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) 1599 { 1600 u16 j; 1601 int size; 1602 struct segments *segments; 1603 1604 segments = mrioc->op_reply_qinfo[q_idx].q_segments; 1605 if (!segments) 1606 return; 1607 1608 if (mrioc->enable_segqueue) { 1609 size = MPI3MR_OP_REP_Q_SEG_SIZE; 1610 if (mrioc->op_reply_qinfo[q_idx].q_segment_list) { 1611 dma_free_coherent(&mrioc->pdev->dev, 1612 MPI3MR_MAX_SEG_LIST_SIZE, 1613 mrioc->op_reply_qinfo[q_idx].q_segment_list, 1614 mrioc->op_reply_qinfo[q_idx].q_segment_list_dma); 1615 mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL; 1616 } 1617 } else 1618 size = mrioc->op_reply_qinfo[q_idx].segment_qd * 1619 mrioc->op_reply_desc_sz; 1620 1621 for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) { 1622 if (!segments[j].segment) 1623 continue; 1624 dma_free_coherent(&mrioc->pdev->dev, 1625 size, segments[j].segment, segments[j].segment_dma); 1626 segments[j].segment = NULL; 1627 } 1628 1629 kfree(mrioc->op_reply_qinfo[q_idx].q_segments); 1630 mrioc->op_reply_qinfo[q_idx].q_segments = NULL; 1631 mrioc->op_reply_qinfo[q_idx].qid = 0; 1632 } 1633 1634 /** 1635 * mpi3mr_delete_op_reply_q - delete operational reply queue 1636 * @mrioc: Adapter instance reference 1637 * @qidx: operational reply queue index 1638 * 1639 * Delete operatinal reply queue by issuing MPI request 1640 * through admin queue. 1641 * 1642 * Return: 0 on success, non-zero on failure. 1643 */ 1644 static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) 1645 { 1646 struct mpi3_delete_reply_queue_request delq_req; 1647 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 1648 int retval = 0; 1649 u16 reply_qid = 0, midx; 1650 1651 reply_qid = op_reply_q->qid; 1652 1653 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset); 1654 1655 if (!reply_qid) { 1656 retval = -1; 1657 ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n"); 1658 goto out; 1659 } 1660 1661 (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount-- : 1662 mrioc->active_poll_qcount--; 1663 1664 memset(&delq_req, 0, sizeof(delq_req)); 1665 mutex_lock(&mrioc->init_cmds.mutex); 1666 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1667 retval = -1; 1668 ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n"); 1669 mutex_unlock(&mrioc->init_cmds.mutex); 1670 goto out; 1671 } 1672 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1673 mrioc->init_cmds.is_waiting = 1; 1674 mrioc->init_cmds.callback = NULL; 1675 delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1676 delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE; 1677 delq_req.queue_id = cpu_to_le16(reply_qid); 1678 1679 init_completion(&mrioc->init_cmds.done); 1680 retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req), 1681 1); 1682 if (retval) { 1683 ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n"); 1684 goto out_unlock; 1685 } 1686 wait_for_completion_timeout(&mrioc->init_cmds.done, 1687 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1688 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1689 ioc_err(mrioc, "delete reply queue timed out\n"); 1690 mpi3mr_check_rh_fault_ioc(mrioc, 1691 MPI3MR_RESET_FROM_DELREPQ_TIMEOUT); 1692 retval = -1; 1693 goto out_unlock; 1694 } 1695 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1696 != MPI3_IOCSTATUS_SUCCESS) { 1697 ioc_err(mrioc, 1698 "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1699 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1700 mrioc->init_cmds.ioc_loginfo); 1701 retval = -1; 1702 goto out_unlock; 1703 } 1704 mrioc->intr_info[midx].op_reply_q = NULL; 1705 1706 mpi3mr_free_op_reply_q_segments(mrioc, qidx); 1707 out_unlock: 1708 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1709 mutex_unlock(&mrioc->init_cmds.mutex); 1710 out: 1711 1712 return retval; 1713 } 1714 1715 /** 1716 * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool 1717 * @mrioc: Adapter instance reference 1718 * @qidx: request queue index 1719 * 1720 * Allocate segmented memory pools for operational reply 1721 * queue. 1722 * 1723 * Return: 0 on success, non-zero on failure. 1724 */ 1725 static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx) 1726 { 1727 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 1728 int i, size; 1729 u64 *q_segment_list_entry = NULL; 1730 struct segments *segments; 1731 1732 if (mrioc->enable_segqueue) { 1733 op_reply_q->segment_qd = 1734 MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz; 1735 1736 size = MPI3MR_OP_REP_Q_SEG_SIZE; 1737 1738 op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev, 1739 MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma, 1740 GFP_KERNEL); 1741 if (!op_reply_q->q_segment_list) 1742 return -ENOMEM; 1743 q_segment_list_entry = (u64 *)op_reply_q->q_segment_list; 1744 } else { 1745 op_reply_q->segment_qd = op_reply_q->num_replies; 1746 size = op_reply_q->num_replies * mrioc->op_reply_desc_sz; 1747 } 1748 1749 op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies, 1750 op_reply_q->segment_qd); 1751 1752 op_reply_q->q_segments = kcalloc(op_reply_q->num_segments, 1753 sizeof(struct segments), GFP_KERNEL); 1754 if (!op_reply_q->q_segments) 1755 return -ENOMEM; 1756 1757 segments = op_reply_q->q_segments; 1758 for (i = 0; i < op_reply_q->num_segments; i++) { 1759 segments[i].segment = 1760 dma_alloc_coherent(&mrioc->pdev->dev, 1761 size, &segments[i].segment_dma, GFP_KERNEL); 1762 if (!segments[i].segment) 1763 return -ENOMEM; 1764 if (mrioc->enable_segqueue) 1765 q_segment_list_entry[i] = 1766 (unsigned long)segments[i].segment_dma; 1767 } 1768 1769 return 0; 1770 } 1771 1772 /** 1773 * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool. 1774 * @mrioc: Adapter instance reference 1775 * @qidx: request queue index 1776 * 1777 * Allocate segmented memory pools for operational request 1778 * queue. 1779 * 1780 * Return: 0 on success, non-zero on failure. 1781 */ 1782 static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx) 1783 { 1784 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx; 1785 int i, size; 1786 u64 *q_segment_list_entry = NULL; 1787 struct segments *segments; 1788 1789 if (mrioc->enable_segqueue) { 1790 op_req_q->segment_qd = 1791 MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz; 1792 1793 size = MPI3MR_OP_REQ_Q_SEG_SIZE; 1794 1795 op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev, 1796 MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma, 1797 GFP_KERNEL); 1798 if (!op_req_q->q_segment_list) 1799 return -ENOMEM; 1800 q_segment_list_entry = (u64 *)op_req_q->q_segment_list; 1801 1802 } else { 1803 op_req_q->segment_qd = op_req_q->num_requests; 1804 size = op_req_q->num_requests * mrioc->facts.op_req_sz; 1805 } 1806 1807 op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests, 1808 op_req_q->segment_qd); 1809 1810 op_req_q->q_segments = kcalloc(op_req_q->num_segments, 1811 sizeof(struct segments), GFP_KERNEL); 1812 if (!op_req_q->q_segments) 1813 return -ENOMEM; 1814 1815 segments = op_req_q->q_segments; 1816 for (i = 0; i < op_req_q->num_segments; i++) { 1817 segments[i].segment = 1818 dma_alloc_coherent(&mrioc->pdev->dev, 1819 size, &segments[i].segment_dma, GFP_KERNEL); 1820 if (!segments[i].segment) 1821 return -ENOMEM; 1822 if (mrioc->enable_segqueue) 1823 q_segment_list_entry[i] = 1824 (unsigned long)segments[i].segment_dma; 1825 } 1826 1827 return 0; 1828 } 1829 1830 /** 1831 * mpi3mr_create_op_reply_q - create operational reply queue 1832 * @mrioc: Adapter instance reference 1833 * @qidx: operational reply queue index 1834 * 1835 * Create operatinal reply queue by issuing MPI request 1836 * through admin queue. 1837 * 1838 * Return: 0 on success, non-zero on failure. 1839 */ 1840 static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) 1841 { 1842 struct mpi3_create_reply_queue_request create_req; 1843 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 1844 int retval = 0; 1845 u16 reply_qid = 0, midx; 1846 1847 reply_qid = op_reply_q->qid; 1848 1849 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset); 1850 1851 if (reply_qid) { 1852 retval = -1; 1853 ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n", 1854 reply_qid); 1855 1856 return retval; 1857 } 1858 1859 reply_qid = qidx + 1; 1860 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD; 1861 if (!mrioc->pdev->revision) 1862 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K; 1863 op_reply_q->ci = 0; 1864 op_reply_q->ephase = 1; 1865 atomic_set(&op_reply_q->pend_ios, 0); 1866 atomic_set(&op_reply_q->in_use, 0); 1867 op_reply_q->enable_irq_poll = false; 1868 1869 if (!op_reply_q->q_segments) { 1870 retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx); 1871 if (retval) { 1872 mpi3mr_free_op_reply_q_segments(mrioc, qidx); 1873 goto out; 1874 } 1875 } 1876 1877 memset(&create_req, 0, sizeof(create_req)); 1878 mutex_lock(&mrioc->init_cmds.mutex); 1879 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1880 retval = -1; 1881 ioc_err(mrioc, "CreateRepQ: Init command is in use\n"); 1882 goto out_unlock; 1883 } 1884 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1885 mrioc->init_cmds.is_waiting = 1; 1886 mrioc->init_cmds.callback = NULL; 1887 create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1888 create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE; 1889 create_req.queue_id = cpu_to_le16(reply_qid); 1890 1891 if (midx < (mrioc->intr_info_count - mrioc->requested_poll_qcount)) 1892 op_reply_q->qtype = MPI3MR_DEFAULT_QUEUE; 1893 else 1894 op_reply_q->qtype = MPI3MR_POLL_QUEUE; 1895 1896 if (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) { 1897 create_req.flags = 1898 MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE; 1899 create_req.msix_index = 1900 cpu_to_le16(mrioc->intr_info[midx].msix_index); 1901 } else { 1902 create_req.msix_index = cpu_to_le16(mrioc->intr_info_count - 1); 1903 ioc_info(mrioc, "create reply queue(polled): for qid(%d), midx(%d)\n", 1904 reply_qid, midx); 1905 if (!mrioc->active_poll_qcount) 1906 disable_irq_nosync(pci_irq_vector(mrioc->pdev, 1907 mrioc->intr_info_count - 1)); 1908 } 1909 1910 if (mrioc->enable_segqueue) { 1911 create_req.flags |= 1912 MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED; 1913 create_req.base_address = cpu_to_le64( 1914 op_reply_q->q_segment_list_dma); 1915 } else 1916 create_req.base_address = cpu_to_le64( 1917 op_reply_q->q_segments[0].segment_dma); 1918 1919 create_req.size = cpu_to_le16(op_reply_q->num_replies); 1920 1921 init_completion(&mrioc->init_cmds.done); 1922 retval = mpi3mr_admin_request_post(mrioc, &create_req, 1923 sizeof(create_req), 1); 1924 if (retval) { 1925 ioc_err(mrioc, "CreateRepQ: Admin Post failed\n"); 1926 goto out_unlock; 1927 } 1928 wait_for_completion_timeout(&mrioc->init_cmds.done, 1929 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1930 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1931 ioc_err(mrioc, "create reply queue timed out\n"); 1932 mpi3mr_check_rh_fault_ioc(mrioc, 1933 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT); 1934 retval = -1; 1935 goto out_unlock; 1936 } 1937 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1938 != MPI3_IOCSTATUS_SUCCESS) { 1939 ioc_err(mrioc, 1940 "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1941 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1942 mrioc->init_cmds.ioc_loginfo); 1943 retval = -1; 1944 goto out_unlock; 1945 } 1946 op_reply_q->qid = reply_qid; 1947 if (midx < mrioc->intr_info_count) 1948 mrioc->intr_info[midx].op_reply_q = op_reply_q; 1949 1950 (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount++ : 1951 mrioc->active_poll_qcount++; 1952 1953 out_unlock: 1954 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1955 mutex_unlock(&mrioc->init_cmds.mutex); 1956 out: 1957 1958 return retval; 1959 } 1960 1961 /** 1962 * mpi3mr_create_op_req_q - create operational request queue 1963 * @mrioc: Adapter instance reference 1964 * @idx: operational request queue index 1965 * @reply_qid: Reply queue ID 1966 * 1967 * Create operatinal request queue by issuing MPI request 1968 * through admin queue. 1969 * 1970 * Return: 0 on success, non-zero on failure. 1971 */ 1972 static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx, 1973 u16 reply_qid) 1974 { 1975 struct mpi3_create_request_queue_request create_req; 1976 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx; 1977 int retval = 0; 1978 u16 req_qid = 0; 1979 1980 req_qid = op_req_q->qid; 1981 1982 if (req_qid) { 1983 retval = -1; 1984 ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n", 1985 req_qid); 1986 1987 return retval; 1988 } 1989 req_qid = idx + 1; 1990 1991 op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD; 1992 op_req_q->ci = 0; 1993 op_req_q->pi = 0; 1994 op_req_q->reply_qid = reply_qid; 1995 spin_lock_init(&op_req_q->q_lock); 1996 1997 if (!op_req_q->q_segments) { 1998 retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx); 1999 if (retval) { 2000 mpi3mr_free_op_req_q_segments(mrioc, idx); 2001 goto out; 2002 } 2003 } 2004 2005 memset(&create_req, 0, sizeof(create_req)); 2006 mutex_lock(&mrioc->init_cmds.mutex); 2007 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2008 retval = -1; 2009 ioc_err(mrioc, "CreateReqQ: Init command is in use\n"); 2010 goto out_unlock; 2011 } 2012 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2013 mrioc->init_cmds.is_waiting = 1; 2014 mrioc->init_cmds.callback = NULL; 2015 create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2016 create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE; 2017 create_req.queue_id = cpu_to_le16(req_qid); 2018 if (mrioc->enable_segqueue) { 2019 create_req.flags = 2020 MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED; 2021 create_req.base_address = cpu_to_le64( 2022 op_req_q->q_segment_list_dma); 2023 } else 2024 create_req.base_address = cpu_to_le64( 2025 op_req_q->q_segments[0].segment_dma); 2026 create_req.reply_queue_id = cpu_to_le16(reply_qid); 2027 create_req.size = cpu_to_le16(op_req_q->num_requests); 2028 2029 init_completion(&mrioc->init_cmds.done); 2030 retval = mpi3mr_admin_request_post(mrioc, &create_req, 2031 sizeof(create_req), 1); 2032 if (retval) { 2033 ioc_err(mrioc, "CreateReqQ: Admin Post failed\n"); 2034 goto out_unlock; 2035 } 2036 wait_for_completion_timeout(&mrioc->init_cmds.done, 2037 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2038 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2039 ioc_err(mrioc, "create request queue timed out\n"); 2040 mpi3mr_check_rh_fault_ioc(mrioc, 2041 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT); 2042 retval = -1; 2043 goto out_unlock; 2044 } 2045 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2046 != MPI3_IOCSTATUS_SUCCESS) { 2047 ioc_err(mrioc, 2048 "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2049 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2050 mrioc->init_cmds.ioc_loginfo); 2051 retval = -1; 2052 goto out_unlock; 2053 } 2054 op_req_q->qid = req_qid; 2055 2056 out_unlock: 2057 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2058 mutex_unlock(&mrioc->init_cmds.mutex); 2059 out: 2060 2061 return retval; 2062 } 2063 2064 /** 2065 * mpi3mr_create_op_queues - create operational queue pairs 2066 * @mrioc: Adapter instance reference 2067 * 2068 * Allocate memory for operational queue meta data and call 2069 * create request and reply queue functions. 2070 * 2071 * Return: 0 on success, non-zero on failures. 2072 */ 2073 static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc) 2074 { 2075 int retval = 0; 2076 u16 num_queues = 0, i = 0, msix_count_op_q = 1; 2077 2078 num_queues = min_t(int, mrioc->facts.max_op_reply_q, 2079 mrioc->facts.max_op_req_q); 2080 2081 msix_count_op_q = 2082 mrioc->intr_info_count - mrioc->op_reply_q_offset; 2083 if (!mrioc->num_queues) 2084 mrioc->num_queues = min_t(int, num_queues, msix_count_op_q); 2085 /* 2086 * During reset set the num_queues to the number of queues 2087 * that was set before the reset. 2088 */ 2089 num_queues = mrioc->num_op_reply_q ? 2090 mrioc->num_op_reply_q : mrioc->num_queues; 2091 ioc_info(mrioc, "trying to create %d operational queue pairs\n", 2092 num_queues); 2093 2094 if (!mrioc->req_qinfo) { 2095 mrioc->req_qinfo = kcalloc(num_queues, 2096 sizeof(struct op_req_qinfo), GFP_KERNEL); 2097 if (!mrioc->req_qinfo) { 2098 retval = -1; 2099 goto out_failed; 2100 } 2101 2102 mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) * 2103 num_queues, GFP_KERNEL); 2104 if (!mrioc->op_reply_qinfo) { 2105 retval = -1; 2106 goto out_failed; 2107 } 2108 } 2109 2110 if (mrioc->enable_segqueue) 2111 ioc_info(mrioc, 2112 "allocating operational queues through segmented queues\n"); 2113 2114 for (i = 0; i < num_queues; i++) { 2115 if (mpi3mr_create_op_reply_q(mrioc, i)) { 2116 ioc_err(mrioc, "Cannot create OP RepQ %d\n", i); 2117 break; 2118 } 2119 if (mpi3mr_create_op_req_q(mrioc, i, 2120 mrioc->op_reply_qinfo[i].qid)) { 2121 ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i); 2122 mpi3mr_delete_op_reply_q(mrioc, i); 2123 break; 2124 } 2125 } 2126 2127 if (i == 0) { 2128 /* Not even one queue is created successfully*/ 2129 retval = -1; 2130 goto out_failed; 2131 } 2132 mrioc->num_op_reply_q = mrioc->num_op_req_q = i; 2133 ioc_info(mrioc, 2134 "successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n", 2135 mrioc->num_op_reply_q, mrioc->default_qcount, 2136 mrioc->active_poll_qcount); 2137 2138 return retval; 2139 out_failed: 2140 kfree(mrioc->req_qinfo); 2141 mrioc->req_qinfo = NULL; 2142 2143 kfree(mrioc->op_reply_qinfo); 2144 mrioc->op_reply_qinfo = NULL; 2145 2146 return retval; 2147 } 2148 2149 /** 2150 * mpi3mr_op_request_post - Post request to operational queue 2151 * @mrioc: Adapter reference 2152 * @op_req_q: Operational request queue info 2153 * @req: MPI3 request 2154 * 2155 * Post the MPI3 request into operational request queue and 2156 * inform the controller, if the queue is full return 2157 * appropriate error. 2158 * 2159 * Return: 0 on success, non-zero on failure. 2160 */ 2161 int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc, 2162 struct op_req_qinfo *op_req_q, u8 *req) 2163 { 2164 u16 pi = 0, max_entries, reply_qidx = 0, midx; 2165 int retval = 0; 2166 unsigned long flags; 2167 u8 *req_entry; 2168 void *segment_base_addr; 2169 u16 req_sz = mrioc->facts.op_req_sz; 2170 struct segments *segments = op_req_q->q_segments; 2171 2172 reply_qidx = op_req_q->reply_qid - 1; 2173 2174 if (mrioc->unrecoverable) 2175 return -EFAULT; 2176 2177 spin_lock_irqsave(&op_req_q->q_lock, flags); 2178 pi = op_req_q->pi; 2179 max_entries = op_req_q->num_requests; 2180 2181 if (mpi3mr_check_req_qfull(op_req_q)) { 2182 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX( 2183 reply_qidx, mrioc->op_reply_q_offset); 2184 mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q); 2185 2186 if (mpi3mr_check_req_qfull(op_req_q)) { 2187 retval = -EAGAIN; 2188 goto out; 2189 } 2190 } 2191 2192 if (mrioc->reset_in_progress) { 2193 ioc_err(mrioc, "OpReqQ submit reset in progress\n"); 2194 retval = -EAGAIN; 2195 goto out; 2196 } 2197 2198 segment_base_addr = segments[pi / op_req_q->segment_qd].segment; 2199 req_entry = (u8 *)segment_base_addr + 2200 ((pi % op_req_q->segment_qd) * req_sz); 2201 2202 memset(req_entry, 0, req_sz); 2203 memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ); 2204 2205 if (++pi == max_entries) 2206 pi = 0; 2207 op_req_q->pi = pi; 2208 2209 #ifndef CONFIG_PREEMPT_RT 2210 if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios) 2211 > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT) 2212 mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true; 2213 #else 2214 atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios); 2215 #endif 2216 2217 writel(op_req_q->pi, 2218 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index); 2219 2220 out: 2221 spin_unlock_irqrestore(&op_req_q->q_lock, flags); 2222 return retval; 2223 } 2224 2225 /** 2226 * mpi3mr_check_rh_fault_ioc - check reset history and fault 2227 * controller 2228 * @mrioc: Adapter instance reference 2229 * @reason_code: reason code for the fault. 2230 * 2231 * This routine will save snapdump and fault the controller with 2232 * the given reason code if it is not already in the fault or 2233 * not asynchronosuly reset. This will be used to handle 2234 * initilaization time faults/resets/timeout as in those cases 2235 * immediate soft reset invocation is not required. 2236 * 2237 * Return: None. 2238 */ 2239 void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code) 2240 { 2241 u32 ioc_status, host_diagnostic, timeout; 2242 2243 if (mrioc->unrecoverable) { 2244 ioc_err(mrioc, "controller is unrecoverable\n"); 2245 return; 2246 } 2247 2248 if (!pci_device_is_present(mrioc->pdev)) { 2249 mrioc->unrecoverable = 1; 2250 ioc_err(mrioc, "controller is not present\n"); 2251 return; 2252 } 2253 2254 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 2255 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || 2256 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { 2257 mpi3mr_print_fault_info(mrioc); 2258 return; 2259 } 2260 mpi3mr_set_diagsave(mrioc); 2261 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 2262 reason_code); 2263 timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; 2264 do { 2265 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); 2266 if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS)) 2267 break; 2268 msleep(100); 2269 } while (--timeout); 2270 } 2271 2272 /** 2273 * mpi3mr_sync_timestamp - Issue time stamp sync request 2274 * @mrioc: Adapter reference 2275 * 2276 * Issue IO unit control MPI request to synchornize firmware 2277 * timestamp with host time. 2278 * 2279 * Return: 0 on success, non-zero on failure. 2280 */ 2281 static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc) 2282 { 2283 ktime_t current_time; 2284 struct mpi3_iounit_control_request iou_ctrl; 2285 int retval = 0; 2286 2287 memset(&iou_ctrl, 0, sizeof(iou_ctrl)); 2288 mutex_lock(&mrioc->init_cmds.mutex); 2289 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2290 retval = -1; 2291 ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n"); 2292 mutex_unlock(&mrioc->init_cmds.mutex); 2293 goto out; 2294 } 2295 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2296 mrioc->init_cmds.is_waiting = 1; 2297 mrioc->init_cmds.callback = NULL; 2298 iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2299 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL; 2300 iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP; 2301 current_time = ktime_get_real(); 2302 iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time)); 2303 2304 init_completion(&mrioc->init_cmds.done); 2305 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, 2306 sizeof(iou_ctrl), 0); 2307 if (retval) { 2308 ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n"); 2309 goto out_unlock; 2310 } 2311 2312 wait_for_completion_timeout(&mrioc->init_cmds.done, 2313 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2314 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2315 ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n"); 2316 mrioc->init_cmds.is_waiting = 0; 2317 if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET)) 2318 mpi3mr_soft_reset_handler(mrioc, 2319 MPI3MR_RESET_FROM_TSU_TIMEOUT, 1); 2320 retval = -1; 2321 goto out_unlock; 2322 } 2323 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2324 != MPI3_IOCSTATUS_SUCCESS) { 2325 ioc_err(mrioc, 2326 "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2327 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2328 mrioc->init_cmds.ioc_loginfo); 2329 retval = -1; 2330 goto out_unlock; 2331 } 2332 2333 out_unlock: 2334 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2335 mutex_unlock(&mrioc->init_cmds.mutex); 2336 2337 out: 2338 return retval; 2339 } 2340 2341 /** 2342 * mpi3mr_print_pkg_ver - display controller fw package version 2343 * @mrioc: Adapter reference 2344 * 2345 * Retrieve firmware package version from the component image 2346 * header of the controller flash and display it. 2347 * 2348 * Return: 0 on success and non-zero on failure. 2349 */ 2350 static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc) 2351 { 2352 struct mpi3_ci_upload_request ci_upload; 2353 int retval = -1; 2354 void *data = NULL; 2355 dma_addr_t data_dma; 2356 struct mpi3_ci_manifest_mpi *manifest; 2357 u32 data_len = sizeof(struct mpi3_ci_manifest_mpi); 2358 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 2359 2360 data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 2361 GFP_KERNEL); 2362 if (!data) 2363 return -ENOMEM; 2364 2365 memset(&ci_upload, 0, sizeof(ci_upload)); 2366 mutex_lock(&mrioc->init_cmds.mutex); 2367 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2368 ioc_err(mrioc, "sending get package version failed due to command in use\n"); 2369 mutex_unlock(&mrioc->init_cmds.mutex); 2370 goto out; 2371 } 2372 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2373 mrioc->init_cmds.is_waiting = 1; 2374 mrioc->init_cmds.callback = NULL; 2375 ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2376 ci_upload.function = MPI3_FUNCTION_CI_UPLOAD; 2377 ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY; 2378 ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST); 2379 ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE); 2380 ci_upload.segment_size = cpu_to_le32(data_len); 2381 2382 mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len, 2383 data_dma); 2384 init_completion(&mrioc->init_cmds.done); 2385 retval = mpi3mr_admin_request_post(mrioc, &ci_upload, 2386 sizeof(ci_upload), 1); 2387 if (retval) { 2388 ioc_err(mrioc, "posting get package version failed\n"); 2389 goto out_unlock; 2390 } 2391 wait_for_completion_timeout(&mrioc->init_cmds.done, 2392 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2393 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2394 ioc_err(mrioc, "get package version timed out\n"); 2395 mpi3mr_check_rh_fault_ioc(mrioc, 2396 MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT); 2397 retval = -1; 2398 goto out_unlock; 2399 } 2400 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2401 == MPI3_IOCSTATUS_SUCCESS) { 2402 manifest = (struct mpi3_ci_manifest_mpi *) data; 2403 if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) { 2404 ioc_info(mrioc, 2405 "firmware package version(%d.%d.%d.%d.%05d-%05d)\n", 2406 manifest->package_version.gen_major, 2407 manifest->package_version.gen_minor, 2408 manifest->package_version.phase_major, 2409 manifest->package_version.phase_minor, 2410 manifest->package_version.customer_id, 2411 manifest->package_version.build_num); 2412 } 2413 } 2414 retval = 0; 2415 out_unlock: 2416 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2417 mutex_unlock(&mrioc->init_cmds.mutex); 2418 2419 out: 2420 if (data) 2421 dma_free_coherent(&mrioc->pdev->dev, data_len, data, 2422 data_dma); 2423 return retval; 2424 } 2425 2426 /** 2427 * mpi3mr_watchdog_work - watchdog thread to monitor faults 2428 * @work: work struct 2429 * 2430 * Watch dog work periodically executed (1 second interval) to 2431 * monitor firmware fault and to issue periodic timer sync to 2432 * the firmware. 2433 * 2434 * Return: Nothing. 2435 */ 2436 static void mpi3mr_watchdog_work(struct work_struct *work) 2437 { 2438 struct mpi3mr_ioc *mrioc = 2439 container_of(work, struct mpi3mr_ioc, watchdog_work.work); 2440 unsigned long flags; 2441 enum mpi3mr_iocstate ioc_state; 2442 u32 fault, host_diagnostic, ioc_status; 2443 u32 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH; 2444 2445 if (mrioc->reset_in_progress) 2446 return; 2447 2448 if (!mrioc->unrecoverable && !pci_device_is_present(mrioc->pdev)) { 2449 ioc_err(mrioc, "watchdog could not detect the controller\n"); 2450 mrioc->unrecoverable = 1; 2451 } 2452 2453 if (mrioc->unrecoverable) { 2454 ioc_err(mrioc, 2455 "flush pending commands for unrecoverable controller\n"); 2456 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 2457 return; 2458 } 2459 2460 if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) { 2461 mrioc->ts_update_counter = 0; 2462 mpi3mr_sync_timestamp(mrioc); 2463 } 2464 2465 if ((mrioc->prepare_for_reset) && 2466 ((mrioc->prepare_for_reset_timeout_counter++) >= 2467 MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) { 2468 mpi3mr_soft_reset_handler(mrioc, 2469 MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1); 2470 return; 2471 } 2472 2473 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 2474 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) { 2475 mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0); 2476 return; 2477 } 2478 2479 /*Check for fault state every one second and issue Soft reset*/ 2480 ioc_state = mpi3mr_get_iocstate(mrioc); 2481 if (ioc_state != MRIOC_STATE_FAULT) 2482 goto schedule_work; 2483 2484 fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK; 2485 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); 2486 if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) { 2487 if (!mrioc->diagsave_timeout) { 2488 mpi3mr_print_fault_info(mrioc); 2489 ioc_warn(mrioc, "diag save in progress\n"); 2490 } 2491 if ((mrioc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT) 2492 goto schedule_work; 2493 } 2494 2495 mpi3mr_print_fault_info(mrioc); 2496 mrioc->diagsave_timeout = 0; 2497 2498 switch (fault) { 2499 case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED: 2500 case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED: 2501 ioc_warn(mrioc, 2502 "controller requires system power cycle, marking controller as unrecoverable\n"); 2503 mrioc->unrecoverable = 1; 2504 goto schedule_work; 2505 case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS: 2506 return; 2507 case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET: 2508 reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT; 2509 break; 2510 default: 2511 break; 2512 } 2513 mpi3mr_soft_reset_handler(mrioc, reset_reason, 0); 2514 return; 2515 2516 schedule_work: 2517 spin_lock_irqsave(&mrioc->watchdog_lock, flags); 2518 if (mrioc->watchdog_work_q) 2519 queue_delayed_work(mrioc->watchdog_work_q, 2520 &mrioc->watchdog_work, 2521 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); 2522 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); 2523 return; 2524 } 2525 2526 /** 2527 * mpi3mr_start_watchdog - Start watchdog 2528 * @mrioc: Adapter instance reference 2529 * 2530 * Create and start the watchdog thread to monitor controller 2531 * faults. 2532 * 2533 * Return: Nothing. 2534 */ 2535 void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc) 2536 { 2537 if (mrioc->watchdog_work_q) 2538 return; 2539 2540 INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work); 2541 snprintf(mrioc->watchdog_work_q_name, 2542 sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name, 2543 mrioc->id); 2544 mrioc->watchdog_work_q = 2545 create_singlethread_workqueue(mrioc->watchdog_work_q_name); 2546 if (!mrioc->watchdog_work_q) { 2547 ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__); 2548 return; 2549 } 2550 2551 if (mrioc->watchdog_work_q) 2552 queue_delayed_work(mrioc->watchdog_work_q, 2553 &mrioc->watchdog_work, 2554 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); 2555 } 2556 2557 /** 2558 * mpi3mr_stop_watchdog - Stop watchdog 2559 * @mrioc: Adapter instance reference 2560 * 2561 * Stop the watchdog thread created to monitor controller 2562 * faults. 2563 * 2564 * Return: Nothing. 2565 */ 2566 void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc) 2567 { 2568 unsigned long flags; 2569 struct workqueue_struct *wq; 2570 2571 spin_lock_irqsave(&mrioc->watchdog_lock, flags); 2572 wq = mrioc->watchdog_work_q; 2573 mrioc->watchdog_work_q = NULL; 2574 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); 2575 if (wq) { 2576 if (!cancel_delayed_work_sync(&mrioc->watchdog_work)) 2577 flush_workqueue(wq); 2578 destroy_workqueue(wq); 2579 } 2580 } 2581 2582 /** 2583 * mpi3mr_setup_admin_qpair - Setup admin queue pair 2584 * @mrioc: Adapter instance reference 2585 * 2586 * Allocate memory for admin queue pair if required and register 2587 * the admin queue with the controller. 2588 * 2589 * Return: 0 on success, non-zero on failures. 2590 */ 2591 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc) 2592 { 2593 int retval = 0; 2594 u32 num_admin_entries = 0; 2595 2596 mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE; 2597 mrioc->num_admin_req = mrioc->admin_req_q_sz / 2598 MPI3MR_ADMIN_REQ_FRAME_SZ; 2599 mrioc->admin_req_ci = mrioc->admin_req_pi = 0; 2600 2601 mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE; 2602 mrioc->num_admin_replies = mrioc->admin_reply_q_sz / 2603 MPI3MR_ADMIN_REPLY_FRAME_SZ; 2604 mrioc->admin_reply_ci = 0; 2605 mrioc->admin_reply_ephase = 1; 2606 2607 if (!mrioc->admin_req_base) { 2608 mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev, 2609 mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL); 2610 2611 if (!mrioc->admin_req_base) { 2612 retval = -1; 2613 goto out_failed; 2614 } 2615 2616 mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev, 2617 mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma, 2618 GFP_KERNEL); 2619 2620 if (!mrioc->admin_reply_base) { 2621 retval = -1; 2622 goto out_failed; 2623 } 2624 } 2625 2626 num_admin_entries = (mrioc->num_admin_replies << 16) | 2627 (mrioc->num_admin_req); 2628 writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries); 2629 mpi3mr_writeq(mrioc->admin_req_dma, 2630 &mrioc->sysif_regs->admin_request_queue_address); 2631 mpi3mr_writeq(mrioc->admin_reply_dma, 2632 &mrioc->sysif_regs->admin_reply_queue_address); 2633 writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); 2634 writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); 2635 return retval; 2636 2637 out_failed: 2638 2639 if (mrioc->admin_reply_base) { 2640 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz, 2641 mrioc->admin_reply_base, mrioc->admin_reply_dma); 2642 mrioc->admin_reply_base = NULL; 2643 } 2644 if (mrioc->admin_req_base) { 2645 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz, 2646 mrioc->admin_req_base, mrioc->admin_req_dma); 2647 mrioc->admin_req_base = NULL; 2648 } 2649 return retval; 2650 } 2651 2652 /** 2653 * mpi3mr_issue_iocfacts - Send IOC Facts 2654 * @mrioc: Adapter instance reference 2655 * @facts_data: Cached IOC facts data 2656 * 2657 * Issue IOC Facts MPI request through admin queue and wait for 2658 * the completion of it or time out. 2659 * 2660 * Return: 0 on success, non-zero on failures. 2661 */ 2662 static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc, 2663 struct mpi3_ioc_facts_data *facts_data) 2664 { 2665 struct mpi3_ioc_facts_request iocfacts_req; 2666 void *data = NULL; 2667 dma_addr_t data_dma; 2668 u32 data_len = sizeof(*facts_data); 2669 int retval = 0; 2670 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 2671 2672 data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 2673 GFP_KERNEL); 2674 2675 if (!data) { 2676 retval = -1; 2677 goto out; 2678 } 2679 2680 memset(&iocfacts_req, 0, sizeof(iocfacts_req)); 2681 mutex_lock(&mrioc->init_cmds.mutex); 2682 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2683 retval = -1; 2684 ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n"); 2685 mutex_unlock(&mrioc->init_cmds.mutex); 2686 goto out; 2687 } 2688 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2689 mrioc->init_cmds.is_waiting = 1; 2690 mrioc->init_cmds.callback = NULL; 2691 iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2692 iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS; 2693 2694 mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len, 2695 data_dma); 2696 2697 init_completion(&mrioc->init_cmds.done); 2698 retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req, 2699 sizeof(iocfacts_req), 1); 2700 if (retval) { 2701 ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n"); 2702 goto out_unlock; 2703 } 2704 wait_for_completion_timeout(&mrioc->init_cmds.done, 2705 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2706 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2707 ioc_err(mrioc, "ioc_facts timed out\n"); 2708 mpi3mr_check_rh_fault_ioc(mrioc, 2709 MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT); 2710 retval = -1; 2711 goto out_unlock; 2712 } 2713 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2714 != MPI3_IOCSTATUS_SUCCESS) { 2715 ioc_err(mrioc, 2716 "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2717 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2718 mrioc->init_cmds.ioc_loginfo); 2719 retval = -1; 2720 goto out_unlock; 2721 } 2722 memcpy(facts_data, (u8 *)data, data_len); 2723 mpi3mr_process_factsdata(mrioc, facts_data); 2724 out_unlock: 2725 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2726 mutex_unlock(&mrioc->init_cmds.mutex); 2727 2728 out: 2729 if (data) 2730 dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma); 2731 2732 return retval; 2733 } 2734 2735 /** 2736 * mpi3mr_check_reset_dma_mask - Process IOC facts data 2737 * @mrioc: Adapter instance reference 2738 * 2739 * Check whether the new DMA mask requested through IOCFacts by 2740 * firmware needs to be set, if so set it . 2741 * 2742 * Return: 0 on success, non-zero on failure. 2743 */ 2744 static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc) 2745 { 2746 struct pci_dev *pdev = mrioc->pdev; 2747 int r; 2748 u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask); 2749 2750 if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask)) 2751 return 0; 2752 2753 ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n", 2754 mrioc->dma_mask, facts_dma_mask); 2755 2756 r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask); 2757 if (r) { 2758 ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n", 2759 facts_dma_mask, r); 2760 return r; 2761 } 2762 mrioc->dma_mask = facts_dma_mask; 2763 return r; 2764 } 2765 2766 /** 2767 * mpi3mr_process_factsdata - Process IOC facts data 2768 * @mrioc: Adapter instance reference 2769 * @facts_data: Cached IOC facts data 2770 * 2771 * Convert IOC facts data into cpu endianness and cache it in 2772 * the driver . 2773 * 2774 * Return: Nothing. 2775 */ 2776 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, 2777 struct mpi3_ioc_facts_data *facts_data) 2778 { 2779 u32 ioc_config, req_sz, facts_flags; 2780 2781 if ((le16_to_cpu(facts_data->ioc_facts_data_length)) != 2782 (sizeof(*facts_data) / 4)) { 2783 ioc_warn(mrioc, 2784 "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n", 2785 sizeof(*facts_data), 2786 le16_to_cpu(facts_data->ioc_facts_data_length) * 4); 2787 } 2788 2789 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 2790 req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >> 2791 MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT); 2792 if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) { 2793 ioc_err(mrioc, 2794 "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n", 2795 req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size)); 2796 } 2797 2798 memset(&mrioc->facts, 0, sizeof(mrioc->facts)); 2799 2800 facts_flags = le32_to_cpu(facts_data->flags); 2801 mrioc->facts.op_req_sz = req_sz; 2802 mrioc->op_reply_desc_sz = 1 << ((ioc_config & 2803 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >> 2804 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT); 2805 2806 mrioc->facts.ioc_num = facts_data->ioc_number; 2807 mrioc->facts.who_init = facts_data->who_init; 2808 mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors); 2809 mrioc->facts.personality = (facts_flags & 2810 MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK); 2811 mrioc->facts.dma_mask = (facts_flags & 2812 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >> 2813 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT; 2814 mrioc->facts.protocol_flags = facts_data->protocol_flags; 2815 mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word); 2816 mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests); 2817 mrioc->facts.product_id = le16_to_cpu(facts_data->product_id); 2818 mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4; 2819 mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions); 2820 mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id); 2821 mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds); 2822 mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds); 2823 mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds); 2824 mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds); 2825 mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme); 2826 mrioc->facts.max_pcie_switches = 2827 le16_to_cpu(facts_data->max_pcie_switches); 2828 mrioc->facts.max_sasexpanders = 2829 le16_to_cpu(facts_data->max_sas_expanders); 2830 mrioc->facts.max_sasinitiators = 2831 le16_to_cpu(facts_data->max_sas_initiators); 2832 mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures); 2833 mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle); 2834 mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle); 2835 mrioc->facts.max_op_req_q = 2836 le16_to_cpu(facts_data->max_operational_request_queues); 2837 mrioc->facts.max_op_reply_q = 2838 le16_to_cpu(facts_data->max_operational_reply_queues); 2839 mrioc->facts.ioc_capabilities = 2840 le32_to_cpu(facts_data->ioc_capabilities); 2841 mrioc->facts.fw_ver.build_num = 2842 le16_to_cpu(facts_data->fw_version.build_num); 2843 mrioc->facts.fw_ver.cust_id = 2844 le16_to_cpu(facts_data->fw_version.customer_id); 2845 mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor; 2846 mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major; 2847 mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor; 2848 mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major; 2849 mrioc->msix_count = min_t(int, mrioc->msix_count, 2850 mrioc->facts.max_msix_vectors); 2851 mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask; 2852 mrioc->facts.sge_mod_value = facts_data->sge_modifier_value; 2853 mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift; 2854 mrioc->facts.shutdown_timeout = 2855 le16_to_cpu(facts_data->shutdown_timeout); 2856 2857 mrioc->facts.max_dev_per_tg = 2858 facts_data->max_devices_per_throttle_group; 2859 mrioc->facts.io_throttle_data_length = 2860 le16_to_cpu(facts_data->io_throttle_data_length); 2861 mrioc->facts.max_io_throttle_group = 2862 le16_to_cpu(facts_data->max_io_throttle_group); 2863 mrioc->facts.io_throttle_low = le16_to_cpu(facts_data->io_throttle_low); 2864 mrioc->facts.io_throttle_high = 2865 le16_to_cpu(facts_data->io_throttle_high); 2866 2867 /* Store in 512b block count */ 2868 if (mrioc->facts.io_throttle_data_length) 2869 mrioc->io_throttle_data_length = 2870 (mrioc->facts.io_throttle_data_length * 2 * 4); 2871 else 2872 /* set the length to 1MB + 1K to disable throttle */ 2873 mrioc->io_throttle_data_length = MPI3MR_MAX_SECTORS + 2; 2874 2875 mrioc->io_throttle_high = (mrioc->facts.io_throttle_high * 2 * 1024); 2876 mrioc->io_throttle_low = (mrioc->facts.io_throttle_low * 2 * 1024); 2877 2878 ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),", 2879 mrioc->facts.ioc_num, mrioc->facts.max_op_req_q, 2880 mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle); 2881 ioc_info(mrioc, 2882 "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n", 2883 mrioc->facts.max_reqs, mrioc->facts.min_devhandle, 2884 mrioc->facts.max_msix_vectors, mrioc->facts.max_perids); 2885 ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ", 2886 mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value, 2887 mrioc->facts.sge_mod_shift); 2888 ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n", 2889 mrioc->facts.dma_mask, (facts_flags & 2890 MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK)); 2891 ioc_info(mrioc, 2892 "max_dev_per_throttle_group(%d), max_throttle_groups(%d)\n", 2893 mrioc->facts.max_dev_per_tg, mrioc->facts.max_io_throttle_group); 2894 ioc_info(mrioc, 2895 "io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n", 2896 mrioc->facts.io_throttle_data_length * 4, 2897 mrioc->facts.io_throttle_high, mrioc->facts.io_throttle_low); 2898 } 2899 2900 /** 2901 * mpi3mr_alloc_reply_sense_bufs - Send IOC Init 2902 * @mrioc: Adapter instance reference 2903 * 2904 * Allocate and initialize the reply free buffers, sense 2905 * buffers, reply free queue and sense buffer queue. 2906 * 2907 * Return: 0 on success, non-zero on failures. 2908 */ 2909 static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc) 2910 { 2911 int retval = 0; 2912 u32 sz, i; 2913 2914 if (mrioc->init_cmds.reply) 2915 return retval; 2916 2917 mrioc->init_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); 2918 if (!mrioc->init_cmds.reply) 2919 goto out_failed; 2920 2921 mrioc->bsg_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); 2922 if (!mrioc->bsg_cmds.reply) 2923 goto out_failed; 2924 2925 mrioc->transport_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); 2926 if (!mrioc->transport_cmds.reply) 2927 goto out_failed; 2928 2929 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { 2930 mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz, 2931 GFP_KERNEL); 2932 if (!mrioc->dev_rmhs_cmds[i].reply) 2933 goto out_failed; 2934 } 2935 2936 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) { 2937 mrioc->evtack_cmds[i].reply = kzalloc(mrioc->reply_sz, 2938 GFP_KERNEL); 2939 if (!mrioc->evtack_cmds[i].reply) 2940 goto out_failed; 2941 } 2942 2943 mrioc->host_tm_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); 2944 if (!mrioc->host_tm_cmds.reply) 2945 goto out_failed; 2946 2947 mrioc->pel_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); 2948 if (!mrioc->pel_cmds.reply) 2949 goto out_failed; 2950 2951 mrioc->pel_abort_cmd.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); 2952 if (!mrioc->pel_abort_cmd.reply) 2953 goto out_failed; 2954 2955 mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle; 2956 mrioc->removepend_bitmap = bitmap_zalloc(mrioc->dev_handle_bitmap_bits, 2957 GFP_KERNEL); 2958 if (!mrioc->removepend_bitmap) 2959 goto out_failed; 2960 2961 mrioc->devrem_bitmap = bitmap_zalloc(MPI3MR_NUM_DEVRMCMD, GFP_KERNEL); 2962 if (!mrioc->devrem_bitmap) 2963 goto out_failed; 2964 2965 mrioc->evtack_cmds_bitmap = bitmap_zalloc(MPI3MR_NUM_EVTACKCMD, 2966 GFP_KERNEL); 2967 if (!mrioc->evtack_cmds_bitmap) 2968 goto out_failed; 2969 2970 mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES; 2971 mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1; 2972 mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR; 2973 mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1; 2974 2975 /* reply buffer pool, 16 byte align */ 2976 sz = mrioc->num_reply_bufs * mrioc->reply_sz; 2977 mrioc->reply_buf_pool = dma_pool_create("reply_buf pool", 2978 &mrioc->pdev->dev, sz, 16, 0); 2979 if (!mrioc->reply_buf_pool) { 2980 ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n"); 2981 goto out_failed; 2982 } 2983 2984 mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL, 2985 &mrioc->reply_buf_dma); 2986 if (!mrioc->reply_buf) 2987 goto out_failed; 2988 2989 mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz; 2990 2991 /* reply free queue, 8 byte align */ 2992 sz = mrioc->reply_free_qsz * 8; 2993 mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool", 2994 &mrioc->pdev->dev, sz, 8, 0); 2995 if (!mrioc->reply_free_q_pool) { 2996 ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n"); 2997 goto out_failed; 2998 } 2999 mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool, 3000 GFP_KERNEL, &mrioc->reply_free_q_dma); 3001 if (!mrioc->reply_free_q) 3002 goto out_failed; 3003 3004 /* sense buffer pool, 4 byte align */ 3005 sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ; 3006 mrioc->sense_buf_pool = dma_pool_create("sense_buf pool", 3007 &mrioc->pdev->dev, sz, 4, 0); 3008 if (!mrioc->sense_buf_pool) { 3009 ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n"); 3010 goto out_failed; 3011 } 3012 mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL, 3013 &mrioc->sense_buf_dma); 3014 if (!mrioc->sense_buf) 3015 goto out_failed; 3016 3017 /* sense buffer queue, 8 byte align */ 3018 sz = mrioc->sense_buf_q_sz * 8; 3019 mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool", 3020 &mrioc->pdev->dev, sz, 8, 0); 3021 if (!mrioc->sense_buf_q_pool) { 3022 ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n"); 3023 goto out_failed; 3024 } 3025 mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool, 3026 GFP_KERNEL, &mrioc->sense_buf_q_dma); 3027 if (!mrioc->sense_buf_q) 3028 goto out_failed; 3029 3030 return retval; 3031 3032 out_failed: 3033 retval = -1; 3034 return retval; 3035 } 3036 3037 /** 3038 * mpimr_initialize_reply_sbuf_queues - initialize reply sense 3039 * buffers 3040 * @mrioc: Adapter instance reference 3041 * 3042 * Helper function to initialize reply and sense buffers along 3043 * with some debug prints. 3044 * 3045 * Return: None. 3046 */ 3047 static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc) 3048 { 3049 u32 sz, i; 3050 dma_addr_t phy_addr; 3051 3052 sz = mrioc->num_reply_bufs * mrioc->reply_sz; 3053 ioc_info(mrioc, 3054 "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n", 3055 mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->reply_sz, 3056 (sz / 1024), (unsigned long long)mrioc->reply_buf_dma); 3057 sz = mrioc->reply_free_qsz * 8; 3058 ioc_info(mrioc, 3059 "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n", 3060 mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024), 3061 (unsigned long long)mrioc->reply_free_q_dma); 3062 sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ; 3063 ioc_info(mrioc, 3064 "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n", 3065 mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ, 3066 (sz / 1024), (unsigned long long)mrioc->sense_buf_dma); 3067 sz = mrioc->sense_buf_q_sz * 8; 3068 ioc_info(mrioc, 3069 "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n", 3070 mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024), 3071 (unsigned long long)mrioc->sense_buf_q_dma); 3072 3073 /* initialize Reply buffer Queue */ 3074 for (i = 0, phy_addr = mrioc->reply_buf_dma; 3075 i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->reply_sz) 3076 mrioc->reply_free_q[i] = cpu_to_le64(phy_addr); 3077 mrioc->reply_free_q[i] = cpu_to_le64(0); 3078 3079 /* initialize Sense Buffer Queue */ 3080 for (i = 0, phy_addr = mrioc->sense_buf_dma; 3081 i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ) 3082 mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr); 3083 mrioc->sense_buf_q[i] = cpu_to_le64(0); 3084 } 3085 3086 /** 3087 * mpi3mr_issue_iocinit - Send IOC Init 3088 * @mrioc: Adapter instance reference 3089 * 3090 * Issue IOC Init MPI request through admin queue and wait for 3091 * the completion of it or time out. 3092 * 3093 * Return: 0 on success, non-zero on failures. 3094 */ 3095 static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc) 3096 { 3097 struct mpi3_ioc_init_request iocinit_req; 3098 struct mpi3_driver_info_layout *drv_info; 3099 dma_addr_t data_dma; 3100 u32 data_len = sizeof(*drv_info); 3101 int retval = 0; 3102 ktime_t current_time; 3103 3104 drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 3105 GFP_KERNEL); 3106 if (!drv_info) { 3107 retval = -1; 3108 goto out; 3109 } 3110 mpimr_initialize_reply_sbuf_queues(mrioc); 3111 3112 drv_info->information_length = cpu_to_le32(data_len); 3113 strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature)); 3114 strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name)); 3115 strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version)); 3116 strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name)); 3117 strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version)); 3118 strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE, 3119 sizeof(drv_info->driver_release_date)); 3120 drv_info->driver_capabilities = 0; 3121 memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info, 3122 sizeof(mrioc->driver_info)); 3123 3124 memset(&iocinit_req, 0, sizeof(iocinit_req)); 3125 mutex_lock(&mrioc->init_cmds.mutex); 3126 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 3127 retval = -1; 3128 ioc_err(mrioc, "Issue IOCInit: Init command is in use\n"); 3129 mutex_unlock(&mrioc->init_cmds.mutex); 3130 goto out; 3131 } 3132 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 3133 mrioc->init_cmds.is_waiting = 1; 3134 mrioc->init_cmds.callback = NULL; 3135 iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 3136 iocinit_req.function = MPI3_FUNCTION_IOC_INIT; 3137 iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV; 3138 iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT; 3139 iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR; 3140 iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR; 3141 iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER; 3142 iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz); 3143 iocinit_req.reply_free_queue_address = 3144 cpu_to_le64(mrioc->reply_free_q_dma); 3145 iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ); 3146 iocinit_req.sense_buffer_free_queue_depth = 3147 cpu_to_le16(mrioc->sense_buf_q_sz); 3148 iocinit_req.sense_buffer_free_queue_address = 3149 cpu_to_le64(mrioc->sense_buf_q_dma); 3150 iocinit_req.driver_information_address = cpu_to_le64(data_dma); 3151 3152 current_time = ktime_get_real(); 3153 iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time)); 3154 3155 init_completion(&mrioc->init_cmds.done); 3156 retval = mpi3mr_admin_request_post(mrioc, &iocinit_req, 3157 sizeof(iocinit_req), 1); 3158 if (retval) { 3159 ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n"); 3160 goto out_unlock; 3161 } 3162 wait_for_completion_timeout(&mrioc->init_cmds.done, 3163 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 3164 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 3165 mpi3mr_check_rh_fault_ioc(mrioc, 3166 MPI3MR_RESET_FROM_IOCINIT_TIMEOUT); 3167 ioc_err(mrioc, "ioc_init timed out\n"); 3168 retval = -1; 3169 goto out_unlock; 3170 } 3171 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 3172 != MPI3_IOCSTATUS_SUCCESS) { 3173 ioc_err(mrioc, 3174 "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 3175 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 3176 mrioc->init_cmds.ioc_loginfo); 3177 retval = -1; 3178 goto out_unlock; 3179 } 3180 3181 mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs; 3182 writel(mrioc->reply_free_queue_host_index, 3183 &mrioc->sysif_regs->reply_free_host_index); 3184 3185 mrioc->sbq_host_index = mrioc->num_sense_bufs; 3186 writel(mrioc->sbq_host_index, 3187 &mrioc->sysif_regs->sense_buffer_free_host_index); 3188 out_unlock: 3189 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3190 mutex_unlock(&mrioc->init_cmds.mutex); 3191 3192 out: 3193 if (drv_info) 3194 dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info, 3195 data_dma); 3196 3197 return retval; 3198 } 3199 3200 /** 3201 * mpi3mr_unmask_events - Unmask events in event mask bitmap 3202 * @mrioc: Adapter instance reference 3203 * @event: MPI event ID 3204 * 3205 * Un mask the specific event by resetting the event_mask 3206 * bitmap. 3207 * 3208 * Return: 0 on success, non-zero on failures. 3209 */ 3210 static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event) 3211 { 3212 u32 desired_event; 3213 u8 word; 3214 3215 if (event >= 128) 3216 return; 3217 3218 desired_event = (1 << (event % 32)); 3219 word = event / 32; 3220 3221 mrioc->event_masks[word] &= ~desired_event; 3222 } 3223 3224 /** 3225 * mpi3mr_issue_event_notification - Send event notification 3226 * @mrioc: Adapter instance reference 3227 * 3228 * Issue event notification MPI request through admin queue and 3229 * wait for the completion of it or time out. 3230 * 3231 * Return: 0 on success, non-zero on failures. 3232 */ 3233 static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc) 3234 { 3235 struct mpi3_event_notification_request evtnotify_req; 3236 int retval = 0; 3237 u8 i; 3238 3239 memset(&evtnotify_req, 0, sizeof(evtnotify_req)); 3240 mutex_lock(&mrioc->init_cmds.mutex); 3241 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 3242 retval = -1; 3243 ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n"); 3244 mutex_unlock(&mrioc->init_cmds.mutex); 3245 goto out; 3246 } 3247 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 3248 mrioc->init_cmds.is_waiting = 1; 3249 mrioc->init_cmds.callback = NULL; 3250 evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 3251 evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION; 3252 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 3253 evtnotify_req.event_masks[i] = 3254 cpu_to_le32(mrioc->event_masks[i]); 3255 init_completion(&mrioc->init_cmds.done); 3256 retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req, 3257 sizeof(evtnotify_req), 1); 3258 if (retval) { 3259 ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n"); 3260 goto out_unlock; 3261 } 3262 wait_for_completion_timeout(&mrioc->init_cmds.done, 3263 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 3264 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 3265 ioc_err(mrioc, "event notification timed out\n"); 3266 mpi3mr_check_rh_fault_ioc(mrioc, 3267 MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT); 3268 retval = -1; 3269 goto out_unlock; 3270 } 3271 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 3272 != MPI3_IOCSTATUS_SUCCESS) { 3273 ioc_err(mrioc, 3274 "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 3275 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 3276 mrioc->init_cmds.ioc_loginfo); 3277 retval = -1; 3278 goto out_unlock; 3279 } 3280 3281 out_unlock: 3282 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3283 mutex_unlock(&mrioc->init_cmds.mutex); 3284 out: 3285 return retval; 3286 } 3287 3288 /** 3289 * mpi3mr_process_event_ack - Process event acknowledgment 3290 * @mrioc: Adapter instance reference 3291 * @event: MPI3 event ID 3292 * @event_ctx: event context 3293 * 3294 * Send event acknowledgment through admin queue and wait for 3295 * it to complete. 3296 * 3297 * Return: 0 on success, non-zero on failures. 3298 */ 3299 int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 3300 u32 event_ctx) 3301 { 3302 struct mpi3_event_ack_request evtack_req; 3303 int retval = 0; 3304 3305 memset(&evtack_req, 0, sizeof(evtack_req)); 3306 mutex_lock(&mrioc->init_cmds.mutex); 3307 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 3308 retval = -1; 3309 ioc_err(mrioc, "Send EvtAck: Init command is in use\n"); 3310 mutex_unlock(&mrioc->init_cmds.mutex); 3311 goto out; 3312 } 3313 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 3314 mrioc->init_cmds.is_waiting = 1; 3315 mrioc->init_cmds.callback = NULL; 3316 evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 3317 evtack_req.function = MPI3_FUNCTION_EVENT_ACK; 3318 evtack_req.event = event; 3319 evtack_req.event_context = cpu_to_le32(event_ctx); 3320 3321 init_completion(&mrioc->init_cmds.done); 3322 retval = mpi3mr_admin_request_post(mrioc, &evtack_req, 3323 sizeof(evtack_req), 1); 3324 if (retval) { 3325 ioc_err(mrioc, "Send EvtAck: Admin Post failed\n"); 3326 goto out_unlock; 3327 } 3328 wait_for_completion_timeout(&mrioc->init_cmds.done, 3329 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 3330 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 3331 ioc_err(mrioc, "Issue EvtNotify: command timed out\n"); 3332 if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET)) 3333 mpi3mr_soft_reset_handler(mrioc, 3334 MPI3MR_RESET_FROM_EVTACK_TIMEOUT, 1); 3335 retval = -1; 3336 goto out_unlock; 3337 } 3338 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 3339 != MPI3_IOCSTATUS_SUCCESS) { 3340 ioc_err(mrioc, 3341 "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 3342 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 3343 mrioc->init_cmds.ioc_loginfo); 3344 retval = -1; 3345 goto out_unlock; 3346 } 3347 3348 out_unlock: 3349 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3350 mutex_unlock(&mrioc->init_cmds.mutex); 3351 out: 3352 return retval; 3353 } 3354 3355 /** 3356 * mpi3mr_alloc_chain_bufs - Allocate chain buffers 3357 * @mrioc: Adapter instance reference 3358 * 3359 * Allocate chain buffers and set a bitmap to indicate free 3360 * chain buffers. Chain buffers are used to pass the SGE 3361 * information along with MPI3 SCSI IO requests for host I/O. 3362 * 3363 * Return: 0 on success, non-zero on failure 3364 */ 3365 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc) 3366 { 3367 int retval = 0; 3368 u32 sz, i; 3369 u16 num_chains; 3370 3371 if (mrioc->chain_sgl_list) 3372 return retval; 3373 3374 num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR; 3375 3376 if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION 3377 | SHOST_DIX_TYPE1_PROTECTION 3378 | SHOST_DIX_TYPE2_PROTECTION 3379 | SHOST_DIX_TYPE3_PROTECTION)) 3380 num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR); 3381 3382 mrioc->chain_buf_count = num_chains; 3383 sz = sizeof(struct chain_element) * num_chains; 3384 mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL); 3385 if (!mrioc->chain_sgl_list) 3386 goto out_failed; 3387 3388 sz = MPI3MR_PAGE_SIZE_4K; 3389 mrioc->chain_buf_pool = dma_pool_create("chain_buf pool", 3390 &mrioc->pdev->dev, sz, 16, 0); 3391 if (!mrioc->chain_buf_pool) { 3392 ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n"); 3393 goto out_failed; 3394 } 3395 3396 for (i = 0; i < num_chains; i++) { 3397 mrioc->chain_sgl_list[i].addr = 3398 dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL, 3399 &mrioc->chain_sgl_list[i].dma_addr); 3400 3401 if (!mrioc->chain_sgl_list[i].addr) 3402 goto out_failed; 3403 } 3404 mrioc->chain_bitmap = bitmap_zalloc(num_chains, GFP_KERNEL); 3405 if (!mrioc->chain_bitmap) 3406 goto out_failed; 3407 return retval; 3408 out_failed: 3409 retval = -1; 3410 return retval; 3411 } 3412 3413 /** 3414 * mpi3mr_port_enable_complete - Mark port enable complete 3415 * @mrioc: Adapter instance reference 3416 * @drv_cmd: Internal command tracker 3417 * 3418 * Call back for asynchronous port enable request sets the 3419 * driver command to indicate port enable request is complete. 3420 * 3421 * Return: Nothing 3422 */ 3423 static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc, 3424 struct mpi3mr_drv_cmd *drv_cmd) 3425 { 3426 drv_cmd->callback = NULL; 3427 mrioc->scan_started = 0; 3428 if (drv_cmd->state & MPI3MR_CMD_RESET) 3429 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 3430 else 3431 mrioc->scan_failed = drv_cmd->ioc_status; 3432 drv_cmd->state = MPI3MR_CMD_NOTUSED; 3433 } 3434 3435 /** 3436 * mpi3mr_issue_port_enable - Issue Port Enable 3437 * @mrioc: Adapter instance reference 3438 * @async: Flag to wait for completion or not 3439 * 3440 * Issue Port Enable MPI request through admin queue and if the 3441 * async flag is not set wait for the completion of the port 3442 * enable or time out. 3443 * 3444 * Return: 0 on success, non-zero on failures. 3445 */ 3446 int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async) 3447 { 3448 struct mpi3_port_enable_request pe_req; 3449 int retval = 0; 3450 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT; 3451 3452 memset(&pe_req, 0, sizeof(pe_req)); 3453 mutex_lock(&mrioc->init_cmds.mutex); 3454 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 3455 retval = -1; 3456 ioc_err(mrioc, "Issue PortEnable: Init command is in use\n"); 3457 mutex_unlock(&mrioc->init_cmds.mutex); 3458 goto out; 3459 } 3460 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 3461 if (async) { 3462 mrioc->init_cmds.is_waiting = 0; 3463 mrioc->init_cmds.callback = mpi3mr_port_enable_complete; 3464 } else { 3465 mrioc->init_cmds.is_waiting = 1; 3466 mrioc->init_cmds.callback = NULL; 3467 init_completion(&mrioc->init_cmds.done); 3468 } 3469 pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 3470 pe_req.function = MPI3_FUNCTION_PORT_ENABLE; 3471 3472 retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1); 3473 if (retval) { 3474 ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n"); 3475 goto out_unlock; 3476 } 3477 if (async) { 3478 mutex_unlock(&mrioc->init_cmds.mutex); 3479 goto out; 3480 } 3481 3482 wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ)); 3483 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 3484 ioc_err(mrioc, "port enable timed out\n"); 3485 retval = -1; 3486 mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT); 3487 goto out_unlock; 3488 } 3489 mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds); 3490 3491 out_unlock: 3492 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3493 mutex_unlock(&mrioc->init_cmds.mutex); 3494 out: 3495 return retval; 3496 } 3497 3498 /* Protocol type to name mapper structure */ 3499 static const struct { 3500 u8 protocol; 3501 char *name; 3502 } mpi3mr_protocols[] = { 3503 { MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" }, 3504 { MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" }, 3505 { MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" }, 3506 }; 3507 3508 /* Capability to name mapper structure*/ 3509 static const struct { 3510 u32 capability; 3511 char *name; 3512 } mpi3mr_capabilities[] = { 3513 { MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE, "RAID" }, 3514 { MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED, "MultiPath" }, 3515 }; 3516 3517 /** 3518 * mpi3mr_print_ioc_info - Display controller information 3519 * @mrioc: Adapter instance reference 3520 * 3521 * Display controller personalit, capability, supported 3522 * protocols etc. 3523 * 3524 * Return: Nothing 3525 */ 3526 static void 3527 mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc) 3528 { 3529 int i = 0, bytes_written = 0; 3530 char personality[16]; 3531 char protocol[50] = {0}; 3532 char capabilities[100] = {0}; 3533 struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver; 3534 3535 switch (mrioc->facts.personality) { 3536 case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA: 3537 strncpy(personality, "Enhanced HBA", sizeof(personality)); 3538 break; 3539 case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR: 3540 strncpy(personality, "RAID", sizeof(personality)); 3541 break; 3542 default: 3543 strncpy(personality, "Unknown", sizeof(personality)); 3544 break; 3545 } 3546 3547 ioc_info(mrioc, "Running in %s Personality", personality); 3548 3549 ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n", 3550 fwver->gen_major, fwver->gen_minor, fwver->ph_major, 3551 fwver->ph_minor, fwver->cust_id, fwver->build_num); 3552 3553 for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) { 3554 if (mrioc->facts.protocol_flags & 3555 mpi3mr_protocols[i].protocol) { 3556 bytes_written += scnprintf(protocol + bytes_written, 3557 sizeof(protocol) - bytes_written, "%s%s", 3558 bytes_written ? "," : "", 3559 mpi3mr_protocols[i].name); 3560 } 3561 } 3562 3563 bytes_written = 0; 3564 for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) { 3565 if (mrioc->facts.protocol_flags & 3566 mpi3mr_capabilities[i].capability) { 3567 bytes_written += scnprintf(capabilities + bytes_written, 3568 sizeof(capabilities) - bytes_written, "%s%s", 3569 bytes_written ? "," : "", 3570 mpi3mr_capabilities[i].name); 3571 } 3572 } 3573 3574 ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n", 3575 protocol, capabilities); 3576 } 3577 3578 /** 3579 * mpi3mr_cleanup_resources - Free PCI resources 3580 * @mrioc: Adapter instance reference 3581 * 3582 * Unmap PCI device memory and disable PCI device. 3583 * 3584 * Return: 0 on success and non-zero on failure. 3585 */ 3586 void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc) 3587 { 3588 struct pci_dev *pdev = mrioc->pdev; 3589 3590 mpi3mr_cleanup_isr(mrioc); 3591 3592 if (mrioc->sysif_regs) { 3593 iounmap((void __iomem *)mrioc->sysif_regs); 3594 mrioc->sysif_regs = NULL; 3595 } 3596 3597 if (pci_is_enabled(pdev)) { 3598 if (mrioc->bars) 3599 pci_release_selected_regions(pdev, mrioc->bars); 3600 pci_disable_device(pdev); 3601 } 3602 } 3603 3604 /** 3605 * mpi3mr_setup_resources - Enable PCI resources 3606 * @mrioc: Adapter instance reference 3607 * 3608 * Enable PCI device memory, MSI-x registers and set DMA mask. 3609 * 3610 * Return: 0 on success and non-zero on failure. 3611 */ 3612 int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc) 3613 { 3614 struct pci_dev *pdev = mrioc->pdev; 3615 u32 memap_sz = 0; 3616 int i, retval = 0, capb = 0; 3617 u16 message_control; 3618 u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask : 3619 ((sizeof(dma_addr_t) > 4) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32)); 3620 3621 if (pci_enable_device_mem(pdev)) { 3622 ioc_err(mrioc, "pci_enable_device_mem: failed\n"); 3623 retval = -ENODEV; 3624 goto out_failed; 3625 } 3626 3627 capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 3628 if (!capb) { 3629 ioc_err(mrioc, "Unable to find MSI-X Capabilities\n"); 3630 retval = -ENODEV; 3631 goto out_failed; 3632 } 3633 mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); 3634 3635 if (pci_request_selected_regions(pdev, mrioc->bars, 3636 mrioc->driver_name)) { 3637 ioc_err(mrioc, "pci_request_selected_regions: failed\n"); 3638 retval = -ENODEV; 3639 goto out_failed; 3640 } 3641 3642 for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) { 3643 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 3644 mrioc->sysif_regs_phys = pci_resource_start(pdev, i); 3645 memap_sz = pci_resource_len(pdev, i); 3646 mrioc->sysif_regs = 3647 ioremap(mrioc->sysif_regs_phys, memap_sz); 3648 break; 3649 } 3650 } 3651 3652 pci_set_master(pdev); 3653 3654 retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask); 3655 if (retval) { 3656 if (dma_mask != DMA_BIT_MASK(32)) { 3657 ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n"); 3658 dma_mask = DMA_BIT_MASK(32); 3659 retval = dma_set_mask_and_coherent(&pdev->dev, 3660 dma_mask); 3661 } 3662 if (retval) { 3663 mrioc->dma_mask = 0; 3664 ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n"); 3665 goto out_failed; 3666 } 3667 } 3668 mrioc->dma_mask = dma_mask; 3669 3670 if (!mrioc->sysif_regs) { 3671 ioc_err(mrioc, 3672 "Unable to map adapter memory or resource not found\n"); 3673 retval = -EINVAL; 3674 goto out_failed; 3675 } 3676 3677 pci_read_config_word(pdev, capb + 2, &message_control); 3678 mrioc->msix_count = (message_control & 0x3FF) + 1; 3679 3680 pci_save_state(pdev); 3681 3682 pci_set_drvdata(pdev, mrioc->shost); 3683 3684 mpi3mr_ioc_disable_intr(mrioc); 3685 3686 ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n", 3687 (unsigned long long)mrioc->sysif_regs_phys, 3688 mrioc->sysif_regs, memap_sz); 3689 ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n", 3690 mrioc->msix_count); 3691 3692 if (!reset_devices && poll_queues > 0) 3693 mrioc->requested_poll_qcount = min_t(int, poll_queues, 3694 mrioc->msix_count - 2); 3695 return retval; 3696 3697 out_failed: 3698 mpi3mr_cleanup_resources(mrioc); 3699 return retval; 3700 } 3701 3702 /** 3703 * mpi3mr_enable_events - Enable required events 3704 * @mrioc: Adapter instance reference 3705 * 3706 * This routine unmasks the events required by the driver by 3707 * sennding appropriate event mask bitmapt through an event 3708 * notification request. 3709 * 3710 * Return: 0 on success and non-zero on failure. 3711 */ 3712 static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc) 3713 { 3714 int retval = 0; 3715 u32 i; 3716 3717 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 3718 mrioc->event_masks[i] = -1; 3719 3720 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED); 3721 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED); 3722 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE); 3723 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE); 3724 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_ADDED); 3725 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST); 3726 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY); 3727 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR); 3728 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE); 3729 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST); 3730 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION); 3731 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET); 3732 mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT); 3733 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE); 3734 3735 retval = mpi3mr_issue_event_notification(mrioc); 3736 if (retval) 3737 ioc_err(mrioc, "failed to issue event notification %d\n", 3738 retval); 3739 return retval; 3740 } 3741 3742 /** 3743 * mpi3mr_init_ioc - Initialize the controller 3744 * @mrioc: Adapter instance reference 3745 * 3746 * This the controller initialization routine, executed either 3747 * after soft reset or from pci probe callback. 3748 * Setup the required resources, memory map the controller 3749 * registers, create admin and operational reply queue pairs, 3750 * allocate required memory for reply pool, sense buffer pool, 3751 * issue IOC init request to the firmware, unmask the events and 3752 * issue port enable to discover SAS/SATA/NVMe devies and RAID 3753 * volumes. 3754 * 3755 * Return: 0 on success and non-zero on failure. 3756 */ 3757 int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc) 3758 { 3759 int retval = 0; 3760 u8 retry = 0; 3761 struct mpi3_ioc_facts_data facts_data; 3762 u32 sz; 3763 3764 retry_init: 3765 retval = mpi3mr_bring_ioc_ready(mrioc); 3766 if (retval) { 3767 ioc_err(mrioc, "Failed to bring ioc ready: error %d\n", 3768 retval); 3769 goto out_failed_noretry; 3770 } 3771 3772 retval = mpi3mr_setup_isr(mrioc, 1); 3773 if (retval) { 3774 ioc_err(mrioc, "Failed to setup ISR error %d\n", 3775 retval); 3776 goto out_failed_noretry; 3777 } 3778 3779 retval = mpi3mr_issue_iocfacts(mrioc, &facts_data); 3780 if (retval) { 3781 ioc_err(mrioc, "Failed to Issue IOC Facts %d\n", 3782 retval); 3783 goto out_failed; 3784 } 3785 3786 mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD; 3787 3788 mrioc->num_io_throttle_group = mrioc->facts.max_io_throttle_group; 3789 atomic_set(&mrioc->pend_large_data_sz, 0); 3790 3791 if (reset_devices) 3792 mrioc->max_host_ios = min_t(int, mrioc->max_host_ios, 3793 MPI3MR_HOST_IOS_KDUMP); 3794 3795 if (!(mrioc->facts.ioc_capabilities & 3796 MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED)) { 3797 mrioc->sas_transport_enabled = 1; 3798 mrioc->scsi_device_channel = 1; 3799 mrioc->shost->max_channel = 1; 3800 mrioc->shost->transportt = mpi3mr_transport_template; 3801 } 3802 3803 mrioc->reply_sz = mrioc->facts.reply_sz; 3804 3805 retval = mpi3mr_check_reset_dma_mask(mrioc); 3806 if (retval) { 3807 ioc_err(mrioc, "Resetting dma mask failed %d\n", 3808 retval); 3809 goto out_failed_noretry; 3810 } 3811 3812 mpi3mr_print_ioc_info(mrioc); 3813 3814 dprint_init(mrioc, "allocating config page buffers\n"); 3815 mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev, 3816 MPI3MR_DEFAULT_CFG_PAGE_SZ, &mrioc->cfg_page_dma, GFP_KERNEL); 3817 if (!mrioc->cfg_page) 3818 goto out_failed_noretry; 3819 3820 mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ; 3821 3822 retval = mpi3mr_alloc_reply_sense_bufs(mrioc); 3823 if (retval) { 3824 ioc_err(mrioc, 3825 "%s :Failed to allocated reply sense buffers %d\n", 3826 __func__, retval); 3827 goto out_failed_noretry; 3828 } 3829 3830 retval = mpi3mr_alloc_chain_bufs(mrioc); 3831 if (retval) { 3832 ioc_err(mrioc, "Failed to allocated chain buffers %d\n", 3833 retval); 3834 goto out_failed_noretry; 3835 } 3836 3837 retval = mpi3mr_issue_iocinit(mrioc); 3838 if (retval) { 3839 ioc_err(mrioc, "Failed to Issue IOC Init %d\n", 3840 retval); 3841 goto out_failed; 3842 } 3843 3844 retval = mpi3mr_print_pkg_ver(mrioc); 3845 if (retval) { 3846 ioc_err(mrioc, "failed to get package version\n"); 3847 goto out_failed; 3848 } 3849 3850 retval = mpi3mr_setup_isr(mrioc, 0); 3851 if (retval) { 3852 ioc_err(mrioc, "Failed to re-setup ISR, error %d\n", 3853 retval); 3854 goto out_failed_noretry; 3855 } 3856 3857 retval = mpi3mr_create_op_queues(mrioc); 3858 if (retval) { 3859 ioc_err(mrioc, "Failed to create OpQueues error %d\n", 3860 retval); 3861 goto out_failed; 3862 } 3863 3864 if (!mrioc->pel_seqnum_virt) { 3865 dprint_init(mrioc, "allocating memory for pel_seqnum_virt\n"); 3866 mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq); 3867 mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev, 3868 mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma, 3869 GFP_KERNEL); 3870 if (!mrioc->pel_seqnum_virt) { 3871 retval = -ENOMEM; 3872 goto out_failed_noretry; 3873 } 3874 } 3875 3876 if (!mrioc->throttle_groups && mrioc->num_io_throttle_group) { 3877 dprint_init(mrioc, "allocating memory for throttle groups\n"); 3878 sz = sizeof(struct mpi3mr_throttle_group_info); 3879 mrioc->throttle_groups = kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL); 3880 if (!mrioc->throttle_groups) 3881 goto out_failed_noretry; 3882 } 3883 3884 retval = mpi3mr_enable_events(mrioc); 3885 if (retval) { 3886 ioc_err(mrioc, "failed to enable events %d\n", 3887 retval); 3888 goto out_failed; 3889 } 3890 3891 ioc_info(mrioc, "controller initialization completed successfully\n"); 3892 return retval; 3893 out_failed: 3894 if (retry < 2) { 3895 retry++; 3896 ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n", 3897 retry); 3898 mpi3mr_memset_buffers(mrioc); 3899 goto retry_init; 3900 } 3901 out_failed_noretry: 3902 ioc_err(mrioc, "controller initialization failed\n"); 3903 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 3904 MPI3MR_RESET_FROM_CTLR_CLEANUP); 3905 mrioc->unrecoverable = 1; 3906 return retval; 3907 } 3908 3909 /** 3910 * mpi3mr_reinit_ioc - Re-Initialize the controller 3911 * @mrioc: Adapter instance reference 3912 * @is_resume: Called from resume or reset path 3913 * 3914 * This the controller re-initialization routine, executed from 3915 * the soft reset handler or resume callback. Creates 3916 * operational reply queue pairs, allocate required memory for 3917 * reply pool, sense buffer pool, issue IOC init request to the 3918 * firmware, unmask the events and issue port enable to discover 3919 * SAS/SATA/NVMe devices and RAID volumes. 3920 * 3921 * Return: 0 on success and non-zero on failure. 3922 */ 3923 int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume) 3924 { 3925 int retval = 0; 3926 u8 retry = 0; 3927 struct mpi3_ioc_facts_data facts_data; 3928 u32 pe_timeout, ioc_status; 3929 3930 retry_init: 3931 pe_timeout = 3932 (MPI3MR_PORTENABLE_TIMEOUT / MPI3MR_PORTENABLE_POLL_INTERVAL); 3933 3934 dprint_reset(mrioc, "bringing up the controller to ready state\n"); 3935 retval = mpi3mr_bring_ioc_ready(mrioc); 3936 if (retval) { 3937 ioc_err(mrioc, "failed to bring to ready state\n"); 3938 goto out_failed_noretry; 3939 } 3940 3941 if (is_resume) { 3942 dprint_reset(mrioc, "setting up single ISR\n"); 3943 retval = mpi3mr_setup_isr(mrioc, 1); 3944 if (retval) { 3945 ioc_err(mrioc, "failed to setup ISR\n"); 3946 goto out_failed_noretry; 3947 } 3948 } else 3949 mpi3mr_ioc_enable_intr(mrioc); 3950 3951 dprint_reset(mrioc, "getting ioc_facts\n"); 3952 retval = mpi3mr_issue_iocfacts(mrioc, &facts_data); 3953 if (retval) { 3954 ioc_err(mrioc, "failed to get ioc_facts\n"); 3955 goto out_failed; 3956 } 3957 3958 dprint_reset(mrioc, "validating ioc_facts\n"); 3959 retval = mpi3mr_revalidate_factsdata(mrioc); 3960 if (retval) { 3961 ioc_err(mrioc, "failed to revalidate ioc_facts data\n"); 3962 goto out_failed_noretry; 3963 } 3964 3965 mpi3mr_print_ioc_info(mrioc); 3966 3967 dprint_reset(mrioc, "sending ioc_init\n"); 3968 retval = mpi3mr_issue_iocinit(mrioc); 3969 if (retval) { 3970 ioc_err(mrioc, "failed to send ioc_init\n"); 3971 goto out_failed; 3972 } 3973 3974 dprint_reset(mrioc, "getting package version\n"); 3975 retval = mpi3mr_print_pkg_ver(mrioc); 3976 if (retval) { 3977 ioc_err(mrioc, "failed to get package version\n"); 3978 goto out_failed; 3979 } 3980 3981 if (is_resume) { 3982 dprint_reset(mrioc, "setting up multiple ISR\n"); 3983 retval = mpi3mr_setup_isr(mrioc, 0); 3984 if (retval) { 3985 ioc_err(mrioc, "failed to re-setup ISR\n"); 3986 goto out_failed_noretry; 3987 } 3988 } 3989 3990 dprint_reset(mrioc, "creating operational queue pairs\n"); 3991 retval = mpi3mr_create_op_queues(mrioc); 3992 if (retval) { 3993 ioc_err(mrioc, "failed to create operational queue pairs\n"); 3994 goto out_failed; 3995 } 3996 3997 if (!mrioc->pel_seqnum_virt) { 3998 dprint_reset(mrioc, "allocating memory for pel_seqnum_virt\n"); 3999 mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq); 4000 mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev, 4001 mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma, 4002 GFP_KERNEL); 4003 if (!mrioc->pel_seqnum_virt) { 4004 retval = -ENOMEM; 4005 goto out_failed_noretry; 4006 } 4007 } 4008 4009 if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) { 4010 ioc_err(mrioc, 4011 "cannot create minimum number of operational queues expected:%d created:%d\n", 4012 mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q); 4013 goto out_failed_noretry; 4014 } 4015 4016 dprint_reset(mrioc, "enabling events\n"); 4017 retval = mpi3mr_enable_events(mrioc); 4018 if (retval) { 4019 ioc_err(mrioc, "failed to enable events\n"); 4020 goto out_failed; 4021 } 4022 4023 mrioc->device_refresh_on = 1; 4024 mpi3mr_add_event_wait_for_device_refresh(mrioc); 4025 4026 ioc_info(mrioc, "sending port enable\n"); 4027 retval = mpi3mr_issue_port_enable(mrioc, 1); 4028 if (retval) { 4029 ioc_err(mrioc, "failed to issue port enable\n"); 4030 goto out_failed; 4031 } 4032 do { 4033 ssleep(MPI3MR_PORTENABLE_POLL_INTERVAL); 4034 if (mrioc->init_cmds.state == MPI3MR_CMD_NOTUSED) 4035 break; 4036 if (!pci_device_is_present(mrioc->pdev)) 4037 mrioc->unrecoverable = 1; 4038 if (mrioc->unrecoverable) { 4039 retval = -1; 4040 goto out_failed_noretry; 4041 } 4042 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 4043 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || 4044 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { 4045 mpi3mr_print_fault_info(mrioc); 4046 mrioc->init_cmds.is_waiting = 0; 4047 mrioc->init_cmds.callback = NULL; 4048 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 4049 goto out_failed; 4050 } 4051 } while (--pe_timeout); 4052 4053 if (!pe_timeout) { 4054 ioc_err(mrioc, "port enable timed out\n"); 4055 mpi3mr_check_rh_fault_ioc(mrioc, 4056 MPI3MR_RESET_FROM_PE_TIMEOUT); 4057 mrioc->init_cmds.is_waiting = 0; 4058 mrioc->init_cmds.callback = NULL; 4059 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 4060 goto out_failed; 4061 } else if (mrioc->scan_failed) { 4062 ioc_err(mrioc, 4063 "port enable failed with status=0x%04x\n", 4064 mrioc->scan_failed); 4065 } else 4066 ioc_info(mrioc, "port enable completed successfully\n"); 4067 4068 ioc_info(mrioc, "controller %s completed successfully\n", 4069 (is_resume)?"resume":"re-initialization"); 4070 return retval; 4071 out_failed: 4072 if (retry < 2) { 4073 retry++; 4074 ioc_warn(mrioc, "retrying controller %s, retry_count:%d\n", 4075 (is_resume)?"resume":"re-initialization", retry); 4076 mpi3mr_memset_buffers(mrioc); 4077 goto retry_init; 4078 } 4079 out_failed_noretry: 4080 ioc_err(mrioc, "controller %s is failed\n", 4081 (is_resume)?"resume":"re-initialization"); 4082 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 4083 MPI3MR_RESET_FROM_CTLR_CLEANUP); 4084 mrioc->unrecoverable = 1; 4085 return retval; 4086 } 4087 4088 /** 4089 * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's 4090 * segments 4091 * @mrioc: Adapter instance reference 4092 * @qidx: Operational reply queue index 4093 * 4094 * Return: Nothing. 4095 */ 4096 static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx) 4097 { 4098 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 4099 struct segments *segments; 4100 int i, size; 4101 4102 if (!op_reply_q->q_segments) 4103 return; 4104 4105 size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz; 4106 segments = op_reply_q->q_segments; 4107 for (i = 0; i < op_reply_q->num_segments; i++) 4108 memset(segments[i].segment, 0, size); 4109 } 4110 4111 /** 4112 * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's 4113 * segments 4114 * @mrioc: Adapter instance reference 4115 * @qidx: Operational request queue index 4116 * 4117 * Return: Nothing. 4118 */ 4119 static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx) 4120 { 4121 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx; 4122 struct segments *segments; 4123 int i, size; 4124 4125 if (!op_req_q->q_segments) 4126 return; 4127 4128 size = op_req_q->segment_qd * mrioc->facts.op_req_sz; 4129 segments = op_req_q->q_segments; 4130 for (i = 0; i < op_req_q->num_segments; i++) 4131 memset(segments[i].segment, 0, size); 4132 } 4133 4134 /** 4135 * mpi3mr_memset_buffers - memset memory for a controller 4136 * @mrioc: Adapter instance reference 4137 * 4138 * clear all the memory allocated for a controller, typically 4139 * called post reset to reuse the memory allocated during the 4140 * controller init. 4141 * 4142 * Return: Nothing. 4143 */ 4144 void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc) 4145 { 4146 u16 i; 4147 struct mpi3mr_throttle_group_info *tg; 4148 4149 mrioc->change_count = 0; 4150 mrioc->active_poll_qcount = 0; 4151 mrioc->default_qcount = 0; 4152 if (mrioc->admin_req_base) 4153 memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz); 4154 if (mrioc->admin_reply_base) 4155 memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz); 4156 4157 if (mrioc->init_cmds.reply) { 4158 memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply)); 4159 memset(mrioc->bsg_cmds.reply, 0, 4160 sizeof(*mrioc->bsg_cmds.reply)); 4161 memset(mrioc->host_tm_cmds.reply, 0, 4162 sizeof(*mrioc->host_tm_cmds.reply)); 4163 memset(mrioc->pel_cmds.reply, 0, 4164 sizeof(*mrioc->pel_cmds.reply)); 4165 memset(mrioc->pel_abort_cmd.reply, 0, 4166 sizeof(*mrioc->pel_abort_cmd.reply)); 4167 memset(mrioc->transport_cmds.reply, 0, 4168 sizeof(*mrioc->transport_cmds.reply)); 4169 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) 4170 memset(mrioc->dev_rmhs_cmds[i].reply, 0, 4171 sizeof(*mrioc->dev_rmhs_cmds[i].reply)); 4172 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) 4173 memset(mrioc->evtack_cmds[i].reply, 0, 4174 sizeof(*mrioc->evtack_cmds[i].reply)); 4175 bitmap_clear(mrioc->removepend_bitmap, 0, 4176 mrioc->dev_handle_bitmap_bits); 4177 bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD); 4178 bitmap_clear(mrioc->evtack_cmds_bitmap, 0, 4179 MPI3MR_NUM_EVTACKCMD); 4180 } 4181 4182 for (i = 0; i < mrioc->num_queues; i++) { 4183 mrioc->op_reply_qinfo[i].qid = 0; 4184 mrioc->op_reply_qinfo[i].ci = 0; 4185 mrioc->op_reply_qinfo[i].num_replies = 0; 4186 mrioc->op_reply_qinfo[i].ephase = 0; 4187 atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0); 4188 atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0); 4189 mpi3mr_memset_op_reply_q_buffers(mrioc, i); 4190 4191 mrioc->req_qinfo[i].ci = 0; 4192 mrioc->req_qinfo[i].pi = 0; 4193 mrioc->req_qinfo[i].num_requests = 0; 4194 mrioc->req_qinfo[i].qid = 0; 4195 mrioc->req_qinfo[i].reply_qid = 0; 4196 spin_lock_init(&mrioc->req_qinfo[i].q_lock); 4197 mpi3mr_memset_op_req_q_buffers(mrioc, i); 4198 } 4199 4200 atomic_set(&mrioc->pend_large_data_sz, 0); 4201 if (mrioc->throttle_groups) { 4202 tg = mrioc->throttle_groups; 4203 for (i = 0; i < mrioc->num_io_throttle_group; i++, tg++) { 4204 tg->id = 0; 4205 tg->fw_qd = 0; 4206 tg->modified_qd = 0; 4207 tg->io_divert = 0; 4208 tg->need_qd_reduction = 0; 4209 tg->high = 0; 4210 tg->low = 0; 4211 tg->qd_reduction = 0; 4212 atomic_set(&tg->pend_large_data_sz, 0); 4213 } 4214 } 4215 } 4216 4217 /** 4218 * mpi3mr_free_mem - Free memory allocated for a controller 4219 * @mrioc: Adapter instance reference 4220 * 4221 * Free all the memory allocated for a controller. 4222 * 4223 * Return: Nothing. 4224 */ 4225 void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc) 4226 { 4227 u16 i; 4228 struct mpi3mr_intr_info *intr_info; 4229 4230 mpi3mr_free_enclosure_list(mrioc); 4231 4232 if (mrioc->sense_buf_pool) { 4233 if (mrioc->sense_buf) 4234 dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf, 4235 mrioc->sense_buf_dma); 4236 dma_pool_destroy(mrioc->sense_buf_pool); 4237 mrioc->sense_buf = NULL; 4238 mrioc->sense_buf_pool = NULL; 4239 } 4240 if (mrioc->sense_buf_q_pool) { 4241 if (mrioc->sense_buf_q) 4242 dma_pool_free(mrioc->sense_buf_q_pool, 4243 mrioc->sense_buf_q, mrioc->sense_buf_q_dma); 4244 dma_pool_destroy(mrioc->sense_buf_q_pool); 4245 mrioc->sense_buf_q = NULL; 4246 mrioc->sense_buf_q_pool = NULL; 4247 } 4248 4249 if (mrioc->reply_buf_pool) { 4250 if (mrioc->reply_buf) 4251 dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf, 4252 mrioc->reply_buf_dma); 4253 dma_pool_destroy(mrioc->reply_buf_pool); 4254 mrioc->reply_buf = NULL; 4255 mrioc->reply_buf_pool = NULL; 4256 } 4257 if (mrioc->reply_free_q_pool) { 4258 if (mrioc->reply_free_q) 4259 dma_pool_free(mrioc->reply_free_q_pool, 4260 mrioc->reply_free_q, mrioc->reply_free_q_dma); 4261 dma_pool_destroy(mrioc->reply_free_q_pool); 4262 mrioc->reply_free_q = NULL; 4263 mrioc->reply_free_q_pool = NULL; 4264 } 4265 4266 for (i = 0; i < mrioc->num_op_req_q; i++) 4267 mpi3mr_free_op_req_q_segments(mrioc, i); 4268 4269 for (i = 0; i < mrioc->num_op_reply_q; i++) 4270 mpi3mr_free_op_reply_q_segments(mrioc, i); 4271 4272 for (i = 0; i < mrioc->intr_info_count; i++) { 4273 intr_info = mrioc->intr_info + i; 4274 intr_info->op_reply_q = NULL; 4275 } 4276 4277 kfree(mrioc->req_qinfo); 4278 mrioc->req_qinfo = NULL; 4279 mrioc->num_op_req_q = 0; 4280 4281 kfree(mrioc->op_reply_qinfo); 4282 mrioc->op_reply_qinfo = NULL; 4283 mrioc->num_op_reply_q = 0; 4284 4285 kfree(mrioc->init_cmds.reply); 4286 mrioc->init_cmds.reply = NULL; 4287 4288 kfree(mrioc->bsg_cmds.reply); 4289 mrioc->bsg_cmds.reply = NULL; 4290 4291 kfree(mrioc->host_tm_cmds.reply); 4292 mrioc->host_tm_cmds.reply = NULL; 4293 4294 kfree(mrioc->pel_cmds.reply); 4295 mrioc->pel_cmds.reply = NULL; 4296 4297 kfree(mrioc->pel_abort_cmd.reply); 4298 mrioc->pel_abort_cmd.reply = NULL; 4299 4300 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) { 4301 kfree(mrioc->evtack_cmds[i].reply); 4302 mrioc->evtack_cmds[i].reply = NULL; 4303 } 4304 4305 bitmap_free(mrioc->removepend_bitmap); 4306 mrioc->removepend_bitmap = NULL; 4307 4308 bitmap_free(mrioc->devrem_bitmap); 4309 mrioc->devrem_bitmap = NULL; 4310 4311 bitmap_free(mrioc->evtack_cmds_bitmap); 4312 mrioc->evtack_cmds_bitmap = NULL; 4313 4314 bitmap_free(mrioc->chain_bitmap); 4315 mrioc->chain_bitmap = NULL; 4316 4317 kfree(mrioc->transport_cmds.reply); 4318 mrioc->transport_cmds.reply = NULL; 4319 4320 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { 4321 kfree(mrioc->dev_rmhs_cmds[i].reply); 4322 mrioc->dev_rmhs_cmds[i].reply = NULL; 4323 } 4324 4325 if (mrioc->chain_buf_pool) { 4326 for (i = 0; i < mrioc->chain_buf_count; i++) { 4327 if (mrioc->chain_sgl_list[i].addr) { 4328 dma_pool_free(mrioc->chain_buf_pool, 4329 mrioc->chain_sgl_list[i].addr, 4330 mrioc->chain_sgl_list[i].dma_addr); 4331 mrioc->chain_sgl_list[i].addr = NULL; 4332 } 4333 } 4334 dma_pool_destroy(mrioc->chain_buf_pool); 4335 mrioc->chain_buf_pool = NULL; 4336 } 4337 4338 kfree(mrioc->chain_sgl_list); 4339 mrioc->chain_sgl_list = NULL; 4340 4341 if (mrioc->admin_reply_base) { 4342 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz, 4343 mrioc->admin_reply_base, mrioc->admin_reply_dma); 4344 mrioc->admin_reply_base = NULL; 4345 } 4346 if (mrioc->admin_req_base) { 4347 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz, 4348 mrioc->admin_req_base, mrioc->admin_req_dma); 4349 mrioc->admin_req_base = NULL; 4350 } 4351 4352 if (mrioc->pel_seqnum_virt) { 4353 dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz, 4354 mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma); 4355 mrioc->pel_seqnum_virt = NULL; 4356 } 4357 4358 kfree(mrioc->logdata_buf); 4359 mrioc->logdata_buf = NULL; 4360 4361 } 4362 4363 /** 4364 * mpi3mr_issue_ioc_shutdown - shutdown controller 4365 * @mrioc: Adapter instance reference 4366 * 4367 * Send shutodwn notification to the controller and wait for the 4368 * shutdown_timeout for it to be completed. 4369 * 4370 * Return: Nothing. 4371 */ 4372 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc) 4373 { 4374 u32 ioc_config, ioc_status; 4375 u8 retval = 1; 4376 u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10; 4377 4378 ioc_info(mrioc, "Issuing shutdown Notification\n"); 4379 if (mrioc->unrecoverable) { 4380 ioc_warn(mrioc, 4381 "IOC is unrecoverable shutdown is not issued\n"); 4382 return; 4383 } 4384 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 4385 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 4386 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) { 4387 ioc_info(mrioc, "shutdown already in progress\n"); 4388 return; 4389 } 4390 4391 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 4392 ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL; 4393 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ; 4394 4395 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 4396 4397 if (mrioc->facts.shutdown_timeout) 4398 timeout = mrioc->facts.shutdown_timeout * 10; 4399 4400 do { 4401 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 4402 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 4403 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) { 4404 retval = 0; 4405 break; 4406 } 4407 msleep(100); 4408 } while (--timeout); 4409 4410 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 4411 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 4412 4413 if (retval) { 4414 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 4415 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) 4416 ioc_warn(mrioc, 4417 "shutdown still in progress after timeout\n"); 4418 } 4419 4420 ioc_info(mrioc, 4421 "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n", 4422 (!retval) ? "successful" : "failed", ioc_status, 4423 ioc_config); 4424 } 4425 4426 /** 4427 * mpi3mr_cleanup_ioc - Cleanup controller 4428 * @mrioc: Adapter instance reference 4429 * 4430 * controller cleanup handler, Message unit reset or soft reset 4431 * and shutdown notification is issued to the controller. 4432 * 4433 * Return: Nothing. 4434 */ 4435 void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc) 4436 { 4437 enum mpi3mr_iocstate ioc_state; 4438 4439 dprint_exit(mrioc, "cleaning up the controller\n"); 4440 mpi3mr_ioc_disable_intr(mrioc); 4441 4442 ioc_state = mpi3mr_get_iocstate(mrioc); 4443 4444 if ((!mrioc->unrecoverable) && (!mrioc->reset_in_progress) && 4445 (ioc_state == MRIOC_STATE_READY)) { 4446 if (mpi3mr_issue_and_process_mur(mrioc, 4447 MPI3MR_RESET_FROM_CTLR_CLEANUP)) 4448 mpi3mr_issue_reset(mrioc, 4449 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, 4450 MPI3MR_RESET_FROM_MUR_FAILURE); 4451 mpi3mr_issue_ioc_shutdown(mrioc); 4452 } 4453 dprint_exit(mrioc, "controller cleanup completed\n"); 4454 } 4455 4456 /** 4457 * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command 4458 * @mrioc: Adapter instance reference 4459 * @cmdptr: Internal command tracker 4460 * 4461 * Complete an internal driver commands with state indicating it 4462 * is completed due to reset. 4463 * 4464 * Return: Nothing. 4465 */ 4466 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc, 4467 struct mpi3mr_drv_cmd *cmdptr) 4468 { 4469 if (cmdptr->state & MPI3MR_CMD_PENDING) { 4470 cmdptr->state |= MPI3MR_CMD_RESET; 4471 cmdptr->state &= ~MPI3MR_CMD_PENDING; 4472 if (cmdptr->is_waiting) { 4473 complete(&cmdptr->done); 4474 cmdptr->is_waiting = 0; 4475 } else if (cmdptr->callback) 4476 cmdptr->callback(mrioc, cmdptr); 4477 } 4478 } 4479 4480 /** 4481 * mpi3mr_flush_drv_cmds - Flush internaldriver commands 4482 * @mrioc: Adapter instance reference 4483 * 4484 * Flush all internal driver commands post reset 4485 * 4486 * Return: Nothing. 4487 */ 4488 void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc) 4489 { 4490 struct mpi3mr_drv_cmd *cmdptr; 4491 u8 i; 4492 4493 cmdptr = &mrioc->init_cmds; 4494 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4495 4496 cmdptr = &mrioc->cfg_cmds; 4497 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4498 4499 cmdptr = &mrioc->bsg_cmds; 4500 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4501 cmdptr = &mrioc->host_tm_cmds; 4502 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4503 4504 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { 4505 cmdptr = &mrioc->dev_rmhs_cmds[i]; 4506 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4507 } 4508 4509 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) { 4510 cmdptr = &mrioc->evtack_cmds[i]; 4511 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4512 } 4513 4514 cmdptr = &mrioc->pel_cmds; 4515 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4516 4517 cmdptr = &mrioc->pel_abort_cmd; 4518 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4519 4520 cmdptr = &mrioc->transport_cmds; 4521 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4522 } 4523 4524 /** 4525 * mpi3mr_pel_wait_post - Issue PEL Wait 4526 * @mrioc: Adapter instance reference 4527 * @drv_cmd: Internal command tracker 4528 * 4529 * Issue PEL Wait MPI request through admin queue and return. 4530 * 4531 * Return: Nothing. 4532 */ 4533 static void mpi3mr_pel_wait_post(struct mpi3mr_ioc *mrioc, 4534 struct mpi3mr_drv_cmd *drv_cmd) 4535 { 4536 struct mpi3_pel_req_action_wait pel_wait; 4537 4538 mrioc->pel_abort_requested = false; 4539 4540 memset(&pel_wait, 0, sizeof(pel_wait)); 4541 drv_cmd->state = MPI3MR_CMD_PENDING; 4542 drv_cmd->is_waiting = 0; 4543 drv_cmd->callback = mpi3mr_pel_wait_complete; 4544 drv_cmd->ioc_status = 0; 4545 drv_cmd->ioc_loginfo = 0; 4546 pel_wait.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT); 4547 pel_wait.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; 4548 pel_wait.action = MPI3_PEL_ACTION_WAIT; 4549 pel_wait.starting_sequence_number = cpu_to_le32(mrioc->pel_newest_seqnum); 4550 pel_wait.locale = cpu_to_le16(mrioc->pel_locale); 4551 pel_wait.class = cpu_to_le16(mrioc->pel_class); 4552 pel_wait.wait_time = MPI3_PEL_WAITTIME_INFINITE_WAIT; 4553 dprint_bsg_info(mrioc, "sending pel_wait seqnum(%d), class(%d), locale(0x%08x)\n", 4554 mrioc->pel_newest_seqnum, mrioc->pel_class, mrioc->pel_locale); 4555 4556 if (mpi3mr_admin_request_post(mrioc, &pel_wait, sizeof(pel_wait), 0)) { 4557 dprint_bsg_err(mrioc, 4558 "Issuing PELWait: Admin post failed\n"); 4559 drv_cmd->state = MPI3MR_CMD_NOTUSED; 4560 drv_cmd->callback = NULL; 4561 drv_cmd->retry_count = 0; 4562 mrioc->pel_enabled = false; 4563 } 4564 } 4565 4566 /** 4567 * mpi3mr_pel_get_seqnum_post - Issue PEL Get Sequence number 4568 * @mrioc: Adapter instance reference 4569 * @drv_cmd: Internal command tracker 4570 * 4571 * Issue PEL get sequence number MPI request through admin queue 4572 * and return. 4573 * 4574 * Return: 0 on success, non-zero on failure. 4575 */ 4576 int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc, 4577 struct mpi3mr_drv_cmd *drv_cmd) 4578 { 4579 struct mpi3_pel_req_action_get_sequence_numbers pel_getseq_req; 4580 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 4581 int retval = 0; 4582 4583 memset(&pel_getseq_req, 0, sizeof(pel_getseq_req)); 4584 mrioc->pel_cmds.state = MPI3MR_CMD_PENDING; 4585 mrioc->pel_cmds.is_waiting = 0; 4586 mrioc->pel_cmds.ioc_status = 0; 4587 mrioc->pel_cmds.ioc_loginfo = 0; 4588 mrioc->pel_cmds.callback = mpi3mr_pel_get_seqnum_complete; 4589 pel_getseq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT); 4590 pel_getseq_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; 4591 pel_getseq_req.action = MPI3_PEL_ACTION_GET_SEQNUM; 4592 mpi3mr_add_sg_single(&pel_getseq_req.sgl, sgl_flags, 4593 mrioc->pel_seqnum_sz, mrioc->pel_seqnum_dma); 4594 4595 retval = mpi3mr_admin_request_post(mrioc, &pel_getseq_req, 4596 sizeof(pel_getseq_req), 0); 4597 if (retval) { 4598 if (drv_cmd) { 4599 drv_cmd->state = MPI3MR_CMD_NOTUSED; 4600 drv_cmd->callback = NULL; 4601 drv_cmd->retry_count = 0; 4602 } 4603 mrioc->pel_enabled = false; 4604 } 4605 4606 return retval; 4607 } 4608 4609 /** 4610 * mpi3mr_pel_wait_complete - PELWait Completion callback 4611 * @mrioc: Adapter instance reference 4612 * @drv_cmd: Internal command tracker 4613 * 4614 * This is a callback handler for the PELWait request and 4615 * firmware completes a PELWait request when it is aborted or a 4616 * new PEL entry is available. This sends AEN to the application 4617 * and if the PELwait completion is not due to PELAbort then 4618 * this will send a request for new PEL Sequence number 4619 * 4620 * Return: Nothing. 4621 */ 4622 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc, 4623 struct mpi3mr_drv_cmd *drv_cmd) 4624 { 4625 struct mpi3_pel_reply *pel_reply = NULL; 4626 u16 ioc_status, pe_log_status; 4627 bool do_retry = false; 4628 4629 if (drv_cmd->state & MPI3MR_CMD_RESET) 4630 goto cleanup_drv_cmd; 4631 4632 ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK; 4633 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 4634 ioc_err(mrioc, "%s: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 4635 __func__, ioc_status, drv_cmd->ioc_loginfo); 4636 dprint_bsg_err(mrioc, 4637 "pel_wait: failed with ioc_status(0x%04x), log_info(0x%08x)\n", 4638 ioc_status, drv_cmd->ioc_loginfo); 4639 do_retry = true; 4640 } 4641 4642 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) 4643 pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply; 4644 4645 if (!pel_reply) { 4646 dprint_bsg_err(mrioc, 4647 "pel_wait: failed due to no reply\n"); 4648 goto out_failed; 4649 } 4650 4651 pe_log_status = le16_to_cpu(pel_reply->pe_log_status); 4652 if ((pe_log_status != MPI3_PEL_STATUS_SUCCESS) && 4653 (pe_log_status != MPI3_PEL_STATUS_ABORTED)) { 4654 ioc_err(mrioc, "%s: Failed pe_log_status(0x%04x)\n", 4655 __func__, pe_log_status); 4656 dprint_bsg_err(mrioc, 4657 "pel_wait: failed due to pel_log_status(0x%04x)\n", 4658 pe_log_status); 4659 do_retry = true; 4660 } 4661 4662 if (do_retry) { 4663 if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) { 4664 drv_cmd->retry_count++; 4665 dprint_bsg_err(mrioc, "pel_wait: retrying(%d)\n", 4666 drv_cmd->retry_count); 4667 mpi3mr_pel_wait_post(mrioc, drv_cmd); 4668 return; 4669 } 4670 dprint_bsg_err(mrioc, 4671 "pel_wait: failed after all retries(%d)\n", 4672 drv_cmd->retry_count); 4673 goto out_failed; 4674 } 4675 atomic64_inc(&event_counter); 4676 if (!mrioc->pel_abort_requested) { 4677 mrioc->pel_cmds.retry_count = 0; 4678 mpi3mr_pel_get_seqnum_post(mrioc, &mrioc->pel_cmds); 4679 } 4680 4681 return; 4682 out_failed: 4683 mrioc->pel_enabled = false; 4684 cleanup_drv_cmd: 4685 drv_cmd->state = MPI3MR_CMD_NOTUSED; 4686 drv_cmd->callback = NULL; 4687 drv_cmd->retry_count = 0; 4688 } 4689 4690 /** 4691 * mpi3mr_pel_get_seqnum_complete - PELGetSeqNum Completion callback 4692 * @mrioc: Adapter instance reference 4693 * @drv_cmd: Internal command tracker 4694 * 4695 * This is a callback handler for the PEL get sequence number 4696 * request and a new PEL wait request will be issued to the 4697 * firmware from this 4698 * 4699 * Return: Nothing. 4700 */ 4701 void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc, 4702 struct mpi3mr_drv_cmd *drv_cmd) 4703 { 4704 struct mpi3_pel_reply *pel_reply = NULL; 4705 struct mpi3_pel_seq *pel_seqnum_virt; 4706 u16 ioc_status; 4707 bool do_retry = false; 4708 4709 pel_seqnum_virt = (struct mpi3_pel_seq *)mrioc->pel_seqnum_virt; 4710 4711 if (drv_cmd->state & MPI3MR_CMD_RESET) 4712 goto cleanup_drv_cmd; 4713 4714 ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK; 4715 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 4716 dprint_bsg_err(mrioc, 4717 "pel_get_seqnum: failed with ioc_status(0x%04x), log_info(0x%08x)\n", 4718 ioc_status, drv_cmd->ioc_loginfo); 4719 do_retry = true; 4720 } 4721 4722 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) 4723 pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply; 4724 if (!pel_reply) { 4725 dprint_bsg_err(mrioc, 4726 "pel_get_seqnum: failed due to no reply\n"); 4727 goto out_failed; 4728 } 4729 4730 if (le16_to_cpu(pel_reply->pe_log_status) != MPI3_PEL_STATUS_SUCCESS) { 4731 dprint_bsg_err(mrioc, 4732 "pel_get_seqnum: failed due to pel_log_status(0x%04x)\n", 4733 le16_to_cpu(pel_reply->pe_log_status)); 4734 do_retry = true; 4735 } 4736 4737 if (do_retry) { 4738 if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) { 4739 drv_cmd->retry_count++; 4740 dprint_bsg_err(mrioc, 4741 "pel_get_seqnum: retrying(%d)\n", 4742 drv_cmd->retry_count); 4743 mpi3mr_pel_get_seqnum_post(mrioc, drv_cmd); 4744 return; 4745 } 4746 4747 dprint_bsg_err(mrioc, 4748 "pel_get_seqnum: failed after all retries(%d)\n", 4749 drv_cmd->retry_count); 4750 goto out_failed; 4751 } 4752 mrioc->pel_newest_seqnum = le32_to_cpu(pel_seqnum_virt->newest) + 1; 4753 drv_cmd->retry_count = 0; 4754 mpi3mr_pel_wait_post(mrioc, drv_cmd); 4755 4756 return; 4757 out_failed: 4758 mrioc->pel_enabled = false; 4759 cleanup_drv_cmd: 4760 drv_cmd->state = MPI3MR_CMD_NOTUSED; 4761 drv_cmd->callback = NULL; 4762 drv_cmd->retry_count = 0; 4763 } 4764 4765 /** 4766 * mpi3mr_soft_reset_handler - Reset the controller 4767 * @mrioc: Adapter instance reference 4768 * @reset_reason: Reset reason code 4769 * @snapdump: Flag to generate snapdump in firmware or not 4770 * 4771 * This is an handler for recovering controller by issuing soft 4772 * reset are diag fault reset. This is a blocking function and 4773 * when one reset is executed if any other resets they will be 4774 * blocked. All BSG requests will be blocked during the reset. If 4775 * controller reset is successful then the controller will be 4776 * reinitalized, otherwise the controller will be marked as not 4777 * recoverable 4778 * 4779 * In snapdump bit is set, the controller is issued with diag 4780 * fault reset so that the firmware can create a snap dump and 4781 * post that the firmware will result in F000 fault and the 4782 * driver will issue soft reset to recover from that. 4783 * 4784 * Return: 0 on success, non-zero on failure. 4785 */ 4786 int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, 4787 u32 reset_reason, u8 snapdump) 4788 { 4789 int retval = 0, i; 4790 unsigned long flags; 4791 u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; 4792 4793 /* Block the reset handler until diag save in progress*/ 4794 dprint_reset(mrioc, 4795 "soft_reset_handler: check and block on diagsave_timeout(%d)\n", 4796 mrioc->diagsave_timeout); 4797 while (mrioc->diagsave_timeout) 4798 ssleep(1); 4799 /* 4800 * Block new resets until the currently executing one is finished and 4801 * return the status of the existing reset for all blocked resets 4802 */ 4803 dprint_reset(mrioc, "soft_reset_handler: acquiring reset_mutex\n"); 4804 if (!mutex_trylock(&mrioc->reset_mutex)) { 4805 ioc_info(mrioc, 4806 "controller reset triggered by %s is blocked due to another reset in progress\n", 4807 mpi3mr_reset_rc_name(reset_reason)); 4808 do { 4809 ssleep(1); 4810 } while (mrioc->reset_in_progress == 1); 4811 ioc_info(mrioc, 4812 "returning previous reset result(%d) for the reset triggered by %s\n", 4813 mrioc->prev_reset_result, 4814 mpi3mr_reset_rc_name(reset_reason)); 4815 return mrioc->prev_reset_result; 4816 } 4817 ioc_info(mrioc, "controller reset is triggered by %s\n", 4818 mpi3mr_reset_rc_name(reset_reason)); 4819 4820 mrioc->device_refresh_on = 0; 4821 mrioc->reset_in_progress = 1; 4822 mrioc->stop_bsgs = 1; 4823 mrioc->prev_reset_result = -1; 4824 4825 if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) && 4826 (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) && 4827 (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) { 4828 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 4829 mrioc->event_masks[i] = -1; 4830 4831 dprint_reset(mrioc, "soft_reset_handler: masking events\n"); 4832 mpi3mr_issue_event_notification(mrioc); 4833 } 4834 4835 mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT); 4836 4837 mpi3mr_ioc_disable_intr(mrioc); 4838 4839 if (snapdump) { 4840 mpi3mr_set_diagsave(mrioc); 4841 retval = mpi3mr_issue_reset(mrioc, 4842 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason); 4843 if (!retval) { 4844 do { 4845 host_diagnostic = 4846 readl(&mrioc->sysif_regs->host_diagnostic); 4847 if (!(host_diagnostic & 4848 MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS)) 4849 break; 4850 msleep(100); 4851 } while (--timeout); 4852 } 4853 } 4854 4855 retval = mpi3mr_issue_reset(mrioc, 4856 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason); 4857 if (retval) { 4858 ioc_err(mrioc, "Failed to issue soft reset to the ioc\n"); 4859 goto out; 4860 } 4861 if (mrioc->num_io_throttle_group != 4862 mrioc->facts.max_io_throttle_group) { 4863 ioc_err(mrioc, 4864 "max io throttle group doesn't match old(%d), new(%d)\n", 4865 mrioc->num_io_throttle_group, 4866 mrioc->facts.max_io_throttle_group); 4867 retval = -EPERM; 4868 goto out; 4869 } 4870 4871 mpi3mr_flush_delayed_cmd_lists(mrioc); 4872 mpi3mr_flush_drv_cmds(mrioc); 4873 bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD); 4874 bitmap_clear(mrioc->removepend_bitmap, 0, 4875 mrioc->dev_handle_bitmap_bits); 4876 bitmap_clear(mrioc->evtack_cmds_bitmap, 0, MPI3MR_NUM_EVTACKCMD); 4877 mpi3mr_flush_host_io(mrioc); 4878 mpi3mr_cleanup_fwevt_list(mrioc); 4879 mpi3mr_invalidate_devhandles(mrioc); 4880 mpi3mr_free_enclosure_list(mrioc); 4881 4882 if (mrioc->prepare_for_reset) { 4883 mrioc->prepare_for_reset = 0; 4884 mrioc->prepare_for_reset_timeout_counter = 0; 4885 } 4886 mpi3mr_memset_buffers(mrioc); 4887 retval = mpi3mr_reinit_ioc(mrioc, 0); 4888 if (retval) { 4889 pr_err(IOCNAME "reinit after soft reset failed: reason %d\n", 4890 mrioc->name, reset_reason); 4891 goto out; 4892 } 4893 ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME); 4894 4895 out: 4896 if (!retval) { 4897 mrioc->diagsave_timeout = 0; 4898 mrioc->reset_in_progress = 0; 4899 mrioc->pel_abort_requested = 0; 4900 if (mrioc->pel_enabled) { 4901 mrioc->pel_cmds.retry_count = 0; 4902 mpi3mr_pel_wait_post(mrioc, &mrioc->pel_cmds); 4903 } 4904 4905 mrioc->device_refresh_on = 0; 4906 4907 mrioc->ts_update_counter = 0; 4908 spin_lock_irqsave(&mrioc->watchdog_lock, flags); 4909 if (mrioc->watchdog_work_q) 4910 queue_delayed_work(mrioc->watchdog_work_q, 4911 &mrioc->watchdog_work, 4912 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); 4913 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); 4914 mrioc->stop_bsgs = 0; 4915 if (mrioc->pel_enabled) 4916 atomic64_inc(&event_counter); 4917 } else { 4918 mpi3mr_issue_reset(mrioc, 4919 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason); 4920 mrioc->device_refresh_on = 0; 4921 mrioc->unrecoverable = 1; 4922 mrioc->reset_in_progress = 0; 4923 retval = -1; 4924 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 4925 } 4926 mrioc->prev_reset_result = retval; 4927 mutex_unlock(&mrioc->reset_mutex); 4928 ioc_info(mrioc, "controller reset is %s\n", 4929 ((retval == 0) ? "successful" : "failed")); 4930 return retval; 4931 } 4932 4933 4934 /** 4935 * mpi3mr_free_config_dma_memory - free memory for config page 4936 * @mrioc: Adapter instance reference 4937 * @mem_desc: memory descriptor structure 4938 * 4939 * Check whether the size of the buffer specified by the memory 4940 * descriptor is greater than the default page size if so then 4941 * free the memory pointed by the descriptor. 4942 * 4943 * Return: Nothing. 4944 */ 4945 static void mpi3mr_free_config_dma_memory(struct mpi3mr_ioc *mrioc, 4946 struct dma_memory_desc *mem_desc) 4947 { 4948 if ((mem_desc->size > mrioc->cfg_page_sz) && mem_desc->addr) { 4949 dma_free_coherent(&mrioc->pdev->dev, mem_desc->size, 4950 mem_desc->addr, mem_desc->dma_addr); 4951 mem_desc->addr = NULL; 4952 } 4953 } 4954 4955 /** 4956 * mpi3mr_alloc_config_dma_memory - Alloc memory for config page 4957 * @mrioc: Adapter instance reference 4958 * @mem_desc: Memory descriptor to hold dma memory info 4959 * 4960 * This function allocates new dmaable memory or provides the 4961 * default config page dmaable memory based on the memory size 4962 * described by the descriptor. 4963 * 4964 * Return: 0 on success, non-zero on failure. 4965 */ 4966 static int mpi3mr_alloc_config_dma_memory(struct mpi3mr_ioc *mrioc, 4967 struct dma_memory_desc *mem_desc) 4968 { 4969 if (mem_desc->size > mrioc->cfg_page_sz) { 4970 mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev, 4971 mem_desc->size, &mem_desc->dma_addr, GFP_KERNEL); 4972 if (!mem_desc->addr) 4973 return -ENOMEM; 4974 } else { 4975 mem_desc->addr = mrioc->cfg_page; 4976 mem_desc->dma_addr = mrioc->cfg_page_dma; 4977 memset(mem_desc->addr, 0, mrioc->cfg_page_sz); 4978 } 4979 return 0; 4980 } 4981 4982 /** 4983 * mpi3mr_post_cfg_req - Issue config requests and wait 4984 * @mrioc: Adapter instance reference 4985 * @cfg_req: Configuration request 4986 * @timeout: Timeout in seconds 4987 * @ioc_status: Pointer to return ioc status 4988 * 4989 * A generic function for posting MPI3 configuration request to 4990 * the firmware. This blocks for the completion of request for 4991 * timeout seconds and if the request times out this function 4992 * faults the controller with proper reason code. 4993 * 4994 * On successful completion of the request this function returns 4995 * appropriate ioc status from the firmware back to the caller. 4996 * 4997 * Return: 0 on success, non-zero on failure. 4998 */ 4999 static int mpi3mr_post_cfg_req(struct mpi3mr_ioc *mrioc, 5000 struct mpi3_config_request *cfg_req, int timeout, u16 *ioc_status) 5001 { 5002 int retval = 0; 5003 5004 mutex_lock(&mrioc->cfg_cmds.mutex); 5005 if (mrioc->cfg_cmds.state & MPI3MR_CMD_PENDING) { 5006 retval = -1; 5007 ioc_err(mrioc, "sending config request failed due to command in use\n"); 5008 mutex_unlock(&mrioc->cfg_cmds.mutex); 5009 goto out; 5010 } 5011 mrioc->cfg_cmds.state = MPI3MR_CMD_PENDING; 5012 mrioc->cfg_cmds.is_waiting = 1; 5013 mrioc->cfg_cmds.callback = NULL; 5014 mrioc->cfg_cmds.ioc_status = 0; 5015 mrioc->cfg_cmds.ioc_loginfo = 0; 5016 5017 cfg_req->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_CFG_CMDS); 5018 cfg_req->function = MPI3_FUNCTION_CONFIG; 5019 5020 init_completion(&mrioc->cfg_cmds.done); 5021 dprint_cfg_info(mrioc, "posting config request\n"); 5022 if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO) 5023 dprint_dump(cfg_req, sizeof(struct mpi3_config_request), 5024 "mpi3_cfg_req"); 5025 retval = mpi3mr_admin_request_post(mrioc, cfg_req, sizeof(*cfg_req), 1); 5026 if (retval) { 5027 ioc_err(mrioc, "posting config request failed\n"); 5028 goto out_unlock; 5029 } 5030 wait_for_completion_timeout(&mrioc->cfg_cmds.done, (timeout * HZ)); 5031 if (!(mrioc->cfg_cmds.state & MPI3MR_CMD_COMPLETE)) { 5032 mpi3mr_check_rh_fault_ioc(mrioc, 5033 MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT); 5034 ioc_err(mrioc, "config request timed out\n"); 5035 retval = -1; 5036 goto out_unlock; 5037 } 5038 *ioc_status = mrioc->cfg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK; 5039 if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS) 5040 dprint_cfg_err(mrioc, 5041 "cfg_page request returned with ioc_status(0x%04x), log_info(0x%08x)\n", 5042 *ioc_status, mrioc->cfg_cmds.ioc_loginfo); 5043 5044 out_unlock: 5045 mrioc->cfg_cmds.state = MPI3MR_CMD_NOTUSED; 5046 mutex_unlock(&mrioc->cfg_cmds.mutex); 5047 5048 out: 5049 return retval; 5050 } 5051 5052 /** 5053 * mpi3mr_process_cfg_req - config page request processor 5054 * @mrioc: Adapter instance reference 5055 * @cfg_req: Configuration request 5056 * @cfg_hdr: Configuration page header 5057 * @timeout: Timeout in seconds 5058 * @ioc_status: Pointer to return ioc status 5059 * @cfg_buf: Memory pointer to copy config page or header 5060 * @cfg_buf_sz: Size of the memory to get config page or header 5061 * 5062 * This is handler for config page read, write and config page 5063 * header read operations. 5064 * 5065 * This function expects the cfg_req to be populated with page 5066 * type, page number, action for the header read and with page 5067 * address for all other operations. 5068 * 5069 * The cfg_hdr can be passed as null for reading required header 5070 * details for read/write pages the cfg_hdr should point valid 5071 * configuration page header. 5072 * 5073 * This allocates dmaable memory based on the size of the config 5074 * buffer and set the SGE of the cfg_req. 5075 * 5076 * For write actions, the config page data has to be passed in 5077 * the cfg_buf and size of the data has to be mentioned in the 5078 * cfg_buf_sz. 5079 * 5080 * For read/header actions, on successful completion of the 5081 * request with successful ioc_status the data will be copied 5082 * into the cfg_buf limited to a minimum of actual page size and 5083 * cfg_buf_sz 5084 * 5085 * 5086 * Return: 0 on success, non-zero on failure. 5087 */ 5088 static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc, 5089 struct mpi3_config_request *cfg_req, 5090 struct mpi3_config_page_header *cfg_hdr, int timeout, u16 *ioc_status, 5091 void *cfg_buf, u32 cfg_buf_sz) 5092 { 5093 struct dma_memory_desc mem_desc; 5094 int retval = -1; 5095 u8 invalid_action = 0; 5096 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 5097 5098 memset(&mem_desc, 0, sizeof(struct dma_memory_desc)); 5099 5100 if (cfg_req->action == MPI3_CONFIG_ACTION_PAGE_HEADER) 5101 mem_desc.size = sizeof(struct mpi3_config_page_header); 5102 else { 5103 if (!cfg_hdr) { 5104 ioc_err(mrioc, "null config header passed for config action(%d), page_type(0x%02x), page_num(%d)\n", 5105 cfg_req->action, cfg_req->page_type, 5106 cfg_req->page_number); 5107 goto out; 5108 } 5109 switch (cfg_hdr->page_attribute & MPI3_CONFIG_PAGEATTR_MASK) { 5110 case MPI3_CONFIG_PAGEATTR_READ_ONLY: 5111 if (cfg_req->action 5112 != MPI3_CONFIG_ACTION_READ_CURRENT) 5113 invalid_action = 1; 5114 break; 5115 case MPI3_CONFIG_PAGEATTR_CHANGEABLE: 5116 if ((cfg_req->action == 5117 MPI3_CONFIG_ACTION_READ_PERSISTENT) || 5118 (cfg_req->action == 5119 MPI3_CONFIG_ACTION_WRITE_PERSISTENT)) 5120 invalid_action = 1; 5121 break; 5122 case MPI3_CONFIG_PAGEATTR_PERSISTENT: 5123 default: 5124 break; 5125 } 5126 if (invalid_action) { 5127 ioc_err(mrioc, 5128 "config action(%d) is not allowed for page_type(0x%02x), page_num(%d) with page_attribute(0x%02x)\n", 5129 cfg_req->action, cfg_req->page_type, 5130 cfg_req->page_number, cfg_hdr->page_attribute); 5131 goto out; 5132 } 5133 mem_desc.size = le16_to_cpu(cfg_hdr->page_length) * 4; 5134 cfg_req->page_length = cfg_hdr->page_length; 5135 cfg_req->page_version = cfg_hdr->page_version; 5136 } 5137 if (mpi3mr_alloc_config_dma_memory(mrioc, &mem_desc)) 5138 goto out; 5139 5140 mpi3mr_add_sg_single(&cfg_req->sgl, sgl_flags, mem_desc.size, 5141 mem_desc.dma_addr); 5142 5143 if ((cfg_req->action == MPI3_CONFIG_ACTION_WRITE_PERSISTENT) || 5144 (cfg_req->action == MPI3_CONFIG_ACTION_WRITE_CURRENT)) { 5145 memcpy(mem_desc.addr, cfg_buf, min_t(u16, mem_desc.size, 5146 cfg_buf_sz)); 5147 dprint_cfg_info(mrioc, "config buffer to be written\n"); 5148 if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO) 5149 dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf"); 5150 } 5151 5152 if (mpi3mr_post_cfg_req(mrioc, cfg_req, timeout, ioc_status)) 5153 goto out; 5154 5155 retval = 0; 5156 if ((*ioc_status == MPI3_IOCSTATUS_SUCCESS) && 5157 (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_PERSISTENT) && 5158 (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_CURRENT)) { 5159 memcpy(cfg_buf, mem_desc.addr, min_t(u16, mem_desc.size, 5160 cfg_buf_sz)); 5161 dprint_cfg_info(mrioc, "config buffer read\n"); 5162 if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO) 5163 dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf"); 5164 } 5165 5166 out: 5167 mpi3mr_free_config_dma_memory(mrioc, &mem_desc); 5168 return retval; 5169 } 5170 5171 /** 5172 * mpi3mr_cfg_get_dev_pg0 - Read current device page0 5173 * @mrioc: Adapter instance reference 5174 * @ioc_status: Pointer to return ioc status 5175 * @dev_pg0: Pointer to return device page 0 5176 * @pg_sz: Size of the memory allocated to the page pointer 5177 * @form: The form to be used for addressing the page 5178 * @form_spec: Form specific information like device handle 5179 * 5180 * This is handler for config page read for a specific device 5181 * page0. The ioc_status has the controller returned ioc_status. 5182 * This routine doesn't check ioc_status to decide whether the 5183 * page read is success or not and it is the callers 5184 * responsibility. 5185 * 5186 * Return: 0 on success, non-zero on failure. 5187 */ 5188 int mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status, 5189 struct mpi3_device_page0 *dev_pg0, u16 pg_sz, u32 form, u32 form_spec) 5190 { 5191 struct mpi3_config_page_header cfg_hdr; 5192 struct mpi3_config_request cfg_req; 5193 u32 page_address; 5194 5195 memset(dev_pg0, 0, pg_sz); 5196 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5197 memset(&cfg_req, 0, sizeof(cfg_req)); 5198 5199 cfg_req.function = MPI3_FUNCTION_CONFIG; 5200 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5201 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DEVICE; 5202 cfg_req.page_number = 0; 5203 cfg_req.page_address = 0; 5204 5205 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5206 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5207 ioc_err(mrioc, "device page0 header read failed\n"); 5208 goto out_failed; 5209 } 5210 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5211 ioc_err(mrioc, "device page0 header read failed with ioc_status(0x%04x)\n", 5212 *ioc_status); 5213 goto out_failed; 5214 } 5215 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5216 page_address = ((form & MPI3_DEVICE_PGAD_FORM_MASK) | 5217 (form_spec & MPI3_DEVICE_PGAD_HANDLE_MASK)); 5218 cfg_req.page_address = cpu_to_le32(page_address); 5219 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5220 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, dev_pg0, pg_sz)) { 5221 ioc_err(mrioc, "device page0 read failed\n"); 5222 goto out_failed; 5223 } 5224 return 0; 5225 out_failed: 5226 return -1; 5227 } 5228 5229 5230 /** 5231 * mpi3mr_cfg_get_sas_phy_pg0 - Read current SAS Phy page0 5232 * @mrioc: Adapter instance reference 5233 * @ioc_status: Pointer to return ioc status 5234 * @phy_pg0: Pointer to return SAS Phy page 0 5235 * @pg_sz: Size of the memory allocated to the page pointer 5236 * @form: The form to be used for addressing the page 5237 * @form_spec: Form specific information like phy number 5238 * 5239 * This is handler for config page read for a specific SAS Phy 5240 * page0. The ioc_status has the controller returned ioc_status. 5241 * This routine doesn't check ioc_status to decide whether the 5242 * page read is success or not and it is the callers 5243 * responsibility. 5244 * 5245 * Return: 0 on success, non-zero on failure. 5246 */ 5247 int mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status, 5248 struct mpi3_sas_phy_page0 *phy_pg0, u16 pg_sz, u32 form, 5249 u32 form_spec) 5250 { 5251 struct mpi3_config_page_header cfg_hdr; 5252 struct mpi3_config_request cfg_req; 5253 u32 page_address; 5254 5255 memset(phy_pg0, 0, pg_sz); 5256 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5257 memset(&cfg_req, 0, sizeof(cfg_req)); 5258 5259 cfg_req.function = MPI3_FUNCTION_CONFIG; 5260 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5261 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY; 5262 cfg_req.page_number = 0; 5263 cfg_req.page_address = 0; 5264 5265 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5266 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5267 ioc_err(mrioc, "sas phy page0 header read failed\n"); 5268 goto out_failed; 5269 } 5270 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5271 ioc_err(mrioc, "sas phy page0 header read failed with ioc_status(0x%04x)\n", 5272 *ioc_status); 5273 goto out_failed; 5274 } 5275 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5276 page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) | 5277 (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK)); 5278 cfg_req.page_address = cpu_to_le32(page_address); 5279 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5280 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg0, pg_sz)) { 5281 ioc_err(mrioc, "sas phy page0 read failed\n"); 5282 goto out_failed; 5283 } 5284 return 0; 5285 out_failed: 5286 return -1; 5287 } 5288 5289 /** 5290 * mpi3mr_cfg_get_sas_phy_pg1 - Read current SAS Phy page1 5291 * @mrioc: Adapter instance reference 5292 * @ioc_status: Pointer to return ioc status 5293 * @phy_pg1: Pointer to return SAS Phy page 1 5294 * @pg_sz: Size of the memory allocated to the page pointer 5295 * @form: The form to be used for addressing the page 5296 * @form_spec: Form specific information like phy number 5297 * 5298 * This is handler for config page read for a specific SAS Phy 5299 * page1. The ioc_status has the controller returned ioc_status. 5300 * This routine doesn't check ioc_status to decide whether the 5301 * page read is success or not and it is the callers 5302 * responsibility. 5303 * 5304 * Return: 0 on success, non-zero on failure. 5305 */ 5306 int mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status, 5307 struct mpi3_sas_phy_page1 *phy_pg1, u16 pg_sz, u32 form, 5308 u32 form_spec) 5309 { 5310 struct mpi3_config_page_header cfg_hdr; 5311 struct mpi3_config_request cfg_req; 5312 u32 page_address; 5313 5314 memset(phy_pg1, 0, pg_sz); 5315 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5316 memset(&cfg_req, 0, sizeof(cfg_req)); 5317 5318 cfg_req.function = MPI3_FUNCTION_CONFIG; 5319 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5320 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY; 5321 cfg_req.page_number = 1; 5322 cfg_req.page_address = 0; 5323 5324 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5325 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5326 ioc_err(mrioc, "sas phy page1 header read failed\n"); 5327 goto out_failed; 5328 } 5329 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5330 ioc_err(mrioc, "sas phy page1 header read failed with ioc_status(0x%04x)\n", 5331 *ioc_status); 5332 goto out_failed; 5333 } 5334 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5335 page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) | 5336 (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK)); 5337 cfg_req.page_address = cpu_to_le32(page_address); 5338 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5339 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg1, pg_sz)) { 5340 ioc_err(mrioc, "sas phy page1 read failed\n"); 5341 goto out_failed; 5342 } 5343 return 0; 5344 out_failed: 5345 return -1; 5346 } 5347 5348 5349 /** 5350 * mpi3mr_cfg_get_sas_exp_pg0 - Read current SAS Expander page0 5351 * @mrioc: Adapter instance reference 5352 * @ioc_status: Pointer to return ioc status 5353 * @exp_pg0: Pointer to return SAS Expander page 0 5354 * @pg_sz: Size of the memory allocated to the page pointer 5355 * @form: The form to be used for addressing the page 5356 * @form_spec: Form specific information like device handle 5357 * 5358 * This is handler for config page read for a specific SAS 5359 * Expander page0. The ioc_status has the controller returned 5360 * ioc_status. This routine doesn't check ioc_status to decide 5361 * whether the page read is success or not and it is the callers 5362 * responsibility. 5363 * 5364 * Return: 0 on success, non-zero on failure. 5365 */ 5366 int mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status, 5367 struct mpi3_sas_expander_page0 *exp_pg0, u16 pg_sz, u32 form, 5368 u32 form_spec) 5369 { 5370 struct mpi3_config_page_header cfg_hdr; 5371 struct mpi3_config_request cfg_req; 5372 u32 page_address; 5373 5374 memset(exp_pg0, 0, pg_sz); 5375 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5376 memset(&cfg_req, 0, sizeof(cfg_req)); 5377 5378 cfg_req.function = MPI3_FUNCTION_CONFIG; 5379 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5380 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER; 5381 cfg_req.page_number = 0; 5382 cfg_req.page_address = 0; 5383 5384 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5385 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5386 ioc_err(mrioc, "expander page0 header read failed\n"); 5387 goto out_failed; 5388 } 5389 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5390 ioc_err(mrioc, "expander page0 header read failed with ioc_status(0x%04x)\n", 5391 *ioc_status); 5392 goto out_failed; 5393 } 5394 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5395 page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) | 5396 (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK | 5397 MPI3_SAS_EXPAND_PGAD_HANDLE_MASK))); 5398 cfg_req.page_address = cpu_to_le32(page_address); 5399 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5400 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg0, pg_sz)) { 5401 ioc_err(mrioc, "expander page0 read failed\n"); 5402 goto out_failed; 5403 } 5404 return 0; 5405 out_failed: 5406 return -1; 5407 } 5408 5409 /** 5410 * mpi3mr_cfg_get_sas_exp_pg1 - Read current SAS Expander page1 5411 * @mrioc: Adapter instance reference 5412 * @ioc_status: Pointer to return ioc status 5413 * @exp_pg1: Pointer to return SAS Expander page 1 5414 * @pg_sz: Size of the memory allocated to the page pointer 5415 * @form: The form to be used for addressing the page 5416 * @form_spec: Form specific information like phy number 5417 * 5418 * This is handler for config page read for a specific SAS 5419 * Expander page1. The ioc_status has the controller returned 5420 * ioc_status. This routine doesn't check ioc_status to decide 5421 * whether the page read is success or not and it is the callers 5422 * responsibility. 5423 * 5424 * Return: 0 on success, non-zero on failure. 5425 */ 5426 int mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status, 5427 struct mpi3_sas_expander_page1 *exp_pg1, u16 pg_sz, u32 form, 5428 u32 form_spec) 5429 { 5430 struct mpi3_config_page_header cfg_hdr; 5431 struct mpi3_config_request cfg_req; 5432 u32 page_address; 5433 5434 memset(exp_pg1, 0, pg_sz); 5435 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5436 memset(&cfg_req, 0, sizeof(cfg_req)); 5437 5438 cfg_req.function = MPI3_FUNCTION_CONFIG; 5439 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5440 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER; 5441 cfg_req.page_number = 1; 5442 cfg_req.page_address = 0; 5443 5444 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5445 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5446 ioc_err(mrioc, "expander page1 header read failed\n"); 5447 goto out_failed; 5448 } 5449 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5450 ioc_err(mrioc, "expander page1 header read failed with ioc_status(0x%04x)\n", 5451 *ioc_status); 5452 goto out_failed; 5453 } 5454 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5455 page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) | 5456 (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK | 5457 MPI3_SAS_EXPAND_PGAD_HANDLE_MASK))); 5458 cfg_req.page_address = cpu_to_le32(page_address); 5459 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5460 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg1, pg_sz)) { 5461 ioc_err(mrioc, "expander page1 read failed\n"); 5462 goto out_failed; 5463 } 5464 return 0; 5465 out_failed: 5466 return -1; 5467 } 5468 5469 /** 5470 * mpi3mr_cfg_get_enclosure_pg0 - Read current Enclosure page0 5471 * @mrioc: Adapter instance reference 5472 * @ioc_status: Pointer to return ioc status 5473 * @encl_pg0: Pointer to return Enclosure page 0 5474 * @pg_sz: Size of the memory allocated to the page pointer 5475 * @form: The form to be used for addressing the page 5476 * @form_spec: Form specific information like device handle 5477 * 5478 * This is handler for config page read for a specific Enclosure 5479 * page0. The ioc_status has the controller returned ioc_status. 5480 * This routine doesn't check ioc_status to decide whether the 5481 * page read is success or not and it is the callers 5482 * responsibility. 5483 * 5484 * Return: 0 on success, non-zero on failure. 5485 */ 5486 int mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status, 5487 struct mpi3_enclosure_page0 *encl_pg0, u16 pg_sz, u32 form, 5488 u32 form_spec) 5489 { 5490 struct mpi3_config_page_header cfg_hdr; 5491 struct mpi3_config_request cfg_req; 5492 u32 page_address; 5493 5494 memset(encl_pg0, 0, pg_sz); 5495 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5496 memset(&cfg_req, 0, sizeof(cfg_req)); 5497 5498 cfg_req.function = MPI3_FUNCTION_CONFIG; 5499 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5500 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_ENCLOSURE; 5501 cfg_req.page_number = 0; 5502 cfg_req.page_address = 0; 5503 5504 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5505 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5506 ioc_err(mrioc, "enclosure page0 header read failed\n"); 5507 goto out_failed; 5508 } 5509 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5510 ioc_err(mrioc, "enclosure page0 header read failed with ioc_status(0x%04x)\n", 5511 *ioc_status); 5512 goto out_failed; 5513 } 5514 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5515 page_address = ((form & MPI3_ENCLOS_PGAD_FORM_MASK) | 5516 (form_spec & MPI3_ENCLOS_PGAD_HANDLE_MASK)); 5517 cfg_req.page_address = cpu_to_le32(page_address); 5518 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5519 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, encl_pg0, pg_sz)) { 5520 ioc_err(mrioc, "enclosure page0 read failed\n"); 5521 goto out_failed; 5522 } 5523 return 0; 5524 out_failed: 5525 return -1; 5526 } 5527 5528 5529 /** 5530 * mpi3mr_cfg_get_sas_io_unit_pg0 - Read current SASIOUnit page0 5531 * @mrioc: Adapter instance reference 5532 * @sas_io_unit_pg0: Pointer to return SAS IO Unit page 0 5533 * @pg_sz: Size of the memory allocated to the page pointer 5534 * 5535 * This is handler for config page read for the SAS IO Unit 5536 * page0. This routine checks ioc_status to decide whether the 5537 * page read is success or not. 5538 * 5539 * Return: 0 on success, non-zero on failure. 5540 */ 5541 int mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc *mrioc, 5542 struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0, u16 pg_sz) 5543 { 5544 struct mpi3_config_page_header cfg_hdr; 5545 struct mpi3_config_request cfg_req; 5546 u16 ioc_status = 0; 5547 5548 memset(sas_io_unit_pg0, 0, pg_sz); 5549 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5550 memset(&cfg_req, 0, sizeof(cfg_req)); 5551 5552 cfg_req.function = MPI3_FUNCTION_CONFIG; 5553 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5554 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT; 5555 cfg_req.page_number = 0; 5556 cfg_req.page_address = 0; 5557 5558 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5559 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5560 ioc_err(mrioc, "sas io unit page0 header read failed\n"); 5561 goto out_failed; 5562 } 5563 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5564 ioc_err(mrioc, "sas io unit page0 header read failed with ioc_status(0x%04x)\n", 5565 ioc_status); 5566 goto out_failed; 5567 } 5568 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5569 5570 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5571 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg0, pg_sz)) { 5572 ioc_err(mrioc, "sas io unit page0 read failed\n"); 5573 goto out_failed; 5574 } 5575 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5576 ioc_err(mrioc, "sas io unit page0 read failed with ioc_status(0x%04x)\n", 5577 ioc_status); 5578 goto out_failed; 5579 } 5580 return 0; 5581 out_failed: 5582 return -1; 5583 } 5584 5585 /** 5586 * mpi3mr_cfg_get_sas_io_unit_pg1 - Read current SASIOUnit page1 5587 * @mrioc: Adapter instance reference 5588 * @sas_io_unit_pg1: Pointer to return SAS IO Unit page 1 5589 * @pg_sz: Size of the memory allocated to the page pointer 5590 * 5591 * This is handler for config page read for the SAS IO Unit 5592 * page1. This routine checks ioc_status to decide whether the 5593 * page read is success or not. 5594 * 5595 * Return: 0 on success, non-zero on failure. 5596 */ 5597 int mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc, 5598 struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz) 5599 { 5600 struct mpi3_config_page_header cfg_hdr; 5601 struct mpi3_config_request cfg_req; 5602 u16 ioc_status = 0; 5603 5604 memset(sas_io_unit_pg1, 0, pg_sz); 5605 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5606 memset(&cfg_req, 0, sizeof(cfg_req)); 5607 5608 cfg_req.function = MPI3_FUNCTION_CONFIG; 5609 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5610 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT; 5611 cfg_req.page_number = 1; 5612 cfg_req.page_address = 0; 5613 5614 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5615 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5616 ioc_err(mrioc, "sas io unit page1 header read failed\n"); 5617 goto out_failed; 5618 } 5619 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5620 ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n", 5621 ioc_status); 5622 goto out_failed; 5623 } 5624 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5625 5626 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5627 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) { 5628 ioc_err(mrioc, "sas io unit page1 read failed\n"); 5629 goto out_failed; 5630 } 5631 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5632 ioc_err(mrioc, "sas io unit page1 read failed with ioc_status(0x%04x)\n", 5633 ioc_status); 5634 goto out_failed; 5635 } 5636 return 0; 5637 out_failed: 5638 return -1; 5639 } 5640 5641 /** 5642 * mpi3mr_cfg_set_sas_io_unit_pg1 - Write SASIOUnit page1 5643 * @mrioc: Adapter instance reference 5644 * @sas_io_unit_pg1: Pointer to the SAS IO Unit page 1 to write 5645 * @pg_sz: Size of the memory allocated to the page pointer 5646 * 5647 * This is handler for config page write for the SAS IO Unit 5648 * page1. This routine checks ioc_status to decide whether the 5649 * page read is success or not. This will modify both current 5650 * and persistent page. 5651 * 5652 * Return: 0 on success, non-zero on failure. 5653 */ 5654 int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc, 5655 struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz) 5656 { 5657 struct mpi3_config_page_header cfg_hdr; 5658 struct mpi3_config_request cfg_req; 5659 u16 ioc_status = 0; 5660 5661 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5662 memset(&cfg_req, 0, sizeof(cfg_req)); 5663 5664 cfg_req.function = MPI3_FUNCTION_CONFIG; 5665 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5666 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT; 5667 cfg_req.page_number = 1; 5668 cfg_req.page_address = 0; 5669 5670 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5671 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5672 ioc_err(mrioc, "sas io unit page1 header read failed\n"); 5673 goto out_failed; 5674 } 5675 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5676 ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n", 5677 ioc_status); 5678 goto out_failed; 5679 } 5680 cfg_req.action = MPI3_CONFIG_ACTION_WRITE_CURRENT; 5681 5682 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5683 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) { 5684 ioc_err(mrioc, "sas io unit page1 write current failed\n"); 5685 goto out_failed; 5686 } 5687 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5688 ioc_err(mrioc, "sas io unit page1 write current failed with ioc_status(0x%04x)\n", 5689 ioc_status); 5690 goto out_failed; 5691 } 5692 5693 cfg_req.action = MPI3_CONFIG_ACTION_WRITE_PERSISTENT; 5694 5695 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5696 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) { 5697 ioc_err(mrioc, "sas io unit page1 write persistent failed\n"); 5698 goto out_failed; 5699 } 5700 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5701 ioc_err(mrioc, "sas io unit page1 write persistent failed with ioc_status(0x%04x)\n", 5702 ioc_status); 5703 goto out_failed; 5704 } 5705 return 0; 5706 out_failed: 5707 return -1; 5708 } 5709 5710 /** 5711 * mpi3mr_cfg_get_driver_pg1 - Read current Driver page1 5712 * @mrioc: Adapter instance reference 5713 * @driver_pg1: Pointer to return Driver page 1 5714 * @pg_sz: Size of the memory allocated to the page pointer 5715 * 5716 * This is handler for config page read for the Driver page1. 5717 * This routine checks ioc_status to decide whether the page 5718 * read is success or not. 5719 * 5720 * Return: 0 on success, non-zero on failure. 5721 */ 5722 int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc, 5723 struct mpi3_driver_page1 *driver_pg1, u16 pg_sz) 5724 { 5725 struct mpi3_config_page_header cfg_hdr; 5726 struct mpi3_config_request cfg_req; 5727 u16 ioc_status = 0; 5728 5729 memset(driver_pg1, 0, pg_sz); 5730 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5731 memset(&cfg_req, 0, sizeof(cfg_req)); 5732 5733 cfg_req.function = MPI3_FUNCTION_CONFIG; 5734 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5735 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER; 5736 cfg_req.page_number = 1; 5737 cfg_req.page_address = 0; 5738 5739 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5740 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5741 ioc_err(mrioc, "driver page1 header read failed\n"); 5742 goto out_failed; 5743 } 5744 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5745 ioc_err(mrioc, "driver page1 header read failed with ioc_status(0x%04x)\n", 5746 ioc_status); 5747 goto out_failed; 5748 } 5749 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5750 5751 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5752 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg1, pg_sz)) { 5753 ioc_err(mrioc, "driver page1 read failed\n"); 5754 goto out_failed; 5755 } 5756 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5757 ioc_err(mrioc, "driver page1 read failed with ioc_status(0x%04x)\n", 5758 ioc_status); 5759 goto out_failed; 5760 } 5761 return 0; 5762 out_failed: 5763 return -1; 5764 } 5765