1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2022 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 #include <linux/io-64-nonatomic-lo-hi.h> 12 13 static int 14 mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u32 reset_reason); 15 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc); 16 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, 17 struct mpi3_ioc_facts_data *facts_data); 18 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc, 19 struct mpi3mr_drv_cmd *drv_cmd); 20 21 static int poll_queues; 22 module_param(poll_queues, int, 0444); 23 MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)"); 24 25 #if defined(writeq) && defined(CONFIG_64BIT) 26 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) 27 { 28 writeq(b, addr); 29 } 30 #else 31 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) 32 { 33 __u64 data_out = b; 34 35 writel((u32)(data_out), addr); 36 writel((u32)(data_out >> 32), (addr + 4)); 37 } 38 #endif 39 40 static inline bool 41 mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q) 42 { 43 u16 pi, ci, max_entries; 44 bool is_qfull = false; 45 46 pi = op_req_q->pi; 47 ci = READ_ONCE(op_req_q->ci); 48 max_entries = op_req_q->num_requests; 49 50 if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1)))) 51 is_qfull = true; 52 53 return is_qfull; 54 } 55 56 static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc) 57 { 58 u16 i, max_vectors; 59 60 max_vectors = mrioc->intr_info_count; 61 62 for (i = 0; i < max_vectors; i++) 63 synchronize_irq(pci_irq_vector(mrioc->pdev, i)); 64 } 65 66 void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc) 67 { 68 mrioc->intr_enabled = 0; 69 mpi3mr_sync_irqs(mrioc); 70 } 71 72 void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc) 73 { 74 mrioc->intr_enabled = 1; 75 } 76 77 static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc) 78 { 79 u16 i; 80 81 mpi3mr_ioc_disable_intr(mrioc); 82 83 if (!mrioc->intr_info) 84 return; 85 86 for (i = 0; i < mrioc->intr_info_count; i++) 87 free_irq(pci_irq_vector(mrioc->pdev, i), 88 (mrioc->intr_info + i)); 89 90 kfree(mrioc->intr_info); 91 mrioc->intr_info = NULL; 92 mrioc->intr_info_count = 0; 93 mrioc->is_intr_info_set = false; 94 pci_free_irq_vectors(mrioc->pdev); 95 } 96 97 void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length, 98 dma_addr_t dma_addr) 99 { 100 struct mpi3_sge_common *sgel = paddr; 101 102 sgel->flags = flags; 103 sgel->length = cpu_to_le32(length); 104 sgel->address = cpu_to_le64(dma_addr); 105 } 106 107 void mpi3mr_build_zero_len_sge(void *paddr) 108 { 109 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 110 111 mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1); 112 } 113 114 void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc, 115 dma_addr_t phys_addr) 116 { 117 if (!phys_addr) 118 return NULL; 119 120 if ((phys_addr < mrioc->reply_buf_dma) || 121 (phys_addr > mrioc->reply_buf_dma_max_address)) 122 return NULL; 123 124 return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma); 125 } 126 127 void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc, 128 dma_addr_t phys_addr) 129 { 130 if (!phys_addr) 131 return NULL; 132 133 return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma); 134 } 135 136 static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc, 137 u64 reply_dma) 138 { 139 u32 old_idx = 0; 140 unsigned long flags; 141 142 spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags); 143 old_idx = mrioc->reply_free_queue_host_index; 144 mrioc->reply_free_queue_host_index = ( 145 (mrioc->reply_free_queue_host_index == 146 (mrioc->reply_free_qsz - 1)) ? 0 : 147 (mrioc->reply_free_queue_host_index + 1)); 148 mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma); 149 writel(mrioc->reply_free_queue_host_index, 150 &mrioc->sysif_regs->reply_free_host_index); 151 spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags); 152 } 153 154 void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc, 155 u64 sense_buf_dma) 156 { 157 u32 old_idx = 0; 158 unsigned long flags; 159 160 spin_lock_irqsave(&mrioc->sbq_lock, flags); 161 old_idx = mrioc->sbq_host_index; 162 mrioc->sbq_host_index = ((mrioc->sbq_host_index == 163 (mrioc->sense_buf_q_sz - 1)) ? 0 : 164 (mrioc->sbq_host_index + 1)); 165 mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma); 166 writel(mrioc->sbq_host_index, 167 &mrioc->sysif_regs->sense_buffer_free_host_index); 168 spin_unlock_irqrestore(&mrioc->sbq_lock, flags); 169 } 170 171 static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc, 172 struct mpi3_event_notification_reply *event_reply) 173 { 174 char *desc = NULL; 175 u16 event; 176 177 event = event_reply->event; 178 179 switch (event) { 180 case MPI3_EVENT_LOG_DATA: 181 desc = "Log Data"; 182 break; 183 case MPI3_EVENT_CHANGE: 184 desc = "Event Change"; 185 break; 186 case MPI3_EVENT_GPIO_INTERRUPT: 187 desc = "GPIO Interrupt"; 188 break; 189 case MPI3_EVENT_CABLE_MGMT: 190 desc = "Cable Management"; 191 break; 192 case MPI3_EVENT_ENERGY_PACK_CHANGE: 193 desc = "Energy Pack Change"; 194 break; 195 case MPI3_EVENT_DEVICE_ADDED: 196 { 197 struct mpi3_device_page0 *event_data = 198 (struct mpi3_device_page0 *)event_reply->event_data; 199 ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n", 200 event_data->dev_handle, event_data->device_form); 201 return; 202 } 203 case MPI3_EVENT_DEVICE_INFO_CHANGED: 204 { 205 struct mpi3_device_page0 *event_data = 206 (struct mpi3_device_page0 *)event_reply->event_data; 207 ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n", 208 event_data->dev_handle, event_data->device_form); 209 return; 210 } 211 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 212 { 213 struct mpi3_event_data_device_status_change *event_data = 214 (struct mpi3_event_data_device_status_change *)event_reply->event_data; 215 ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n", 216 event_data->dev_handle, event_data->reason_code); 217 return; 218 } 219 case MPI3_EVENT_SAS_DISCOVERY: 220 { 221 struct mpi3_event_data_sas_discovery *event_data = 222 (struct mpi3_event_data_sas_discovery *)event_reply->event_data; 223 ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n", 224 (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ? 225 "start" : "stop", 226 le32_to_cpu(event_data->discovery_status)); 227 return; 228 } 229 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE: 230 desc = "SAS Broadcast Primitive"; 231 break; 232 case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE: 233 desc = "SAS Notify Primitive"; 234 break; 235 case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: 236 desc = "SAS Init Device Status Change"; 237 break; 238 case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW: 239 desc = "SAS Init Table Overflow"; 240 break; 241 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 242 desc = "SAS Topology Change List"; 243 break; 244 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 245 desc = "Enclosure Device Status Change"; 246 break; 247 case MPI3_EVENT_ENCL_DEVICE_ADDED: 248 desc = "Enclosure Added"; 249 break; 250 case MPI3_EVENT_HARD_RESET_RECEIVED: 251 desc = "Hard Reset Received"; 252 break; 253 case MPI3_EVENT_SAS_PHY_COUNTER: 254 desc = "SAS PHY Counter"; 255 break; 256 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 257 desc = "SAS Device Discovery Error"; 258 break; 259 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 260 desc = "PCIE Topology Change List"; 261 break; 262 case MPI3_EVENT_PCIE_ENUMERATION: 263 { 264 struct mpi3_event_data_pcie_enumeration *event_data = 265 (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data; 266 ioc_info(mrioc, "PCIE Enumeration: (%s)", 267 (event_data->reason_code == 268 MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop"); 269 if (event_data->enumeration_status) 270 ioc_info(mrioc, "enumeration_status(0x%08x)\n", 271 le32_to_cpu(event_data->enumeration_status)); 272 return; 273 } 274 case MPI3_EVENT_PREPARE_FOR_RESET: 275 desc = "Prepare For Reset"; 276 break; 277 } 278 279 if (!desc) 280 return; 281 282 ioc_info(mrioc, "%s\n", desc); 283 } 284 285 static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc, 286 struct mpi3_default_reply *def_reply) 287 { 288 struct mpi3_event_notification_reply *event_reply = 289 (struct mpi3_event_notification_reply *)def_reply; 290 291 mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count); 292 mpi3mr_print_event_data(mrioc, event_reply); 293 mpi3mr_os_handle_events(mrioc, event_reply); 294 } 295 296 static struct mpi3mr_drv_cmd * 297 mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag, 298 struct mpi3_default_reply *def_reply) 299 { 300 u16 idx; 301 302 switch (host_tag) { 303 case MPI3MR_HOSTTAG_INITCMDS: 304 return &mrioc->init_cmds; 305 case MPI3MR_HOSTTAG_CFG_CMDS: 306 return &mrioc->cfg_cmds; 307 case MPI3MR_HOSTTAG_BSG_CMDS: 308 return &mrioc->bsg_cmds; 309 case MPI3MR_HOSTTAG_BLK_TMS: 310 return &mrioc->host_tm_cmds; 311 case MPI3MR_HOSTTAG_PEL_ABORT: 312 return &mrioc->pel_abort_cmd; 313 case MPI3MR_HOSTTAG_PEL_WAIT: 314 return &mrioc->pel_cmds; 315 case MPI3MR_HOSTTAG_INVALID: 316 if (def_reply && def_reply->function == 317 MPI3_FUNCTION_EVENT_NOTIFICATION) 318 mpi3mr_handle_events(mrioc, def_reply); 319 return NULL; 320 default: 321 break; 322 } 323 if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN && 324 host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) { 325 idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 326 return &mrioc->dev_rmhs_cmds[idx]; 327 } 328 329 if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN && 330 host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) { 331 idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 332 return &mrioc->evtack_cmds[idx]; 333 } 334 335 return NULL; 336 } 337 338 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc, 339 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma) 340 { 341 u16 reply_desc_type, host_tag = 0; 342 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; 343 u32 ioc_loginfo = 0; 344 struct mpi3_status_reply_descriptor *status_desc; 345 struct mpi3_address_reply_descriptor *addr_desc; 346 struct mpi3_success_reply_descriptor *success_desc; 347 struct mpi3_default_reply *def_reply = NULL; 348 struct mpi3mr_drv_cmd *cmdptr = NULL; 349 struct mpi3_scsi_io_reply *scsi_reply; 350 u8 *sense_buf = NULL; 351 352 *reply_dma = 0; 353 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & 354 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK; 355 switch (reply_desc_type) { 356 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS: 357 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc; 358 host_tag = le16_to_cpu(status_desc->host_tag); 359 ioc_status = le16_to_cpu(status_desc->ioc_status); 360 if (ioc_status & 361 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 362 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); 363 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 364 break; 365 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: 366 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; 367 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address); 368 def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma); 369 if (!def_reply) 370 goto out; 371 host_tag = le16_to_cpu(def_reply->host_tag); 372 ioc_status = le16_to_cpu(def_reply->ioc_status); 373 if (ioc_status & 374 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 375 ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info); 376 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 377 if (def_reply->function == MPI3_FUNCTION_SCSI_IO) { 378 scsi_reply = (struct mpi3_scsi_io_reply *)def_reply; 379 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, 380 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 381 } 382 break; 383 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: 384 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; 385 host_tag = le16_to_cpu(success_desc->host_tag); 386 break; 387 default: 388 break; 389 } 390 391 cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply); 392 if (cmdptr) { 393 if (cmdptr->state & MPI3MR_CMD_PENDING) { 394 cmdptr->state |= MPI3MR_CMD_COMPLETE; 395 cmdptr->ioc_loginfo = ioc_loginfo; 396 cmdptr->ioc_status = ioc_status; 397 cmdptr->state &= ~MPI3MR_CMD_PENDING; 398 if (def_reply) { 399 cmdptr->state |= MPI3MR_CMD_REPLY_VALID; 400 memcpy((u8 *)cmdptr->reply, (u8 *)def_reply, 401 mrioc->reply_sz); 402 } 403 if (cmdptr->is_waiting) { 404 complete(&cmdptr->done); 405 cmdptr->is_waiting = 0; 406 } else if (cmdptr->callback) 407 cmdptr->callback(mrioc, cmdptr); 408 } 409 } 410 out: 411 if (sense_buf) 412 mpi3mr_repost_sense_buf(mrioc, 413 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 414 } 415 416 static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc) 417 { 418 u32 exp_phase = mrioc->admin_reply_ephase; 419 u32 admin_reply_ci = mrioc->admin_reply_ci; 420 u32 num_admin_replies = 0; 421 u64 reply_dma = 0; 422 struct mpi3_default_reply_descriptor *reply_desc; 423 424 reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base + 425 admin_reply_ci; 426 427 if ((le16_to_cpu(reply_desc->reply_flags) & 428 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 429 return 0; 430 431 do { 432 mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci); 433 mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma); 434 if (reply_dma) 435 mpi3mr_repost_reply_buf(mrioc, reply_dma); 436 num_admin_replies++; 437 if (++admin_reply_ci == mrioc->num_admin_replies) { 438 admin_reply_ci = 0; 439 exp_phase ^= 1; 440 } 441 reply_desc = 442 (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base + 443 admin_reply_ci; 444 if ((le16_to_cpu(reply_desc->reply_flags) & 445 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 446 break; 447 } while (1); 448 449 writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); 450 mrioc->admin_reply_ci = admin_reply_ci; 451 mrioc->admin_reply_ephase = exp_phase; 452 453 return num_admin_replies; 454 } 455 456 /** 457 * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to 458 * queue's consumer index from operational reply descriptor queue. 459 * @op_reply_q: op_reply_qinfo object 460 * @reply_ci: operational reply descriptor's queue consumer index 461 * 462 * Returns reply descriptor frame address 463 */ 464 static inline struct mpi3_default_reply_descriptor * 465 mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci) 466 { 467 void *segment_base_addr; 468 struct segments *segments = op_reply_q->q_segments; 469 struct mpi3_default_reply_descriptor *reply_desc = NULL; 470 471 segment_base_addr = 472 segments[reply_ci / op_reply_q->segment_qd].segment; 473 reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr + 474 (reply_ci % op_reply_q->segment_qd); 475 return reply_desc; 476 } 477 478 /** 479 * mpi3mr_process_op_reply_q - Operational reply queue handler 480 * @mrioc: Adapter instance reference 481 * @op_reply_q: Operational reply queue info 482 * 483 * Checks the specific operational reply queue and drains the 484 * reply queue entries until the queue is empty and process the 485 * individual reply descriptors. 486 * 487 * Return: 0 if queue is already processed,or number of reply 488 * descriptors processed. 489 */ 490 int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, 491 struct op_reply_qinfo *op_reply_q) 492 { 493 struct op_req_qinfo *op_req_q; 494 u32 exp_phase; 495 u32 reply_ci; 496 u32 num_op_reply = 0; 497 u64 reply_dma = 0; 498 struct mpi3_default_reply_descriptor *reply_desc; 499 u16 req_q_idx = 0, reply_qidx; 500 501 reply_qidx = op_reply_q->qid - 1; 502 503 if (!atomic_add_unless(&op_reply_q->in_use, 1, 1)) 504 return 0; 505 506 exp_phase = op_reply_q->ephase; 507 reply_ci = op_reply_q->ci; 508 509 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci); 510 if ((le16_to_cpu(reply_desc->reply_flags) & 511 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) { 512 atomic_dec(&op_reply_q->in_use); 513 return 0; 514 } 515 516 do { 517 req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1; 518 op_req_q = &mrioc->req_qinfo[req_q_idx]; 519 520 WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci)); 521 mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma, 522 reply_qidx); 523 atomic_dec(&op_reply_q->pend_ios); 524 if (reply_dma) 525 mpi3mr_repost_reply_buf(mrioc, reply_dma); 526 num_op_reply++; 527 528 if (++reply_ci == op_reply_q->num_replies) { 529 reply_ci = 0; 530 exp_phase ^= 1; 531 } 532 533 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci); 534 535 if ((le16_to_cpu(reply_desc->reply_flags) & 536 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 537 break; 538 /* 539 * Exit completion loop to avoid CPU lockup 540 * Ensure remaining completion happens from threaded ISR. 541 */ 542 if (num_op_reply > mrioc->max_host_ios) { 543 op_reply_q->enable_irq_poll = true; 544 break; 545 } 546 547 } while (1); 548 549 writel(reply_ci, 550 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index); 551 op_reply_q->ci = reply_ci; 552 op_reply_q->ephase = exp_phase; 553 554 atomic_dec(&op_reply_q->in_use); 555 return num_op_reply; 556 } 557 558 /** 559 * mpi3mr_blk_mq_poll - Operational reply queue handler 560 * @shost: SCSI Host reference 561 * @queue_num: Request queue number (w.r.t OS it is hardware context number) 562 * 563 * Checks the specific operational reply queue and drains the 564 * reply queue entries until the queue is empty and process the 565 * individual reply descriptors. 566 * 567 * Return: 0 if queue is already processed,or number of reply 568 * descriptors processed. 569 */ 570 int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) 571 { 572 int num_entries = 0; 573 struct mpi3mr_ioc *mrioc; 574 575 mrioc = (struct mpi3mr_ioc *)shost->hostdata; 576 577 if ((mrioc->reset_in_progress || mrioc->prepare_for_reset)) 578 return 0; 579 580 num_entries = mpi3mr_process_op_reply_q(mrioc, 581 &mrioc->op_reply_qinfo[queue_num]); 582 583 return num_entries; 584 } 585 586 static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata) 587 { 588 struct mpi3mr_intr_info *intr_info = privdata; 589 struct mpi3mr_ioc *mrioc; 590 u16 midx; 591 u32 num_admin_replies = 0, num_op_reply = 0; 592 593 if (!intr_info) 594 return IRQ_NONE; 595 596 mrioc = intr_info->mrioc; 597 598 if (!mrioc->intr_enabled) 599 return IRQ_NONE; 600 601 midx = intr_info->msix_index; 602 603 if (!midx) 604 num_admin_replies = mpi3mr_process_admin_reply_q(mrioc); 605 if (intr_info->op_reply_q) 606 num_op_reply = mpi3mr_process_op_reply_q(mrioc, 607 intr_info->op_reply_q); 608 609 if (num_admin_replies || num_op_reply) 610 return IRQ_HANDLED; 611 else 612 return IRQ_NONE; 613 } 614 615 static irqreturn_t mpi3mr_isr(int irq, void *privdata) 616 { 617 struct mpi3mr_intr_info *intr_info = privdata; 618 struct mpi3mr_ioc *mrioc; 619 u16 midx; 620 int ret; 621 622 if (!intr_info) 623 return IRQ_NONE; 624 625 mrioc = intr_info->mrioc; 626 midx = intr_info->msix_index; 627 /* Call primary ISR routine */ 628 ret = mpi3mr_isr_primary(irq, privdata); 629 630 /* 631 * If more IOs are expected, schedule IRQ polling thread. 632 * Otherwise exit from ISR. 633 */ 634 if (!intr_info->op_reply_q) 635 return ret; 636 637 if (!intr_info->op_reply_q->enable_irq_poll || 638 !atomic_read(&intr_info->op_reply_q->pend_ios)) 639 return ret; 640 641 disable_irq_nosync(pci_irq_vector(mrioc->pdev, midx)); 642 643 return IRQ_WAKE_THREAD; 644 } 645 646 /** 647 * mpi3mr_isr_poll - Reply queue polling routine 648 * @irq: IRQ 649 * @privdata: Interrupt info 650 * 651 * poll for pending I/O completions in a loop until pending I/Os 652 * present or controller queue depth I/Os are processed. 653 * 654 * Return: IRQ_NONE or IRQ_HANDLED 655 */ 656 static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata) 657 { 658 struct mpi3mr_intr_info *intr_info = privdata; 659 struct mpi3mr_ioc *mrioc; 660 u16 midx; 661 u32 num_op_reply = 0; 662 663 if (!intr_info || !intr_info->op_reply_q) 664 return IRQ_NONE; 665 666 mrioc = intr_info->mrioc; 667 midx = intr_info->msix_index; 668 669 /* Poll for pending IOs completions */ 670 do { 671 if (!mrioc->intr_enabled) 672 break; 673 674 if (!midx) 675 mpi3mr_process_admin_reply_q(mrioc); 676 if (intr_info->op_reply_q) 677 num_op_reply += 678 mpi3mr_process_op_reply_q(mrioc, 679 intr_info->op_reply_q); 680 681 usleep_range(MPI3MR_IRQ_POLL_SLEEP, 10 * MPI3MR_IRQ_POLL_SLEEP); 682 683 } while (atomic_read(&intr_info->op_reply_q->pend_ios) && 684 (num_op_reply < mrioc->max_host_ios)); 685 686 intr_info->op_reply_q->enable_irq_poll = false; 687 enable_irq(pci_irq_vector(mrioc->pdev, midx)); 688 689 return IRQ_HANDLED; 690 } 691 692 /** 693 * mpi3mr_request_irq - Request IRQ and register ISR 694 * @mrioc: Adapter instance reference 695 * @index: IRQ vector index 696 * 697 * Request threaded ISR with primary ISR and secondary 698 * 699 * Return: 0 on success and non zero on failures. 700 */ 701 static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index) 702 { 703 struct pci_dev *pdev = mrioc->pdev; 704 struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index; 705 int retval = 0; 706 707 intr_info->mrioc = mrioc; 708 intr_info->msix_index = index; 709 intr_info->op_reply_q = NULL; 710 711 snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d", 712 mrioc->driver_name, mrioc->id, index); 713 714 retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr, 715 mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info); 716 if (retval) { 717 ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n", 718 intr_info->name, pci_irq_vector(pdev, index)); 719 return retval; 720 } 721 722 return retval; 723 } 724 725 static void mpi3mr_calc_poll_queues(struct mpi3mr_ioc *mrioc, u16 max_vectors) 726 { 727 if (!mrioc->requested_poll_qcount) 728 return; 729 730 /* Reserved for Admin and Default Queue */ 731 if (max_vectors > 2 && 732 (mrioc->requested_poll_qcount < max_vectors - 2)) { 733 ioc_info(mrioc, 734 "enabled polled queues (%d) msix (%d)\n", 735 mrioc->requested_poll_qcount, max_vectors); 736 } else { 737 ioc_info(mrioc, 738 "disabled polled queues (%d) msix (%d) because of no resources for default queue\n", 739 mrioc->requested_poll_qcount, max_vectors); 740 mrioc->requested_poll_qcount = 0; 741 } 742 } 743 744 /** 745 * mpi3mr_setup_isr - Setup ISR for the controller 746 * @mrioc: Adapter instance reference 747 * @setup_one: Request one IRQ or more 748 * 749 * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR 750 * 751 * Return: 0 on success and non zero on failures. 752 */ 753 static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one) 754 { 755 unsigned int irq_flags = PCI_IRQ_MSIX; 756 int max_vectors, min_vec; 757 int retval; 758 int i; 759 struct irq_affinity desc = { .pre_vectors = 1, .post_vectors = 1 }; 760 761 if (mrioc->is_intr_info_set) 762 return 0; 763 764 mpi3mr_cleanup_isr(mrioc); 765 766 if (setup_one || reset_devices) { 767 max_vectors = 1; 768 retval = pci_alloc_irq_vectors(mrioc->pdev, 769 1, max_vectors, irq_flags); 770 if (retval < 0) { 771 ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n", 772 retval); 773 goto out_failed; 774 } 775 } else { 776 max_vectors = 777 min_t(int, mrioc->cpu_count + 1 + 778 mrioc->requested_poll_qcount, mrioc->msix_count); 779 780 mpi3mr_calc_poll_queues(mrioc, max_vectors); 781 782 ioc_info(mrioc, 783 "MSI-X vectors supported: %d, no of cores: %d,", 784 mrioc->msix_count, mrioc->cpu_count); 785 ioc_info(mrioc, 786 "MSI-x vectors requested: %d poll_queues %d\n", 787 max_vectors, mrioc->requested_poll_qcount); 788 789 desc.post_vectors = mrioc->requested_poll_qcount; 790 min_vec = desc.pre_vectors + desc.post_vectors; 791 irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; 792 793 retval = pci_alloc_irq_vectors_affinity(mrioc->pdev, 794 min_vec, max_vectors, irq_flags, &desc); 795 796 if (retval < 0) { 797 ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n", 798 retval); 799 goto out_failed; 800 } 801 802 803 /* 804 * If only one MSI-x is allocated, then MSI-x 0 will be shared 805 * between Admin queue and operational queue 806 */ 807 if (retval == min_vec) 808 mrioc->op_reply_q_offset = 0; 809 else if (retval != (max_vectors)) { 810 ioc_info(mrioc, 811 "allocated vectors (%d) are less than configured (%d)\n", 812 retval, max_vectors); 813 } 814 815 max_vectors = retval; 816 mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0; 817 818 mpi3mr_calc_poll_queues(mrioc, max_vectors); 819 820 } 821 822 mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors, 823 GFP_KERNEL); 824 if (!mrioc->intr_info) { 825 retval = -ENOMEM; 826 pci_free_irq_vectors(mrioc->pdev); 827 goto out_failed; 828 } 829 for (i = 0; i < max_vectors; i++) { 830 retval = mpi3mr_request_irq(mrioc, i); 831 if (retval) { 832 mrioc->intr_info_count = i; 833 goto out_failed; 834 } 835 } 836 if (reset_devices || !setup_one) 837 mrioc->is_intr_info_set = true; 838 mrioc->intr_info_count = max_vectors; 839 mpi3mr_ioc_enable_intr(mrioc); 840 return 0; 841 842 out_failed: 843 mpi3mr_cleanup_isr(mrioc); 844 845 return retval; 846 } 847 848 static const struct { 849 enum mpi3mr_iocstate value; 850 char *name; 851 } mrioc_states[] = { 852 { MRIOC_STATE_READY, "ready" }, 853 { MRIOC_STATE_FAULT, "fault" }, 854 { MRIOC_STATE_RESET, "reset" }, 855 { MRIOC_STATE_BECOMING_READY, "becoming ready" }, 856 { MRIOC_STATE_RESET_REQUESTED, "reset requested" }, 857 { MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" }, 858 }; 859 860 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state) 861 { 862 int i; 863 char *name = NULL; 864 865 for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) { 866 if (mrioc_states[i].value == mrioc_state) { 867 name = mrioc_states[i].name; 868 break; 869 } 870 } 871 return name; 872 } 873 874 /* Reset reason to name mapper structure*/ 875 static const struct { 876 enum mpi3mr_reset_reason value; 877 char *name; 878 } mpi3mr_reset_reason_codes[] = { 879 { MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" }, 880 { MPI3MR_RESET_FROM_FAULT_WATCH, "fault" }, 881 { MPI3MR_RESET_FROM_APP, "application invocation" }, 882 { MPI3MR_RESET_FROM_EH_HOS, "error handling" }, 883 { MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" }, 884 { MPI3MR_RESET_FROM_APP_TIMEOUT, "application command timeout" }, 885 { MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" }, 886 { MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" }, 887 { MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" }, 888 { MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" }, 889 { MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" }, 890 { MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" }, 891 { MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" }, 892 { 893 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT, 894 "create request queue timeout" 895 }, 896 { 897 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT, 898 "create reply queue timeout" 899 }, 900 { MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" }, 901 { MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" }, 902 { MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" }, 903 { MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" }, 904 { 905 MPI3MR_RESET_FROM_CIACTVRST_TIMER, 906 "component image activation timeout" 907 }, 908 { 909 MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT, 910 "get package version timeout" 911 }, 912 { MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" }, 913 { MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" }, 914 { MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" }, 915 { MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"}, 916 }; 917 918 /** 919 * mpi3mr_reset_rc_name - get reset reason code name 920 * @reason_code: reset reason code value 921 * 922 * Map reset reason to an NULL terminated ASCII string 923 * 924 * Return: name corresponding to reset reason value or NULL. 925 */ 926 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code) 927 { 928 int i; 929 char *name = NULL; 930 931 for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) { 932 if (mpi3mr_reset_reason_codes[i].value == reason_code) { 933 name = mpi3mr_reset_reason_codes[i].name; 934 break; 935 } 936 } 937 return name; 938 } 939 940 /* Reset type to name mapper structure*/ 941 static const struct { 942 u16 reset_type; 943 char *name; 944 } mpi3mr_reset_types[] = { 945 { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" }, 946 { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" }, 947 }; 948 949 /** 950 * mpi3mr_reset_type_name - get reset type name 951 * @reset_type: reset type value 952 * 953 * Map reset type to an NULL terminated ASCII string 954 * 955 * Return: name corresponding to reset type value or NULL. 956 */ 957 static const char *mpi3mr_reset_type_name(u16 reset_type) 958 { 959 int i; 960 char *name = NULL; 961 962 for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) { 963 if (mpi3mr_reset_types[i].reset_type == reset_type) { 964 name = mpi3mr_reset_types[i].name; 965 break; 966 } 967 } 968 return name; 969 } 970 971 /** 972 * mpi3mr_print_fault_info - Display fault information 973 * @mrioc: Adapter instance reference 974 * 975 * Display the controller fault information if there is a 976 * controller fault. 977 * 978 * Return: Nothing. 979 */ 980 void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc) 981 { 982 u32 ioc_status, code, code1, code2, code3; 983 984 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 985 986 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) { 987 code = readl(&mrioc->sysif_regs->fault); 988 code1 = readl(&mrioc->sysif_regs->fault_info[0]); 989 code2 = readl(&mrioc->sysif_regs->fault_info[1]); 990 code3 = readl(&mrioc->sysif_regs->fault_info[2]); 991 992 ioc_info(mrioc, 993 "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n", 994 code, code1, code2, code3); 995 } 996 } 997 998 /** 999 * mpi3mr_get_iocstate - Get IOC State 1000 * @mrioc: Adapter instance reference 1001 * 1002 * Return a proper IOC state enum based on the IOC status and 1003 * IOC configuration and unrcoverable state of the controller. 1004 * 1005 * Return: Current IOC state. 1006 */ 1007 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc) 1008 { 1009 u32 ioc_status, ioc_config; 1010 u8 ready, enabled; 1011 1012 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1013 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1014 1015 if (mrioc->unrecoverable) 1016 return MRIOC_STATE_UNRECOVERABLE; 1017 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) 1018 return MRIOC_STATE_FAULT; 1019 1020 ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY); 1021 enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC); 1022 1023 if (ready && enabled) 1024 return MRIOC_STATE_READY; 1025 if ((!ready) && (!enabled)) 1026 return MRIOC_STATE_RESET; 1027 if ((!ready) && (enabled)) 1028 return MRIOC_STATE_BECOMING_READY; 1029 1030 return MRIOC_STATE_RESET_REQUESTED; 1031 } 1032 1033 /** 1034 * mpi3mr_clear_reset_history - clear reset history 1035 * @mrioc: Adapter instance reference 1036 * 1037 * Write the reset history bit in IOC status to clear the bit, 1038 * if it is already set. 1039 * 1040 * Return: Nothing. 1041 */ 1042 static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc) 1043 { 1044 u32 ioc_status; 1045 1046 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1047 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) 1048 writel(ioc_status, &mrioc->sysif_regs->ioc_status); 1049 } 1050 1051 /** 1052 * mpi3mr_issue_and_process_mur - Message unit Reset handler 1053 * @mrioc: Adapter instance reference 1054 * @reset_reason: Reset reason code 1055 * 1056 * Issue Message unit Reset to the controller and wait for it to 1057 * be complete. 1058 * 1059 * Return: 0 on success, -1 on failure. 1060 */ 1061 static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc, 1062 u32 reset_reason) 1063 { 1064 u32 ioc_config, timeout, ioc_status; 1065 int retval = -1; 1066 1067 ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n"); 1068 if (mrioc->unrecoverable) { 1069 ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n"); 1070 return retval; 1071 } 1072 mpi3mr_clear_reset_history(mrioc); 1073 writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]); 1074 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1075 ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; 1076 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 1077 1078 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10; 1079 do { 1080 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1081 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) { 1082 mpi3mr_clear_reset_history(mrioc); 1083 break; 1084 } 1085 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) { 1086 mpi3mr_print_fault_info(mrioc); 1087 break; 1088 } 1089 msleep(100); 1090 } while (--timeout); 1091 1092 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1093 if (timeout && !((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) || 1094 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) || 1095 (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) 1096 retval = 0; 1097 1098 ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n", 1099 (!retval) ? "successful" : "failed", ioc_status, ioc_config); 1100 return retval; 1101 } 1102 1103 /** 1104 * mpi3mr_revalidate_factsdata - validate IOCFacts parameters 1105 * during reset/resume 1106 * @mrioc: Adapter instance reference 1107 * 1108 * Return zero if the new IOCFacts parameters value is compatible with 1109 * older values else return -EPERM 1110 */ 1111 static int 1112 mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc) 1113 { 1114 u16 dev_handle_bitmap_sz; 1115 void *removepend_bitmap; 1116 1117 if (mrioc->facts.reply_sz > mrioc->reply_sz) { 1118 ioc_err(mrioc, 1119 "cannot increase reply size from %d to %d\n", 1120 mrioc->reply_sz, mrioc->facts.reply_sz); 1121 return -EPERM; 1122 } 1123 1124 if (mrioc->facts.max_op_reply_q < mrioc->num_op_reply_q) { 1125 ioc_err(mrioc, 1126 "cannot reduce number of operational reply queues from %d to %d\n", 1127 mrioc->num_op_reply_q, 1128 mrioc->facts.max_op_reply_q); 1129 return -EPERM; 1130 } 1131 1132 if (mrioc->facts.max_op_req_q < mrioc->num_op_req_q) { 1133 ioc_err(mrioc, 1134 "cannot reduce number of operational request queues from %d to %d\n", 1135 mrioc->num_op_req_q, mrioc->facts.max_op_req_q); 1136 return -EPERM; 1137 } 1138 1139 dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8; 1140 if (mrioc->facts.max_devhandle % 8) 1141 dev_handle_bitmap_sz++; 1142 if (dev_handle_bitmap_sz > mrioc->dev_handle_bitmap_sz) { 1143 removepend_bitmap = krealloc(mrioc->removepend_bitmap, 1144 dev_handle_bitmap_sz, GFP_KERNEL); 1145 if (!removepend_bitmap) { 1146 ioc_err(mrioc, 1147 "failed to increase removepend_bitmap sz from: %d to %d\n", 1148 mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz); 1149 return -EPERM; 1150 } 1151 memset(removepend_bitmap + mrioc->dev_handle_bitmap_sz, 0, 1152 dev_handle_bitmap_sz - mrioc->dev_handle_bitmap_sz); 1153 mrioc->removepend_bitmap = removepend_bitmap; 1154 ioc_info(mrioc, 1155 "increased dev_handle_bitmap_sz from %d to %d\n", 1156 mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz); 1157 mrioc->dev_handle_bitmap_sz = dev_handle_bitmap_sz; 1158 } 1159 1160 return 0; 1161 } 1162 1163 /** 1164 * mpi3mr_bring_ioc_ready - Bring controller to ready state 1165 * @mrioc: Adapter instance reference 1166 * 1167 * Set Enable IOC bit in IOC configuration register and wait for 1168 * the controller to become ready. 1169 * 1170 * Return: 0 on success, appropriate error on failure. 1171 */ 1172 static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc) 1173 { 1174 u32 ioc_config, ioc_status, timeout; 1175 int retval = 0; 1176 enum mpi3mr_iocstate ioc_state; 1177 u64 base_info; 1178 1179 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1180 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1181 base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information); 1182 ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n", 1183 ioc_status, ioc_config, base_info); 1184 1185 /*The timeout value is in 2sec unit, changing it to seconds*/ 1186 mrioc->ready_timeout = 1187 ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >> 1188 MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2; 1189 1190 ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout); 1191 1192 ioc_state = mpi3mr_get_iocstate(mrioc); 1193 ioc_info(mrioc, "controller is in %s state during detection\n", 1194 mpi3mr_iocstate_name(ioc_state)); 1195 1196 if (ioc_state == MRIOC_STATE_BECOMING_READY || 1197 ioc_state == MRIOC_STATE_RESET_REQUESTED) { 1198 timeout = mrioc->ready_timeout * 10; 1199 do { 1200 msleep(100); 1201 } while (--timeout); 1202 1203 ioc_state = mpi3mr_get_iocstate(mrioc); 1204 ioc_info(mrioc, 1205 "controller is in %s state after waiting to reset\n", 1206 mpi3mr_iocstate_name(ioc_state)); 1207 } 1208 1209 if (ioc_state == MRIOC_STATE_READY) { 1210 ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n"); 1211 retval = mpi3mr_issue_and_process_mur(mrioc, 1212 MPI3MR_RESET_FROM_BRINGUP); 1213 ioc_state = mpi3mr_get_iocstate(mrioc); 1214 if (retval) 1215 ioc_err(mrioc, 1216 "message unit reset failed with error %d current state %s\n", 1217 retval, mpi3mr_iocstate_name(ioc_state)); 1218 } 1219 if (ioc_state != MRIOC_STATE_RESET) { 1220 mpi3mr_print_fault_info(mrioc); 1221 ioc_info(mrioc, "issuing soft reset to bring to reset state\n"); 1222 retval = mpi3mr_issue_reset(mrioc, 1223 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, 1224 MPI3MR_RESET_FROM_BRINGUP); 1225 if (retval) { 1226 ioc_err(mrioc, 1227 "soft reset failed with error %d\n", retval); 1228 goto out_failed; 1229 } 1230 } 1231 ioc_state = mpi3mr_get_iocstate(mrioc); 1232 if (ioc_state != MRIOC_STATE_RESET) { 1233 ioc_err(mrioc, 1234 "cannot bring controller to reset state, current state: %s\n", 1235 mpi3mr_iocstate_name(ioc_state)); 1236 goto out_failed; 1237 } 1238 mpi3mr_clear_reset_history(mrioc); 1239 retval = mpi3mr_setup_admin_qpair(mrioc); 1240 if (retval) { 1241 ioc_err(mrioc, "failed to setup admin queues: error %d\n", 1242 retval); 1243 goto out_failed; 1244 } 1245 1246 ioc_info(mrioc, "bringing controller to ready state\n"); 1247 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1248 ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; 1249 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 1250 1251 timeout = mrioc->ready_timeout * 10; 1252 do { 1253 ioc_state = mpi3mr_get_iocstate(mrioc); 1254 if (ioc_state == MRIOC_STATE_READY) { 1255 ioc_info(mrioc, 1256 "successfully transitioned to %s state\n", 1257 mpi3mr_iocstate_name(ioc_state)); 1258 return 0; 1259 } 1260 msleep(100); 1261 } while (--timeout); 1262 1263 out_failed: 1264 ioc_state = mpi3mr_get_iocstate(mrioc); 1265 ioc_err(mrioc, 1266 "failed to bring to ready state, current state: %s\n", 1267 mpi3mr_iocstate_name(ioc_state)); 1268 return retval; 1269 } 1270 1271 /** 1272 * mpi3mr_soft_reset_success - Check softreset is success or not 1273 * @ioc_status: IOC status register value 1274 * @ioc_config: IOC config register value 1275 * 1276 * Check whether the soft reset is successful or not based on 1277 * IOC status and IOC config register values. 1278 * 1279 * Return: True when the soft reset is success, false otherwise. 1280 */ 1281 static inline bool 1282 mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config) 1283 { 1284 if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) || 1285 (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) 1286 return true; 1287 return false; 1288 } 1289 1290 /** 1291 * mpi3mr_diagfault_success - Check diag fault is success or not 1292 * @mrioc: Adapter reference 1293 * @ioc_status: IOC status register value 1294 * 1295 * Check whether the controller hit diag reset fault code. 1296 * 1297 * Return: True when there is diag fault, false otherwise. 1298 */ 1299 static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc, 1300 u32 ioc_status) 1301 { 1302 u32 fault; 1303 1304 if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) 1305 return false; 1306 fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK; 1307 if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) { 1308 mpi3mr_print_fault_info(mrioc); 1309 return true; 1310 } 1311 return false; 1312 } 1313 1314 /** 1315 * mpi3mr_set_diagsave - Set diag save bit for snapdump 1316 * @mrioc: Adapter reference 1317 * 1318 * Set diag save bit in IOC configuration register to enable 1319 * snapdump. 1320 * 1321 * Return: Nothing. 1322 */ 1323 static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc) 1324 { 1325 u32 ioc_config; 1326 1327 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1328 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE; 1329 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 1330 } 1331 1332 /** 1333 * mpi3mr_issue_reset - Issue reset to the controller 1334 * @mrioc: Adapter reference 1335 * @reset_type: Reset type 1336 * @reset_reason: Reset reason code 1337 * 1338 * Unlock the host diagnostic registers and write the specific 1339 * reset type to that, wait for reset acknowledgment from the 1340 * controller, if the reset is not successful retry for the 1341 * predefined number of times. 1342 * 1343 * Return: 0 on success, non-zero on failure. 1344 */ 1345 static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, 1346 u32 reset_reason) 1347 { 1348 int retval = -1; 1349 u8 unlock_retry_count = 0; 1350 u32 host_diagnostic, ioc_status, ioc_config; 1351 u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10; 1352 1353 if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) && 1354 (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT)) 1355 return retval; 1356 if (mrioc->unrecoverable) 1357 return retval; 1358 if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) { 1359 retval = 0; 1360 return retval; 1361 } 1362 1363 ioc_info(mrioc, "%s reset due to %s(0x%x)\n", 1364 mpi3mr_reset_type_name(reset_type), 1365 mpi3mr_reset_rc_name(reset_reason), reset_reason); 1366 1367 mpi3mr_clear_reset_history(mrioc); 1368 do { 1369 ioc_info(mrioc, 1370 "Write magic sequence to unlock host diag register (retry=%d)\n", 1371 ++unlock_retry_count); 1372 if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) { 1373 ioc_err(mrioc, 1374 "%s reset failed due to unlock failure, host_diagnostic(0x%08x)\n", 1375 mpi3mr_reset_type_name(reset_type), 1376 host_diagnostic); 1377 mrioc->unrecoverable = 1; 1378 return retval; 1379 } 1380 1381 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH, 1382 &mrioc->sysif_regs->write_sequence); 1383 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST, 1384 &mrioc->sysif_regs->write_sequence); 1385 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND, 1386 &mrioc->sysif_regs->write_sequence); 1387 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD, 1388 &mrioc->sysif_regs->write_sequence); 1389 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH, 1390 &mrioc->sysif_regs->write_sequence); 1391 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH, 1392 &mrioc->sysif_regs->write_sequence); 1393 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH, 1394 &mrioc->sysif_regs->write_sequence); 1395 usleep_range(1000, 1100); 1396 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); 1397 ioc_info(mrioc, 1398 "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n", 1399 unlock_retry_count, host_diagnostic); 1400 } while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE)); 1401 1402 writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]); 1403 writel(host_diagnostic | reset_type, 1404 &mrioc->sysif_regs->host_diagnostic); 1405 switch (reset_type) { 1406 case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET: 1407 do { 1408 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1409 ioc_config = 1410 readl(&mrioc->sysif_regs->ioc_configuration); 1411 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) 1412 && mpi3mr_soft_reset_success(ioc_status, ioc_config) 1413 ) { 1414 mpi3mr_clear_reset_history(mrioc); 1415 retval = 0; 1416 break; 1417 } 1418 msleep(100); 1419 } while (--timeout); 1420 mpi3mr_print_fault_info(mrioc); 1421 break; 1422 case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT: 1423 do { 1424 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1425 if (mpi3mr_diagfault_success(mrioc, ioc_status)) { 1426 retval = 0; 1427 break; 1428 } 1429 msleep(100); 1430 } while (--timeout); 1431 break; 1432 default: 1433 break; 1434 } 1435 1436 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND, 1437 &mrioc->sysif_regs->write_sequence); 1438 1439 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1440 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1441 ioc_info(mrioc, 1442 "ioc_status/ioc_onfig after %s reset is (0x%x)/(0x%x)\n", 1443 (!retval)?"successful":"failed", ioc_status, 1444 ioc_config); 1445 if (retval) 1446 mrioc->unrecoverable = 1; 1447 return retval; 1448 } 1449 1450 /** 1451 * mpi3mr_admin_request_post - Post request to admin queue 1452 * @mrioc: Adapter reference 1453 * @admin_req: MPI3 request 1454 * @admin_req_sz: Request size 1455 * @ignore_reset: Ignore reset in process 1456 * 1457 * Post the MPI3 request into admin request queue and 1458 * inform the controller, if the queue is full return 1459 * appropriate error. 1460 * 1461 * Return: 0 on success, non-zero on failure. 1462 */ 1463 int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req, 1464 u16 admin_req_sz, u8 ignore_reset) 1465 { 1466 u16 areq_pi = 0, areq_ci = 0, max_entries = 0; 1467 int retval = 0; 1468 unsigned long flags; 1469 u8 *areq_entry; 1470 1471 if (mrioc->unrecoverable) { 1472 ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__); 1473 return -EFAULT; 1474 } 1475 1476 spin_lock_irqsave(&mrioc->admin_req_lock, flags); 1477 areq_pi = mrioc->admin_req_pi; 1478 areq_ci = mrioc->admin_req_ci; 1479 max_entries = mrioc->num_admin_req; 1480 if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) && 1481 (areq_pi == (max_entries - 1)))) { 1482 ioc_err(mrioc, "AdminReqQ full condition detected\n"); 1483 retval = -EAGAIN; 1484 goto out; 1485 } 1486 if (!ignore_reset && mrioc->reset_in_progress) { 1487 ioc_err(mrioc, "AdminReqQ submit reset in progress\n"); 1488 retval = -EAGAIN; 1489 goto out; 1490 } 1491 areq_entry = (u8 *)mrioc->admin_req_base + 1492 (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ); 1493 memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); 1494 memcpy(areq_entry, (u8 *)admin_req, admin_req_sz); 1495 1496 if (++areq_pi == max_entries) 1497 areq_pi = 0; 1498 mrioc->admin_req_pi = areq_pi; 1499 1500 writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); 1501 1502 out: 1503 spin_unlock_irqrestore(&mrioc->admin_req_lock, flags); 1504 1505 return retval; 1506 } 1507 1508 /** 1509 * mpi3mr_free_op_req_q_segments - free request memory segments 1510 * @mrioc: Adapter instance reference 1511 * @q_idx: operational request queue index 1512 * 1513 * Free memory segments allocated for operational request queue 1514 * 1515 * Return: Nothing. 1516 */ 1517 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) 1518 { 1519 u16 j; 1520 int size; 1521 struct segments *segments; 1522 1523 segments = mrioc->req_qinfo[q_idx].q_segments; 1524 if (!segments) 1525 return; 1526 1527 if (mrioc->enable_segqueue) { 1528 size = MPI3MR_OP_REQ_Q_SEG_SIZE; 1529 if (mrioc->req_qinfo[q_idx].q_segment_list) { 1530 dma_free_coherent(&mrioc->pdev->dev, 1531 MPI3MR_MAX_SEG_LIST_SIZE, 1532 mrioc->req_qinfo[q_idx].q_segment_list, 1533 mrioc->req_qinfo[q_idx].q_segment_list_dma); 1534 mrioc->req_qinfo[q_idx].q_segment_list = NULL; 1535 } 1536 } else 1537 size = mrioc->req_qinfo[q_idx].segment_qd * 1538 mrioc->facts.op_req_sz; 1539 1540 for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) { 1541 if (!segments[j].segment) 1542 continue; 1543 dma_free_coherent(&mrioc->pdev->dev, 1544 size, segments[j].segment, segments[j].segment_dma); 1545 segments[j].segment = NULL; 1546 } 1547 kfree(mrioc->req_qinfo[q_idx].q_segments); 1548 mrioc->req_qinfo[q_idx].q_segments = NULL; 1549 mrioc->req_qinfo[q_idx].qid = 0; 1550 } 1551 1552 /** 1553 * mpi3mr_free_op_reply_q_segments - free reply memory segments 1554 * @mrioc: Adapter instance reference 1555 * @q_idx: operational reply queue index 1556 * 1557 * Free memory segments allocated for operational reply queue 1558 * 1559 * Return: Nothing. 1560 */ 1561 static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) 1562 { 1563 u16 j; 1564 int size; 1565 struct segments *segments; 1566 1567 segments = mrioc->op_reply_qinfo[q_idx].q_segments; 1568 if (!segments) 1569 return; 1570 1571 if (mrioc->enable_segqueue) { 1572 size = MPI3MR_OP_REP_Q_SEG_SIZE; 1573 if (mrioc->op_reply_qinfo[q_idx].q_segment_list) { 1574 dma_free_coherent(&mrioc->pdev->dev, 1575 MPI3MR_MAX_SEG_LIST_SIZE, 1576 mrioc->op_reply_qinfo[q_idx].q_segment_list, 1577 mrioc->op_reply_qinfo[q_idx].q_segment_list_dma); 1578 mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL; 1579 } 1580 } else 1581 size = mrioc->op_reply_qinfo[q_idx].segment_qd * 1582 mrioc->op_reply_desc_sz; 1583 1584 for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) { 1585 if (!segments[j].segment) 1586 continue; 1587 dma_free_coherent(&mrioc->pdev->dev, 1588 size, segments[j].segment, segments[j].segment_dma); 1589 segments[j].segment = NULL; 1590 } 1591 1592 kfree(mrioc->op_reply_qinfo[q_idx].q_segments); 1593 mrioc->op_reply_qinfo[q_idx].q_segments = NULL; 1594 mrioc->op_reply_qinfo[q_idx].qid = 0; 1595 } 1596 1597 /** 1598 * mpi3mr_delete_op_reply_q - delete operational reply queue 1599 * @mrioc: Adapter instance reference 1600 * @qidx: operational reply queue index 1601 * 1602 * Delete operatinal reply queue by issuing MPI request 1603 * through admin queue. 1604 * 1605 * Return: 0 on success, non-zero on failure. 1606 */ 1607 static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) 1608 { 1609 struct mpi3_delete_reply_queue_request delq_req; 1610 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 1611 int retval = 0; 1612 u16 reply_qid = 0, midx; 1613 1614 reply_qid = op_reply_q->qid; 1615 1616 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset); 1617 1618 if (!reply_qid) { 1619 retval = -1; 1620 ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n"); 1621 goto out; 1622 } 1623 1624 (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount-- : 1625 mrioc->active_poll_qcount--; 1626 1627 memset(&delq_req, 0, sizeof(delq_req)); 1628 mutex_lock(&mrioc->init_cmds.mutex); 1629 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1630 retval = -1; 1631 ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n"); 1632 mutex_unlock(&mrioc->init_cmds.mutex); 1633 goto out; 1634 } 1635 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1636 mrioc->init_cmds.is_waiting = 1; 1637 mrioc->init_cmds.callback = NULL; 1638 delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1639 delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE; 1640 delq_req.queue_id = cpu_to_le16(reply_qid); 1641 1642 init_completion(&mrioc->init_cmds.done); 1643 retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req), 1644 1); 1645 if (retval) { 1646 ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n"); 1647 goto out_unlock; 1648 } 1649 wait_for_completion_timeout(&mrioc->init_cmds.done, 1650 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1651 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1652 ioc_err(mrioc, "delete reply queue timed out\n"); 1653 mpi3mr_check_rh_fault_ioc(mrioc, 1654 MPI3MR_RESET_FROM_DELREPQ_TIMEOUT); 1655 retval = -1; 1656 goto out_unlock; 1657 } 1658 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1659 != MPI3_IOCSTATUS_SUCCESS) { 1660 ioc_err(mrioc, 1661 "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1662 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1663 mrioc->init_cmds.ioc_loginfo); 1664 retval = -1; 1665 goto out_unlock; 1666 } 1667 mrioc->intr_info[midx].op_reply_q = NULL; 1668 1669 mpi3mr_free_op_reply_q_segments(mrioc, qidx); 1670 out_unlock: 1671 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1672 mutex_unlock(&mrioc->init_cmds.mutex); 1673 out: 1674 1675 return retval; 1676 } 1677 1678 /** 1679 * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool 1680 * @mrioc: Adapter instance reference 1681 * @qidx: request queue index 1682 * 1683 * Allocate segmented memory pools for operational reply 1684 * queue. 1685 * 1686 * Return: 0 on success, non-zero on failure. 1687 */ 1688 static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx) 1689 { 1690 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 1691 int i, size; 1692 u64 *q_segment_list_entry = NULL; 1693 struct segments *segments; 1694 1695 if (mrioc->enable_segqueue) { 1696 op_reply_q->segment_qd = 1697 MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz; 1698 1699 size = MPI3MR_OP_REP_Q_SEG_SIZE; 1700 1701 op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev, 1702 MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma, 1703 GFP_KERNEL); 1704 if (!op_reply_q->q_segment_list) 1705 return -ENOMEM; 1706 q_segment_list_entry = (u64 *)op_reply_q->q_segment_list; 1707 } else { 1708 op_reply_q->segment_qd = op_reply_q->num_replies; 1709 size = op_reply_q->num_replies * mrioc->op_reply_desc_sz; 1710 } 1711 1712 op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies, 1713 op_reply_q->segment_qd); 1714 1715 op_reply_q->q_segments = kcalloc(op_reply_q->num_segments, 1716 sizeof(struct segments), GFP_KERNEL); 1717 if (!op_reply_q->q_segments) 1718 return -ENOMEM; 1719 1720 segments = op_reply_q->q_segments; 1721 for (i = 0; i < op_reply_q->num_segments; i++) { 1722 segments[i].segment = 1723 dma_alloc_coherent(&mrioc->pdev->dev, 1724 size, &segments[i].segment_dma, GFP_KERNEL); 1725 if (!segments[i].segment) 1726 return -ENOMEM; 1727 if (mrioc->enable_segqueue) 1728 q_segment_list_entry[i] = 1729 (unsigned long)segments[i].segment_dma; 1730 } 1731 1732 return 0; 1733 } 1734 1735 /** 1736 * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool. 1737 * @mrioc: Adapter instance reference 1738 * @qidx: request queue index 1739 * 1740 * Allocate segmented memory pools for operational request 1741 * queue. 1742 * 1743 * Return: 0 on success, non-zero on failure. 1744 */ 1745 static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx) 1746 { 1747 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx; 1748 int i, size; 1749 u64 *q_segment_list_entry = NULL; 1750 struct segments *segments; 1751 1752 if (mrioc->enable_segqueue) { 1753 op_req_q->segment_qd = 1754 MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz; 1755 1756 size = MPI3MR_OP_REQ_Q_SEG_SIZE; 1757 1758 op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev, 1759 MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma, 1760 GFP_KERNEL); 1761 if (!op_req_q->q_segment_list) 1762 return -ENOMEM; 1763 q_segment_list_entry = (u64 *)op_req_q->q_segment_list; 1764 1765 } else { 1766 op_req_q->segment_qd = op_req_q->num_requests; 1767 size = op_req_q->num_requests * mrioc->facts.op_req_sz; 1768 } 1769 1770 op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests, 1771 op_req_q->segment_qd); 1772 1773 op_req_q->q_segments = kcalloc(op_req_q->num_segments, 1774 sizeof(struct segments), GFP_KERNEL); 1775 if (!op_req_q->q_segments) 1776 return -ENOMEM; 1777 1778 segments = op_req_q->q_segments; 1779 for (i = 0; i < op_req_q->num_segments; i++) { 1780 segments[i].segment = 1781 dma_alloc_coherent(&mrioc->pdev->dev, 1782 size, &segments[i].segment_dma, GFP_KERNEL); 1783 if (!segments[i].segment) 1784 return -ENOMEM; 1785 if (mrioc->enable_segqueue) 1786 q_segment_list_entry[i] = 1787 (unsigned long)segments[i].segment_dma; 1788 } 1789 1790 return 0; 1791 } 1792 1793 /** 1794 * mpi3mr_create_op_reply_q - create operational reply queue 1795 * @mrioc: Adapter instance reference 1796 * @qidx: operational reply queue index 1797 * 1798 * Create operatinal reply queue by issuing MPI request 1799 * through admin queue. 1800 * 1801 * Return: 0 on success, non-zero on failure. 1802 */ 1803 static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) 1804 { 1805 struct mpi3_create_reply_queue_request create_req; 1806 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 1807 int retval = 0; 1808 u16 reply_qid = 0, midx; 1809 1810 reply_qid = op_reply_q->qid; 1811 1812 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset); 1813 1814 if (reply_qid) { 1815 retval = -1; 1816 ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n", 1817 reply_qid); 1818 1819 return retval; 1820 } 1821 1822 reply_qid = qidx + 1; 1823 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD; 1824 if (!mrioc->pdev->revision) 1825 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K; 1826 op_reply_q->ci = 0; 1827 op_reply_q->ephase = 1; 1828 atomic_set(&op_reply_q->pend_ios, 0); 1829 atomic_set(&op_reply_q->in_use, 0); 1830 op_reply_q->enable_irq_poll = false; 1831 1832 if (!op_reply_q->q_segments) { 1833 retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx); 1834 if (retval) { 1835 mpi3mr_free_op_reply_q_segments(mrioc, qidx); 1836 goto out; 1837 } 1838 } 1839 1840 memset(&create_req, 0, sizeof(create_req)); 1841 mutex_lock(&mrioc->init_cmds.mutex); 1842 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1843 retval = -1; 1844 ioc_err(mrioc, "CreateRepQ: Init command is in use\n"); 1845 goto out_unlock; 1846 } 1847 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1848 mrioc->init_cmds.is_waiting = 1; 1849 mrioc->init_cmds.callback = NULL; 1850 create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1851 create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE; 1852 create_req.queue_id = cpu_to_le16(reply_qid); 1853 1854 if (midx < (mrioc->intr_info_count - mrioc->requested_poll_qcount)) 1855 op_reply_q->qtype = MPI3MR_DEFAULT_QUEUE; 1856 else 1857 op_reply_q->qtype = MPI3MR_POLL_QUEUE; 1858 1859 if (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) { 1860 create_req.flags = 1861 MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE; 1862 create_req.msix_index = 1863 cpu_to_le16(mrioc->intr_info[midx].msix_index); 1864 } else { 1865 create_req.msix_index = cpu_to_le16(mrioc->intr_info_count - 1); 1866 ioc_info(mrioc, "create reply queue(polled): for qid(%d), midx(%d)\n", 1867 reply_qid, midx); 1868 if (!mrioc->active_poll_qcount) 1869 disable_irq_nosync(pci_irq_vector(mrioc->pdev, 1870 mrioc->intr_info_count - 1)); 1871 } 1872 1873 if (mrioc->enable_segqueue) { 1874 create_req.flags |= 1875 MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED; 1876 create_req.base_address = cpu_to_le64( 1877 op_reply_q->q_segment_list_dma); 1878 } else 1879 create_req.base_address = cpu_to_le64( 1880 op_reply_q->q_segments[0].segment_dma); 1881 1882 create_req.size = cpu_to_le16(op_reply_q->num_replies); 1883 1884 init_completion(&mrioc->init_cmds.done); 1885 retval = mpi3mr_admin_request_post(mrioc, &create_req, 1886 sizeof(create_req), 1); 1887 if (retval) { 1888 ioc_err(mrioc, "CreateRepQ: Admin Post failed\n"); 1889 goto out_unlock; 1890 } 1891 wait_for_completion_timeout(&mrioc->init_cmds.done, 1892 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1893 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1894 ioc_err(mrioc, "create reply queue timed out\n"); 1895 mpi3mr_check_rh_fault_ioc(mrioc, 1896 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT); 1897 retval = -1; 1898 goto out_unlock; 1899 } 1900 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1901 != MPI3_IOCSTATUS_SUCCESS) { 1902 ioc_err(mrioc, 1903 "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1904 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1905 mrioc->init_cmds.ioc_loginfo); 1906 retval = -1; 1907 goto out_unlock; 1908 } 1909 op_reply_q->qid = reply_qid; 1910 if (midx < mrioc->intr_info_count) 1911 mrioc->intr_info[midx].op_reply_q = op_reply_q; 1912 1913 (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount++ : 1914 mrioc->active_poll_qcount++; 1915 1916 out_unlock: 1917 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1918 mutex_unlock(&mrioc->init_cmds.mutex); 1919 out: 1920 1921 return retval; 1922 } 1923 1924 /** 1925 * mpi3mr_create_op_req_q - create operational request queue 1926 * @mrioc: Adapter instance reference 1927 * @idx: operational request queue index 1928 * @reply_qid: Reply queue ID 1929 * 1930 * Create operatinal request queue by issuing MPI request 1931 * through admin queue. 1932 * 1933 * Return: 0 on success, non-zero on failure. 1934 */ 1935 static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx, 1936 u16 reply_qid) 1937 { 1938 struct mpi3_create_request_queue_request create_req; 1939 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx; 1940 int retval = 0; 1941 u16 req_qid = 0; 1942 1943 req_qid = op_req_q->qid; 1944 1945 if (req_qid) { 1946 retval = -1; 1947 ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n", 1948 req_qid); 1949 1950 return retval; 1951 } 1952 req_qid = idx + 1; 1953 1954 op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD; 1955 op_req_q->ci = 0; 1956 op_req_q->pi = 0; 1957 op_req_q->reply_qid = reply_qid; 1958 spin_lock_init(&op_req_q->q_lock); 1959 1960 if (!op_req_q->q_segments) { 1961 retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx); 1962 if (retval) { 1963 mpi3mr_free_op_req_q_segments(mrioc, idx); 1964 goto out; 1965 } 1966 } 1967 1968 memset(&create_req, 0, sizeof(create_req)); 1969 mutex_lock(&mrioc->init_cmds.mutex); 1970 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1971 retval = -1; 1972 ioc_err(mrioc, "CreateReqQ: Init command is in use\n"); 1973 goto out_unlock; 1974 } 1975 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1976 mrioc->init_cmds.is_waiting = 1; 1977 mrioc->init_cmds.callback = NULL; 1978 create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1979 create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE; 1980 create_req.queue_id = cpu_to_le16(req_qid); 1981 if (mrioc->enable_segqueue) { 1982 create_req.flags = 1983 MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED; 1984 create_req.base_address = cpu_to_le64( 1985 op_req_q->q_segment_list_dma); 1986 } else 1987 create_req.base_address = cpu_to_le64( 1988 op_req_q->q_segments[0].segment_dma); 1989 create_req.reply_queue_id = cpu_to_le16(reply_qid); 1990 create_req.size = cpu_to_le16(op_req_q->num_requests); 1991 1992 init_completion(&mrioc->init_cmds.done); 1993 retval = mpi3mr_admin_request_post(mrioc, &create_req, 1994 sizeof(create_req), 1); 1995 if (retval) { 1996 ioc_err(mrioc, "CreateReqQ: Admin Post failed\n"); 1997 goto out_unlock; 1998 } 1999 wait_for_completion_timeout(&mrioc->init_cmds.done, 2000 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2001 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2002 ioc_err(mrioc, "create request queue timed out\n"); 2003 mpi3mr_check_rh_fault_ioc(mrioc, 2004 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT); 2005 retval = -1; 2006 goto out_unlock; 2007 } 2008 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2009 != MPI3_IOCSTATUS_SUCCESS) { 2010 ioc_err(mrioc, 2011 "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2012 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2013 mrioc->init_cmds.ioc_loginfo); 2014 retval = -1; 2015 goto out_unlock; 2016 } 2017 op_req_q->qid = req_qid; 2018 2019 out_unlock: 2020 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2021 mutex_unlock(&mrioc->init_cmds.mutex); 2022 out: 2023 2024 return retval; 2025 } 2026 2027 /** 2028 * mpi3mr_create_op_queues - create operational queue pairs 2029 * @mrioc: Adapter instance reference 2030 * 2031 * Allocate memory for operational queue meta data and call 2032 * create request and reply queue functions. 2033 * 2034 * Return: 0 on success, non-zero on failures. 2035 */ 2036 static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc) 2037 { 2038 int retval = 0; 2039 u16 num_queues = 0, i = 0, msix_count_op_q = 1; 2040 2041 num_queues = min_t(int, mrioc->facts.max_op_reply_q, 2042 mrioc->facts.max_op_req_q); 2043 2044 msix_count_op_q = 2045 mrioc->intr_info_count - mrioc->op_reply_q_offset; 2046 if (!mrioc->num_queues) 2047 mrioc->num_queues = min_t(int, num_queues, msix_count_op_q); 2048 /* 2049 * During reset set the num_queues to the number of queues 2050 * that was set before the reset. 2051 */ 2052 num_queues = mrioc->num_op_reply_q ? 2053 mrioc->num_op_reply_q : mrioc->num_queues; 2054 ioc_info(mrioc, "trying to create %d operational queue pairs\n", 2055 num_queues); 2056 2057 if (!mrioc->req_qinfo) { 2058 mrioc->req_qinfo = kcalloc(num_queues, 2059 sizeof(struct op_req_qinfo), GFP_KERNEL); 2060 if (!mrioc->req_qinfo) { 2061 retval = -1; 2062 goto out_failed; 2063 } 2064 2065 mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) * 2066 num_queues, GFP_KERNEL); 2067 if (!mrioc->op_reply_qinfo) { 2068 retval = -1; 2069 goto out_failed; 2070 } 2071 } 2072 2073 if (mrioc->enable_segqueue) 2074 ioc_info(mrioc, 2075 "allocating operational queues through segmented queues\n"); 2076 2077 for (i = 0; i < num_queues; i++) { 2078 if (mpi3mr_create_op_reply_q(mrioc, i)) { 2079 ioc_err(mrioc, "Cannot create OP RepQ %d\n", i); 2080 break; 2081 } 2082 if (mpi3mr_create_op_req_q(mrioc, i, 2083 mrioc->op_reply_qinfo[i].qid)) { 2084 ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i); 2085 mpi3mr_delete_op_reply_q(mrioc, i); 2086 break; 2087 } 2088 } 2089 2090 if (i == 0) { 2091 /* Not even one queue is created successfully*/ 2092 retval = -1; 2093 goto out_failed; 2094 } 2095 mrioc->num_op_reply_q = mrioc->num_op_req_q = i; 2096 ioc_info(mrioc, 2097 "successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n", 2098 mrioc->num_op_reply_q, mrioc->default_qcount, 2099 mrioc->active_poll_qcount); 2100 2101 return retval; 2102 out_failed: 2103 kfree(mrioc->req_qinfo); 2104 mrioc->req_qinfo = NULL; 2105 2106 kfree(mrioc->op_reply_qinfo); 2107 mrioc->op_reply_qinfo = NULL; 2108 2109 return retval; 2110 } 2111 2112 /** 2113 * mpi3mr_op_request_post - Post request to operational queue 2114 * @mrioc: Adapter reference 2115 * @op_req_q: Operational request queue info 2116 * @req: MPI3 request 2117 * 2118 * Post the MPI3 request into operational request queue and 2119 * inform the controller, if the queue is full return 2120 * appropriate error. 2121 * 2122 * Return: 0 on success, non-zero on failure. 2123 */ 2124 int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc, 2125 struct op_req_qinfo *op_req_q, u8 *req) 2126 { 2127 u16 pi = 0, max_entries, reply_qidx = 0, midx; 2128 int retval = 0; 2129 unsigned long flags; 2130 u8 *req_entry; 2131 void *segment_base_addr; 2132 u16 req_sz = mrioc->facts.op_req_sz; 2133 struct segments *segments = op_req_q->q_segments; 2134 2135 reply_qidx = op_req_q->reply_qid - 1; 2136 2137 if (mrioc->unrecoverable) 2138 return -EFAULT; 2139 2140 spin_lock_irqsave(&op_req_q->q_lock, flags); 2141 pi = op_req_q->pi; 2142 max_entries = op_req_q->num_requests; 2143 2144 if (mpi3mr_check_req_qfull(op_req_q)) { 2145 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX( 2146 reply_qidx, mrioc->op_reply_q_offset); 2147 mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q); 2148 2149 if (mpi3mr_check_req_qfull(op_req_q)) { 2150 retval = -EAGAIN; 2151 goto out; 2152 } 2153 } 2154 2155 if (mrioc->reset_in_progress) { 2156 ioc_err(mrioc, "OpReqQ submit reset in progress\n"); 2157 retval = -EAGAIN; 2158 goto out; 2159 } 2160 2161 segment_base_addr = segments[pi / op_req_q->segment_qd].segment; 2162 req_entry = (u8 *)segment_base_addr + 2163 ((pi % op_req_q->segment_qd) * req_sz); 2164 2165 memset(req_entry, 0, req_sz); 2166 memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ); 2167 2168 if (++pi == max_entries) 2169 pi = 0; 2170 op_req_q->pi = pi; 2171 2172 if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios) 2173 > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT) 2174 mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true; 2175 2176 writel(op_req_q->pi, 2177 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index); 2178 2179 out: 2180 spin_unlock_irqrestore(&op_req_q->q_lock, flags); 2181 return retval; 2182 } 2183 2184 /** 2185 * mpi3mr_check_rh_fault_ioc - check reset history and fault 2186 * controller 2187 * @mrioc: Adapter instance reference 2188 * @reason_code: reason code for the fault. 2189 * 2190 * This routine will save snapdump and fault the controller with 2191 * the given reason code if it is not already in the fault or 2192 * not asynchronosuly reset. This will be used to handle 2193 * initilaization time faults/resets/timeout as in those cases 2194 * immediate soft reset invocation is not required. 2195 * 2196 * Return: None. 2197 */ 2198 void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code) 2199 { 2200 u32 ioc_status, host_diagnostic, timeout; 2201 2202 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 2203 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || 2204 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { 2205 mpi3mr_print_fault_info(mrioc); 2206 return; 2207 } 2208 mpi3mr_set_diagsave(mrioc); 2209 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 2210 reason_code); 2211 timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; 2212 do { 2213 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); 2214 if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS)) 2215 break; 2216 msleep(100); 2217 } while (--timeout); 2218 } 2219 2220 /** 2221 * mpi3mr_sync_timestamp - Issue time stamp sync request 2222 * @mrioc: Adapter reference 2223 * 2224 * Issue IO unit control MPI request to synchornize firmware 2225 * timestamp with host time. 2226 * 2227 * Return: 0 on success, non-zero on failure. 2228 */ 2229 static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc) 2230 { 2231 ktime_t current_time; 2232 struct mpi3_iounit_control_request iou_ctrl; 2233 int retval = 0; 2234 2235 memset(&iou_ctrl, 0, sizeof(iou_ctrl)); 2236 mutex_lock(&mrioc->init_cmds.mutex); 2237 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2238 retval = -1; 2239 ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n"); 2240 mutex_unlock(&mrioc->init_cmds.mutex); 2241 goto out; 2242 } 2243 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2244 mrioc->init_cmds.is_waiting = 1; 2245 mrioc->init_cmds.callback = NULL; 2246 iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2247 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL; 2248 iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP; 2249 current_time = ktime_get_real(); 2250 iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time)); 2251 2252 init_completion(&mrioc->init_cmds.done); 2253 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, 2254 sizeof(iou_ctrl), 0); 2255 if (retval) { 2256 ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n"); 2257 goto out_unlock; 2258 } 2259 2260 wait_for_completion_timeout(&mrioc->init_cmds.done, 2261 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2262 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2263 ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n"); 2264 mrioc->init_cmds.is_waiting = 0; 2265 if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET)) 2266 mpi3mr_soft_reset_handler(mrioc, 2267 MPI3MR_RESET_FROM_TSU_TIMEOUT, 1); 2268 retval = -1; 2269 goto out_unlock; 2270 } 2271 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2272 != MPI3_IOCSTATUS_SUCCESS) { 2273 ioc_err(mrioc, 2274 "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2275 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2276 mrioc->init_cmds.ioc_loginfo); 2277 retval = -1; 2278 goto out_unlock; 2279 } 2280 2281 out_unlock: 2282 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2283 mutex_unlock(&mrioc->init_cmds.mutex); 2284 2285 out: 2286 return retval; 2287 } 2288 2289 /** 2290 * mpi3mr_print_pkg_ver - display controller fw package version 2291 * @mrioc: Adapter reference 2292 * 2293 * Retrieve firmware package version from the component image 2294 * header of the controller flash and display it. 2295 * 2296 * Return: 0 on success and non-zero on failure. 2297 */ 2298 static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc) 2299 { 2300 struct mpi3_ci_upload_request ci_upload; 2301 int retval = -1; 2302 void *data = NULL; 2303 dma_addr_t data_dma; 2304 struct mpi3_ci_manifest_mpi *manifest; 2305 u32 data_len = sizeof(struct mpi3_ci_manifest_mpi); 2306 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 2307 2308 data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 2309 GFP_KERNEL); 2310 if (!data) 2311 return -ENOMEM; 2312 2313 memset(&ci_upload, 0, sizeof(ci_upload)); 2314 mutex_lock(&mrioc->init_cmds.mutex); 2315 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2316 ioc_err(mrioc, "sending get package version failed due to command in use\n"); 2317 mutex_unlock(&mrioc->init_cmds.mutex); 2318 goto out; 2319 } 2320 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2321 mrioc->init_cmds.is_waiting = 1; 2322 mrioc->init_cmds.callback = NULL; 2323 ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2324 ci_upload.function = MPI3_FUNCTION_CI_UPLOAD; 2325 ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY; 2326 ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST); 2327 ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE); 2328 ci_upload.segment_size = cpu_to_le32(data_len); 2329 2330 mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len, 2331 data_dma); 2332 init_completion(&mrioc->init_cmds.done); 2333 retval = mpi3mr_admin_request_post(mrioc, &ci_upload, 2334 sizeof(ci_upload), 1); 2335 if (retval) { 2336 ioc_err(mrioc, "posting get package version failed\n"); 2337 goto out_unlock; 2338 } 2339 wait_for_completion_timeout(&mrioc->init_cmds.done, 2340 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2341 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2342 ioc_err(mrioc, "get package version timed out\n"); 2343 mpi3mr_check_rh_fault_ioc(mrioc, 2344 MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT); 2345 retval = -1; 2346 goto out_unlock; 2347 } 2348 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2349 == MPI3_IOCSTATUS_SUCCESS) { 2350 manifest = (struct mpi3_ci_manifest_mpi *) data; 2351 if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) { 2352 ioc_info(mrioc, 2353 "firmware package version(%d.%d.%d.%d.%05d-%05d)\n", 2354 manifest->package_version.gen_major, 2355 manifest->package_version.gen_minor, 2356 manifest->package_version.phase_major, 2357 manifest->package_version.phase_minor, 2358 manifest->package_version.customer_id, 2359 manifest->package_version.build_num); 2360 } 2361 } 2362 retval = 0; 2363 out_unlock: 2364 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2365 mutex_unlock(&mrioc->init_cmds.mutex); 2366 2367 out: 2368 if (data) 2369 dma_free_coherent(&mrioc->pdev->dev, data_len, data, 2370 data_dma); 2371 return retval; 2372 } 2373 2374 /** 2375 * mpi3mr_watchdog_work - watchdog thread to monitor faults 2376 * @work: work struct 2377 * 2378 * Watch dog work periodically executed (1 second interval) to 2379 * monitor firmware fault and to issue periodic timer sync to 2380 * the firmware. 2381 * 2382 * Return: Nothing. 2383 */ 2384 static void mpi3mr_watchdog_work(struct work_struct *work) 2385 { 2386 struct mpi3mr_ioc *mrioc = 2387 container_of(work, struct mpi3mr_ioc, watchdog_work.work); 2388 unsigned long flags; 2389 enum mpi3mr_iocstate ioc_state; 2390 u32 fault, host_diagnostic, ioc_status; 2391 u32 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH; 2392 2393 if (mrioc->reset_in_progress || mrioc->unrecoverable) 2394 return; 2395 2396 if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) { 2397 mrioc->ts_update_counter = 0; 2398 mpi3mr_sync_timestamp(mrioc); 2399 } 2400 2401 if ((mrioc->prepare_for_reset) && 2402 ((mrioc->prepare_for_reset_timeout_counter++) >= 2403 MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) { 2404 mpi3mr_soft_reset_handler(mrioc, 2405 MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1); 2406 return; 2407 } 2408 2409 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 2410 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) { 2411 mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0); 2412 return; 2413 } 2414 2415 /*Check for fault state every one second and issue Soft reset*/ 2416 ioc_state = mpi3mr_get_iocstate(mrioc); 2417 if (ioc_state != MRIOC_STATE_FAULT) 2418 goto schedule_work; 2419 2420 fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK; 2421 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); 2422 if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) { 2423 if (!mrioc->diagsave_timeout) { 2424 mpi3mr_print_fault_info(mrioc); 2425 ioc_warn(mrioc, "diag save in progress\n"); 2426 } 2427 if ((mrioc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT) 2428 goto schedule_work; 2429 } 2430 2431 mpi3mr_print_fault_info(mrioc); 2432 mrioc->diagsave_timeout = 0; 2433 2434 switch (fault) { 2435 case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED: 2436 ioc_info(mrioc, 2437 "controller requires system power cycle, marking controller as unrecoverable\n"); 2438 mrioc->unrecoverable = 1; 2439 return; 2440 case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS: 2441 return; 2442 case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET: 2443 reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT; 2444 break; 2445 default: 2446 break; 2447 } 2448 mpi3mr_soft_reset_handler(mrioc, reset_reason, 0); 2449 return; 2450 2451 schedule_work: 2452 spin_lock_irqsave(&mrioc->watchdog_lock, flags); 2453 if (mrioc->watchdog_work_q) 2454 queue_delayed_work(mrioc->watchdog_work_q, 2455 &mrioc->watchdog_work, 2456 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); 2457 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); 2458 return; 2459 } 2460 2461 /** 2462 * mpi3mr_start_watchdog - Start watchdog 2463 * @mrioc: Adapter instance reference 2464 * 2465 * Create and start the watchdog thread to monitor controller 2466 * faults. 2467 * 2468 * Return: Nothing. 2469 */ 2470 void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc) 2471 { 2472 if (mrioc->watchdog_work_q) 2473 return; 2474 2475 INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work); 2476 snprintf(mrioc->watchdog_work_q_name, 2477 sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name, 2478 mrioc->id); 2479 mrioc->watchdog_work_q = 2480 create_singlethread_workqueue(mrioc->watchdog_work_q_name); 2481 if (!mrioc->watchdog_work_q) { 2482 ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__); 2483 return; 2484 } 2485 2486 if (mrioc->watchdog_work_q) 2487 queue_delayed_work(mrioc->watchdog_work_q, 2488 &mrioc->watchdog_work, 2489 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); 2490 } 2491 2492 /** 2493 * mpi3mr_stop_watchdog - Stop watchdog 2494 * @mrioc: Adapter instance reference 2495 * 2496 * Stop the watchdog thread created to monitor controller 2497 * faults. 2498 * 2499 * Return: Nothing. 2500 */ 2501 void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc) 2502 { 2503 unsigned long flags; 2504 struct workqueue_struct *wq; 2505 2506 spin_lock_irqsave(&mrioc->watchdog_lock, flags); 2507 wq = mrioc->watchdog_work_q; 2508 mrioc->watchdog_work_q = NULL; 2509 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); 2510 if (wq) { 2511 if (!cancel_delayed_work_sync(&mrioc->watchdog_work)) 2512 flush_workqueue(wq); 2513 destroy_workqueue(wq); 2514 } 2515 } 2516 2517 /** 2518 * mpi3mr_setup_admin_qpair - Setup admin queue pair 2519 * @mrioc: Adapter instance reference 2520 * 2521 * Allocate memory for admin queue pair if required and register 2522 * the admin queue with the controller. 2523 * 2524 * Return: 0 on success, non-zero on failures. 2525 */ 2526 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc) 2527 { 2528 int retval = 0; 2529 u32 num_admin_entries = 0; 2530 2531 mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE; 2532 mrioc->num_admin_req = mrioc->admin_req_q_sz / 2533 MPI3MR_ADMIN_REQ_FRAME_SZ; 2534 mrioc->admin_req_ci = mrioc->admin_req_pi = 0; 2535 mrioc->admin_req_base = NULL; 2536 2537 mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE; 2538 mrioc->num_admin_replies = mrioc->admin_reply_q_sz / 2539 MPI3MR_ADMIN_REPLY_FRAME_SZ; 2540 mrioc->admin_reply_ci = 0; 2541 mrioc->admin_reply_ephase = 1; 2542 mrioc->admin_reply_base = NULL; 2543 2544 if (!mrioc->admin_req_base) { 2545 mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev, 2546 mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL); 2547 2548 if (!mrioc->admin_req_base) { 2549 retval = -1; 2550 goto out_failed; 2551 } 2552 2553 mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev, 2554 mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma, 2555 GFP_KERNEL); 2556 2557 if (!mrioc->admin_reply_base) { 2558 retval = -1; 2559 goto out_failed; 2560 } 2561 } 2562 2563 num_admin_entries = (mrioc->num_admin_replies << 16) | 2564 (mrioc->num_admin_req); 2565 writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries); 2566 mpi3mr_writeq(mrioc->admin_req_dma, 2567 &mrioc->sysif_regs->admin_request_queue_address); 2568 mpi3mr_writeq(mrioc->admin_reply_dma, 2569 &mrioc->sysif_regs->admin_reply_queue_address); 2570 writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); 2571 writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); 2572 return retval; 2573 2574 out_failed: 2575 2576 if (mrioc->admin_reply_base) { 2577 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz, 2578 mrioc->admin_reply_base, mrioc->admin_reply_dma); 2579 mrioc->admin_reply_base = NULL; 2580 } 2581 if (mrioc->admin_req_base) { 2582 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz, 2583 mrioc->admin_req_base, mrioc->admin_req_dma); 2584 mrioc->admin_req_base = NULL; 2585 } 2586 return retval; 2587 } 2588 2589 /** 2590 * mpi3mr_issue_iocfacts - Send IOC Facts 2591 * @mrioc: Adapter instance reference 2592 * @facts_data: Cached IOC facts data 2593 * 2594 * Issue IOC Facts MPI request through admin queue and wait for 2595 * the completion of it or time out. 2596 * 2597 * Return: 0 on success, non-zero on failures. 2598 */ 2599 static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc, 2600 struct mpi3_ioc_facts_data *facts_data) 2601 { 2602 struct mpi3_ioc_facts_request iocfacts_req; 2603 void *data = NULL; 2604 dma_addr_t data_dma; 2605 u32 data_len = sizeof(*facts_data); 2606 int retval = 0; 2607 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 2608 2609 data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 2610 GFP_KERNEL); 2611 2612 if (!data) { 2613 retval = -1; 2614 goto out; 2615 } 2616 2617 memset(&iocfacts_req, 0, sizeof(iocfacts_req)); 2618 mutex_lock(&mrioc->init_cmds.mutex); 2619 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2620 retval = -1; 2621 ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n"); 2622 mutex_unlock(&mrioc->init_cmds.mutex); 2623 goto out; 2624 } 2625 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2626 mrioc->init_cmds.is_waiting = 1; 2627 mrioc->init_cmds.callback = NULL; 2628 iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2629 iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS; 2630 2631 mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len, 2632 data_dma); 2633 2634 init_completion(&mrioc->init_cmds.done); 2635 retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req, 2636 sizeof(iocfacts_req), 1); 2637 if (retval) { 2638 ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n"); 2639 goto out_unlock; 2640 } 2641 wait_for_completion_timeout(&mrioc->init_cmds.done, 2642 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2643 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2644 ioc_err(mrioc, "ioc_facts timed out\n"); 2645 mpi3mr_check_rh_fault_ioc(mrioc, 2646 MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT); 2647 retval = -1; 2648 goto out_unlock; 2649 } 2650 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2651 != MPI3_IOCSTATUS_SUCCESS) { 2652 ioc_err(mrioc, 2653 "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2654 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2655 mrioc->init_cmds.ioc_loginfo); 2656 retval = -1; 2657 goto out_unlock; 2658 } 2659 memcpy(facts_data, (u8 *)data, data_len); 2660 mpi3mr_process_factsdata(mrioc, facts_data); 2661 out_unlock: 2662 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2663 mutex_unlock(&mrioc->init_cmds.mutex); 2664 2665 out: 2666 if (data) 2667 dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma); 2668 2669 return retval; 2670 } 2671 2672 /** 2673 * mpi3mr_check_reset_dma_mask - Process IOC facts data 2674 * @mrioc: Adapter instance reference 2675 * 2676 * Check whether the new DMA mask requested through IOCFacts by 2677 * firmware needs to be set, if so set it . 2678 * 2679 * Return: 0 on success, non-zero on failure. 2680 */ 2681 static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc) 2682 { 2683 struct pci_dev *pdev = mrioc->pdev; 2684 int r; 2685 u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask); 2686 2687 if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask)) 2688 return 0; 2689 2690 ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n", 2691 mrioc->dma_mask, facts_dma_mask); 2692 2693 r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask); 2694 if (r) { 2695 ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n", 2696 facts_dma_mask, r); 2697 return r; 2698 } 2699 mrioc->dma_mask = facts_dma_mask; 2700 return r; 2701 } 2702 2703 /** 2704 * mpi3mr_process_factsdata - Process IOC facts data 2705 * @mrioc: Adapter instance reference 2706 * @facts_data: Cached IOC facts data 2707 * 2708 * Convert IOC facts data into cpu endianness and cache it in 2709 * the driver . 2710 * 2711 * Return: Nothing. 2712 */ 2713 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, 2714 struct mpi3_ioc_facts_data *facts_data) 2715 { 2716 u32 ioc_config, req_sz, facts_flags; 2717 2718 if ((le16_to_cpu(facts_data->ioc_facts_data_length)) != 2719 (sizeof(*facts_data) / 4)) { 2720 ioc_warn(mrioc, 2721 "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n", 2722 sizeof(*facts_data), 2723 le16_to_cpu(facts_data->ioc_facts_data_length) * 4); 2724 } 2725 2726 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 2727 req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >> 2728 MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT); 2729 if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) { 2730 ioc_err(mrioc, 2731 "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n", 2732 req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size)); 2733 } 2734 2735 memset(&mrioc->facts, 0, sizeof(mrioc->facts)); 2736 2737 facts_flags = le32_to_cpu(facts_data->flags); 2738 mrioc->facts.op_req_sz = req_sz; 2739 mrioc->op_reply_desc_sz = 1 << ((ioc_config & 2740 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >> 2741 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT); 2742 2743 mrioc->facts.ioc_num = facts_data->ioc_number; 2744 mrioc->facts.who_init = facts_data->who_init; 2745 mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors); 2746 mrioc->facts.personality = (facts_flags & 2747 MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK); 2748 mrioc->facts.dma_mask = (facts_flags & 2749 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >> 2750 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT; 2751 mrioc->facts.protocol_flags = facts_data->protocol_flags; 2752 mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word); 2753 mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests); 2754 mrioc->facts.product_id = le16_to_cpu(facts_data->product_id); 2755 mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4; 2756 mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions); 2757 mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id); 2758 mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds); 2759 mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds); 2760 mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds); 2761 mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds); 2762 mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme); 2763 mrioc->facts.max_pcie_switches = 2764 le16_to_cpu(facts_data->max_pcie_switches); 2765 mrioc->facts.max_sasexpanders = 2766 le16_to_cpu(facts_data->max_sas_expanders); 2767 mrioc->facts.max_sasinitiators = 2768 le16_to_cpu(facts_data->max_sas_initiators); 2769 mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures); 2770 mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle); 2771 mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle); 2772 mrioc->facts.max_op_req_q = 2773 le16_to_cpu(facts_data->max_operational_request_queues); 2774 mrioc->facts.max_op_reply_q = 2775 le16_to_cpu(facts_data->max_operational_reply_queues); 2776 mrioc->facts.ioc_capabilities = 2777 le32_to_cpu(facts_data->ioc_capabilities); 2778 mrioc->facts.fw_ver.build_num = 2779 le16_to_cpu(facts_data->fw_version.build_num); 2780 mrioc->facts.fw_ver.cust_id = 2781 le16_to_cpu(facts_data->fw_version.customer_id); 2782 mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor; 2783 mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major; 2784 mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor; 2785 mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major; 2786 mrioc->msix_count = min_t(int, mrioc->msix_count, 2787 mrioc->facts.max_msix_vectors); 2788 mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask; 2789 mrioc->facts.sge_mod_value = facts_data->sge_modifier_value; 2790 mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift; 2791 mrioc->facts.shutdown_timeout = 2792 le16_to_cpu(facts_data->shutdown_timeout); 2793 2794 mrioc->facts.max_dev_per_tg = 2795 facts_data->max_devices_per_throttle_group; 2796 mrioc->facts.io_throttle_data_length = 2797 le16_to_cpu(facts_data->io_throttle_data_length); 2798 mrioc->facts.max_io_throttle_group = 2799 le16_to_cpu(facts_data->max_io_throttle_group); 2800 mrioc->facts.io_throttle_low = le16_to_cpu(facts_data->io_throttle_low); 2801 mrioc->facts.io_throttle_high = 2802 le16_to_cpu(facts_data->io_throttle_high); 2803 2804 /* Store in 512b block count */ 2805 if (mrioc->facts.io_throttle_data_length) 2806 mrioc->io_throttle_data_length = 2807 (mrioc->facts.io_throttle_data_length * 2 * 4); 2808 else 2809 /* set the length to 1MB + 1K to disable throttle */ 2810 mrioc->io_throttle_data_length = MPI3MR_MAX_SECTORS + 2; 2811 2812 mrioc->io_throttle_high = (mrioc->facts.io_throttle_high * 2 * 1024); 2813 mrioc->io_throttle_low = (mrioc->facts.io_throttle_low * 2 * 1024); 2814 2815 ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),", 2816 mrioc->facts.ioc_num, mrioc->facts.max_op_req_q, 2817 mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle); 2818 ioc_info(mrioc, 2819 "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n", 2820 mrioc->facts.max_reqs, mrioc->facts.min_devhandle, 2821 mrioc->facts.max_msix_vectors, mrioc->facts.max_perids); 2822 ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ", 2823 mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value, 2824 mrioc->facts.sge_mod_shift); 2825 ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n", 2826 mrioc->facts.dma_mask, (facts_flags & 2827 MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK)); 2828 ioc_info(mrioc, 2829 "max_dev_per_throttle_group(%d), max_throttle_groups(%d)\n", 2830 mrioc->facts.max_dev_per_tg, mrioc->facts.max_io_throttle_group); 2831 ioc_info(mrioc, 2832 "io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n", 2833 mrioc->facts.io_throttle_data_length * 4, 2834 mrioc->facts.io_throttle_high, mrioc->facts.io_throttle_low); 2835 } 2836 2837 /** 2838 * mpi3mr_alloc_reply_sense_bufs - Send IOC Init 2839 * @mrioc: Adapter instance reference 2840 * 2841 * Allocate and initialize the reply free buffers, sense 2842 * buffers, reply free queue and sense buffer queue. 2843 * 2844 * Return: 0 on success, non-zero on failures. 2845 */ 2846 static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc) 2847 { 2848 int retval = 0; 2849 u32 sz, i; 2850 2851 if (mrioc->init_cmds.reply) 2852 return retval; 2853 2854 mrioc->init_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); 2855 if (!mrioc->init_cmds.reply) 2856 goto out_failed; 2857 2858 mrioc->bsg_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); 2859 if (!mrioc->bsg_cmds.reply) 2860 goto out_failed; 2861 2862 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { 2863 mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz, 2864 GFP_KERNEL); 2865 if (!mrioc->dev_rmhs_cmds[i].reply) 2866 goto out_failed; 2867 } 2868 2869 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) { 2870 mrioc->evtack_cmds[i].reply = kzalloc(mrioc->reply_sz, 2871 GFP_KERNEL); 2872 if (!mrioc->evtack_cmds[i].reply) 2873 goto out_failed; 2874 } 2875 2876 mrioc->host_tm_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); 2877 if (!mrioc->host_tm_cmds.reply) 2878 goto out_failed; 2879 2880 mrioc->pel_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); 2881 if (!mrioc->pel_cmds.reply) 2882 goto out_failed; 2883 2884 mrioc->pel_abort_cmd.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); 2885 if (!mrioc->pel_abort_cmd.reply) 2886 goto out_failed; 2887 2888 mrioc->dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8; 2889 if (mrioc->facts.max_devhandle % 8) 2890 mrioc->dev_handle_bitmap_sz++; 2891 mrioc->removepend_bitmap = kzalloc(mrioc->dev_handle_bitmap_sz, 2892 GFP_KERNEL); 2893 if (!mrioc->removepend_bitmap) 2894 goto out_failed; 2895 2896 mrioc->devrem_bitmap_sz = MPI3MR_NUM_DEVRMCMD / 8; 2897 if (MPI3MR_NUM_DEVRMCMD % 8) 2898 mrioc->devrem_bitmap_sz++; 2899 mrioc->devrem_bitmap = kzalloc(mrioc->devrem_bitmap_sz, 2900 GFP_KERNEL); 2901 if (!mrioc->devrem_bitmap) 2902 goto out_failed; 2903 2904 mrioc->evtack_cmds_bitmap_sz = MPI3MR_NUM_EVTACKCMD / 8; 2905 if (MPI3MR_NUM_EVTACKCMD % 8) 2906 mrioc->evtack_cmds_bitmap_sz++; 2907 mrioc->evtack_cmds_bitmap = kzalloc(mrioc->evtack_cmds_bitmap_sz, 2908 GFP_KERNEL); 2909 if (!mrioc->evtack_cmds_bitmap) 2910 goto out_failed; 2911 2912 mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES; 2913 mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1; 2914 mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR; 2915 mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1; 2916 2917 /* reply buffer pool, 16 byte align */ 2918 sz = mrioc->num_reply_bufs * mrioc->reply_sz; 2919 mrioc->reply_buf_pool = dma_pool_create("reply_buf pool", 2920 &mrioc->pdev->dev, sz, 16, 0); 2921 if (!mrioc->reply_buf_pool) { 2922 ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n"); 2923 goto out_failed; 2924 } 2925 2926 mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL, 2927 &mrioc->reply_buf_dma); 2928 if (!mrioc->reply_buf) 2929 goto out_failed; 2930 2931 mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz; 2932 2933 /* reply free queue, 8 byte align */ 2934 sz = mrioc->reply_free_qsz * 8; 2935 mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool", 2936 &mrioc->pdev->dev, sz, 8, 0); 2937 if (!mrioc->reply_free_q_pool) { 2938 ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n"); 2939 goto out_failed; 2940 } 2941 mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool, 2942 GFP_KERNEL, &mrioc->reply_free_q_dma); 2943 if (!mrioc->reply_free_q) 2944 goto out_failed; 2945 2946 /* sense buffer pool, 4 byte align */ 2947 sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ; 2948 mrioc->sense_buf_pool = dma_pool_create("sense_buf pool", 2949 &mrioc->pdev->dev, sz, 4, 0); 2950 if (!mrioc->sense_buf_pool) { 2951 ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n"); 2952 goto out_failed; 2953 } 2954 mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL, 2955 &mrioc->sense_buf_dma); 2956 if (!mrioc->sense_buf) 2957 goto out_failed; 2958 2959 /* sense buffer queue, 8 byte align */ 2960 sz = mrioc->sense_buf_q_sz * 8; 2961 mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool", 2962 &mrioc->pdev->dev, sz, 8, 0); 2963 if (!mrioc->sense_buf_q_pool) { 2964 ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n"); 2965 goto out_failed; 2966 } 2967 mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool, 2968 GFP_KERNEL, &mrioc->sense_buf_q_dma); 2969 if (!mrioc->sense_buf_q) 2970 goto out_failed; 2971 2972 return retval; 2973 2974 out_failed: 2975 retval = -1; 2976 return retval; 2977 } 2978 2979 /** 2980 * mpimr_initialize_reply_sbuf_queues - initialize reply sense 2981 * buffers 2982 * @mrioc: Adapter instance reference 2983 * 2984 * Helper function to initialize reply and sense buffers along 2985 * with some debug prints. 2986 * 2987 * Return: None. 2988 */ 2989 static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc) 2990 { 2991 u32 sz, i; 2992 dma_addr_t phy_addr; 2993 2994 sz = mrioc->num_reply_bufs * mrioc->reply_sz; 2995 ioc_info(mrioc, 2996 "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n", 2997 mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->reply_sz, 2998 (sz / 1024), (unsigned long long)mrioc->reply_buf_dma); 2999 sz = mrioc->reply_free_qsz * 8; 3000 ioc_info(mrioc, 3001 "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n", 3002 mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024), 3003 (unsigned long long)mrioc->reply_free_q_dma); 3004 sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ; 3005 ioc_info(mrioc, 3006 "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n", 3007 mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ, 3008 (sz / 1024), (unsigned long long)mrioc->sense_buf_dma); 3009 sz = mrioc->sense_buf_q_sz * 8; 3010 ioc_info(mrioc, 3011 "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n", 3012 mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024), 3013 (unsigned long long)mrioc->sense_buf_q_dma); 3014 3015 /* initialize Reply buffer Queue */ 3016 for (i = 0, phy_addr = mrioc->reply_buf_dma; 3017 i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->reply_sz) 3018 mrioc->reply_free_q[i] = cpu_to_le64(phy_addr); 3019 mrioc->reply_free_q[i] = cpu_to_le64(0); 3020 3021 /* initialize Sense Buffer Queue */ 3022 for (i = 0, phy_addr = mrioc->sense_buf_dma; 3023 i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ) 3024 mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr); 3025 mrioc->sense_buf_q[i] = cpu_to_le64(0); 3026 } 3027 3028 /** 3029 * mpi3mr_issue_iocinit - Send IOC Init 3030 * @mrioc: Adapter instance reference 3031 * 3032 * Issue IOC Init MPI request through admin queue and wait for 3033 * the completion of it or time out. 3034 * 3035 * Return: 0 on success, non-zero on failures. 3036 */ 3037 static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc) 3038 { 3039 struct mpi3_ioc_init_request iocinit_req; 3040 struct mpi3_driver_info_layout *drv_info; 3041 dma_addr_t data_dma; 3042 u32 data_len = sizeof(*drv_info); 3043 int retval = 0; 3044 ktime_t current_time; 3045 3046 drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 3047 GFP_KERNEL); 3048 if (!drv_info) { 3049 retval = -1; 3050 goto out; 3051 } 3052 mpimr_initialize_reply_sbuf_queues(mrioc); 3053 3054 drv_info->information_length = cpu_to_le32(data_len); 3055 strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature)); 3056 strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name)); 3057 strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version)); 3058 strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name)); 3059 strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version)); 3060 strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE, 3061 sizeof(drv_info->driver_release_date)); 3062 drv_info->driver_capabilities = 0; 3063 memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info, 3064 sizeof(mrioc->driver_info)); 3065 3066 memset(&iocinit_req, 0, sizeof(iocinit_req)); 3067 mutex_lock(&mrioc->init_cmds.mutex); 3068 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 3069 retval = -1; 3070 ioc_err(mrioc, "Issue IOCInit: Init command is in use\n"); 3071 mutex_unlock(&mrioc->init_cmds.mutex); 3072 goto out; 3073 } 3074 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 3075 mrioc->init_cmds.is_waiting = 1; 3076 mrioc->init_cmds.callback = NULL; 3077 iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 3078 iocinit_req.function = MPI3_FUNCTION_IOC_INIT; 3079 iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV; 3080 iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT; 3081 iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR; 3082 iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR; 3083 iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER; 3084 iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz); 3085 iocinit_req.reply_free_queue_address = 3086 cpu_to_le64(mrioc->reply_free_q_dma); 3087 iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ); 3088 iocinit_req.sense_buffer_free_queue_depth = 3089 cpu_to_le16(mrioc->sense_buf_q_sz); 3090 iocinit_req.sense_buffer_free_queue_address = 3091 cpu_to_le64(mrioc->sense_buf_q_dma); 3092 iocinit_req.driver_information_address = cpu_to_le64(data_dma); 3093 3094 current_time = ktime_get_real(); 3095 iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time)); 3096 3097 init_completion(&mrioc->init_cmds.done); 3098 retval = mpi3mr_admin_request_post(mrioc, &iocinit_req, 3099 sizeof(iocinit_req), 1); 3100 if (retval) { 3101 ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n"); 3102 goto out_unlock; 3103 } 3104 wait_for_completion_timeout(&mrioc->init_cmds.done, 3105 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 3106 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 3107 mpi3mr_check_rh_fault_ioc(mrioc, 3108 MPI3MR_RESET_FROM_IOCINIT_TIMEOUT); 3109 ioc_err(mrioc, "ioc_init timed out\n"); 3110 retval = -1; 3111 goto out_unlock; 3112 } 3113 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 3114 != MPI3_IOCSTATUS_SUCCESS) { 3115 ioc_err(mrioc, 3116 "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 3117 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 3118 mrioc->init_cmds.ioc_loginfo); 3119 retval = -1; 3120 goto out_unlock; 3121 } 3122 3123 mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs; 3124 writel(mrioc->reply_free_queue_host_index, 3125 &mrioc->sysif_regs->reply_free_host_index); 3126 3127 mrioc->sbq_host_index = mrioc->num_sense_bufs; 3128 writel(mrioc->sbq_host_index, 3129 &mrioc->sysif_regs->sense_buffer_free_host_index); 3130 out_unlock: 3131 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3132 mutex_unlock(&mrioc->init_cmds.mutex); 3133 3134 out: 3135 if (drv_info) 3136 dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info, 3137 data_dma); 3138 3139 return retval; 3140 } 3141 3142 /** 3143 * mpi3mr_unmask_events - Unmask events in event mask bitmap 3144 * @mrioc: Adapter instance reference 3145 * @event: MPI event ID 3146 * 3147 * Un mask the specific event by resetting the event_mask 3148 * bitmap. 3149 * 3150 * Return: 0 on success, non-zero on failures. 3151 */ 3152 static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event) 3153 { 3154 u32 desired_event; 3155 u8 word; 3156 3157 if (event >= 128) 3158 return; 3159 3160 desired_event = (1 << (event % 32)); 3161 word = event / 32; 3162 3163 mrioc->event_masks[word] &= ~desired_event; 3164 } 3165 3166 /** 3167 * mpi3mr_issue_event_notification - Send event notification 3168 * @mrioc: Adapter instance reference 3169 * 3170 * Issue event notification MPI request through admin queue and 3171 * wait for the completion of it or time out. 3172 * 3173 * Return: 0 on success, non-zero on failures. 3174 */ 3175 static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc) 3176 { 3177 struct mpi3_event_notification_request evtnotify_req; 3178 int retval = 0; 3179 u8 i; 3180 3181 memset(&evtnotify_req, 0, sizeof(evtnotify_req)); 3182 mutex_lock(&mrioc->init_cmds.mutex); 3183 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 3184 retval = -1; 3185 ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n"); 3186 mutex_unlock(&mrioc->init_cmds.mutex); 3187 goto out; 3188 } 3189 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 3190 mrioc->init_cmds.is_waiting = 1; 3191 mrioc->init_cmds.callback = NULL; 3192 evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 3193 evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION; 3194 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 3195 evtnotify_req.event_masks[i] = 3196 cpu_to_le32(mrioc->event_masks[i]); 3197 init_completion(&mrioc->init_cmds.done); 3198 retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req, 3199 sizeof(evtnotify_req), 1); 3200 if (retval) { 3201 ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n"); 3202 goto out_unlock; 3203 } 3204 wait_for_completion_timeout(&mrioc->init_cmds.done, 3205 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 3206 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 3207 ioc_err(mrioc, "event notification timed out\n"); 3208 mpi3mr_check_rh_fault_ioc(mrioc, 3209 MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT); 3210 retval = -1; 3211 goto out_unlock; 3212 } 3213 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 3214 != MPI3_IOCSTATUS_SUCCESS) { 3215 ioc_err(mrioc, 3216 "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 3217 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 3218 mrioc->init_cmds.ioc_loginfo); 3219 retval = -1; 3220 goto out_unlock; 3221 } 3222 3223 out_unlock: 3224 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3225 mutex_unlock(&mrioc->init_cmds.mutex); 3226 out: 3227 return retval; 3228 } 3229 3230 /** 3231 * mpi3mr_process_event_ack - Process event acknowledgment 3232 * @mrioc: Adapter instance reference 3233 * @event: MPI3 event ID 3234 * @event_ctx: event context 3235 * 3236 * Send event acknowledgment through admin queue and wait for 3237 * it to complete. 3238 * 3239 * Return: 0 on success, non-zero on failures. 3240 */ 3241 int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 3242 u32 event_ctx) 3243 { 3244 struct mpi3_event_ack_request evtack_req; 3245 int retval = 0; 3246 3247 memset(&evtack_req, 0, sizeof(evtack_req)); 3248 mutex_lock(&mrioc->init_cmds.mutex); 3249 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 3250 retval = -1; 3251 ioc_err(mrioc, "Send EvtAck: Init command is in use\n"); 3252 mutex_unlock(&mrioc->init_cmds.mutex); 3253 goto out; 3254 } 3255 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 3256 mrioc->init_cmds.is_waiting = 1; 3257 mrioc->init_cmds.callback = NULL; 3258 evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 3259 evtack_req.function = MPI3_FUNCTION_EVENT_ACK; 3260 evtack_req.event = event; 3261 evtack_req.event_context = cpu_to_le32(event_ctx); 3262 3263 init_completion(&mrioc->init_cmds.done); 3264 retval = mpi3mr_admin_request_post(mrioc, &evtack_req, 3265 sizeof(evtack_req), 1); 3266 if (retval) { 3267 ioc_err(mrioc, "Send EvtAck: Admin Post failed\n"); 3268 goto out_unlock; 3269 } 3270 wait_for_completion_timeout(&mrioc->init_cmds.done, 3271 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 3272 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 3273 ioc_err(mrioc, "Issue EvtNotify: command timed out\n"); 3274 if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET)) 3275 mpi3mr_soft_reset_handler(mrioc, 3276 MPI3MR_RESET_FROM_EVTACK_TIMEOUT, 1); 3277 retval = -1; 3278 goto out_unlock; 3279 } 3280 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 3281 != MPI3_IOCSTATUS_SUCCESS) { 3282 ioc_err(mrioc, 3283 "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 3284 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 3285 mrioc->init_cmds.ioc_loginfo); 3286 retval = -1; 3287 goto out_unlock; 3288 } 3289 3290 out_unlock: 3291 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3292 mutex_unlock(&mrioc->init_cmds.mutex); 3293 out: 3294 return retval; 3295 } 3296 3297 /** 3298 * mpi3mr_alloc_chain_bufs - Allocate chain buffers 3299 * @mrioc: Adapter instance reference 3300 * 3301 * Allocate chain buffers and set a bitmap to indicate free 3302 * chain buffers. Chain buffers are used to pass the SGE 3303 * information along with MPI3 SCSI IO requests for host I/O. 3304 * 3305 * Return: 0 on success, non-zero on failure 3306 */ 3307 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc) 3308 { 3309 int retval = 0; 3310 u32 sz, i; 3311 u16 num_chains; 3312 3313 if (mrioc->chain_sgl_list) 3314 return retval; 3315 3316 num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR; 3317 3318 if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION 3319 | SHOST_DIX_TYPE1_PROTECTION 3320 | SHOST_DIX_TYPE2_PROTECTION 3321 | SHOST_DIX_TYPE3_PROTECTION)) 3322 num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR); 3323 3324 mrioc->chain_buf_count = num_chains; 3325 sz = sizeof(struct chain_element) * num_chains; 3326 mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL); 3327 if (!mrioc->chain_sgl_list) 3328 goto out_failed; 3329 3330 sz = MPI3MR_PAGE_SIZE_4K; 3331 mrioc->chain_buf_pool = dma_pool_create("chain_buf pool", 3332 &mrioc->pdev->dev, sz, 16, 0); 3333 if (!mrioc->chain_buf_pool) { 3334 ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n"); 3335 goto out_failed; 3336 } 3337 3338 for (i = 0; i < num_chains; i++) { 3339 mrioc->chain_sgl_list[i].addr = 3340 dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL, 3341 &mrioc->chain_sgl_list[i].dma_addr); 3342 3343 if (!mrioc->chain_sgl_list[i].addr) 3344 goto out_failed; 3345 } 3346 mrioc->chain_bitmap_sz = num_chains / 8; 3347 if (num_chains % 8) 3348 mrioc->chain_bitmap_sz++; 3349 mrioc->chain_bitmap = kzalloc(mrioc->chain_bitmap_sz, GFP_KERNEL); 3350 if (!mrioc->chain_bitmap) 3351 goto out_failed; 3352 return retval; 3353 out_failed: 3354 retval = -1; 3355 return retval; 3356 } 3357 3358 /** 3359 * mpi3mr_port_enable_complete - Mark port enable complete 3360 * @mrioc: Adapter instance reference 3361 * @drv_cmd: Internal command tracker 3362 * 3363 * Call back for asynchronous port enable request sets the 3364 * driver command to indicate port enable request is complete. 3365 * 3366 * Return: Nothing 3367 */ 3368 static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc, 3369 struct mpi3mr_drv_cmd *drv_cmd) 3370 { 3371 drv_cmd->state = MPI3MR_CMD_NOTUSED; 3372 drv_cmd->callback = NULL; 3373 mrioc->scan_failed = drv_cmd->ioc_status; 3374 mrioc->scan_started = 0; 3375 } 3376 3377 /** 3378 * mpi3mr_issue_port_enable - Issue Port Enable 3379 * @mrioc: Adapter instance reference 3380 * @async: Flag to wait for completion or not 3381 * 3382 * Issue Port Enable MPI request through admin queue and if the 3383 * async flag is not set wait for the completion of the port 3384 * enable or time out. 3385 * 3386 * Return: 0 on success, non-zero on failures. 3387 */ 3388 int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async) 3389 { 3390 struct mpi3_port_enable_request pe_req; 3391 int retval = 0; 3392 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT; 3393 3394 memset(&pe_req, 0, sizeof(pe_req)); 3395 mutex_lock(&mrioc->init_cmds.mutex); 3396 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 3397 retval = -1; 3398 ioc_err(mrioc, "Issue PortEnable: Init command is in use\n"); 3399 mutex_unlock(&mrioc->init_cmds.mutex); 3400 goto out; 3401 } 3402 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 3403 if (async) { 3404 mrioc->init_cmds.is_waiting = 0; 3405 mrioc->init_cmds.callback = mpi3mr_port_enable_complete; 3406 } else { 3407 mrioc->init_cmds.is_waiting = 1; 3408 mrioc->init_cmds.callback = NULL; 3409 init_completion(&mrioc->init_cmds.done); 3410 } 3411 pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 3412 pe_req.function = MPI3_FUNCTION_PORT_ENABLE; 3413 3414 retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1); 3415 if (retval) { 3416 ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n"); 3417 goto out_unlock; 3418 } 3419 if (async) { 3420 mutex_unlock(&mrioc->init_cmds.mutex); 3421 goto out; 3422 } 3423 3424 wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ)); 3425 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 3426 ioc_err(mrioc, "port enable timed out\n"); 3427 retval = -1; 3428 mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT); 3429 goto out_unlock; 3430 } 3431 mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds); 3432 3433 out_unlock: 3434 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3435 mutex_unlock(&mrioc->init_cmds.mutex); 3436 out: 3437 return retval; 3438 } 3439 3440 /* Protocol type to name mapper structure */ 3441 static const struct { 3442 u8 protocol; 3443 char *name; 3444 } mpi3mr_protocols[] = { 3445 { MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" }, 3446 { MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" }, 3447 { MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" }, 3448 }; 3449 3450 /* Capability to name mapper structure*/ 3451 static const struct { 3452 u32 capability; 3453 char *name; 3454 } mpi3mr_capabilities[] = { 3455 { MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE, "RAID" }, 3456 }; 3457 3458 /** 3459 * mpi3mr_print_ioc_info - Display controller information 3460 * @mrioc: Adapter instance reference 3461 * 3462 * Display controller personalit, capability, supported 3463 * protocols etc. 3464 * 3465 * Return: Nothing 3466 */ 3467 static void 3468 mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc) 3469 { 3470 int i = 0, bytes_written = 0; 3471 char personality[16]; 3472 char protocol[50] = {0}; 3473 char capabilities[100] = {0}; 3474 struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver; 3475 3476 switch (mrioc->facts.personality) { 3477 case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA: 3478 strncpy(personality, "Enhanced HBA", sizeof(personality)); 3479 break; 3480 case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR: 3481 strncpy(personality, "RAID", sizeof(personality)); 3482 break; 3483 default: 3484 strncpy(personality, "Unknown", sizeof(personality)); 3485 break; 3486 } 3487 3488 ioc_info(mrioc, "Running in %s Personality", personality); 3489 3490 ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n", 3491 fwver->gen_major, fwver->gen_minor, fwver->ph_major, 3492 fwver->ph_minor, fwver->cust_id, fwver->build_num); 3493 3494 for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) { 3495 if (mrioc->facts.protocol_flags & 3496 mpi3mr_protocols[i].protocol) { 3497 bytes_written += scnprintf(protocol + bytes_written, 3498 sizeof(protocol) - bytes_written, "%s%s", 3499 bytes_written ? "," : "", 3500 mpi3mr_protocols[i].name); 3501 } 3502 } 3503 3504 bytes_written = 0; 3505 for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) { 3506 if (mrioc->facts.protocol_flags & 3507 mpi3mr_capabilities[i].capability) { 3508 bytes_written += scnprintf(capabilities + bytes_written, 3509 sizeof(capabilities) - bytes_written, "%s%s", 3510 bytes_written ? "," : "", 3511 mpi3mr_capabilities[i].name); 3512 } 3513 } 3514 3515 ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n", 3516 protocol, capabilities); 3517 } 3518 3519 /** 3520 * mpi3mr_cleanup_resources - Free PCI resources 3521 * @mrioc: Adapter instance reference 3522 * 3523 * Unmap PCI device memory and disable PCI device. 3524 * 3525 * Return: 0 on success and non-zero on failure. 3526 */ 3527 void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc) 3528 { 3529 struct pci_dev *pdev = mrioc->pdev; 3530 3531 mpi3mr_cleanup_isr(mrioc); 3532 3533 if (mrioc->sysif_regs) { 3534 iounmap((void __iomem *)mrioc->sysif_regs); 3535 mrioc->sysif_regs = NULL; 3536 } 3537 3538 if (pci_is_enabled(pdev)) { 3539 if (mrioc->bars) 3540 pci_release_selected_regions(pdev, mrioc->bars); 3541 pci_disable_device(pdev); 3542 } 3543 } 3544 3545 /** 3546 * mpi3mr_setup_resources - Enable PCI resources 3547 * @mrioc: Adapter instance reference 3548 * 3549 * Enable PCI device memory, MSI-x registers and set DMA mask. 3550 * 3551 * Return: 0 on success and non-zero on failure. 3552 */ 3553 int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc) 3554 { 3555 struct pci_dev *pdev = mrioc->pdev; 3556 u32 memap_sz = 0; 3557 int i, retval = 0, capb = 0; 3558 u16 message_control; 3559 u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask : 3560 (((dma_get_required_mask(&pdev->dev) > DMA_BIT_MASK(32)) && 3561 (sizeof(dma_addr_t) > 4)) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32)); 3562 3563 if (pci_enable_device_mem(pdev)) { 3564 ioc_err(mrioc, "pci_enable_device_mem: failed\n"); 3565 retval = -ENODEV; 3566 goto out_failed; 3567 } 3568 3569 capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 3570 if (!capb) { 3571 ioc_err(mrioc, "Unable to find MSI-X Capabilities\n"); 3572 retval = -ENODEV; 3573 goto out_failed; 3574 } 3575 mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); 3576 3577 if (pci_request_selected_regions(pdev, mrioc->bars, 3578 mrioc->driver_name)) { 3579 ioc_err(mrioc, "pci_request_selected_regions: failed\n"); 3580 retval = -ENODEV; 3581 goto out_failed; 3582 } 3583 3584 for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) { 3585 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 3586 mrioc->sysif_regs_phys = pci_resource_start(pdev, i); 3587 memap_sz = pci_resource_len(pdev, i); 3588 mrioc->sysif_regs = 3589 ioremap(mrioc->sysif_regs_phys, memap_sz); 3590 break; 3591 } 3592 } 3593 3594 pci_set_master(pdev); 3595 3596 retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask); 3597 if (retval) { 3598 if (dma_mask != DMA_BIT_MASK(32)) { 3599 ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n"); 3600 dma_mask = DMA_BIT_MASK(32); 3601 retval = dma_set_mask_and_coherent(&pdev->dev, 3602 dma_mask); 3603 } 3604 if (retval) { 3605 mrioc->dma_mask = 0; 3606 ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n"); 3607 goto out_failed; 3608 } 3609 } 3610 mrioc->dma_mask = dma_mask; 3611 3612 if (!mrioc->sysif_regs) { 3613 ioc_err(mrioc, 3614 "Unable to map adapter memory or resource not found\n"); 3615 retval = -EINVAL; 3616 goto out_failed; 3617 } 3618 3619 pci_read_config_word(pdev, capb + 2, &message_control); 3620 mrioc->msix_count = (message_control & 0x3FF) + 1; 3621 3622 pci_save_state(pdev); 3623 3624 pci_set_drvdata(pdev, mrioc->shost); 3625 3626 mpi3mr_ioc_disable_intr(mrioc); 3627 3628 ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n", 3629 (unsigned long long)mrioc->sysif_regs_phys, 3630 mrioc->sysif_regs, memap_sz); 3631 ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n", 3632 mrioc->msix_count); 3633 3634 if (!reset_devices && poll_queues > 0) 3635 mrioc->requested_poll_qcount = min_t(int, poll_queues, 3636 mrioc->msix_count - 2); 3637 return retval; 3638 3639 out_failed: 3640 mpi3mr_cleanup_resources(mrioc); 3641 return retval; 3642 } 3643 3644 /** 3645 * mpi3mr_enable_events - Enable required events 3646 * @mrioc: Adapter instance reference 3647 * 3648 * This routine unmasks the events required by the driver by 3649 * sennding appropriate event mask bitmapt through an event 3650 * notification request. 3651 * 3652 * Return: 0 on success and non-zero on failure. 3653 */ 3654 static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc) 3655 { 3656 int retval = 0; 3657 u32 i; 3658 3659 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 3660 mrioc->event_masks[i] = -1; 3661 3662 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED); 3663 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED); 3664 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE); 3665 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE); 3666 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_ADDED); 3667 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST); 3668 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY); 3669 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR); 3670 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE); 3671 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST); 3672 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION); 3673 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET); 3674 mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT); 3675 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE); 3676 3677 retval = mpi3mr_issue_event_notification(mrioc); 3678 if (retval) 3679 ioc_err(mrioc, "failed to issue event notification %d\n", 3680 retval); 3681 return retval; 3682 } 3683 3684 /** 3685 * mpi3mr_init_ioc - Initialize the controller 3686 * @mrioc: Adapter instance reference 3687 * 3688 * This the controller initialization routine, executed either 3689 * after soft reset or from pci probe callback. 3690 * Setup the required resources, memory map the controller 3691 * registers, create admin and operational reply queue pairs, 3692 * allocate required memory for reply pool, sense buffer pool, 3693 * issue IOC init request to the firmware, unmask the events and 3694 * issue port enable to discover SAS/SATA/NVMe devies and RAID 3695 * volumes. 3696 * 3697 * Return: 0 on success and non-zero on failure. 3698 */ 3699 int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc) 3700 { 3701 int retval = 0; 3702 u8 retry = 0; 3703 struct mpi3_ioc_facts_data facts_data; 3704 u32 sz; 3705 3706 retry_init: 3707 retval = mpi3mr_bring_ioc_ready(mrioc); 3708 if (retval) { 3709 ioc_err(mrioc, "Failed to bring ioc ready: error %d\n", 3710 retval); 3711 goto out_failed_noretry; 3712 } 3713 3714 retval = mpi3mr_setup_isr(mrioc, 1); 3715 if (retval) { 3716 ioc_err(mrioc, "Failed to setup ISR error %d\n", 3717 retval); 3718 goto out_failed_noretry; 3719 } 3720 3721 retval = mpi3mr_issue_iocfacts(mrioc, &facts_data); 3722 if (retval) { 3723 ioc_err(mrioc, "Failed to Issue IOC Facts %d\n", 3724 retval); 3725 goto out_failed; 3726 } 3727 3728 mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD; 3729 3730 mrioc->num_io_throttle_group = mrioc->facts.max_io_throttle_group; 3731 atomic_set(&mrioc->pend_large_data_sz, 0); 3732 3733 if (reset_devices) 3734 mrioc->max_host_ios = min_t(int, mrioc->max_host_ios, 3735 MPI3MR_HOST_IOS_KDUMP); 3736 3737 mrioc->reply_sz = mrioc->facts.reply_sz; 3738 3739 retval = mpi3mr_check_reset_dma_mask(mrioc); 3740 if (retval) { 3741 ioc_err(mrioc, "Resetting dma mask failed %d\n", 3742 retval); 3743 goto out_failed_noretry; 3744 } 3745 3746 mpi3mr_print_ioc_info(mrioc); 3747 3748 dprint_init(mrioc, "allocating config page buffers\n"); 3749 mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev, 3750 MPI3MR_DEFAULT_CFG_PAGE_SZ, &mrioc->cfg_page_dma, GFP_KERNEL); 3751 if (!mrioc->cfg_page) 3752 goto out_failed_noretry; 3753 3754 mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ; 3755 3756 retval = mpi3mr_alloc_reply_sense_bufs(mrioc); 3757 if (retval) { 3758 ioc_err(mrioc, 3759 "%s :Failed to allocated reply sense buffers %d\n", 3760 __func__, retval); 3761 goto out_failed_noretry; 3762 } 3763 3764 retval = mpi3mr_alloc_chain_bufs(mrioc); 3765 if (retval) { 3766 ioc_err(mrioc, "Failed to allocated chain buffers %d\n", 3767 retval); 3768 goto out_failed_noretry; 3769 } 3770 3771 retval = mpi3mr_issue_iocinit(mrioc); 3772 if (retval) { 3773 ioc_err(mrioc, "Failed to Issue IOC Init %d\n", 3774 retval); 3775 goto out_failed; 3776 } 3777 3778 retval = mpi3mr_print_pkg_ver(mrioc); 3779 if (retval) { 3780 ioc_err(mrioc, "failed to get package version\n"); 3781 goto out_failed; 3782 } 3783 3784 retval = mpi3mr_setup_isr(mrioc, 0); 3785 if (retval) { 3786 ioc_err(mrioc, "Failed to re-setup ISR, error %d\n", 3787 retval); 3788 goto out_failed_noretry; 3789 } 3790 3791 retval = mpi3mr_create_op_queues(mrioc); 3792 if (retval) { 3793 ioc_err(mrioc, "Failed to create OpQueues error %d\n", 3794 retval); 3795 goto out_failed; 3796 } 3797 3798 if (!mrioc->pel_seqnum_virt) { 3799 dprint_init(mrioc, "allocating memory for pel_seqnum_virt\n"); 3800 mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq); 3801 mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev, 3802 mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma, 3803 GFP_KERNEL); 3804 if (!mrioc->pel_seqnum_virt) { 3805 retval = -ENOMEM; 3806 goto out_failed_noretry; 3807 } 3808 } 3809 3810 if (!mrioc->throttle_groups && mrioc->num_io_throttle_group) { 3811 dprint_init(mrioc, "allocating memory for throttle groups\n"); 3812 sz = sizeof(struct mpi3mr_throttle_group_info); 3813 mrioc->throttle_groups = (struct mpi3mr_throttle_group_info *) 3814 kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL); 3815 if (!mrioc->throttle_groups) 3816 goto out_failed_noretry; 3817 } 3818 3819 retval = mpi3mr_enable_events(mrioc); 3820 if (retval) { 3821 ioc_err(mrioc, "failed to enable events %d\n", 3822 retval); 3823 goto out_failed; 3824 } 3825 3826 ioc_info(mrioc, "controller initialization completed successfully\n"); 3827 return retval; 3828 out_failed: 3829 if (retry < 2) { 3830 retry++; 3831 ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n", 3832 retry); 3833 mpi3mr_memset_buffers(mrioc); 3834 goto retry_init; 3835 } 3836 out_failed_noretry: 3837 ioc_err(mrioc, "controller initialization failed\n"); 3838 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 3839 MPI3MR_RESET_FROM_CTLR_CLEANUP); 3840 mrioc->unrecoverable = 1; 3841 return retval; 3842 } 3843 3844 /** 3845 * mpi3mr_reinit_ioc - Re-Initialize the controller 3846 * @mrioc: Adapter instance reference 3847 * @is_resume: Called from resume or reset path 3848 * 3849 * This the controller re-initialization routine, executed from 3850 * the soft reset handler or resume callback. Creates 3851 * operational reply queue pairs, allocate required memory for 3852 * reply pool, sense buffer pool, issue IOC init request to the 3853 * firmware, unmask the events and issue port enable to discover 3854 * SAS/SATA/NVMe devices and RAID volumes. 3855 * 3856 * Return: 0 on success and non-zero on failure. 3857 */ 3858 int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume) 3859 { 3860 int retval = 0; 3861 u8 retry = 0; 3862 struct mpi3_ioc_facts_data facts_data; 3863 3864 retry_init: 3865 dprint_reset(mrioc, "bringing up the controller to ready state\n"); 3866 retval = mpi3mr_bring_ioc_ready(mrioc); 3867 if (retval) { 3868 ioc_err(mrioc, "failed to bring to ready state\n"); 3869 goto out_failed_noretry; 3870 } 3871 3872 if (is_resume) { 3873 dprint_reset(mrioc, "setting up single ISR\n"); 3874 retval = mpi3mr_setup_isr(mrioc, 1); 3875 if (retval) { 3876 ioc_err(mrioc, "failed to setup ISR\n"); 3877 goto out_failed_noretry; 3878 } 3879 } else 3880 mpi3mr_ioc_enable_intr(mrioc); 3881 3882 dprint_reset(mrioc, "getting ioc_facts\n"); 3883 retval = mpi3mr_issue_iocfacts(mrioc, &facts_data); 3884 if (retval) { 3885 ioc_err(mrioc, "failed to get ioc_facts\n"); 3886 goto out_failed; 3887 } 3888 3889 dprint_reset(mrioc, "validating ioc_facts\n"); 3890 retval = mpi3mr_revalidate_factsdata(mrioc); 3891 if (retval) { 3892 ioc_err(mrioc, "failed to revalidate ioc_facts data\n"); 3893 goto out_failed_noretry; 3894 } 3895 3896 mpi3mr_print_ioc_info(mrioc); 3897 3898 dprint_reset(mrioc, "sending ioc_init\n"); 3899 retval = mpi3mr_issue_iocinit(mrioc); 3900 if (retval) { 3901 ioc_err(mrioc, "failed to send ioc_init\n"); 3902 goto out_failed; 3903 } 3904 3905 dprint_reset(mrioc, "getting package version\n"); 3906 retval = mpi3mr_print_pkg_ver(mrioc); 3907 if (retval) { 3908 ioc_err(mrioc, "failed to get package version\n"); 3909 goto out_failed; 3910 } 3911 3912 if (is_resume) { 3913 dprint_reset(mrioc, "setting up multiple ISR\n"); 3914 retval = mpi3mr_setup_isr(mrioc, 0); 3915 if (retval) { 3916 ioc_err(mrioc, "failed to re-setup ISR\n"); 3917 goto out_failed_noretry; 3918 } 3919 } 3920 3921 dprint_reset(mrioc, "creating operational queue pairs\n"); 3922 retval = mpi3mr_create_op_queues(mrioc); 3923 if (retval) { 3924 ioc_err(mrioc, "failed to create operational queue pairs\n"); 3925 goto out_failed; 3926 } 3927 3928 if (!mrioc->pel_seqnum_virt) { 3929 dprint_reset(mrioc, "allocating memory for pel_seqnum_virt\n"); 3930 mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq); 3931 mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev, 3932 mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma, 3933 GFP_KERNEL); 3934 if (!mrioc->pel_seqnum_virt) { 3935 retval = -ENOMEM; 3936 goto out_failed_noretry; 3937 } 3938 } 3939 3940 if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) { 3941 ioc_err(mrioc, 3942 "cannot create minimum number of operational queues expected:%d created:%d\n", 3943 mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q); 3944 goto out_failed_noretry; 3945 } 3946 3947 dprint_reset(mrioc, "enabling events\n"); 3948 retval = mpi3mr_enable_events(mrioc); 3949 if (retval) { 3950 ioc_err(mrioc, "failed to enable events\n"); 3951 goto out_failed; 3952 } 3953 3954 ioc_info(mrioc, "sending port enable\n"); 3955 retval = mpi3mr_issue_port_enable(mrioc, 0); 3956 if (retval) { 3957 ioc_err(mrioc, "failed to issue port enable\n"); 3958 goto out_failed; 3959 } 3960 3961 ioc_info(mrioc, "controller %s completed successfully\n", 3962 (is_resume)?"resume":"re-initialization"); 3963 return retval; 3964 out_failed: 3965 if (retry < 2) { 3966 retry++; 3967 ioc_warn(mrioc, "retrying controller %s, retry_count:%d\n", 3968 (is_resume)?"resume":"re-initialization", retry); 3969 mpi3mr_memset_buffers(mrioc); 3970 goto retry_init; 3971 } 3972 out_failed_noretry: 3973 ioc_err(mrioc, "controller %s is failed\n", 3974 (is_resume)?"resume":"re-initialization"); 3975 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 3976 MPI3MR_RESET_FROM_CTLR_CLEANUP); 3977 mrioc->unrecoverable = 1; 3978 return retval; 3979 } 3980 3981 /** 3982 * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's 3983 * segments 3984 * @mrioc: Adapter instance reference 3985 * @qidx: Operational reply queue index 3986 * 3987 * Return: Nothing. 3988 */ 3989 static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx) 3990 { 3991 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 3992 struct segments *segments; 3993 int i, size; 3994 3995 if (!op_reply_q->q_segments) 3996 return; 3997 3998 size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz; 3999 segments = op_reply_q->q_segments; 4000 for (i = 0; i < op_reply_q->num_segments; i++) 4001 memset(segments[i].segment, 0, size); 4002 } 4003 4004 /** 4005 * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's 4006 * segments 4007 * @mrioc: Adapter instance reference 4008 * @qidx: Operational request queue index 4009 * 4010 * Return: Nothing. 4011 */ 4012 static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx) 4013 { 4014 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx; 4015 struct segments *segments; 4016 int i, size; 4017 4018 if (!op_req_q->q_segments) 4019 return; 4020 4021 size = op_req_q->segment_qd * mrioc->facts.op_req_sz; 4022 segments = op_req_q->q_segments; 4023 for (i = 0; i < op_req_q->num_segments; i++) 4024 memset(segments[i].segment, 0, size); 4025 } 4026 4027 /** 4028 * mpi3mr_memset_buffers - memset memory for a controller 4029 * @mrioc: Adapter instance reference 4030 * 4031 * clear all the memory allocated for a controller, typically 4032 * called post reset to reuse the memory allocated during the 4033 * controller init. 4034 * 4035 * Return: Nothing. 4036 */ 4037 void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc) 4038 { 4039 u16 i; 4040 struct mpi3mr_throttle_group_info *tg; 4041 4042 mrioc->change_count = 0; 4043 mrioc->active_poll_qcount = 0; 4044 mrioc->default_qcount = 0; 4045 if (mrioc->admin_req_base) 4046 memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz); 4047 if (mrioc->admin_reply_base) 4048 memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz); 4049 4050 if (mrioc->init_cmds.reply) { 4051 memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply)); 4052 memset(mrioc->bsg_cmds.reply, 0, 4053 sizeof(*mrioc->bsg_cmds.reply)); 4054 memset(mrioc->host_tm_cmds.reply, 0, 4055 sizeof(*mrioc->host_tm_cmds.reply)); 4056 memset(mrioc->pel_cmds.reply, 0, 4057 sizeof(*mrioc->pel_cmds.reply)); 4058 memset(mrioc->pel_abort_cmd.reply, 0, 4059 sizeof(*mrioc->pel_abort_cmd.reply)); 4060 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) 4061 memset(mrioc->dev_rmhs_cmds[i].reply, 0, 4062 sizeof(*mrioc->dev_rmhs_cmds[i].reply)); 4063 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) 4064 memset(mrioc->evtack_cmds[i].reply, 0, 4065 sizeof(*mrioc->evtack_cmds[i].reply)); 4066 memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz); 4067 memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz); 4068 memset(mrioc->evtack_cmds_bitmap, 0, 4069 mrioc->evtack_cmds_bitmap_sz); 4070 } 4071 4072 for (i = 0; i < mrioc->num_queues; i++) { 4073 mrioc->op_reply_qinfo[i].qid = 0; 4074 mrioc->op_reply_qinfo[i].ci = 0; 4075 mrioc->op_reply_qinfo[i].num_replies = 0; 4076 mrioc->op_reply_qinfo[i].ephase = 0; 4077 atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0); 4078 atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0); 4079 mpi3mr_memset_op_reply_q_buffers(mrioc, i); 4080 4081 mrioc->req_qinfo[i].ci = 0; 4082 mrioc->req_qinfo[i].pi = 0; 4083 mrioc->req_qinfo[i].num_requests = 0; 4084 mrioc->req_qinfo[i].qid = 0; 4085 mrioc->req_qinfo[i].reply_qid = 0; 4086 spin_lock_init(&mrioc->req_qinfo[i].q_lock); 4087 mpi3mr_memset_op_req_q_buffers(mrioc, i); 4088 } 4089 4090 atomic_set(&mrioc->pend_large_data_sz, 0); 4091 if (mrioc->throttle_groups) { 4092 tg = mrioc->throttle_groups; 4093 for (i = 0; i < mrioc->num_io_throttle_group; i++, tg++) { 4094 tg->id = 0; 4095 tg->fw_qd = 0; 4096 tg->modified_qd = 0; 4097 tg->io_divert = 0; 4098 tg->need_qd_reduction = 0; 4099 tg->high = 0; 4100 tg->low = 0; 4101 tg->qd_reduction = 0; 4102 atomic_set(&tg->pend_large_data_sz, 0); 4103 } 4104 } 4105 } 4106 4107 /** 4108 * mpi3mr_free_mem - Free memory allocated for a controller 4109 * @mrioc: Adapter instance reference 4110 * 4111 * Free all the memory allocated for a controller. 4112 * 4113 * Return: Nothing. 4114 */ 4115 void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc) 4116 { 4117 u16 i; 4118 struct mpi3mr_intr_info *intr_info; 4119 4120 if (mrioc->sense_buf_pool) { 4121 if (mrioc->sense_buf) 4122 dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf, 4123 mrioc->sense_buf_dma); 4124 dma_pool_destroy(mrioc->sense_buf_pool); 4125 mrioc->sense_buf = NULL; 4126 mrioc->sense_buf_pool = NULL; 4127 } 4128 if (mrioc->sense_buf_q_pool) { 4129 if (mrioc->sense_buf_q) 4130 dma_pool_free(mrioc->sense_buf_q_pool, 4131 mrioc->sense_buf_q, mrioc->sense_buf_q_dma); 4132 dma_pool_destroy(mrioc->sense_buf_q_pool); 4133 mrioc->sense_buf_q = NULL; 4134 mrioc->sense_buf_q_pool = NULL; 4135 } 4136 4137 if (mrioc->reply_buf_pool) { 4138 if (mrioc->reply_buf) 4139 dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf, 4140 mrioc->reply_buf_dma); 4141 dma_pool_destroy(mrioc->reply_buf_pool); 4142 mrioc->reply_buf = NULL; 4143 mrioc->reply_buf_pool = NULL; 4144 } 4145 if (mrioc->reply_free_q_pool) { 4146 if (mrioc->reply_free_q) 4147 dma_pool_free(mrioc->reply_free_q_pool, 4148 mrioc->reply_free_q, mrioc->reply_free_q_dma); 4149 dma_pool_destroy(mrioc->reply_free_q_pool); 4150 mrioc->reply_free_q = NULL; 4151 mrioc->reply_free_q_pool = NULL; 4152 } 4153 4154 for (i = 0; i < mrioc->num_op_req_q; i++) 4155 mpi3mr_free_op_req_q_segments(mrioc, i); 4156 4157 for (i = 0; i < mrioc->num_op_reply_q; i++) 4158 mpi3mr_free_op_reply_q_segments(mrioc, i); 4159 4160 for (i = 0; i < mrioc->intr_info_count; i++) { 4161 intr_info = mrioc->intr_info + i; 4162 intr_info->op_reply_q = NULL; 4163 } 4164 4165 kfree(mrioc->req_qinfo); 4166 mrioc->req_qinfo = NULL; 4167 mrioc->num_op_req_q = 0; 4168 4169 kfree(mrioc->op_reply_qinfo); 4170 mrioc->op_reply_qinfo = NULL; 4171 mrioc->num_op_reply_q = 0; 4172 4173 kfree(mrioc->init_cmds.reply); 4174 mrioc->init_cmds.reply = NULL; 4175 4176 kfree(mrioc->bsg_cmds.reply); 4177 mrioc->bsg_cmds.reply = NULL; 4178 4179 kfree(mrioc->host_tm_cmds.reply); 4180 mrioc->host_tm_cmds.reply = NULL; 4181 4182 kfree(mrioc->pel_cmds.reply); 4183 mrioc->pel_cmds.reply = NULL; 4184 4185 kfree(mrioc->pel_abort_cmd.reply); 4186 mrioc->pel_abort_cmd.reply = NULL; 4187 4188 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) { 4189 kfree(mrioc->evtack_cmds[i].reply); 4190 mrioc->evtack_cmds[i].reply = NULL; 4191 } 4192 4193 kfree(mrioc->removepend_bitmap); 4194 mrioc->removepend_bitmap = NULL; 4195 4196 kfree(mrioc->devrem_bitmap); 4197 mrioc->devrem_bitmap = NULL; 4198 4199 kfree(mrioc->evtack_cmds_bitmap); 4200 mrioc->evtack_cmds_bitmap = NULL; 4201 4202 kfree(mrioc->chain_bitmap); 4203 mrioc->chain_bitmap = NULL; 4204 4205 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { 4206 kfree(mrioc->dev_rmhs_cmds[i].reply); 4207 mrioc->dev_rmhs_cmds[i].reply = NULL; 4208 } 4209 4210 if (mrioc->chain_buf_pool) { 4211 for (i = 0; i < mrioc->chain_buf_count; i++) { 4212 if (mrioc->chain_sgl_list[i].addr) { 4213 dma_pool_free(mrioc->chain_buf_pool, 4214 mrioc->chain_sgl_list[i].addr, 4215 mrioc->chain_sgl_list[i].dma_addr); 4216 mrioc->chain_sgl_list[i].addr = NULL; 4217 } 4218 } 4219 dma_pool_destroy(mrioc->chain_buf_pool); 4220 mrioc->chain_buf_pool = NULL; 4221 } 4222 4223 kfree(mrioc->chain_sgl_list); 4224 mrioc->chain_sgl_list = NULL; 4225 4226 if (mrioc->admin_reply_base) { 4227 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz, 4228 mrioc->admin_reply_base, mrioc->admin_reply_dma); 4229 mrioc->admin_reply_base = NULL; 4230 } 4231 if (mrioc->admin_req_base) { 4232 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz, 4233 mrioc->admin_req_base, mrioc->admin_req_dma); 4234 mrioc->admin_req_base = NULL; 4235 } 4236 4237 if (mrioc->pel_seqnum_virt) { 4238 dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz, 4239 mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma); 4240 mrioc->pel_seqnum_virt = NULL; 4241 } 4242 4243 kfree(mrioc->logdata_buf); 4244 mrioc->logdata_buf = NULL; 4245 4246 } 4247 4248 /** 4249 * mpi3mr_issue_ioc_shutdown - shutdown controller 4250 * @mrioc: Adapter instance reference 4251 * 4252 * Send shutodwn notification to the controller and wait for the 4253 * shutdown_timeout for it to be completed. 4254 * 4255 * Return: Nothing. 4256 */ 4257 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc) 4258 { 4259 u32 ioc_config, ioc_status; 4260 u8 retval = 1; 4261 u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10; 4262 4263 ioc_info(mrioc, "Issuing shutdown Notification\n"); 4264 if (mrioc->unrecoverable) { 4265 ioc_warn(mrioc, 4266 "IOC is unrecoverable shutdown is not issued\n"); 4267 return; 4268 } 4269 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 4270 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 4271 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) { 4272 ioc_info(mrioc, "shutdown already in progress\n"); 4273 return; 4274 } 4275 4276 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 4277 ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL; 4278 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ; 4279 4280 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 4281 4282 if (mrioc->facts.shutdown_timeout) 4283 timeout = mrioc->facts.shutdown_timeout * 10; 4284 4285 do { 4286 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 4287 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 4288 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) { 4289 retval = 0; 4290 break; 4291 } 4292 msleep(100); 4293 } while (--timeout); 4294 4295 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 4296 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 4297 4298 if (retval) { 4299 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 4300 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) 4301 ioc_warn(mrioc, 4302 "shutdown still in progress after timeout\n"); 4303 } 4304 4305 ioc_info(mrioc, 4306 "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n", 4307 (!retval) ? "successful" : "failed", ioc_status, 4308 ioc_config); 4309 } 4310 4311 /** 4312 * mpi3mr_cleanup_ioc - Cleanup controller 4313 * @mrioc: Adapter instance reference 4314 * 4315 * controller cleanup handler, Message unit reset or soft reset 4316 * and shutdown notification is issued to the controller. 4317 * 4318 * Return: Nothing. 4319 */ 4320 void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc) 4321 { 4322 enum mpi3mr_iocstate ioc_state; 4323 4324 dprint_exit(mrioc, "cleaning up the controller\n"); 4325 mpi3mr_ioc_disable_intr(mrioc); 4326 4327 ioc_state = mpi3mr_get_iocstate(mrioc); 4328 4329 if ((!mrioc->unrecoverable) && (!mrioc->reset_in_progress) && 4330 (ioc_state == MRIOC_STATE_READY)) { 4331 if (mpi3mr_issue_and_process_mur(mrioc, 4332 MPI3MR_RESET_FROM_CTLR_CLEANUP)) 4333 mpi3mr_issue_reset(mrioc, 4334 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, 4335 MPI3MR_RESET_FROM_MUR_FAILURE); 4336 mpi3mr_issue_ioc_shutdown(mrioc); 4337 } 4338 dprint_exit(mrioc, "controller cleanup completed\n"); 4339 } 4340 4341 /** 4342 * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command 4343 * @mrioc: Adapter instance reference 4344 * @cmdptr: Internal command tracker 4345 * 4346 * Complete an internal driver commands with state indicating it 4347 * is completed due to reset. 4348 * 4349 * Return: Nothing. 4350 */ 4351 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc, 4352 struct mpi3mr_drv_cmd *cmdptr) 4353 { 4354 if (cmdptr->state & MPI3MR_CMD_PENDING) { 4355 cmdptr->state |= MPI3MR_CMD_RESET; 4356 cmdptr->state &= ~MPI3MR_CMD_PENDING; 4357 if (cmdptr->is_waiting) { 4358 complete(&cmdptr->done); 4359 cmdptr->is_waiting = 0; 4360 } else if (cmdptr->callback) 4361 cmdptr->callback(mrioc, cmdptr); 4362 } 4363 } 4364 4365 /** 4366 * mpi3mr_flush_drv_cmds - Flush internaldriver commands 4367 * @mrioc: Adapter instance reference 4368 * 4369 * Flush all internal driver commands post reset 4370 * 4371 * Return: Nothing. 4372 */ 4373 static void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc) 4374 { 4375 struct mpi3mr_drv_cmd *cmdptr; 4376 u8 i; 4377 4378 cmdptr = &mrioc->init_cmds; 4379 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4380 4381 cmdptr = &mrioc->cfg_cmds; 4382 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4383 4384 cmdptr = &mrioc->bsg_cmds; 4385 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4386 cmdptr = &mrioc->host_tm_cmds; 4387 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4388 4389 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { 4390 cmdptr = &mrioc->dev_rmhs_cmds[i]; 4391 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4392 } 4393 4394 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) { 4395 cmdptr = &mrioc->evtack_cmds[i]; 4396 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4397 } 4398 4399 cmdptr = &mrioc->pel_cmds; 4400 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4401 4402 cmdptr = &mrioc->pel_abort_cmd; 4403 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4404 4405 } 4406 4407 /** 4408 * mpi3mr_pel_wait_post - Issue PEL Wait 4409 * @mrioc: Adapter instance reference 4410 * @drv_cmd: Internal command tracker 4411 * 4412 * Issue PEL Wait MPI request through admin queue and return. 4413 * 4414 * Return: Nothing. 4415 */ 4416 static void mpi3mr_pel_wait_post(struct mpi3mr_ioc *mrioc, 4417 struct mpi3mr_drv_cmd *drv_cmd) 4418 { 4419 struct mpi3_pel_req_action_wait pel_wait; 4420 4421 mrioc->pel_abort_requested = false; 4422 4423 memset(&pel_wait, 0, sizeof(pel_wait)); 4424 drv_cmd->state = MPI3MR_CMD_PENDING; 4425 drv_cmd->is_waiting = 0; 4426 drv_cmd->callback = mpi3mr_pel_wait_complete; 4427 drv_cmd->ioc_status = 0; 4428 drv_cmd->ioc_loginfo = 0; 4429 pel_wait.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT); 4430 pel_wait.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; 4431 pel_wait.action = MPI3_PEL_ACTION_WAIT; 4432 pel_wait.starting_sequence_number = cpu_to_le32(mrioc->pel_newest_seqnum); 4433 pel_wait.locale = cpu_to_le16(mrioc->pel_locale); 4434 pel_wait.class = cpu_to_le16(mrioc->pel_class); 4435 pel_wait.wait_time = MPI3_PEL_WAITTIME_INFINITE_WAIT; 4436 dprint_bsg_info(mrioc, "sending pel_wait seqnum(%d), class(%d), locale(0x%08x)\n", 4437 mrioc->pel_newest_seqnum, mrioc->pel_class, mrioc->pel_locale); 4438 4439 if (mpi3mr_admin_request_post(mrioc, &pel_wait, sizeof(pel_wait), 0)) { 4440 dprint_bsg_err(mrioc, 4441 "Issuing PELWait: Admin post failed\n"); 4442 drv_cmd->state = MPI3MR_CMD_NOTUSED; 4443 drv_cmd->callback = NULL; 4444 drv_cmd->retry_count = 0; 4445 mrioc->pel_enabled = false; 4446 } 4447 } 4448 4449 /** 4450 * mpi3mr_pel_get_seqnum_post - Issue PEL Get Sequence number 4451 * @mrioc: Adapter instance reference 4452 * @drv_cmd: Internal command tracker 4453 * 4454 * Issue PEL get sequence number MPI request through admin queue 4455 * and return. 4456 * 4457 * Return: 0 on success, non-zero on failure. 4458 */ 4459 int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc, 4460 struct mpi3mr_drv_cmd *drv_cmd) 4461 { 4462 struct mpi3_pel_req_action_get_sequence_numbers pel_getseq_req; 4463 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 4464 int retval = 0; 4465 4466 memset(&pel_getseq_req, 0, sizeof(pel_getseq_req)); 4467 mrioc->pel_cmds.state = MPI3MR_CMD_PENDING; 4468 mrioc->pel_cmds.is_waiting = 0; 4469 mrioc->pel_cmds.ioc_status = 0; 4470 mrioc->pel_cmds.ioc_loginfo = 0; 4471 mrioc->pel_cmds.callback = mpi3mr_pel_get_seqnum_complete; 4472 pel_getseq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT); 4473 pel_getseq_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; 4474 pel_getseq_req.action = MPI3_PEL_ACTION_GET_SEQNUM; 4475 mpi3mr_add_sg_single(&pel_getseq_req.sgl, sgl_flags, 4476 mrioc->pel_seqnum_sz, mrioc->pel_seqnum_dma); 4477 4478 retval = mpi3mr_admin_request_post(mrioc, &pel_getseq_req, 4479 sizeof(pel_getseq_req), 0); 4480 if (retval) { 4481 if (drv_cmd) { 4482 drv_cmd->state = MPI3MR_CMD_NOTUSED; 4483 drv_cmd->callback = NULL; 4484 drv_cmd->retry_count = 0; 4485 } 4486 mrioc->pel_enabled = false; 4487 } 4488 4489 return retval; 4490 } 4491 4492 /** 4493 * mpi3mr_pel_wait_complete - PELWait Completion callback 4494 * @mrioc: Adapter instance reference 4495 * @drv_cmd: Internal command tracker 4496 * 4497 * This is a callback handler for the PELWait request and 4498 * firmware completes a PELWait request when it is aborted or a 4499 * new PEL entry is available. This sends AEN to the application 4500 * and if the PELwait completion is not due to PELAbort then 4501 * this will send a request for new PEL Sequence number 4502 * 4503 * Return: Nothing. 4504 */ 4505 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc, 4506 struct mpi3mr_drv_cmd *drv_cmd) 4507 { 4508 struct mpi3_pel_reply *pel_reply = NULL; 4509 u16 ioc_status, pe_log_status; 4510 bool do_retry = false; 4511 4512 if (drv_cmd->state & MPI3MR_CMD_RESET) 4513 goto cleanup_drv_cmd; 4514 4515 ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK; 4516 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 4517 ioc_err(mrioc, "%s: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 4518 __func__, ioc_status, drv_cmd->ioc_loginfo); 4519 dprint_bsg_err(mrioc, 4520 "pel_wait: failed with ioc_status(0x%04x), log_info(0x%08x)\n", 4521 ioc_status, drv_cmd->ioc_loginfo); 4522 do_retry = true; 4523 } 4524 4525 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) 4526 pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply; 4527 4528 if (!pel_reply) { 4529 dprint_bsg_err(mrioc, 4530 "pel_wait: failed due to no reply\n"); 4531 goto out_failed; 4532 } 4533 4534 pe_log_status = le16_to_cpu(pel_reply->pe_log_status); 4535 if ((pe_log_status != MPI3_PEL_STATUS_SUCCESS) && 4536 (pe_log_status != MPI3_PEL_STATUS_ABORTED)) { 4537 ioc_err(mrioc, "%s: Failed pe_log_status(0x%04x)\n", 4538 __func__, pe_log_status); 4539 dprint_bsg_err(mrioc, 4540 "pel_wait: failed due to pel_log_status(0x%04x)\n", 4541 pe_log_status); 4542 do_retry = true; 4543 } 4544 4545 if (do_retry) { 4546 if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) { 4547 drv_cmd->retry_count++; 4548 dprint_bsg_err(mrioc, "pel_wait: retrying(%d)\n", 4549 drv_cmd->retry_count); 4550 mpi3mr_pel_wait_post(mrioc, drv_cmd); 4551 return; 4552 } 4553 dprint_bsg_err(mrioc, 4554 "pel_wait: failed after all retries(%d)\n", 4555 drv_cmd->retry_count); 4556 goto out_failed; 4557 } 4558 atomic64_inc(&event_counter); 4559 if (!mrioc->pel_abort_requested) { 4560 mrioc->pel_cmds.retry_count = 0; 4561 mpi3mr_pel_get_seqnum_post(mrioc, &mrioc->pel_cmds); 4562 } 4563 4564 return; 4565 out_failed: 4566 mrioc->pel_enabled = false; 4567 cleanup_drv_cmd: 4568 drv_cmd->state = MPI3MR_CMD_NOTUSED; 4569 drv_cmd->callback = NULL; 4570 drv_cmd->retry_count = 0; 4571 } 4572 4573 /** 4574 * mpi3mr_pel_get_seqnum_complete - PELGetSeqNum Completion callback 4575 * @mrioc: Adapter instance reference 4576 * @drv_cmd: Internal command tracker 4577 * 4578 * This is a callback handler for the PEL get sequence number 4579 * request and a new PEL wait request will be issued to the 4580 * firmware from this 4581 * 4582 * Return: Nothing. 4583 */ 4584 void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc, 4585 struct mpi3mr_drv_cmd *drv_cmd) 4586 { 4587 struct mpi3_pel_reply *pel_reply = NULL; 4588 struct mpi3_pel_seq *pel_seqnum_virt; 4589 u16 ioc_status; 4590 bool do_retry = false; 4591 4592 pel_seqnum_virt = (struct mpi3_pel_seq *)mrioc->pel_seqnum_virt; 4593 4594 if (drv_cmd->state & MPI3MR_CMD_RESET) 4595 goto cleanup_drv_cmd; 4596 4597 ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK; 4598 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 4599 dprint_bsg_err(mrioc, 4600 "pel_get_seqnum: failed with ioc_status(0x%04x), log_info(0x%08x)\n", 4601 ioc_status, drv_cmd->ioc_loginfo); 4602 do_retry = true; 4603 } 4604 4605 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) 4606 pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply; 4607 if (!pel_reply) { 4608 dprint_bsg_err(mrioc, 4609 "pel_get_seqnum: failed due to no reply\n"); 4610 goto out_failed; 4611 } 4612 4613 if (le16_to_cpu(pel_reply->pe_log_status) != MPI3_PEL_STATUS_SUCCESS) { 4614 dprint_bsg_err(mrioc, 4615 "pel_get_seqnum: failed due to pel_log_status(0x%04x)\n", 4616 le16_to_cpu(pel_reply->pe_log_status)); 4617 do_retry = true; 4618 } 4619 4620 if (do_retry) { 4621 if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) { 4622 drv_cmd->retry_count++; 4623 dprint_bsg_err(mrioc, 4624 "pel_get_seqnum: retrying(%d)\n", 4625 drv_cmd->retry_count); 4626 mpi3mr_pel_get_seqnum_post(mrioc, drv_cmd); 4627 return; 4628 } 4629 4630 dprint_bsg_err(mrioc, 4631 "pel_get_seqnum: failed after all retries(%d)\n", 4632 drv_cmd->retry_count); 4633 goto out_failed; 4634 } 4635 mrioc->pel_newest_seqnum = le32_to_cpu(pel_seqnum_virt->newest) + 1; 4636 drv_cmd->retry_count = 0; 4637 mpi3mr_pel_wait_post(mrioc, drv_cmd); 4638 4639 return; 4640 out_failed: 4641 mrioc->pel_enabled = false; 4642 cleanup_drv_cmd: 4643 drv_cmd->state = MPI3MR_CMD_NOTUSED; 4644 drv_cmd->callback = NULL; 4645 drv_cmd->retry_count = 0; 4646 } 4647 4648 /** 4649 * mpi3mr_soft_reset_handler - Reset the controller 4650 * @mrioc: Adapter instance reference 4651 * @reset_reason: Reset reason code 4652 * @snapdump: Flag to generate snapdump in firmware or not 4653 * 4654 * This is an handler for recovering controller by issuing soft 4655 * reset are diag fault reset. This is a blocking function and 4656 * when one reset is executed if any other resets they will be 4657 * blocked. All BSG requests will be blocked during the reset. If 4658 * controller reset is successful then the controller will be 4659 * reinitalized, otherwise the controller will be marked as not 4660 * recoverable 4661 * 4662 * In snapdump bit is set, the controller is issued with diag 4663 * fault reset so that the firmware can create a snap dump and 4664 * post that the firmware will result in F000 fault and the 4665 * driver will issue soft reset to recover from that. 4666 * 4667 * Return: 0 on success, non-zero on failure. 4668 */ 4669 int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, 4670 u32 reset_reason, u8 snapdump) 4671 { 4672 int retval = 0, i; 4673 unsigned long flags; 4674 u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; 4675 4676 /* Block the reset handler until diag save in progress*/ 4677 dprint_reset(mrioc, 4678 "soft_reset_handler: check and block on diagsave_timeout(%d)\n", 4679 mrioc->diagsave_timeout); 4680 while (mrioc->diagsave_timeout) 4681 ssleep(1); 4682 /* 4683 * Block new resets until the currently executing one is finished and 4684 * return the status of the existing reset for all blocked resets 4685 */ 4686 dprint_reset(mrioc, "soft_reset_handler: acquiring reset_mutex\n"); 4687 if (!mutex_trylock(&mrioc->reset_mutex)) { 4688 ioc_info(mrioc, 4689 "controller reset triggered by %s is blocked due to another reset in progress\n", 4690 mpi3mr_reset_rc_name(reset_reason)); 4691 do { 4692 ssleep(1); 4693 } while (mrioc->reset_in_progress == 1); 4694 ioc_info(mrioc, 4695 "returning previous reset result(%d) for the reset triggered by %s\n", 4696 mrioc->prev_reset_result, 4697 mpi3mr_reset_rc_name(reset_reason)); 4698 return mrioc->prev_reset_result; 4699 } 4700 ioc_info(mrioc, "controller reset is triggered by %s\n", 4701 mpi3mr_reset_rc_name(reset_reason)); 4702 4703 mrioc->reset_in_progress = 1; 4704 mrioc->stop_bsgs = 1; 4705 mrioc->prev_reset_result = -1; 4706 4707 if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) && 4708 (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) && 4709 (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) { 4710 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 4711 mrioc->event_masks[i] = -1; 4712 4713 dprint_reset(mrioc, "soft_reset_handler: masking events\n"); 4714 mpi3mr_issue_event_notification(mrioc); 4715 } 4716 4717 mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT); 4718 4719 mpi3mr_ioc_disable_intr(mrioc); 4720 4721 if (snapdump) { 4722 mpi3mr_set_diagsave(mrioc); 4723 retval = mpi3mr_issue_reset(mrioc, 4724 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason); 4725 if (!retval) { 4726 do { 4727 host_diagnostic = 4728 readl(&mrioc->sysif_regs->host_diagnostic); 4729 if (!(host_diagnostic & 4730 MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS)) 4731 break; 4732 msleep(100); 4733 } while (--timeout); 4734 } 4735 } 4736 4737 retval = mpi3mr_issue_reset(mrioc, 4738 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason); 4739 if (retval) { 4740 ioc_err(mrioc, "Failed to issue soft reset to the ioc\n"); 4741 goto out; 4742 } 4743 if (mrioc->num_io_throttle_group != 4744 mrioc->facts.max_io_throttle_group) { 4745 ioc_err(mrioc, 4746 "max io throttle group doesn't match old(%d), new(%d)\n", 4747 mrioc->num_io_throttle_group, 4748 mrioc->facts.max_io_throttle_group); 4749 retval = -EPERM; 4750 goto out; 4751 } 4752 4753 mpi3mr_flush_delayed_cmd_lists(mrioc); 4754 mpi3mr_flush_drv_cmds(mrioc); 4755 memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz); 4756 memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz); 4757 memset(mrioc->evtack_cmds_bitmap, 0, mrioc->evtack_cmds_bitmap_sz); 4758 mpi3mr_flush_host_io(mrioc); 4759 mpi3mr_cleanup_fwevt_list(mrioc); 4760 mpi3mr_invalidate_devhandles(mrioc); 4761 if (mrioc->prepare_for_reset) { 4762 mrioc->prepare_for_reset = 0; 4763 mrioc->prepare_for_reset_timeout_counter = 0; 4764 } 4765 mpi3mr_memset_buffers(mrioc); 4766 retval = mpi3mr_reinit_ioc(mrioc, 0); 4767 if (retval) { 4768 pr_err(IOCNAME "reinit after soft reset failed: reason %d\n", 4769 mrioc->name, reset_reason); 4770 goto out; 4771 } 4772 ssleep(10); 4773 4774 out: 4775 if (!retval) { 4776 mrioc->diagsave_timeout = 0; 4777 mrioc->reset_in_progress = 0; 4778 mrioc->pel_abort_requested = 0; 4779 if (mrioc->pel_enabled) { 4780 mrioc->pel_cmds.retry_count = 0; 4781 mpi3mr_pel_wait_post(mrioc, &mrioc->pel_cmds); 4782 } 4783 4784 mpi3mr_rfresh_tgtdevs(mrioc); 4785 mrioc->ts_update_counter = 0; 4786 spin_lock_irqsave(&mrioc->watchdog_lock, flags); 4787 if (mrioc->watchdog_work_q) 4788 queue_delayed_work(mrioc->watchdog_work_q, 4789 &mrioc->watchdog_work, 4790 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); 4791 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); 4792 mrioc->stop_bsgs = 0; 4793 if (mrioc->pel_enabled) 4794 atomic64_inc(&event_counter); 4795 } else { 4796 mpi3mr_issue_reset(mrioc, 4797 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason); 4798 mrioc->unrecoverable = 1; 4799 mrioc->reset_in_progress = 0; 4800 retval = -1; 4801 } 4802 mrioc->prev_reset_result = retval; 4803 mutex_unlock(&mrioc->reset_mutex); 4804 ioc_info(mrioc, "controller reset is %s\n", 4805 ((retval == 0) ? "successful" : "failed")); 4806 return retval; 4807 } 4808 4809 4810 /** 4811 * mpi3mr_free_config_dma_memory - free memory for config page 4812 * @mrioc: Adapter instance reference 4813 * @mem_desc: memory descriptor structure 4814 * 4815 * Check whether the size of the buffer specified by the memory 4816 * descriptor is greater than the default page size if so then 4817 * free the memory pointed by the descriptor. 4818 * 4819 * Return: Nothing. 4820 */ 4821 static void mpi3mr_free_config_dma_memory(struct mpi3mr_ioc *mrioc, 4822 struct dma_memory_desc *mem_desc) 4823 { 4824 if ((mem_desc->size > mrioc->cfg_page_sz) && mem_desc->addr) { 4825 dma_free_coherent(&mrioc->pdev->dev, mem_desc->size, 4826 mem_desc->addr, mem_desc->dma_addr); 4827 mem_desc->addr = NULL; 4828 } 4829 } 4830 4831 /** 4832 * mpi3mr_alloc_config_dma_memory - Alloc memory for config page 4833 * @mrioc: Adapter instance reference 4834 * @mem_desc: Memory descriptor to hold dma memory info 4835 * 4836 * This function allocates new dmaable memory or provides the 4837 * default config page dmaable memory based on the memory size 4838 * described by the descriptor. 4839 * 4840 * Return: 0 on success, non-zero on failure. 4841 */ 4842 static int mpi3mr_alloc_config_dma_memory(struct mpi3mr_ioc *mrioc, 4843 struct dma_memory_desc *mem_desc) 4844 { 4845 if (mem_desc->size > mrioc->cfg_page_sz) { 4846 mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev, 4847 mem_desc->size, &mem_desc->dma_addr, GFP_KERNEL); 4848 if (!mem_desc->addr) 4849 return -ENOMEM; 4850 } else { 4851 mem_desc->addr = mrioc->cfg_page; 4852 mem_desc->dma_addr = mrioc->cfg_page_dma; 4853 memset(mem_desc->addr, 0, mrioc->cfg_page_sz); 4854 } 4855 return 0; 4856 } 4857 4858 /** 4859 * mpi3mr_post_cfg_req - Issue config requests and wait 4860 * @mrioc: Adapter instance reference 4861 * @cfg_req: Configuration request 4862 * @timeout: Timeout in seconds 4863 * @ioc_status: Pointer to return ioc status 4864 * 4865 * A generic function for posting MPI3 configuration request to 4866 * the firmware. This blocks for the completion of request for 4867 * timeout seconds and if the request times out this function 4868 * faults the controller with proper reason code. 4869 * 4870 * On successful completion of the request this function returns 4871 * appropriate ioc status from the firmware back to the caller. 4872 * 4873 * Return: 0 on success, non-zero on failure. 4874 */ 4875 static int mpi3mr_post_cfg_req(struct mpi3mr_ioc *mrioc, 4876 struct mpi3_config_request *cfg_req, int timeout, u16 *ioc_status) 4877 { 4878 int retval = 0; 4879 4880 mutex_lock(&mrioc->cfg_cmds.mutex); 4881 if (mrioc->cfg_cmds.state & MPI3MR_CMD_PENDING) { 4882 retval = -1; 4883 ioc_err(mrioc, "sending config request failed due to command in use\n"); 4884 mutex_unlock(&mrioc->cfg_cmds.mutex); 4885 goto out; 4886 } 4887 mrioc->cfg_cmds.state = MPI3MR_CMD_PENDING; 4888 mrioc->cfg_cmds.is_waiting = 1; 4889 mrioc->cfg_cmds.callback = NULL; 4890 mrioc->cfg_cmds.ioc_status = 0; 4891 mrioc->cfg_cmds.ioc_loginfo = 0; 4892 4893 cfg_req->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_CFG_CMDS); 4894 cfg_req->function = MPI3_FUNCTION_CONFIG; 4895 4896 init_completion(&mrioc->cfg_cmds.done); 4897 dprint_cfg_info(mrioc, "posting config request\n"); 4898 if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO) 4899 dprint_dump(cfg_req, sizeof(struct mpi3_config_request), 4900 "mpi3_cfg_req"); 4901 retval = mpi3mr_admin_request_post(mrioc, cfg_req, sizeof(*cfg_req), 1); 4902 if (retval) { 4903 ioc_err(mrioc, "posting config request failed\n"); 4904 goto out_unlock; 4905 } 4906 wait_for_completion_timeout(&mrioc->cfg_cmds.done, (timeout * HZ)); 4907 if (!(mrioc->cfg_cmds.state & MPI3MR_CMD_COMPLETE)) { 4908 mpi3mr_check_rh_fault_ioc(mrioc, 4909 MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT); 4910 ioc_err(mrioc, "config request timed out\n"); 4911 retval = -1; 4912 goto out_unlock; 4913 } 4914 *ioc_status = mrioc->cfg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK; 4915 if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS) 4916 dprint_cfg_err(mrioc, 4917 "cfg_page request returned with ioc_status(0x%04x), log_info(0x%08x)\n", 4918 *ioc_status, mrioc->cfg_cmds.ioc_loginfo); 4919 4920 out_unlock: 4921 mrioc->cfg_cmds.state = MPI3MR_CMD_NOTUSED; 4922 mutex_unlock(&mrioc->cfg_cmds.mutex); 4923 4924 out: 4925 return retval; 4926 } 4927 4928 /** 4929 * mpi3mr_process_cfg_req - config page request processor 4930 * @mrioc: Adapter instance reference 4931 * @cfg_req: Configuration request 4932 * @cfg_hdr: Configuration page header 4933 * @timeout: Timeout in seconds 4934 * @ioc_status: Pointer to return ioc status 4935 * @cfg_buf: Memory pointer to copy config page or header 4936 * @cfg_buf_sz: Size of the memory to get config page or header 4937 * 4938 * This is handler for config page read, write and config page 4939 * header read operations. 4940 * 4941 * This function expects the cfg_req to be populated with page 4942 * type, page number, action for the header read and with page 4943 * address for all other operations. 4944 * 4945 * The cfg_hdr can be passed as null for reading required header 4946 * details for read/write pages the cfg_hdr should point valid 4947 * configuration page header. 4948 * 4949 * This allocates dmaable memory based on the size of the config 4950 * buffer and set the SGE of the cfg_req. 4951 * 4952 * For write actions, the config page data has to be passed in 4953 * the cfg_buf and size of the data has to be mentioned in the 4954 * cfg_buf_sz. 4955 * 4956 * For read/header actions, on successful completion of the 4957 * request with successful ioc_status the data will be copied 4958 * into the cfg_buf limited to a minimum of actual page size and 4959 * cfg_buf_sz 4960 * 4961 * 4962 * Return: 0 on success, non-zero on failure. 4963 */ 4964 static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc, 4965 struct mpi3_config_request *cfg_req, 4966 struct mpi3_config_page_header *cfg_hdr, int timeout, u16 *ioc_status, 4967 void *cfg_buf, u32 cfg_buf_sz) 4968 { 4969 struct dma_memory_desc mem_desc; 4970 int retval = -1; 4971 u8 invalid_action = 0; 4972 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 4973 4974 memset(&mem_desc, 0, sizeof(struct dma_memory_desc)); 4975 4976 if (cfg_req->action == MPI3_CONFIG_ACTION_PAGE_HEADER) 4977 mem_desc.size = sizeof(struct mpi3_config_page_header); 4978 else { 4979 if (!cfg_hdr) { 4980 ioc_err(mrioc, "null config header passed for config action(%d), page_type(0x%02x), page_num(%d)\n", 4981 cfg_req->action, cfg_req->page_type, 4982 cfg_req->page_number); 4983 goto out; 4984 } 4985 switch (cfg_hdr->page_attribute & MPI3_CONFIG_PAGEATTR_MASK) { 4986 case MPI3_CONFIG_PAGEATTR_READ_ONLY: 4987 if (cfg_req->action 4988 != MPI3_CONFIG_ACTION_READ_CURRENT) 4989 invalid_action = 1; 4990 break; 4991 case MPI3_CONFIG_PAGEATTR_CHANGEABLE: 4992 if ((cfg_req->action == 4993 MPI3_CONFIG_ACTION_READ_PERSISTENT) || 4994 (cfg_req->action == 4995 MPI3_CONFIG_ACTION_WRITE_PERSISTENT)) 4996 invalid_action = 1; 4997 break; 4998 case MPI3_CONFIG_PAGEATTR_PERSISTENT: 4999 default: 5000 break; 5001 } 5002 if (invalid_action) { 5003 ioc_err(mrioc, 5004 "config action(%d) is not allowed for page_type(0x%02x), page_num(%d) with page_attribute(0x%02x)\n", 5005 cfg_req->action, cfg_req->page_type, 5006 cfg_req->page_number, cfg_hdr->page_attribute); 5007 goto out; 5008 } 5009 mem_desc.size = le16_to_cpu(cfg_hdr->page_length) * 4; 5010 cfg_req->page_length = cfg_hdr->page_length; 5011 cfg_req->page_version = cfg_hdr->page_version; 5012 } 5013 if (mpi3mr_alloc_config_dma_memory(mrioc, &mem_desc)) 5014 goto out; 5015 5016 mpi3mr_add_sg_single(&cfg_req->sgl, sgl_flags, mem_desc.size, 5017 mem_desc.dma_addr); 5018 5019 if ((cfg_req->action == MPI3_CONFIG_ACTION_WRITE_PERSISTENT) || 5020 (cfg_req->action == MPI3_CONFIG_ACTION_WRITE_CURRENT)) { 5021 memcpy(mem_desc.addr, cfg_buf, min_t(u16, mem_desc.size, 5022 cfg_buf_sz)); 5023 dprint_cfg_info(mrioc, "config buffer to be written\n"); 5024 if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO) 5025 dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf"); 5026 } 5027 5028 if (mpi3mr_post_cfg_req(mrioc, cfg_req, timeout, ioc_status)) 5029 goto out; 5030 5031 retval = 0; 5032 if ((*ioc_status == MPI3_IOCSTATUS_SUCCESS) && 5033 (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_PERSISTENT) && 5034 (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_CURRENT)) { 5035 memcpy(cfg_buf, mem_desc.addr, min_t(u16, mem_desc.size, 5036 cfg_buf_sz)); 5037 dprint_cfg_info(mrioc, "config buffer read\n"); 5038 if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO) 5039 dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf"); 5040 } 5041 5042 out: 5043 mpi3mr_free_config_dma_memory(mrioc, &mem_desc); 5044 return retval; 5045 } 5046 5047 /** 5048 * mpi3mr_cfg_get_dev_pg0 - Read current device page0 5049 * @mrioc: Adapter instance reference 5050 * @ioc_status: Pointer to return ioc status 5051 * @dev_pg0: Pointer to return device page 0 5052 * @pg_sz: Size of the memory allocated to the page pointer 5053 * @form: The form to be used for addressing the page 5054 * @form_spec: Form specific information like device handle 5055 * 5056 * This is handler for config page read for a specific device 5057 * page0. The ioc_status has the controller returned ioc_status. 5058 * This routine doesn't check ioc_status to decide whether the 5059 * page read is success or not and it is the callers 5060 * responsibility. 5061 * 5062 * Return: 0 on success, non-zero on failure. 5063 */ 5064 int mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status, 5065 struct mpi3_device_page0 *dev_pg0, u16 pg_sz, u32 form, u32 form_spec) 5066 { 5067 struct mpi3_config_page_header cfg_hdr; 5068 struct mpi3_config_request cfg_req; 5069 u32 page_address; 5070 5071 memset(dev_pg0, 0, pg_sz); 5072 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5073 memset(&cfg_req, 0, sizeof(cfg_req)); 5074 5075 cfg_req.function = MPI3_FUNCTION_CONFIG; 5076 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5077 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DEVICE; 5078 cfg_req.page_number = 0; 5079 cfg_req.page_address = 0; 5080 5081 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5082 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5083 ioc_err(mrioc, "device page0 header read failed\n"); 5084 goto out_failed; 5085 } 5086 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5087 ioc_err(mrioc, "device page0 header read failed with ioc_status(0x%04x)\n", 5088 *ioc_status); 5089 goto out_failed; 5090 } 5091 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5092 page_address = ((form & MPI3_DEVICE_PGAD_FORM_MASK) | 5093 (form_spec & MPI3_DEVICE_PGAD_HANDLE_MASK)); 5094 cfg_req.page_address = cpu_to_le32(page_address); 5095 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5096 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, dev_pg0, pg_sz)) { 5097 ioc_err(mrioc, "device page0 read failed\n"); 5098 goto out_failed; 5099 } 5100 return 0; 5101 out_failed: 5102 return -1; 5103 } 5104 5105 5106 /** 5107 * mpi3mr_cfg_get_sas_phy_pg0 - Read current SAS Phy page0 5108 * @mrioc: Adapter instance reference 5109 * @ioc_status: Pointer to return ioc status 5110 * @phy_pg0: Pointer to return SAS Phy page 0 5111 * @pg_sz: Size of the memory allocated to the page pointer 5112 * @form: The form to be used for addressing the page 5113 * @form_spec: Form specific information like phy number 5114 * 5115 * This is handler for config page read for a specific SAS Phy 5116 * page0. The ioc_status has the controller returned ioc_status. 5117 * This routine doesn't check ioc_status to decide whether the 5118 * page read is success or not and it is the callers 5119 * responsibility. 5120 * 5121 * Return: 0 on success, non-zero on failure. 5122 */ 5123 int mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status, 5124 struct mpi3_sas_phy_page0 *phy_pg0, u16 pg_sz, u32 form, 5125 u32 form_spec) 5126 { 5127 struct mpi3_config_page_header cfg_hdr; 5128 struct mpi3_config_request cfg_req; 5129 u32 page_address; 5130 5131 memset(phy_pg0, 0, pg_sz); 5132 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5133 memset(&cfg_req, 0, sizeof(cfg_req)); 5134 5135 cfg_req.function = MPI3_FUNCTION_CONFIG; 5136 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5137 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY; 5138 cfg_req.page_number = 0; 5139 cfg_req.page_address = 0; 5140 5141 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5142 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5143 ioc_err(mrioc, "sas phy page0 header read failed\n"); 5144 goto out_failed; 5145 } 5146 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5147 ioc_err(mrioc, "sas phy page0 header read failed with ioc_status(0x%04x)\n", 5148 *ioc_status); 5149 goto out_failed; 5150 } 5151 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5152 page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) | 5153 (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK)); 5154 cfg_req.page_address = cpu_to_le32(page_address); 5155 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5156 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg0, pg_sz)) { 5157 ioc_err(mrioc, "sas phy page0 read failed\n"); 5158 goto out_failed; 5159 } 5160 return 0; 5161 out_failed: 5162 return -1; 5163 } 5164 5165 /** 5166 * mpi3mr_cfg_get_sas_phy_pg1 - Read current SAS Phy page1 5167 * @mrioc: Adapter instance reference 5168 * @ioc_status: Pointer to return ioc status 5169 * @phy_pg1: Pointer to return SAS Phy page 1 5170 * @pg_sz: Size of the memory allocated to the page pointer 5171 * @form: The form to be used for addressing the page 5172 * @form_spec: Form specific information like phy number 5173 * 5174 * This is handler for config page read for a specific SAS Phy 5175 * page1. The ioc_status has the controller returned ioc_status. 5176 * This routine doesn't check ioc_status to decide whether the 5177 * page read is success or not and it is the callers 5178 * responsibility. 5179 * 5180 * Return: 0 on success, non-zero on failure. 5181 */ 5182 int mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status, 5183 struct mpi3_sas_phy_page1 *phy_pg1, u16 pg_sz, u32 form, 5184 u32 form_spec) 5185 { 5186 struct mpi3_config_page_header cfg_hdr; 5187 struct mpi3_config_request cfg_req; 5188 u32 page_address; 5189 5190 memset(phy_pg1, 0, pg_sz); 5191 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5192 memset(&cfg_req, 0, sizeof(cfg_req)); 5193 5194 cfg_req.function = MPI3_FUNCTION_CONFIG; 5195 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5196 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY; 5197 cfg_req.page_number = 1; 5198 cfg_req.page_address = 0; 5199 5200 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5201 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5202 ioc_err(mrioc, "sas phy page1 header read failed\n"); 5203 goto out_failed; 5204 } 5205 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5206 ioc_err(mrioc, "sas phy page1 header read failed with ioc_status(0x%04x)\n", 5207 *ioc_status); 5208 goto out_failed; 5209 } 5210 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5211 page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) | 5212 (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK)); 5213 cfg_req.page_address = cpu_to_le32(page_address); 5214 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5215 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg1, pg_sz)) { 5216 ioc_err(mrioc, "sas phy page1 read failed\n"); 5217 goto out_failed; 5218 } 5219 return 0; 5220 out_failed: 5221 return -1; 5222 } 5223 5224 5225 /** 5226 * mpi3mr_cfg_get_sas_exp_pg0 - Read current SAS Expander page0 5227 * @mrioc: Adapter instance reference 5228 * @ioc_status: Pointer to return ioc status 5229 * @exp_pg0: Pointer to return SAS Expander page 0 5230 * @pg_sz: Size of the memory allocated to the page pointer 5231 * @form: The form to be used for addressing the page 5232 * @form_spec: Form specific information like device handle 5233 * 5234 * This is handler for config page read for a specific SAS 5235 * Expander page0. The ioc_status has the controller returned 5236 * ioc_status. This routine doesn't check ioc_status to decide 5237 * whether the page read is success or not and it is the callers 5238 * responsibility. 5239 * 5240 * Return: 0 on success, non-zero on failure. 5241 */ 5242 int mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status, 5243 struct mpi3_sas_expander_page0 *exp_pg0, u16 pg_sz, u32 form, 5244 u32 form_spec) 5245 { 5246 struct mpi3_config_page_header cfg_hdr; 5247 struct mpi3_config_request cfg_req; 5248 u32 page_address; 5249 5250 memset(exp_pg0, 0, pg_sz); 5251 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5252 memset(&cfg_req, 0, sizeof(cfg_req)); 5253 5254 cfg_req.function = MPI3_FUNCTION_CONFIG; 5255 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5256 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER; 5257 cfg_req.page_number = 0; 5258 cfg_req.page_address = 0; 5259 5260 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5261 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5262 ioc_err(mrioc, "expander page0 header read failed\n"); 5263 goto out_failed; 5264 } 5265 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5266 ioc_err(mrioc, "expander page0 header read failed with ioc_status(0x%04x)\n", 5267 *ioc_status); 5268 goto out_failed; 5269 } 5270 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5271 page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) | 5272 (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK | 5273 MPI3_SAS_EXPAND_PGAD_HANDLE_MASK))); 5274 cfg_req.page_address = cpu_to_le32(page_address); 5275 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5276 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg0, pg_sz)) { 5277 ioc_err(mrioc, "expander page0 read failed\n"); 5278 goto out_failed; 5279 } 5280 return 0; 5281 out_failed: 5282 return -1; 5283 } 5284 5285 /** 5286 * mpi3mr_cfg_get_sas_exp_pg1 - Read current SAS Expander page1 5287 * @mrioc: Adapter instance reference 5288 * @ioc_status: Pointer to return ioc status 5289 * @exp_pg1: Pointer to return SAS Expander page 1 5290 * @pg_sz: Size of the memory allocated to the page pointer 5291 * @form: The form to be used for addressing the page 5292 * @form_spec: Form specific information like phy number 5293 * 5294 * This is handler for config page read for a specific SAS 5295 * Expander page1. The ioc_status has the controller returned 5296 * ioc_status. This routine doesn't check ioc_status to decide 5297 * whether the page read is success or not and it is the callers 5298 * responsibility. 5299 * 5300 * Return: 0 on success, non-zero on failure. 5301 */ 5302 int mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status, 5303 struct mpi3_sas_expander_page1 *exp_pg1, u16 pg_sz, u32 form, 5304 u32 form_spec) 5305 { 5306 struct mpi3_config_page_header cfg_hdr; 5307 struct mpi3_config_request cfg_req; 5308 u32 page_address; 5309 5310 memset(exp_pg1, 0, pg_sz); 5311 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5312 memset(&cfg_req, 0, sizeof(cfg_req)); 5313 5314 cfg_req.function = MPI3_FUNCTION_CONFIG; 5315 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5316 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER; 5317 cfg_req.page_number = 1; 5318 cfg_req.page_address = 0; 5319 5320 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5321 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5322 ioc_err(mrioc, "expander page1 header read failed\n"); 5323 goto out_failed; 5324 } 5325 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5326 ioc_err(mrioc, "expander page1 header read failed with ioc_status(0x%04x)\n", 5327 *ioc_status); 5328 goto out_failed; 5329 } 5330 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5331 page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) | 5332 (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK | 5333 MPI3_SAS_EXPAND_PGAD_HANDLE_MASK))); 5334 cfg_req.page_address = cpu_to_le32(page_address); 5335 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5336 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg1, pg_sz)) { 5337 ioc_err(mrioc, "expander page1 read failed\n"); 5338 goto out_failed; 5339 } 5340 return 0; 5341 out_failed: 5342 return -1; 5343 } 5344 5345 /** 5346 * mpi3mr_cfg_get_enclosure_pg0 - Read current Enclosure page0 5347 * @mrioc: Adapter instance reference 5348 * @ioc_status: Pointer to return ioc status 5349 * @encl_pg0: Pointer to return Enclosure page 0 5350 * @pg_sz: Size of the memory allocated to the page pointer 5351 * @form: The form to be used for addressing the page 5352 * @form_spec: Form specific information like device handle 5353 * 5354 * This is handler for config page read for a specific Enclosure 5355 * page0. The ioc_status has the controller returned ioc_status. 5356 * This routine doesn't check ioc_status to decide whether the 5357 * page read is success or not and it is the callers 5358 * responsibility. 5359 * 5360 * Return: 0 on success, non-zero on failure. 5361 */ 5362 int mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status, 5363 struct mpi3_enclosure_page0 *encl_pg0, u16 pg_sz, u32 form, 5364 u32 form_spec) 5365 { 5366 struct mpi3_config_page_header cfg_hdr; 5367 struct mpi3_config_request cfg_req; 5368 u32 page_address; 5369 5370 memset(encl_pg0, 0, pg_sz); 5371 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5372 memset(&cfg_req, 0, sizeof(cfg_req)); 5373 5374 cfg_req.function = MPI3_FUNCTION_CONFIG; 5375 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5376 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_ENCLOSURE; 5377 cfg_req.page_number = 0; 5378 cfg_req.page_address = 0; 5379 5380 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5381 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5382 ioc_err(mrioc, "enclosure page0 header read failed\n"); 5383 goto out_failed; 5384 } 5385 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5386 ioc_err(mrioc, "enclosure page0 header read failed with ioc_status(0x%04x)\n", 5387 *ioc_status); 5388 goto out_failed; 5389 } 5390 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5391 page_address = ((form & MPI3_ENCLOS_PGAD_FORM_MASK) | 5392 (form_spec & MPI3_ENCLOS_PGAD_HANDLE_MASK)); 5393 cfg_req.page_address = cpu_to_le32(page_address); 5394 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5395 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, encl_pg0, pg_sz)) { 5396 ioc_err(mrioc, "enclosure page0 read failed\n"); 5397 goto out_failed; 5398 } 5399 return 0; 5400 out_failed: 5401 return -1; 5402 } 5403 5404 5405 /** 5406 * mpi3mr_cfg_get_sas_io_unit_pg0 - Read current SASIOUnit page0 5407 * @mrioc: Adapter instance reference 5408 * @sas_io_unit_pg0: Pointer to return SAS IO Unit page 0 5409 * @pg_sz: Size of the memory allocated to the page pointer 5410 * 5411 * This is handler for config page read for the SAS IO Unit 5412 * page0. This routine checks ioc_status to decide whether the 5413 * page read is success or not. 5414 * 5415 * Return: 0 on success, non-zero on failure. 5416 */ 5417 int mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc *mrioc, 5418 struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0, u16 pg_sz) 5419 { 5420 struct mpi3_config_page_header cfg_hdr; 5421 struct mpi3_config_request cfg_req; 5422 u16 ioc_status = 0; 5423 5424 memset(sas_io_unit_pg0, 0, pg_sz); 5425 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5426 memset(&cfg_req, 0, sizeof(cfg_req)); 5427 5428 cfg_req.function = MPI3_FUNCTION_CONFIG; 5429 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5430 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT; 5431 cfg_req.page_number = 0; 5432 cfg_req.page_address = 0; 5433 5434 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5435 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5436 ioc_err(mrioc, "sas io unit page0 header read failed\n"); 5437 goto out_failed; 5438 } 5439 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5440 ioc_err(mrioc, "sas io unit page0 header read failed with ioc_status(0x%04x)\n", 5441 ioc_status); 5442 goto out_failed; 5443 } 5444 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5445 5446 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5447 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg0, pg_sz)) { 5448 ioc_err(mrioc, "sas io unit page0 read failed\n"); 5449 goto out_failed; 5450 } 5451 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5452 ioc_err(mrioc, "sas io unit page0 read failed with ioc_status(0x%04x)\n", 5453 ioc_status); 5454 goto out_failed; 5455 } 5456 return 0; 5457 out_failed: 5458 return -1; 5459 } 5460 5461 /** 5462 * mpi3mr_cfg_get_sas_io_unit_pg1 - Read current SASIOUnit page1 5463 * @mrioc: Adapter instance reference 5464 * @sas_io_unit_pg1: Pointer to return SAS IO Unit page 1 5465 * @pg_sz: Size of the memory allocated to the page pointer 5466 * 5467 * This is handler for config page read for the SAS IO Unit 5468 * page1. This routine checks ioc_status to decide whether the 5469 * page read is success or not. 5470 * 5471 * Return: 0 on success, non-zero on failure. 5472 */ 5473 int mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc, 5474 struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz) 5475 { 5476 struct mpi3_config_page_header cfg_hdr; 5477 struct mpi3_config_request cfg_req; 5478 u16 ioc_status = 0; 5479 5480 memset(sas_io_unit_pg1, 0, pg_sz); 5481 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5482 memset(&cfg_req, 0, sizeof(cfg_req)); 5483 5484 cfg_req.function = MPI3_FUNCTION_CONFIG; 5485 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5486 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT; 5487 cfg_req.page_number = 1; 5488 cfg_req.page_address = 0; 5489 5490 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5491 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5492 ioc_err(mrioc, "sas io unit page1 header read failed\n"); 5493 goto out_failed; 5494 } 5495 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5496 ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n", 5497 ioc_status); 5498 goto out_failed; 5499 } 5500 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5501 5502 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5503 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) { 5504 ioc_err(mrioc, "sas io unit page1 read failed\n"); 5505 goto out_failed; 5506 } 5507 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5508 ioc_err(mrioc, "sas io unit page1 read failed with ioc_status(0x%04x)\n", 5509 ioc_status); 5510 goto out_failed; 5511 } 5512 return 0; 5513 out_failed: 5514 return -1; 5515 } 5516 5517 /** 5518 * mpi3mr_cfg_set_sas_io_unit_pg1 - Write SASIOUnit page1 5519 * @mrioc: Adapter instance reference 5520 * @sas_io_unit_pg1: Pointer to the SAS IO Unit page 1 to write 5521 * @pg_sz: Size of the memory allocated to the page pointer 5522 * 5523 * This is handler for config page write for the SAS IO Unit 5524 * page1. This routine checks ioc_status to decide whether the 5525 * page read is success or not. This will modify both current 5526 * and persistent page. 5527 * 5528 * Return: 0 on success, non-zero on failure. 5529 */ 5530 int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc, 5531 struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz) 5532 { 5533 struct mpi3_config_page_header cfg_hdr; 5534 struct mpi3_config_request cfg_req; 5535 u16 ioc_status = 0; 5536 5537 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5538 memset(&cfg_req, 0, sizeof(cfg_req)); 5539 5540 cfg_req.function = MPI3_FUNCTION_CONFIG; 5541 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5542 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT; 5543 cfg_req.page_number = 1; 5544 cfg_req.page_address = 0; 5545 5546 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5547 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5548 ioc_err(mrioc, "sas io unit page1 header read failed\n"); 5549 goto out_failed; 5550 } 5551 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5552 ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n", 5553 ioc_status); 5554 goto out_failed; 5555 } 5556 cfg_req.action = MPI3_CONFIG_ACTION_WRITE_CURRENT; 5557 5558 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5559 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) { 5560 ioc_err(mrioc, "sas io unit page1 write current failed\n"); 5561 goto out_failed; 5562 } 5563 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5564 ioc_err(mrioc, "sas io unit page1 write current failed with ioc_status(0x%04x)\n", 5565 ioc_status); 5566 goto out_failed; 5567 } 5568 5569 cfg_req.action = MPI3_CONFIG_ACTION_WRITE_PERSISTENT; 5570 5571 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5572 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) { 5573 ioc_err(mrioc, "sas io unit page1 write persistent failed\n"); 5574 goto out_failed; 5575 } 5576 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5577 ioc_err(mrioc, "sas io unit page1 write persistent failed with ioc_status(0x%04x)\n", 5578 ioc_status); 5579 goto out_failed; 5580 } 5581 return 0; 5582 out_failed: 5583 return -1; 5584 } 5585 5586 /** 5587 * mpi3mr_cfg_get_driver_pg1 - Read current Driver page1 5588 * @mrioc: Adapter instance reference 5589 * @driver_pg1: Pointer to return Driver page 1 5590 * @pg_sz: Size of the memory allocated to the page pointer 5591 * 5592 * This is handler for config page read for the Driver page1. 5593 * This routine checks ioc_status to decide whether the page 5594 * read is success or not. 5595 * 5596 * Return: 0 on success, non-zero on failure. 5597 */ 5598 int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc, 5599 struct mpi3_driver_page1 *driver_pg1, u16 pg_sz) 5600 { 5601 struct mpi3_config_page_header cfg_hdr; 5602 struct mpi3_config_request cfg_req; 5603 u16 ioc_status = 0; 5604 5605 memset(driver_pg1, 0, pg_sz); 5606 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5607 memset(&cfg_req, 0, sizeof(cfg_req)); 5608 5609 cfg_req.function = MPI3_FUNCTION_CONFIG; 5610 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5611 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER; 5612 cfg_req.page_number = 1; 5613 cfg_req.page_address = 0; 5614 5615 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5616 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5617 ioc_err(mrioc, "driver page1 header read failed\n"); 5618 goto out_failed; 5619 } 5620 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5621 ioc_err(mrioc, "driver page1 header read failed with ioc_status(0x%04x)\n", 5622 ioc_status); 5623 goto out_failed; 5624 } 5625 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5626 5627 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5628 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg1, pg_sz)) { 5629 ioc_err(mrioc, "driver page1 read failed\n"); 5630 goto out_failed; 5631 } 5632 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5633 ioc_err(mrioc, "driver page1 read failed with ioc_status(0x%04x)\n", 5634 ioc_status); 5635 goto out_failed; 5636 } 5637 return 0; 5638 out_failed: 5639 return -1; 5640 } 5641