1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux MegaRAID driver for SAS based RAID controllers 4 * 5 * Copyright (c) 2009-2013 LSI Corporation 6 * Copyright (c) 2013-2016 Avago Technologies 7 * Copyright (c) 2016-2018 Broadcom Inc. 8 * 9 * FILE: megaraid_sas_fusion.c 10 * 11 * Authors: Broadcom Inc. 12 * Sumant Patro 13 * Adam Radford 14 * Kashyap Desai <kashyap.desai@broadcom.com> 15 * Sumit Saxena <sumit.saxena@broadcom.com> 16 * 17 * Send feedback to: megaraidlinux.pdl@broadcom.com 18 */ 19 20 #include <linux/kernel.h> 21 #include <linux/types.h> 22 #include <linux/pci.h> 23 #include <linux/list.h> 24 #include <linux/moduleparam.h> 25 #include <linux/module.h> 26 #include <linux/spinlock.h> 27 #include <linux/interrupt.h> 28 #include <linux/delay.h> 29 #include <linux/uio.h> 30 #include <linux/uaccess.h> 31 #include <linux/fs.h> 32 #include <linux/compat.h> 33 #include <linux/blkdev.h> 34 #include <linux/mutex.h> 35 #include <linux/poll.h> 36 #include <linux/vmalloc.h> 37 #include <linux/workqueue.h> 38 #include <linux/irq_poll.h> 39 40 #include <scsi/scsi.h> 41 #include <scsi/scsi_cmnd.h> 42 #include <scsi/scsi_device.h> 43 #include <scsi/scsi_host.h> 44 #include <scsi/scsi_dbg.h> 45 #include <linux/dmi.h> 46 47 #include "megaraid_sas_fusion.h" 48 #include "megaraid_sas.h" 49 50 51 extern void megasas_free_cmds(struct megasas_instance *instance); 52 extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance 53 *instance); 54 extern void 55 megasas_complete_cmd(struct megasas_instance *instance, 56 struct megasas_cmd *cmd, u8 alt_status); 57 int 58 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, 59 int seconds); 60 61 void 62 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd); 63 int megasas_alloc_cmds(struct megasas_instance *instance); 64 int 65 megasas_clear_intr_fusion(struct megasas_instance *instance); 66 int 67 megasas_issue_polled(struct megasas_instance *instance, 68 struct megasas_cmd *cmd); 69 void 70 megasas_check_and_restore_queue_depth(struct megasas_instance *instance); 71 72 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); 73 void megaraid_sas_kill_hba(struct megasas_instance *instance); 74 75 extern u32 megasas_dbg_lvl; 76 int megasas_sriov_start_heartbeat(struct megasas_instance *instance, 77 int initial); 78 void megasas_start_timer(struct megasas_instance *instance); 79 extern struct megasas_mgmt_info megasas_mgmt_info; 80 extern unsigned int resetwaittime; 81 extern unsigned int dual_qdepth_disable; 82 static void megasas_free_rdpq_fusion(struct megasas_instance *instance); 83 static void megasas_free_reply_fusion(struct megasas_instance *instance); 84 static inline 85 void megasas_configure_queue_sizes(struct megasas_instance *instance); 86 static void megasas_fusion_crash_dump(struct megasas_instance *instance); 87 extern u32 megasas_readl(struct megasas_instance *instance, 88 const volatile void __iomem *addr); 89 90 /** 91 * megasas_adp_reset_wait_for_ready - initiate chip reset and wait for 92 * controller to come to ready state 93 * @instance - adapter's soft state 94 * @do_adp_reset - If true, do a chip reset 95 * @ocr_context - If called from OCR context this will 96 * be set to 1, else 0 97 * 98 * This function initates a chip reset followed by a wait for controller to 99 * transition to ready state. 100 * During this, driver will block all access to PCI config space from userspace 101 */ 102 int 103 megasas_adp_reset_wait_for_ready(struct megasas_instance *instance, 104 bool do_adp_reset, 105 int ocr_context) 106 { 107 int ret = FAILED; 108 109 /* 110 * Block access to PCI config space from userspace 111 * when diag reset is initiated from driver 112 */ 113 if (megasas_dbg_lvl & OCR_DEBUG) 114 dev_info(&instance->pdev->dev, 115 "Block access to PCI config space %s %d\n", 116 __func__, __LINE__); 117 118 pci_cfg_access_lock(instance->pdev); 119 120 if (do_adp_reset) { 121 if (instance->instancet->adp_reset 122 (instance, instance->reg_set)) 123 goto out; 124 } 125 126 /* Wait for FW to become ready */ 127 if (megasas_transition_to_ready(instance, ocr_context)) { 128 dev_warn(&instance->pdev->dev, 129 "Failed to transition controller to ready for scsi%d.\n", 130 instance->host->host_no); 131 goto out; 132 } 133 134 ret = SUCCESS; 135 out: 136 if (megasas_dbg_lvl & OCR_DEBUG) 137 dev_info(&instance->pdev->dev, 138 "Unlock access to PCI config space %s %d\n", 139 __func__, __LINE__); 140 141 pci_cfg_access_unlock(instance->pdev); 142 143 return ret; 144 } 145 146 /** 147 * megasas_check_same_4gb_region - check if allocation 148 * crosses same 4GB boundary or not 149 * @instance - adapter's soft instance 150 * start_addr - start address of DMA allocation 151 * size - size of allocation in bytes 152 * return - true : allocation does not cross same 153 * 4GB boundary 154 * false: allocation crosses same 155 * 4GB boundary 156 */ 157 static inline bool megasas_check_same_4gb_region 158 (struct megasas_instance *instance, dma_addr_t start_addr, size_t size) 159 { 160 dma_addr_t end_addr; 161 162 end_addr = start_addr + size; 163 164 if (upper_32_bits(start_addr) != upper_32_bits(end_addr)) { 165 dev_err(&instance->pdev->dev, 166 "Failed to get same 4GB boundary: start_addr: 0x%llx end_addr: 0x%llx\n", 167 (unsigned long long)start_addr, 168 (unsigned long long)end_addr); 169 return false; 170 } 171 172 return true; 173 } 174 175 /** 176 * megasas_enable_intr_fusion - Enables interrupts 177 * @regs: MFI register set 178 */ 179 void 180 megasas_enable_intr_fusion(struct megasas_instance *instance) 181 { 182 struct megasas_register_set __iomem *regs; 183 regs = instance->reg_set; 184 185 instance->mask_interrupts = 0; 186 /* For Thunderbolt/Invader also clear intr on enable */ 187 writel(~0, ®s->outbound_intr_status); 188 readl(®s->outbound_intr_status); 189 190 writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 191 192 /* Dummy readl to force pci flush */ 193 dev_info(&instance->pdev->dev, "%s is called outbound_intr_mask:0x%08x\n", 194 __func__, readl(®s->outbound_intr_mask)); 195 } 196 197 /** 198 * megasas_disable_intr_fusion - Disables interrupt 199 * @regs: MFI register set 200 */ 201 void 202 megasas_disable_intr_fusion(struct megasas_instance *instance) 203 { 204 u32 mask = 0xFFFFFFFF; 205 struct megasas_register_set __iomem *regs; 206 regs = instance->reg_set; 207 instance->mask_interrupts = 1; 208 209 writel(mask, ®s->outbound_intr_mask); 210 /* Dummy readl to force pci flush */ 211 dev_info(&instance->pdev->dev, "%s is called outbound_intr_mask:0x%08x\n", 212 __func__, readl(®s->outbound_intr_mask)); 213 } 214 215 int 216 megasas_clear_intr_fusion(struct megasas_instance *instance) 217 { 218 u32 status; 219 struct megasas_register_set __iomem *regs; 220 regs = instance->reg_set; 221 /* 222 * Check if it is our interrupt 223 */ 224 status = megasas_readl(instance, 225 ®s->outbound_intr_status); 226 227 if (status & 1) { 228 writel(status, ®s->outbound_intr_status); 229 readl(®s->outbound_intr_status); 230 return 1; 231 } 232 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) 233 return 0; 234 235 return 1; 236 } 237 238 /** 239 * megasas_get_cmd_fusion - Get a command from the free pool 240 * @instance: Adapter soft state 241 * 242 * Returns a blk_tag indexed mpt frame 243 */ 244 inline struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance 245 *instance, u32 blk_tag) 246 { 247 struct fusion_context *fusion; 248 249 fusion = instance->ctrl_context; 250 return fusion->cmd_list[blk_tag]; 251 } 252 253 /** 254 * megasas_return_cmd_fusion - Return a cmd to free command pool 255 * @instance: Adapter soft state 256 * @cmd: Command packet to be returned to free command pool 257 */ 258 inline void megasas_return_cmd_fusion(struct megasas_instance *instance, 259 struct megasas_cmd_fusion *cmd) 260 { 261 cmd->scmd = NULL; 262 memset(cmd->io_request, 0, MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE); 263 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; 264 cmd->cmd_completed = false; 265 } 266 267 /** 268 * megasas_write_64bit_req_desc - PCI writes 64bit request descriptor 269 * @instance: Adapter soft state 270 * @req_desc: 64bit Request descriptor 271 */ 272 static void 273 megasas_write_64bit_req_desc(struct megasas_instance *instance, 274 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc) 275 { 276 #if defined(writeq) && defined(CONFIG_64BIT) 277 u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) | 278 le32_to_cpu(req_desc->u.low)); 279 writeq(req_data, &instance->reg_set->inbound_low_queue_port); 280 #else 281 unsigned long flags; 282 spin_lock_irqsave(&instance->hba_lock, flags); 283 writel(le32_to_cpu(req_desc->u.low), 284 &instance->reg_set->inbound_low_queue_port); 285 writel(le32_to_cpu(req_desc->u.high), 286 &instance->reg_set->inbound_high_queue_port); 287 spin_unlock_irqrestore(&instance->hba_lock, flags); 288 #endif 289 } 290 291 /** 292 * megasas_fire_cmd_fusion - Sends command to the FW 293 * @instance: Adapter soft state 294 * @req_desc: 32bit or 64bit Request descriptor 295 * 296 * Perform PCI Write. AERO SERIES supports 32 bit Descriptor. 297 * Prior to AERO_SERIES support 64 bit Descriptor. 298 */ 299 static void 300 megasas_fire_cmd_fusion(struct megasas_instance *instance, 301 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc) 302 { 303 if (instance->atomic_desc_support) 304 writel(le32_to_cpu(req_desc->u.low), 305 &instance->reg_set->inbound_single_queue_port); 306 else 307 megasas_write_64bit_req_desc(instance, req_desc); 308 } 309 310 /** 311 * megasas_fusion_update_can_queue - Do all Adapter Queue depth related calculations here 312 * @instance: Adapter soft state 313 * fw_boot_context: Whether this function called during probe or after OCR 314 * 315 * This function is only for fusion controllers. 316 * Update host can queue, if firmware downgrade max supported firmware commands. 317 * Firmware upgrade case will be skiped because underlying firmware has 318 * more resource than exposed to the OS. 319 * 320 */ 321 static void 322 megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_context) 323 { 324 u16 cur_max_fw_cmds = 0; 325 u16 ldio_threshold = 0; 326 327 /* ventura FW does not fill outbound_scratch_pad_2 with queue depth */ 328 if (instance->adapter_type < VENTURA_SERIES) 329 cur_max_fw_cmds = 330 megasas_readl(instance, 331 &instance->reg_set->outbound_scratch_pad_2) & 0x00FFFF; 332 333 if (dual_qdepth_disable || !cur_max_fw_cmds) 334 cur_max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF; 335 else 336 ldio_threshold = 337 (instance->instancet->read_fw_status_reg(instance) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS; 338 339 dev_info(&instance->pdev->dev, 340 "Current firmware supports maximum commands: %d\t LDIO threshold: %d\n", 341 cur_max_fw_cmds, ldio_threshold); 342 343 if (fw_boot_context == OCR_CONTEXT) { 344 cur_max_fw_cmds = cur_max_fw_cmds - 1; 345 if (cur_max_fw_cmds < instance->max_fw_cmds) { 346 instance->cur_can_queue = 347 cur_max_fw_cmds - (MEGASAS_FUSION_INTERNAL_CMDS + 348 MEGASAS_FUSION_IOCTL_CMDS); 349 instance->host->can_queue = instance->cur_can_queue; 350 instance->ldio_threshold = ldio_threshold; 351 } 352 } else { 353 instance->max_fw_cmds = cur_max_fw_cmds; 354 instance->ldio_threshold = ldio_threshold; 355 356 if (reset_devices) 357 instance->max_fw_cmds = min(instance->max_fw_cmds, 358 (u16)MEGASAS_KDUMP_QUEUE_DEPTH); 359 /* 360 * Reduce the max supported cmds by 1. This is to ensure that the 361 * reply_q_sz (1 more than the max cmd that driver may send) 362 * does not exceed max cmds that the FW can support 363 */ 364 instance->max_fw_cmds = instance->max_fw_cmds-1; 365 } 366 } 367 368 static inline void 369 megasas_get_msix_index(struct megasas_instance *instance, 370 struct scsi_cmnd *scmd, 371 struct megasas_cmd_fusion *cmd, 372 u8 data_arms) 373 { 374 int sdev_busy; 375 376 /* nr_hw_queue = 1 for MegaRAID */ 377 struct blk_mq_hw_ctx *hctx = 378 scmd->device->request_queue->queue_hw_ctx[0]; 379 380 sdev_busy = atomic_read(&hctx->nr_active); 381 382 if (instance->perf_mode == MR_BALANCED_PERF_MODE && 383 sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH)) 384 cmd->request_desc->SCSIIO.MSIxIndex = 385 mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) / 386 MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start); 387 else if (instance->msix_load_balance) 388 cmd->request_desc->SCSIIO.MSIxIndex = 389 (mega_mod64(atomic64_add_return(1, &instance->total_io_count), 390 instance->msix_vectors)); 391 else 392 cmd->request_desc->SCSIIO.MSIxIndex = 393 instance->reply_map[raw_smp_processor_id()]; 394 } 395 396 /** 397 * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool 398 * @instance: Adapter soft state 399 */ 400 void 401 megasas_free_cmds_fusion(struct megasas_instance *instance) 402 { 403 int i; 404 struct fusion_context *fusion = instance->ctrl_context; 405 struct megasas_cmd_fusion *cmd; 406 407 if (fusion->sense) 408 dma_pool_free(fusion->sense_dma_pool, fusion->sense, 409 fusion->sense_phys_addr); 410 411 /* SG */ 412 if (fusion->cmd_list) { 413 for (i = 0; i < instance->max_mpt_cmds; i++) { 414 cmd = fusion->cmd_list[i]; 415 if (cmd) { 416 if (cmd->sg_frame) 417 dma_pool_free(fusion->sg_dma_pool, 418 cmd->sg_frame, 419 cmd->sg_frame_phys_addr); 420 } 421 kfree(cmd); 422 } 423 kfree(fusion->cmd_list); 424 } 425 426 if (fusion->sg_dma_pool) { 427 dma_pool_destroy(fusion->sg_dma_pool); 428 fusion->sg_dma_pool = NULL; 429 } 430 if (fusion->sense_dma_pool) { 431 dma_pool_destroy(fusion->sense_dma_pool); 432 fusion->sense_dma_pool = NULL; 433 } 434 435 436 /* Reply Frame, Desc*/ 437 if (instance->is_rdpq) 438 megasas_free_rdpq_fusion(instance); 439 else 440 megasas_free_reply_fusion(instance); 441 442 /* Request Frame, Desc*/ 443 if (fusion->req_frames_desc) 444 dma_free_coherent(&instance->pdev->dev, 445 fusion->request_alloc_sz, fusion->req_frames_desc, 446 fusion->req_frames_desc_phys); 447 if (fusion->io_request_frames) 448 dma_pool_free(fusion->io_request_frames_pool, 449 fusion->io_request_frames, 450 fusion->io_request_frames_phys); 451 if (fusion->io_request_frames_pool) { 452 dma_pool_destroy(fusion->io_request_frames_pool); 453 fusion->io_request_frames_pool = NULL; 454 } 455 } 456 457 /** 458 * megasas_create_sg_sense_fusion - Creates DMA pool for cmd frames 459 * @instance: Adapter soft state 460 * 461 */ 462 static int megasas_create_sg_sense_fusion(struct megasas_instance *instance) 463 { 464 int i; 465 u16 max_cmd; 466 struct fusion_context *fusion; 467 struct megasas_cmd_fusion *cmd; 468 int sense_sz; 469 u32 offset; 470 471 fusion = instance->ctrl_context; 472 max_cmd = instance->max_fw_cmds; 473 sense_sz = instance->max_mpt_cmds * SCSI_SENSE_BUFFERSIZE; 474 475 fusion->sg_dma_pool = 476 dma_pool_create("mr_sg", &instance->pdev->dev, 477 instance->max_chain_frame_sz, 478 MR_DEFAULT_NVME_PAGE_SIZE, 0); 479 /* SCSI_SENSE_BUFFERSIZE = 96 bytes */ 480 fusion->sense_dma_pool = 481 dma_pool_create("mr_sense", &instance->pdev->dev, 482 sense_sz, 64, 0); 483 484 if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) { 485 dev_err(&instance->pdev->dev, 486 "Failed from %s %d\n", __func__, __LINE__); 487 return -ENOMEM; 488 } 489 490 fusion->sense = dma_pool_alloc(fusion->sense_dma_pool, 491 GFP_KERNEL, &fusion->sense_phys_addr); 492 if (!fusion->sense) { 493 dev_err(&instance->pdev->dev, 494 "failed from %s %d\n", __func__, __LINE__); 495 return -ENOMEM; 496 } 497 498 /* sense buffer, request frame and reply desc pool requires to be in 499 * same 4 gb region. Below function will check this. 500 * In case of failure, new pci pool will be created with updated 501 * alignment. 502 * Older allocation and pool will be destroyed. 503 * Alignment will be used such a way that next allocation if success, 504 * will always meet same 4gb region requirement. 505 * Actual requirement is not alignment, but we need start and end of 506 * DMA address must have same upper 32 bit address. 507 */ 508 509 if (!megasas_check_same_4gb_region(instance, fusion->sense_phys_addr, 510 sense_sz)) { 511 dma_pool_free(fusion->sense_dma_pool, fusion->sense, 512 fusion->sense_phys_addr); 513 fusion->sense = NULL; 514 dma_pool_destroy(fusion->sense_dma_pool); 515 516 fusion->sense_dma_pool = 517 dma_pool_create("mr_sense_align", &instance->pdev->dev, 518 sense_sz, roundup_pow_of_two(sense_sz), 519 0); 520 if (!fusion->sense_dma_pool) { 521 dev_err(&instance->pdev->dev, 522 "Failed from %s %d\n", __func__, __LINE__); 523 return -ENOMEM; 524 } 525 fusion->sense = dma_pool_alloc(fusion->sense_dma_pool, 526 GFP_KERNEL, 527 &fusion->sense_phys_addr); 528 if (!fusion->sense) { 529 dev_err(&instance->pdev->dev, 530 "failed from %s %d\n", __func__, __LINE__); 531 return -ENOMEM; 532 } 533 } 534 535 /* 536 * Allocate and attach a frame to each of the commands in cmd_list 537 */ 538 for (i = 0; i < max_cmd; i++) { 539 cmd = fusion->cmd_list[i]; 540 cmd->sg_frame = dma_pool_alloc(fusion->sg_dma_pool, 541 GFP_KERNEL, &cmd->sg_frame_phys_addr); 542 543 offset = SCSI_SENSE_BUFFERSIZE * i; 544 cmd->sense = (u8 *)fusion->sense + offset; 545 cmd->sense_phys_addr = fusion->sense_phys_addr + offset; 546 547 if (!cmd->sg_frame) { 548 dev_err(&instance->pdev->dev, 549 "Failed from %s %d\n", __func__, __LINE__); 550 return -ENOMEM; 551 } 552 } 553 554 /* create sense buffer for the raid 1/10 fp */ 555 for (i = max_cmd; i < instance->max_mpt_cmds; i++) { 556 cmd = fusion->cmd_list[i]; 557 offset = SCSI_SENSE_BUFFERSIZE * i; 558 cmd->sense = (u8 *)fusion->sense + offset; 559 cmd->sense_phys_addr = fusion->sense_phys_addr + offset; 560 561 } 562 563 return 0; 564 } 565 566 static int 567 megasas_alloc_cmdlist_fusion(struct megasas_instance *instance) 568 { 569 u32 max_mpt_cmd, i, j; 570 struct fusion_context *fusion; 571 572 fusion = instance->ctrl_context; 573 574 max_mpt_cmd = instance->max_mpt_cmds; 575 576 /* 577 * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers. 578 * Allocate the dynamic array first and then allocate individual 579 * commands. 580 */ 581 fusion->cmd_list = 582 kcalloc(max_mpt_cmd, sizeof(struct megasas_cmd_fusion *), 583 GFP_KERNEL); 584 if (!fusion->cmd_list) { 585 dev_err(&instance->pdev->dev, 586 "Failed from %s %d\n", __func__, __LINE__); 587 return -ENOMEM; 588 } 589 590 for (i = 0; i < max_mpt_cmd; i++) { 591 fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion), 592 GFP_KERNEL); 593 if (!fusion->cmd_list[i]) { 594 for (j = 0; j < i; j++) 595 kfree(fusion->cmd_list[j]); 596 kfree(fusion->cmd_list); 597 dev_err(&instance->pdev->dev, 598 "Failed from %s %d\n", __func__, __LINE__); 599 return -ENOMEM; 600 } 601 } 602 603 return 0; 604 } 605 606 static int 607 megasas_alloc_request_fusion(struct megasas_instance *instance) 608 { 609 struct fusion_context *fusion; 610 611 fusion = instance->ctrl_context; 612 613 retry_alloc: 614 fusion->io_request_frames_pool = 615 dma_pool_create("mr_ioreq", &instance->pdev->dev, 616 fusion->io_frames_alloc_sz, 16, 0); 617 618 if (!fusion->io_request_frames_pool) { 619 dev_err(&instance->pdev->dev, 620 "Failed from %s %d\n", __func__, __LINE__); 621 return -ENOMEM; 622 } 623 624 fusion->io_request_frames = 625 dma_pool_alloc(fusion->io_request_frames_pool, 626 GFP_KERNEL | __GFP_NOWARN, 627 &fusion->io_request_frames_phys); 628 if (!fusion->io_request_frames) { 629 if (instance->max_fw_cmds >= (MEGASAS_REDUCE_QD_COUNT * 2)) { 630 instance->max_fw_cmds -= MEGASAS_REDUCE_QD_COUNT; 631 dma_pool_destroy(fusion->io_request_frames_pool); 632 megasas_configure_queue_sizes(instance); 633 goto retry_alloc; 634 } else { 635 dev_err(&instance->pdev->dev, 636 "Failed from %s %d\n", __func__, __LINE__); 637 return -ENOMEM; 638 } 639 } 640 641 if (!megasas_check_same_4gb_region(instance, 642 fusion->io_request_frames_phys, 643 fusion->io_frames_alloc_sz)) { 644 dma_pool_free(fusion->io_request_frames_pool, 645 fusion->io_request_frames, 646 fusion->io_request_frames_phys); 647 fusion->io_request_frames = NULL; 648 dma_pool_destroy(fusion->io_request_frames_pool); 649 650 fusion->io_request_frames_pool = 651 dma_pool_create("mr_ioreq_align", 652 &instance->pdev->dev, 653 fusion->io_frames_alloc_sz, 654 roundup_pow_of_two(fusion->io_frames_alloc_sz), 655 0); 656 657 if (!fusion->io_request_frames_pool) { 658 dev_err(&instance->pdev->dev, 659 "Failed from %s %d\n", __func__, __LINE__); 660 return -ENOMEM; 661 } 662 663 fusion->io_request_frames = 664 dma_pool_alloc(fusion->io_request_frames_pool, 665 GFP_KERNEL | __GFP_NOWARN, 666 &fusion->io_request_frames_phys); 667 668 if (!fusion->io_request_frames) { 669 dev_err(&instance->pdev->dev, 670 "Failed from %s %d\n", __func__, __LINE__); 671 return -ENOMEM; 672 } 673 } 674 675 fusion->req_frames_desc = 676 dma_alloc_coherent(&instance->pdev->dev, 677 fusion->request_alloc_sz, 678 &fusion->req_frames_desc_phys, GFP_KERNEL); 679 if (!fusion->req_frames_desc) { 680 dev_err(&instance->pdev->dev, 681 "Failed from %s %d\n", __func__, __LINE__); 682 return -ENOMEM; 683 } 684 685 return 0; 686 } 687 688 static int 689 megasas_alloc_reply_fusion(struct megasas_instance *instance) 690 { 691 int i, count; 692 struct fusion_context *fusion; 693 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; 694 fusion = instance->ctrl_context; 695 696 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 697 fusion->reply_frames_desc_pool = 698 dma_pool_create("mr_reply", &instance->pdev->dev, 699 fusion->reply_alloc_sz * count, 16, 0); 700 701 if (!fusion->reply_frames_desc_pool) { 702 dev_err(&instance->pdev->dev, 703 "Failed from %s %d\n", __func__, __LINE__); 704 return -ENOMEM; 705 } 706 707 fusion->reply_frames_desc[0] = 708 dma_pool_alloc(fusion->reply_frames_desc_pool, 709 GFP_KERNEL, &fusion->reply_frames_desc_phys[0]); 710 if (!fusion->reply_frames_desc[0]) { 711 dev_err(&instance->pdev->dev, 712 "Failed from %s %d\n", __func__, __LINE__); 713 return -ENOMEM; 714 } 715 716 if (!megasas_check_same_4gb_region(instance, 717 fusion->reply_frames_desc_phys[0], 718 (fusion->reply_alloc_sz * count))) { 719 dma_pool_free(fusion->reply_frames_desc_pool, 720 fusion->reply_frames_desc[0], 721 fusion->reply_frames_desc_phys[0]); 722 fusion->reply_frames_desc[0] = NULL; 723 dma_pool_destroy(fusion->reply_frames_desc_pool); 724 725 fusion->reply_frames_desc_pool = 726 dma_pool_create("mr_reply_align", 727 &instance->pdev->dev, 728 fusion->reply_alloc_sz * count, 729 roundup_pow_of_two(fusion->reply_alloc_sz * count), 730 0); 731 732 if (!fusion->reply_frames_desc_pool) { 733 dev_err(&instance->pdev->dev, 734 "Failed from %s %d\n", __func__, __LINE__); 735 return -ENOMEM; 736 } 737 738 fusion->reply_frames_desc[0] = 739 dma_pool_alloc(fusion->reply_frames_desc_pool, 740 GFP_KERNEL, 741 &fusion->reply_frames_desc_phys[0]); 742 743 if (!fusion->reply_frames_desc[0]) { 744 dev_err(&instance->pdev->dev, 745 "Failed from %s %d\n", __func__, __LINE__); 746 return -ENOMEM; 747 } 748 } 749 750 reply_desc = fusion->reply_frames_desc[0]; 751 for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++) 752 reply_desc->Words = cpu_to_le64(ULLONG_MAX); 753 754 /* This is not a rdpq mode, but driver still populate 755 * reply_frame_desc array to use same msix index in ISR path. 756 */ 757 for (i = 0; i < (count - 1); i++) 758 fusion->reply_frames_desc[i + 1] = 759 fusion->reply_frames_desc[i] + 760 (fusion->reply_alloc_sz)/sizeof(union MPI2_REPLY_DESCRIPTORS_UNION); 761 762 return 0; 763 } 764 765 static int 766 megasas_alloc_rdpq_fusion(struct megasas_instance *instance) 767 { 768 int i, j, k, msix_count; 769 struct fusion_context *fusion; 770 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; 771 union MPI2_REPLY_DESCRIPTORS_UNION *rdpq_chunk_virt[RDPQ_MAX_CHUNK_COUNT]; 772 dma_addr_t rdpq_chunk_phys[RDPQ_MAX_CHUNK_COUNT]; 773 u8 dma_alloc_count, abs_index; 774 u32 chunk_size, array_size, offset; 775 776 fusion = instance->ctrl_context; 777 chunk_size = fusion->reply_alloc_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK; 778 array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * 779 MAX_MSIX_QUEUES_FUSION; 780 781 fusion->rdpq_virt = dma_alloc_coherent(&instance->pdev->dev, 782 array_size, &fusion->rdpq_phys, 783 GFP_KERNEL); 784 if (!fusion->rdpq_virt) { 785 dev_err(&instance->pdev->dev, 786 "Failed from %s %d\n", __func__, __LINE__); 787 return -ENOMEM; 788 } 789 790 msix_count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 791 792 fusion->reply_frames_desc_pool = dma_pool_create("mr_rdpq", 793 &instance->pdev->dev, 794 chunk_size, 16, 0); 795 fusion->reply_frames_desc_pool_align = 796 dma_pool_create("mr_rdpq_align", 797 &instance->pdev->dev, 798 chunk_size, 799 roundup_pow_of_two(chunk_size), 800 0); 801 802 if (!fusion->reply_frames_desc_pool || 803 !fusion->reply_frames_desc_pool_align) { 804 dev_err(&instance->pdev->dev, 805 "Failed from %s %d\n", __func__, __LINE__); 806 return -ENOMEM; 807 } 808 809 /* 810 * For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and 811 * VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should be 812 * within 4GB boundary and also reply queues in a set must have same 813 * upper 32-bits in their memory address. so here driver is allocating the 814 * DMA'able memory for reply queues according. Driver uses limitation of 815 * VENTURA_SERIES to manage INVADER_SERIES as well. 816 */ 817 dma_alloc_count = DIV_ROUND_UP(msix_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK); 818 819 for (i = 0; i < dma_alloc_count; i++) { 820 rdpq_chunk_virt[i] = 821 dma_pool_alloc(fusion->reply_frames_desc_pool, 822 GFP_KERNEL, &rdpq_chunk_phys[i]); 823 if (!rdpq_chunk_virt[i]) { 824 dev_err(&instance->pdev->dev, 825 "Failed from %s %d\n", __func__, __LINE__); 826 return -ENOMEM; 827 } 828 /* reply desc pool requires to be in same 4 gb region. 829 * Below function will check this. 830 * In case of failure, new pci pool will be created with updated 831 * alignment. 832 * For RDPQ buffers, driver always allocate two separate pci pool. 833 * Alignment will be used such a way that next allocation if 834 * success, will always meet same 4gb region requirement. 835 * rdpq_tracker keep track of each buffer's physical, 836 * virtual address and pci pool descriptor. It will help driver 837 * while freeing the resources. 838 * 839 */ 840 if (!megasas_check_same_4gb_region(instance, rdpq_chunk_phys[i], 841 chunk_size)) { 842 dma_pool_free(fusion->reply_frames_desc_pool, 843 rdpq_chunk_virt[i], 844 rdpq_chunk_phys[i]); 845 846 rdpq_chunk_virt[i] = 847 dma_pool_alloc(fusion->reply_frames_desc_pool_align, 848 GFP_KERNEL, &rdpq_chunk_phys[i]); 849 if (!rdpq_chunk_virt[i]) { 850 dev_err(&instance->pdev->dev, 851 "Failed from %s %d\n", 852 __func__, __LINE__); 853 return -ENOMEM; 854 } 855 fusion->rdpq_tracker[i].dma_pool_ptr = 856 fusion->reply_frames_desc_pool_align; 857 } else { 858 fusion->rdpq_tracker[i].dma_pool_ptr = 859 fusion->reply_frames_desc_pool; 860 } 861 862 fusion->rdpq_tracker[i].pool_entry_phys = rdpq_chunk_phys[i]; 863 fusion->rdpq_tracker[i].pool_entry_virt = rdpq_chunk_virt[i]; 864 } 865 866 for (k = 0; k < dma_alloc_count; k++) { 867 for (i = 0; i < RDPQ_MAX_INDEX_IN_ONE_CHUNK; i++) { 868 abs_index = (k * RDPQ_MAX_INDEX_IN_ONE_CHUNK) + i; 869 870 if (abs_index == msix_count) 871 break; 872 offset = fusion->reply_alloc_sz * i; 873 fusion->rdpq_virt[abs_index].RDPQBaseAddress = 874 cpu_to_le64(rdpq_chunk_phys[k] + offset); 875 fusion->reply_frames_desc_phys[abs_index] = 876 rdpq_chunk_phys[k] + offset; 877 fusion->reply_frames_desc[abs_index] = 878 (union MPI2_REPLY_DESCRIPTORS_UNION *)((u8 *)rdpq_chunk_virt[k] + offset); 879 880 reply_desc = fusion->reply_frames_desc[abs_index]; 881 for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++) 882 reply_desc->Words = ULLONG_MAX; 883 } 884 } 885 886 return 0; 887 } 888 889 static void 890 megasas_free_rdpq_fusion(struct megasas_instance *instance) { 891 892 int i; 893 struct fusion_context *fusion; 894 895 fusion = instance->ctrl_context; 896 897 for (i = 0; i < RDPQ_MAX_CHUNK_COUNT; i++) { 898 if (fusion->rdpq_tracker[i].pool_entry_virt) 899 dma_pool_free(fusion->rdpq_tracker[i].dma_pool_ptr, 900 fusion->rdpq_tracker[i].pool_entry_virt, 901 fusion->rdpq_tracker[i].pool_entry_phys); 902 903 } 904 905 dma_pool_destroy(fusion->reply_frames_desc_pool); 906 dma_pool_destroy(fusion->reply_frames_desc_pool_align); 907 908 if (fusion->rdpq_virt) 909 dma_free_coherent(&instance->pdev->dev, 910 sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION, 911 fusion->rdpq_virt, fusion->rdpq_phys); 912 } 913 914 static void 915 megasas_free_reply_fusion(struct megasas_instance *instance) { 916 917 struct fusion_context *fusion; 918 919 fusion = instance->ctrl_context; 920 921 if (fusion->reply_frames_desc[0]) 922 dma_pool_free(fusion->reply_frames_desc_pool, 923 fusion->reply_frames_desc[0], 924 fusion->reply_frames_desc_phys[0]); 925 926 dma_pool_destroy(fusion->reply_frames_desc_pool); 927 928 } 929 930 931 /** 932 * megasas_alloc_cmds_fusion - Allocates the command packets 933 * @instance: Adapter soft state 934 * 935 * 936 * Each frame has a 32-bit field called context. This context is used to get 937 * back the megasas_cmd_fusion from the frame when a frame gets completed 938 * In this driver, the 32 bit values are the indices into an array cmd_list. 939 * This array is used only to look up the megasas_cmd_fusion given the context. 940 * The free commands themselves are maintained in a linked list called cmd_pool. 941 * 942 * cmds are formed in the io_request and sg_frame members of the 943 * megasas_cmd_fusion. The context field is used to get a request descriptor 944 * and is used as SMID of the cmd. 945 * SMID value range is from 1 to max_fw_cmds. 946 */ 947 static int 948 megasas_alloc_cmds_fusion(struct megasas_instance *instance) 949 { 950 int i; 951 struct fusion_context *fusion; 952 struct megasas_cmd_fusion *cmd; 953 u32 offset; 954 dma_addr_t io_req_base_phys; 955 u8 *io_req_base; 956 957 958 fusion = instance->ctrl_context; 959 960 if (megasas_alloc_request_fusion(instance)) 961 goto fail_exit; 962 963 if (instance->is_rdpq) { 964 if (megasas_alloc_rdpq_fusion(instance)) 965 goto fail_exit; 966 } else 967 if (megasas_alloc_reply_fusion(instance)) 968 goto fail_exit; 969 970 if (megasas_alloc_cmdlist_fusion(instance)) 971 goto fail_exit; 972 973 dev_info(&instance->pdev->dev, "Configured max firmware commands: %d\n", 974 instance->max_fw_cmds); 975 976 /* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */ 977 io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 978 io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 979 980 /* 981 * Add all the commands to command pool (fusion->cmd_pool) 982 */ 983 984 /* SMID 0 is reserved. Set SMID/index from 1 */ 985 for (i = 0; i < instance->max_mpt_cmds; i++) { 986 cmd = fusion->cmd_list[i]; 987 offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; 988 memset(cmd, 0, sizeof(struct megasas_cmd_fusion)); 989 cmd->index = i + 1; 990 cmd->scmd = NULL; 991 cmd->sync_cmd_idx = 992 (i >= instance->max_scsi_cmds && i < instance->max_fw_cmds) ? 993 (i - instance->max_scsi_cmds) : 994 (u32)ULONG_MAX; /* Set to Invalid */ 995 cmd->instance = instance; 996 cmd->io_request = 997 (struct MPI2_RAID_SCSI_IO_REQUEST *) 998 (io_req_base + offset); 999 memset(cmd->io_request, 0, 1000 sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); 1001 cmd->io_request_phys_addr = io_req_base_phys + offset; 1002 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; 1003 } 1004 1005 if (megasas_create_sg_sense_fusion(instance)) 1006 goto fail_exit; 1007 1008 return 0; 1009 1010 fail_exit: 1011 megasas_free_cmds_fusion(instance); 1012 return -ENOMEM; 1013 } 1014 1015 /** 1016 * wait_and_poll - Issues a polling command 1017 * @instance: Adapter soft state 1018 * @cmd: Command packet to be issued 1019 * 1020 * For polling, MFI requires the cmd_status to be set to 0xFF before posting. 1021 */ 1022 int 1023 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, 1024 int seconds) 1025 { 1026 int i; 1027 struct megasas_header *frame_hdr = &cmd->frame->hdr; 1028 u32 status_reg; 1029 1030 u32 msecs = seconds * 1000; 1031 1032 /* 1033 * Wait for cmd_status to change 1034 */ 1035 for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) { 1036 rmb(); 1037 msleep(20); 1038 if (!(i % 5000)) { 1039 status_reg = instance->instancet->read_fw_status_reg(instance) 1040 & MFI_STATE_MASK; 1041 if (status_reg == MFI_STATE_FAULT) 1042 break; 1043 } 1044 } 1045 1046 if (frame_hdr->cmd_status == MFI_STAT_INVALID_STATUS) 1047 return DCMD_TIMEOUT; 1048 else if (frame_hdr->cmd_status == MFI_STAT_OK) 1049 return DCMD_SUCCESS; 1050 else 1051 return DCMD_FAILED; 1052 } 1053 1054 /** 1055 * megasas_ioc_init_fusion - Initializes the FW 1056 * @instance: Adapter soft state 1057 * 1058 * Issues the IOC Init cmd 1059 */ 1060 int 1061 megasas_ioc_init_fusion(struct megasas_instance *instance) 1062 { 1063 struct megasas_init_frame *init_frame; 1064 struct MPI2_IOC_INIT_REQUEST *IOCInitMessage = NULL; 1065 dma_addr_t ioc_init_handle; 1066 struct megasas_cmd *cmd; 1067 u8 ret, cur_rdpq_mode; 1068 struct fusion_context *fusion; 1069 union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc; 1070 int i; 1071 struct megasas_header *frame_hdr; 1072 const char *sys_info; 1073 MFI_CAPABILITIES *drv_ops; 1074 u32 scratch_pad_1; 1075 ktime_t time; 1076 bool cur_fw_64bit_dma_capable; 1077 bool cur_intr_coalescing; 1078 1079 fusion = instance->ctrl_context; 1080 1081 ioc_init_handle = fusion->ioc_init_request_phys; 1082 IOCInitMessage = fusion->ioc_init_request; 1083 1084 cmd = fusion->ioc_init_cmd; 1085 1086 scratch_pad_1 = megasas_readl 1087 (instance, &instance->reg_set->outbound_scratch_pad_1); 1088 1089 cur_rdpq_mode = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ? 1 : 0; 1090 1091 if (instance->adapter_type == INVADER_SERIES) { 1092 cur_fw_64bit_dma_capable = 1093 (scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET) ? true : false; 1094 1095 if (instance->consistent_mask_64bit && !cur_fw_64bit_dma_capable) { 1096 dev_err(&instance->pdev->dev, "Driver was operating on 64bit " 1097 "DMA mask, but upcoming FW does not support 64bit DMA mask\n"); 1098 megaraid_sas_kill_hba(instance); 1099 ret = 1; 1100 goto fail_fw_init; 1101 } 1102 } 1103 1104 if (instance->is_rdpq && !cur_rdpq_mode) { 1105 dev_err(&instance->pdev->dev, "Firmware downgrade *NOT SUPPORTED*" 1106 " from RDPQ mode to non RDPQ mode\n"); 1107 ret = 1; 1108 goto fail_fw_init; 1109 } 1110 1111 cur_intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ? 1112 true : false; 1113 1114 if ((instance->low_latency_index_start == 1115 MR_HIGH_IOPS_QUEUE_COUNT) && cur_intr_coalescing) 1116 instance->perf_mode = MR_BALANCED_PERF_MODE; 1117 1118 dev_info(&instance->pdev->dev, "Performance mode :%s\n", 1119 MEGASAS_PERF_MODE_2STR(instance->perf_mode)); 1120 1121 instance->fw_sync_cache_support = (scratch_pad_1 & 1122 MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0; 1123 dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n", 1124 instance->fw_sync_cache_support ? "Yes" : "No"); 1125 1126 memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST)); 1127 1128 IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT; 1129 IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER; 1130 IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION); 1131 IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); 1132 IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4); 1133 1134 IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth); 1135 IOCInitMessage->ReplyDescriptorPostQueueAddress = instance->is_rdpq ? 1136 cpu_to_le64(fusion->rdpq_phys) : 1137 cpu_to_le64(fusion->reply_frames_desc_phys[0]); 1138 IOCInitMessage->MsgFlags = instance->is_rdpq ? 1139 MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0; 1140 IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys); 1141 IOCInitMessage->SenseBufferAddressHigh = cpu_to_le32(upper_32_bits(fusion->sense_phys_addr)); 1142 IOCInitMessage->HostMSIxVectors = instance->msix_vectors; 1143 IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT; 1144 1145 time = ktime_get_real(); 1146 /* Convert to milliseconds as per FW requirement */ 1147 IOCInitMessage->TimeStamp = cpu_to_le64(ktime_to_ms(time)); 1148 1149 init_frame = (struct megasas_init_frame *)cmd->frame; 1150 memset(init_frame, 0, IOC_INIT_FRAME_SIZE); 1151 1152 frame_hdr = &cmd->frame->hdr; 1153 frame_hdr->cmd_status = 0xFF; 1154 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); 1155 1156 init_frame->cmd = MFI_CMD_INIT; 1157 init_frame->cmd_status = 0xFF; 1158 1159 drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations); 1160 1161 /* driver support Extended MSIX */ 1162 if (instance->adapter_type >= INVADER_SERIES) 1163 drv_ops->mfi_capabilities.support_additional_msix = 1; 1164 /* driver supports HA / Remote LUN over Fast Path interface */ 1165 drv_ops->mfi_capabilities.support_fp_remote_lun = 1; 1166 1167 drv_ops->mfi_capabilities.support_max_255lds = 1; 1168 drv_ops->mfi_capabilities.support_ndrive_r1_lb = 1; 1169 drv_ops->mfi_capabilities.security_protocol_cmds_fw = 1; 1170 1171 if (instance->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN) 1172 drv_ops->mfi_capabilities.support_ext_io_size = 1; 1173 1174 drv_ops->mfi_capabilities.support_fp_rlbypass = 1; 1175 if (!dual_qdepth_disable) 1176 drv_ops->mfi_capabilities.support_ext_queue_depth = 1; 1177 1178 drv_ops->mfi_capabilities.support_qd_throttling = 1; 1179 drv_ops->mfi_capabilities.support_pd_map_target_id = 1; 1180 drv_ops->mfi_capabilities.support_nvme_passthru = 1; 1181 drv_ops->mfi_capabilities.support_fw_exposed_dev_list = 1; 1182 1183 if (instance->consistent_mask_64bit) 1184 drv_ops->mfi_capabilities.support_64bit_mode = 1; 1185 1186 /* Convert capability to LE32 */ 1187 cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities); 1188 1189 sys_info = dmi_get_system_info(DMI_PRODUCT_UUID); 1190 if (instance->system_info_buf && sys_info) { 1191 memcpy(instance->system_info_buf->systemId, sys_info, 1192 strlen(sys_info) > 64 ? 64 : strlen(sys_info)); 1193 instance->system_info_buf->systemIdLength = 1194 strlen(sys_info) > 64 ? 64 : strlen(sys_info); 1195 init_frame->system_info_lo = cpu_to_le32(lower_32_bits(instance->system_info_h)); 1196 init_frame->system_info_hi = cpu_to_le32(upper_32_bits(instance->system_info_h)); 1197 } 1198 1199 init_frame->queue_info_new_phys_addr_hi = 1200 cpu_to_le32(upper_32_bits(ioc_init_handle)); 1201 init_frame->queue_info_new_phys_addr_lo = 1202 cpu_to_le32(lower_32_bits(ioc_init_handle)); 1203 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST)); 1204 1205 /* 1206 * Each bit in replyqueue_mask represents one group of MSI-x vectors 1207 * (each group has 8 vectors) 1208 */ 1209 switch (instance->perf_mode) { 1210 case MR_BALANCED_PERF_MODE: 1211 init_frame->replyqueue_mask = 1212 cpu_to_le16(~(~0 << instance->low_latency_index_start/8)); 1213 break; 1214 case MR_IOPS_PERF_MODE: 1215 init_frame->replyqueue_mask = 1216 cpu_to_le16(~(~0 << instance->msix_vectors/8)); 1217 break; 1218 } 1219 1220 1221 req_desc.u.low = cpu_to_le32(lower_32_bits(cmd->frame_phys_addr)); 1222 req_desc.u.high = cpu_to_le32(upper_32_bits(cmd->frame_phys_addr)); 1223 req_desc.MFAIo.RequestFlags = 1224 (MEGASAS_REQ_DESCRIPT_FLAGS_MFA << 1225 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1226 1227 /* 1228 * disable the intr before firing the init frame 1229 */ 1230 instance->instancet->disable_intr(instance); 1231 1232 for (i = 0; i < (10 * 1000); i += 20) { 1233 if (megasas_readl(instance, &instance->reg_set->doorbell) & 1) 1234 msleep(20); 1235 else 1236 break; 1237 } 1238 1239 /* For AERO also, IOC_INIT requires 64 bit descriptor write */ 1240 megasas_write_64bit_req_desc(instance, &req_desc); 1241 1242 wait_and_poll(instance, cmd, MFI_IO_TIMEOUT_SECS); 1243 1244 frame_hdr = &cmd->frame->hdr; 1245 if (frame_hdr->cmd_status != 0) { 1246 ret = 1; 1247 goto fail_fw_init; 1248 } 1249 1250 if (instance->adapter_type >= AERO_SERIES) { 1251 scratch_pad_1 = megasas_readl 1252 (instance, &instance->reg_set->outbound_scratch_pad_1); 1253 1254 instance->atomic_desc_support = 1255 (scratch_pad_1 & MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET) ? 1 : 0; 1256 1257 dev_info(&instance->pdev->dev, "FW supports atomic descriptor\t: %s\n", 1258 instance->atomic_desc_support ? "Yes" : "No"); 1259 } 1260 1261 return 0; 1262 1263 fail_fw_init: 1264 dev_err(&instance->pdev->dev, 1265 "Init cmd return status FAILED for SCSI host %d\n", 1266 instance->host->host_no); 1267 1268 return ret; 1269 } 1270 1271 /** 1272 * megasas_sync_pd_seq_num - JBOD SEQ MAP 1273 * @instance: Adapter soft state 1274 * @pend: set to 1, if it is pended jbod map. 1275 * 1276 * Issue Jbod map to the firmware. If it is pended command, 1277 * issue command and return. If it is first instance of jbod map 1278 * issue and receive command. 1279 */ 1280 int 1281 megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) { 1282 int ret = 0; 1283 size_t pd_seq_map_sz; 1284 struct megasas_cmd *cmd; 1285 struct megasas_dcmd_frame *dcmd; 1286 struct fusion_context *fusion = instance->ctrl_context; 1287 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 1288 dma_addr_t pd_seq_h; 1289 1290 pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id & 1)]; 1291 pd_seq_h = fusion->pd_seq_phys[(instance->pd_seq_map_id & 1)]; 1292 pd_seq_map_sz = struct_size(pd_sync, seq, MAX_PHYSICAL_DEVICES - 1); 1293 1294 cmd = megasas_get_cmd(instance); 1295 if (!cmd) { 1296 dev_err(&instance->pdev->dev, 1297 "Could not get mfi cmd. Fail from %s %d\n", 1298 __func__, __LINE__); 1299 return -ENOMEM; 1300 } 1301 1302 dcmd = &cmd->frame->dcmd; 1303 1304 memset(pd_sync, 0, pd_seq_map_sz); 1305 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 1306 1307 if (pend) { 1308 dcmd->mbox.b[0] = MEGASAS_DCMD_MBOX_PEND_FLAG; 1309 dcmd->flags = MFI_FRAME_DIR_WRITE; 1310 instance->jbod_seq_cmd = cmd; 1311 } else { 1312 dcmd->flags = MFI_FRAME_DIR_READ; 1313 } 1314 1315 dcmd->cmd = MFI_CMD_DCMD; 1316 dcmd->cmd_status = 0xFF; 1317 dcmd->sge_count = 1; 1318 dcmd->timeout = 0; 1319 dcmd->pad_0 = 0; 1320 dcmd->data_xfer_len = cpu_to_le32(pd_seq_map_sz); 1321 dcmd->opcode = cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO); 1322 1323 megasas_set_dma_settings(instance, dcmd, pd_seq_h, pd_seq_map_sz); 1324 1325 if (pend) { 1326 instance->instancet->issue_dcmd(instance, cmd); 1327 return 0; 1328 } 1329 1330 /* Below code is only for non pended DCMD */ 1331 if (!instance->mask_interrupts) 1332 ret = megasas_issue_blocked_cmd(instance, cmd, 1333 MFI_IO_TIMEOUT_SECS); 1334 else 1335 ret = megasas_issue_polled(instance, cmd); 1336 1337 if (le32_to_cpu(pd_sync->count) > MAX_PHYSICAL_DEVICES) { 1338 dev_warn(&instance->pdev->dev, 1339 "driver supports max %d JBOD, but FW reports %d\n", 1340 MAX_PHYSICAL_DEVICES, le32_to_cpu(pd_sync->count)); 1341 ret = -EINVAL; 1342 } 1343 1344 if (ret == DCMD_TIMEOUT) 1345 dev_warn(&instance->pdev->dev, 1346 "%s DCMD timed out, continue without JBOD sequence map\n", 1347 __func__); 1348 1349 if (ret == DCMD_SUCCESS) 1350 instance->pd_seq_map_id++; 1351 1352 megasas_return_cmd(instance, cmd); 1353 return ret; 1354 } 1355 1356 /* 1357 * megasas_get_ld_map_info - Returns FW's ld_map structure 1358 * @instance: Adapter soft state 1359 * @pend: Pend the command or not 1360 * Issues an internal command (DCMD) to get the FW's controller PD 1361 * list structure. This information is mainly used to find out SYSTEM 1362 * supported by the FW. 1363 * dcmd.mbox value setting for MR_DCMD_LD_MAP_GET_INFO 1364 * dcmd.mbox.b[0] - number of LDs being sync'd 1365 * dcmd.mbox.b[1] - 0 - complete command immediately. 1366 * - 1 - pend till config change 1367 * dcmd.mbox.b[2] - 0 - supports max 64 lds and uses legacy MR_FW_RAID_MAP 1368 * - 1 - supports max MAX_LOGICAL_DRIVES_EXT lds and 1369 * uses extended struct MR_FW_RAID_MAP_EXT 1370 */ 1371 static int 1372 megasas_get_ld_map_info(struct megasas_instance *instance) 1373 { 1374 int ret = 0; 1375 struct megasas_cmd *cmd; 1376 struct megasas_dcmd_frame *dcmd; 1377 void *ci; 1378 dma_addr_t ci_h = 0; 1379 u32 size_map_info; 1380 struct fusion_context *fusion; 1381 1382 cmd = megasas_get_cmd(instance); 1383 1384 if (!cmd) { 1385 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for map info\n"); 1386 return -ENOMEM; 1387 } 1388 1389 fusion = instance->ctrl_context; 1390 1391 if (!fusion) { 1392 megasas_return_cmd(instance, cmd); 1393 return -ENXIO; 1394 } 1395 1396 dcmd = &cmd->frame->dcmd; 1397 1398 size_map_info = fusion->current_map_sz; 1399 1400 ci = (void *) fusion->ld_map[(instance->map_id & 1)]; 1401 ci_h = fusion->ld_map_phys[(instance->map_id & 1)]; 1402 1403 if (!ci) { 1404 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ld_map_info\n"); 1405 megasas_return_cmd(instance, cmd); 1406 return -ENOMEM; 1407 } 1408 1409 memset(ci, 0, fusion->max_map_sz); 1410 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 1411 dcmd->cmd = MFI_CMD_DCMD; 1412 dcmd->cmd_status = 0xFF; 1413 dcmd->sge_count = 1; 1414 dcmd->flags = MFI_FRAME_DIR_READ; 1415 dcmd->timeout = 0; 1416 dcmd->pad_0 = 0; 1417 dcmd->data_xfer_len = cpu_to_le32(size_map_info); 1418 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO); 1419 1420 megasas_set_dma_settings(instance, dcmd, ci_h, size_map_info); 1421 1422 if (!instance->mask_interrupts) 1423 ret = megasas_issue_blocked_cmd(instance, cmd, 1424 MFI_IO_TIMEOUT_SECS); 1425 else 1426 ret = megasas_issue_polled(instance, cmd); 1427 1428 if (ret == DCMD_TIMEOUT) 1429 dev_warn(&instance->pdev->dev, 1430 "%s DCMD timed out, RAID map is disabled\n", 1431 __func__); 1432 1433 megasas_return_cmd(instance, cmd); 1434 1435 return ret; 1436 } 1437 1438 u8 1439 megasas_get_map_info(struct megasas_instance *instance) 1440 { 1441 struct fusion_context *fusion = instance->ctrl_context; 1442 1443 fusion->fast_path_io = 0; 1444 if (!megasas_get_ld_map_info(instance)) { 1445 if (MR_ValidateMapInfo(instance, instance->map_id)) { 1446 fusion->fast_path_io = 1; 1447 return 0; 1448 } 1449 } 1450 return 1; 1451 } 1452 1453 /* 1454 * megasas_sync_map_info - Returns FW's ld_map structure 1455 * @instance: Adapter soft state 1456 * 1457 * Issues an internal command (DCMD) to get the FW's controller PD 1458 * list structure. This information is mainly used to find out SYSTEM 1459 * supported by the FW. 1460 */ 1461 int 1462 megasas_sync_map_info(struct megasas_instance *instance) 1463 { 1464 int i; 1465 struct megasas_cmd *cmd; 1466 struct megasas_dcmd_frame *dcmd; 1467 u16 num_lds; 1468 struct fusion_context *fusion; 1469 struct MR_LD_TARGET_SYNC *ci = NULL; 1470 struct MR_DRV_RAID_MAP_ALL *map; 1471 struct MR_LD_RAID *raid; 1472 struct MR_LD_TARGET_SYNC *ld_sync; 1473 dma_addr_t ci_h = 0; 1474 u32 size_map_info; 1475 1476 cmd = megasas_get_cmd(instance); 1477 1478 if (!cmd) { 1479 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for sync info\n"); 1480 return -ENOMEM; 1481 } 1482 1483 fusion = instance->ctrl_context; 1484 1485 if (!fusion) { 1486 megasas_return_cmd(instance, cmd); 1487 return 1; 1488 } 1489 1490 map = fusion->ld_drv_map[instance->map_id & 1]; 1491 1492 num_lds = le16_to_cpu(map->raidMap.ldCount); 1493 1494 dcmd = &cmd->frame->dcmd; 1495 1496 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 1497 1498 ci = (struct MR_LD_TARGET_SYNC *) 1499 fusion->ld_map[(instance->map_id - 1) & 1]; 1500 memset(ci, 0, fusion->max_map_sz); 1501 1502 ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1]; 1503 1504 ld_sync = (struct MR_LD_TARGET_SYNC *)ci; 1505 1506 for (i = 0; i < num_lds; i++, ld_sync++) { 1507 raid = MR_LdRaidGet(i, map); 1508 ld_sync->targetId = MR_GetLDTgtId(i, map); 1509 ld_sync->seqNum = raid->seqNum; 1510 } 1511 1512 size_map_info = fusion->current_map_sz; 1513 1514 dcmd->cmd = MFI_CMD_DCMD; 1515 dcmd->cmd_status = 0xFF; 1516 dcmd->sge_count = 1; 1517 dcmd->flags = MFI_FRAME_DIR_WRITE; 1518 dcmd->timeout = 0; 1519 dcmd->pad_0 = 0; 1520 dcmd->data_xfer_len = cpu_to_le32(size_map_info); 1521 dcmd->mbox.b[0] = num_lds; 1522 dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG; 1523 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO); 1524 1525 megasas_set_dma_settings(instance, dcmd, ci_h, size_map_info); 1526 1527 instance->map_update_cmd = cmd; 1528 1529 instance->instancet->issue_dcmd(instance, cmd); 1530 1531 return 0; 1532 } 1533 1534 /* 1535 * meagasas_display_intel_branding - Display branding string 1536 * @instance: per adapter object 1537 * 1538 * Return nothing. 1539 */ 1540 static void 1541 megasas_display_intel_branding(struct megasas_instance *instance) 1542 { 1543 if (instance->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL) 1544 return; 1545 1546 switch (instance->pdev->device) { 1547 case PCI_DEVICE_ID_LSI_INVADER: 1548 switch (instance->pdev->subsystem_device) { 1549 case MEGARAID_INTEL_RS3DC080_SSDID: 1550 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 1551 instance->host->host_no, 1552 MEGARAID_INTEL_RS3DC080_BRANDING); 1553 break; 1554 case MEGARAID_INTEL_RS3DC040_SSDID: 1555 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 1556 instance->host->host_no, 1557 MEGARAID_INTEL_RS3DC040_BRANDING); 1558 break; 1559 case MEGARAID_INTEL_RS3SC008_SSDID: 1560 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 1561 instance->host->host_no, 1562 MEGARAID_INTEL_RS3SC008_BRANDING); 1563 break; 1564 case MEGARAID_INTEL_RS3MC044_SSDID: 1565 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 1566 instance->host->host_no, 1567 MEGARAID_INTEL_RS3MC044_BRANDING); 1568 break; 1569 default: 1570 break; 1571 } 1572 break; 1573 case PCI_DEVICE_ID_LSI_FURY: 1574 switch (instance->pdev->subsystem_device) { 1575 case MEGARAID_INTEL_RS3WC080_SSDID: 1576 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 1577 instance->host->host_no, 1578 MEGARAID_INTEL_RS3WC080_BRANDING); 1579 break; 1580 case MEGARAID_INTEL_RS3WC040_SSDID: 1581 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 1582 instance->host->host_no, 1583 MEGARAID_INTEL_RS3WC040_BRANDING); 1584 break; 1585 default: 1586 break; 1587 } 1588 break; 1589 case PCI_DEVICE_ID_LSI_CUTLASS_52: 1590 case PCI_DEVICE_ID_LSI_CUTLASS_53: 1591 switch (instance->pdev->subsystem_device) { 1592 case MEGARAID_INTEL_RMS3BC160_SSDID: 1593 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 1594 instance->host->host_no, 1595 MEGARAID_INTEL_RMS3BC160_BRANDING); 1596 break; 1597 default: 1598 break; 1599 } 1600 break; 1601 default: 1602 break; 1603 } 1604 } 1605 1606 /** 1607 * megasas_allocate_raid_maps - Allocate memory for RAID maps 1608 * @instance: Adapter soft state 1609 * 1610 * return: if success: return 0 1611 * failed: return -ENOMEM 1612 */ 1613 static inline int megasas_allocate_raid_maps(struct megasas_instance *instance) 1614 { 1615 struct fusion_context *fusion; 1616 int i = 0; 1617 1618 fusion = instance->ctrl_context; 1619 1620 fusion->drv_map_pages = get_order(fusion->drv_map_sz); 1621 1622 for (i = 0; i < 2; i++) { 1623 fusion->ld_map[i] = NULL; 1624 1625 fusion->ld_drv_map[i] = (void *) 1626 __get_free_pages(__GFP_ZERO | GFP_KERNEL, 1627 fusion->drv_map_pages); 1628 1629 if (!fusion->ld_drv_map[i]) { 1630 fusion->ld_drv_map[i] = vzalloc(fusion->drv_map_sz); 1631 1632 if (!fusion->ld_drv_map[i]) { 1633 dev_err(&instance->pdev->dev, 1634 "Could not allocate memory for local map" 1635 " size requested: %d\n", 1636 fusion->drv_map_sz); 1637 goto ld_drv_map_alloc_fail; 1638 } 1639 } 1640 } 1641 1642 for (i = 0; i < 2; i++) { 1643 fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev, 1644 fusion->max_map_sz, 1645 &fusion->ld_map_phys[i], 1646 GFP_KERNEL); 1647 if (!fusion->ld_map[i]) { 1648 dev_err(&instance->pdev->dev, 1649 "Could not allocate memory for map info %s:%d\n", 1650 __func__, __LINE__); 1651 goto ld_map_alloc_fail; 1652 } 1653 } 1654 1655 return 0; 1656 1657 ld_map_alloc_fail: 1658 for (i = 0; i < 2; i++) { 1659 if (fusion->ld_map[i]) 1660 dma_free_coherent(&instance->pdev->dev, 1661 fusion->max_map_sz, 1662 fusion->ld_map[i], 1663 fusion->ld_map_phys[i]); 1664 } 1665 1666 ld_drv_map_alloc_fail: 1667 for (i = 0; i < 2; i++) { 1668 if (fusion->ld_drv_map[i]) { 1669 if (is_vmalloc_addr(fusion->ld_drv_map[i])) 1670 vfree(fusion->ld_drv_map[i]); 1671 else 1672 free_pages((ulong)fusion->ld_drv_map[i], 1673 fusion->drv_map_pages); 1674 } 1675 } 1676 1677 return -ENOMEM; 1678 } 1679 1680 /** 1681 * megasas_configure_queue_sizes - Calculate size of request desc queue, 1682 * reply desc queue, 1683 * IO request frame queue, set can_queue. 1684 * @instance: Adapter soft state 1685 * @return: void 1686 */ 1687 static inline 1688 void megasas_configure_queue_sizes(struct megasas_instance *instance) 1689 { 1690 struct fusion_context *fusion; 1691 u16 max_cmd; 1692 1693 fusion = instance->ctrl_context; 1694 max_cmd = instance->max_fw_cmds; 1695 1696 if (instance->adapter_type >= VENTURA_SERIES) 1697 instance->max_mpt_cmds = instance->max_fw_cmds * RAID_1_PEER_CMDS; 1698 else 1699 instance->max_mpt_cmds = instance->max_fw_cmds; 1700 1701 instance->max_scsi_cmds = instance->max_fw_cmds - instance->max_mfi_cmds; 1702 instance->cur_can_queue = instance->max_scsi_cmds; 1703 instance->host->can_queue = instance->cur_can_queue; 1704 1705 fusion->reply_q_depth = 2 * ((max_cmd + 1 + 15) / 16) * 16; 1706 1707 fusion->request_alloc_sz = sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * 1708 instance->max_mpt_cmds; 1709 fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION) * 1710 (fusion->reply_q_depth); 1711 fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + 1712 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE 1713 * (instance->max_mpt_cmds + 1)); /* Extra 1 for SMID 0 */ 1714 } 1715 1716 static int megasas_alloc_ioc_init_frame(struct megasas_instance *instance) 1717 { 1718 struct fusion_context *fusion; 1719 struct megasas_cmd *cmd; 1720 1721 fusion = instance->ctrl_context; 1722 1723 cmd = kzalloc(sizeof(struct megasas_cmd), GFP_KERNEL); 1724 1725 if (!cmd) { 1726 dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n", 1727 __func__, __LINE__); 1728 return -ENOMEM; 1729 } 1730 1731 cmd->frame = dma_alloc_coherent(&instance->pdev->dev, 1732 IOC_INIT_FRAME_SIZE, 1733 &cmd->frame_phys_addr, GFP_KERNEL); 1734 1735 if (!cmd->frame) { 1736 dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n", 1737 __func__, __LINE__); 1738 kfree(cmd); 1739 return -ENOMEM; 1740 } 1741 1742 fusion->ioc_init_cmd = cmd; 1743 return 0; 1744 } 1745 1746 /** 1747 * megasas_free_ioc_init_cmd - Free IOC INIT command frame 1748 * @instance: Adapter soft state 1749 */ 1750 static inline void megasas_free_ioc_init_cmd(struct megasas_instance *instance) 1751 { 1752 struct fusion_context *fusion; 1753 1754 fusion = instance->ctrl_context; 1755 1756 if (fusion->ioc_init_cmd && fusion->ioc_init_cmd->frame) 1757 dma_free_coherent(&instance->pdev->dev, 1758 IOC_INIT_FRAME_SIZE, 1759 fusion->ioc_init_cmd->frame, 1760 fusion->ioc_init_cmd->frame_phys_addr); 1761 1762 kfree(fusion->ioc_init_cmd); 1763 } 1764 1765 /** 1766 * megasas_init_adapter_fusion - Initializes the FW 1767 * @instance: Adapter soft state 1768 * 1769 * This is the main function for initializing firmware. 1770 */ 1771 static u32 1772 megasas_init_adapter_fusion(struct megasas_instance *instance) 1773 { 1774 struct fusion_context *fusion; 1775 u32 scratch_pad_1; 1776 int i = 0, count; 1777 u32 status_reg; 1778 1779 fusion = instance->ctrl_context; 1780 1781 megasas_fusion_update_can_queue(instance, PROBE_CONTEXT); 1782 1783 /* 1784 * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames 1785 */ 1786 instance->max_mfi_cmds = 1787 MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS; 1788 1789 megasas_configure_queue_sizes(instance); 1790 1791 scratch_pad_1 = megasas_readl(instance, 1792 &instance->reg_set->outbound_scratch_pad_1); 1793 /* If scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set, 1794 * Firmware support extended IO chain frame which is 4 times more than 1795 * legacy Firmware. 1796 * Legacy Firmware - Frame size is (8 * 128) = 1K 1797 * 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K 1798 */ 1799 if (scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK) 1800 instance->max_chain_frame_sz = 1801 ((scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 1802 MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_1MB_IO; 1803 else 1804 instance->max_chain_frame_sz = 1805 ((scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 1806 MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_256K_IO; 1807 1808 if (instance->max_chain_frame_sz < MEGASAS_CHAIN_FRAME_SZ_MIN) { 1809 dev_warn(&instance->pdev->dev, "frame size %d invalid, fall back to legacy max frame size %d\n", 1810 instance->max_chain_frame_sz, 1811 MEGASAS_CHAIN_FRAME_SZ_MIN); 1812 instance->max_chain_frame_sz = MEGASAS_CHAIN_FRAME_SZ_MIN; 1813 } 1814 1815 fusion->max_sge_in_main_msg = 1816 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE 1817 - offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL))/16; 1818 1819 fusion->max_sge_in_chain = 1820 instance->max_chain_frame_sz 1821 / sizeof(union MPI2_SGE_IO_UNION); 1822 1823 instance->max_num_sge = 1824 rounddown_pow_of_two(fusion->max_sge_in_main_msg 1825 + fusion->max_sge_in_chain - 2); 1826 1827 /* Used for pass thru MFI frame (DCMD) */ 1828 fusion->chain_offset_mfi_pthru = 1829 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL)/16; 1830 1831 fusion->chain_offset_io_request = 1832 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 1833 sizeof(union MPI2_SGE_IO_UNION))/16; 1834 1835 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 1836 for (i = 0 ; i < count; i++) 1837 fusion->last_reply_idx[i] = 0; 1838 1839 /* 1840 * For fusion adapters, 3 commands for IOCTL and 8 commands 1841 * for driver's internal DCMDs. 1842 */ 1843 instance->max_scsi_cmds = instance->max_fw_cmds - 1844 (MEGASAS_FUSION_INTERNAL_CMDS + 1845 MEGASAS_FUSION_IOCTL_CMDS); 1846 sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS); 1847 1848 if (megasas_alloc_ioc_init_frame(instance)) 1849 return 1; 1850 1851 /* 1852 * Allocate memory for descriptors 1853 * Create a pool of commands 1854 */ 1855 if (megasas_alloc_cmds(instance)) 1856 goto fail_alloc_mfi_cmds; 1857 if (megasas_alloc_cmds_fusion(instance)) 1858 goto fail_alloc_cmds; 1859 1860 if (megasas_ioc_init_fusion(instance)) { 1861 status_reg = instance->instancet->read_fw_status_reg(instance); 1862 if (((status_reg & MFI_STATE_MASK) == MFI_STATE_FAULT) && 1863 (status_reg & MFI_RESET_ADAPTER)) { 1864 /* Do a chip reset and then retry IOC INIT once */ 1865 if (megasas_adp_reset_wait_for_ready 1866 (instance, true, 0) == FAILED) 1867 goto fail_ioc_init; 1868 1869 if (megasas_ioc_init_fusion(instance)) 1870 goto fail_ioc_init; 1871 } else { 1872 goto fail_ioc_init; 1873 } 1874 } 1875 1876 megasas_display_intel_branding(instance); 1877 if (megasas_get_ctrl_info(instance)) { 1878 dev_err(&instance->pdev->dev, 1879 "Could not get controller info. Fail from %s %d\n", 1880 __func__, __LINE__); 1881 goto fail_ioc_init; 1882 } 1883 1884 instance->flag_ieee = 1; 1885 instance->r1_ldio_hint_default = MR_R1_LDIO_PIGGYBACK_DEFAULT; 1886 instance->threshold_reply_count = instance->max_fw_cmds / 4; 1887 fusion->fast_path_io = 0; 1888 1889 if (megasas_allocate_raid_maps(instance)) 1890 goto fail_ioc_init; 1891 1892 if (!megasas_get_map_info(instance)) 1893 megasas_sync_map_info(instance); 1894 1895 return 0; 1896 1897 fail_ioc_init: 1898 megasas_free_cmds_fusion(instance); 1899 fail_alloc_cmds: 1900 megasas_free_cmds(instance); 1901 fail_alloc_mfi_cmds: 1902 megasas_free_ioc_init_cmd(instance); 1903 return 1; 1904 } 1905 1906 /** 1907 * megasas_fault_detect_work - Worker function of 1908 * FW fault handling workqueue. 1909 */ 1910 static void 1911 megasas_fault_detect_work(struct work_struct *work) 1912 { 1913 struct megasas_instance *instance = 1914 container_of(work, struct megasas_instance, 1915 fw_fault_work.work); 1916 u32 fw_state, dma_state, status; 1917 1918 /* Check the fw state */ 1919 fw_state = instance->instancet->read_fw_status_reg(instance) & 1920 MFI_STATE_MASK; 1921 1922 if (fw_state == MFI_STATE_FAULT) { 1923 dma_state = instance->instancet->read_fw_status_reg(instance) & 1924 MFI_STATE_DMADONE; 1925 /* Start collecting crash, if DMA bit is done */ 1926 if (instance->crash_dump_drv_support && 1927 instance->crash_dump_app_support && dma_state) { 1928 megasas_fusion_crash_dump(instance); 1929 } else { 1930 if (instance->unload == 0) { 1931 status = megasas_reset_fusion(instance->host, 0); 1932 if (status != SUCCESS) { 1933 dev_err(&instance->pdev->dev, 1934 "Failed from %s %d, do not re-arm timer\n", 1935 __func__, __LINE__); 1936 return; 1937 } 1938 } 1939 } 1940 } 1941 1942 if (instance->fw_fault_work_q) 1943 queue_delayed_work(instance->fw_fault_work_q, 1944 &instance->fw_fault_work, 1945 msecs_to_jiffies(MEGASAS_WATCHDOG_THREAD_INTERVAL)); 1946 } 1947 1948 int 1949 megasas_fusion_start_watchdog(struct megasas_instance *instance) 1950 { 1951 /* Check if the Fault WQ is already started */ 1952 if (instance->fw_fault_work_q) 1953 return SUCCESS; 1954 1955 INIT_DELAYED_WORK(&instance->fw_fault_work, megasas_fault_detect_work); 1956 1957 snprintf(instance->fault_handler_work_q_name, 1958 sizeof(instance->fault_handler_work_q_name), 1959 "poll_megasas%d_status", instance->host->host_no); 1960 1961 instance->fw_fault_work_q = 1962 create_singlethread_workqueue(instance->fault_handler_work_q_name); 1963 if (!instance->fw_fault_work_q) { 1964 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1965 __func__, __LINE__); 1966 return FAILED; 1967 } 1968 1969 queue_delayed_work(instance->fw_fault_work_q, 1970 &instance->fw_fault_work, 1971 msecs_to_jiffies(MEGASAS_WATCHDOG_THREAD_INTERVAL)); 1972 1973 return SUCCESS; 1974 } 1975 1976 void 1977 megasas_fusion_stop_watchdog(struct megasas_instance *instance) 1978 { 1979 struct workqueue_struct *wq; 1980 1981 if (instance->fw_fault_work_q) { 1982 wq = instance->fw_fault_work_q; 1983 instance->fw_fault_work_q = NULL; 1984 if (!cancel_delayed_work_sync(&instance->fw_fault_work)) 1985 flush_workqueue(wq); 1986 destroy_workqueue(wq); 1987 } 1988 } 1989 1990 /** 1991 * map_cmd_status - Maps FW cmd status to OS cmd status 1992 * @cmd : Pointer to cmd 1993 * @status : status of cmd returned by FW 1994 * @ext_status : ext status of cmd returned by FW 1995 */ 1996 1997 static void 1998 map_cmd_status(struct fusion_context *fusion, 1999 struct scsi_cmnd *scmd, u8 status, u8 ext_status, 2000 u32 data_length, u8 *sense) 2001 { 2002 u8 cmd_type; 2003 int resid; 2004 2005 cmd_type = megasas_cmd_type(scmd); 2006 switch (status) { 2007 2008 case MFI_STAT_OK: 2009 scmd->result = DID_OK << 16; 2010 break; 2011 2012 case MFI_STAT_SCSI_IO_FAILED: 2013 case MFI_STAT_LD_INIT_IN_PROGRESS: 2014 scmd->result = (DID_ERROR << 16) | ext_status; 2015 break; 2016 2017 case MFI_STAT_SCSI_DONE_WITH_ERROR: 2018 2019 scmd->result = (DID_OK << 16) | ext_status; 2020 if (ext_status == SAM_STAT_CHECK_CONDITION) { 2021 memset(scmd->sense_buffer, 0, 2022 SCSI_SENSE_BUFFERSIZE); 2023 memcpy(scmd->sense_buffer, sense, 2024 SCSI_SENSE_BUFFERSIZE); 2025 scmd->result |= DRIVER_SENSE << 24; 2026 } 2027 2028 /* 2029 * If the IO request is partially completed, then MR FW will 2030 * update "io_request->DataLength" field with actual number of 2031 * bytes transferred.Driver will set residual bytes count in 2032 * SCSI command structure. 2033 */ 2034 resid = (scsi_bufflen(scmd) - data_length); 2035 scsi_set_resid(scmd, resid); 2036 2037 if (resid && 2038 ((cmd_type == READ_WRITE_LDIO) || 2039 (cmd_type == READ_WRITE_SYSPDIO))) 2040 scmd_printk(KERN_INFO, scmd, "BRCM Debug mfi stat 0x%x, data len" 2041 " requested/completed 0x%x/0x%x\n", 2042 status, scsi_bufflen(scmd), data_length); 2043 break; 2044 2045 case MFI_STAT_LD_OFFLINE: 2046 case MFI_STAT_DEVICE_NOT_FOUND: 2047 scmd->result = DID_BAD_TARGET << 16; 2048 break; 2049 case MFI_STAT_CONFIG_SEQ_MISMATCH: 2050 scmd->result = DID_IMM_RETRY << 16; 2051 break; 2052 default: 2053 scmd->result = DID_ERROR << 16; 2054 break; 2055 } 2056 } 2057 2058 /** 2059 * megasas_is_prp_possible - 2060 * Checks if native NVMe PRPs can be built for the IO 2061 * 2062 * @instance: Adapter soft state 2063 * @scmd: SCSI command from the mid-layer 2064 * @sge_count: scatter gather element count. 2065 * 2066 * Returns: true: PRPs can be built 2067 * false: IEEE SGLs needs to be built 2068 */ 2069 static bool 2070 megasas_is_prp_possible(struct megasas_instance *instance, 2071 struct scsi_cmnd *scmd, int sge_count) 2072 { 2073 int i; 2074 u32 data_length = 0; 2075 struct scatterlist *sg_scmd; 2076 bool build_prp = false; 2077 u32 mr_nvme_pg_size; 2078 2079 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, 2080 MR_DEFAULT_NVME_PAGE_SIZE); 2081 data_length = scsi_bufflen(scmd); 2082 sg_scmd = scsi_sglist(scmd); 2083 2084 /* 2085 * NVMe uses one PRP for each page (or part of a page) 2086 * look at the data length - if 4 pages or less then IEEE is OK 2087 * if > 5 pages then we need to build a native SGL 2088 * if > 4 and <= 5 pages, then check physical address of 1st SG entry 2089 * if this first size in the page is >= the residual beyond 4 pages 2090 * then use IEEE, otherwise use native SGL 2091 */ 2092 2093 if (data_length > (mr_nvme_pg_size * 5)) { 2094 build_prp = true; 2095 } else if ((data_length > (mr_nvme_pg_size * 4)) && 2096 (data_length <= (mr_nvme_pg_size * 5))) { 2097 /* check if 1st SG entry size is < residual beyond 4 pages */ 2098 if (sg_dma_len(sg_scmd) < (data_length - (mr_nvme_pg_size * 4))) 2099 build_prp = true; 2100 } 2101 2102 /* 2103 * Below code detects gaps/holes in IO data buffers. 2104 * What does holes/gaps mean? 2105 * Any SGE except first one in a SGL starts at non NVME page size 2106 * aligned address OR Any SGE except last one in a SGL ends at 2107 * non NVME page size boundary. 2108 * 2109 * Driver has already informed block layer by setting boundary rules for 2110 * bio merging done at NVME page size boundary calling kernel API 2111 * blk_queue_virt_boundary inside slave_config. 2112 * Still there is possibility of IO coming with holes to driver because of 2113 * IO merging done by IO scheduler. 2114 * 2115 * With SCSI BLK MQ enabled, there will be no IO with holes as there is no 2116 * IO scheduling so no IO merging. 2117 * 2118 * With SCSI BLK MQ disabled, IO scheduler may attempt to merge IOs and 2119 * then sending IOs with holes. 2120 * 2121 * Though driver can request block layer to disable IO merging by calling- 2122 * blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue) but 2123 * user may tune sysfs parameter- nomerges again to 0 or 1. 2124 * 2125 * If in future IO scheduling is enabled with SCSI BLK MQ, 2126 * this algorithm to detect holes will be required in driver 2127 * for SCSI BLK MQ enabled case as well. 2128 * 2129 * 2130 */ 2131 scsi_for_each_sg(scmd, sg_scmd, sge_count, i) { 2132 if ((i != 0) && (i != (sge_count - 1))) { 2133 if (mega_mod64(sg_dma_len(sg_scmd), mr_nvme_pg_size) || 2134 mega_mod64(sg_dma_address(sg_scmd), 2135 mr_nvme_pg_size)) { 2136 build_prp = false; 2137 break; 2138 } 2139 } 2140 2141 if ((sge_count > 1) && (i == 0)) { 2142 if ((mega_mod64((sg_dma_address(sg_scmd) + 2143 sg_dma_len(sg_scmd)), 2144 mr_nvme_pg_size))) { 2145 build_prp = false; 2146 break; 2147 } 2148 } 2149 2150 if ((sge_count > 1) && (i == (sge_count - 1))) { 2151 if (mega_mod64(sg_dma_address(sg_scmd), 2152 mr_nvme_pg_size)) { 2153 build_prp = false; 2154 break; 2155 } 2156 } 2157 } 2158 2159 return build_prp; 2160 } 2161 2162 /** 2163 * megasas_make_prp_nvme - 2164 * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only 2165 * 2166 * @instance: Adapter soft state 2167 * @scmd: SCSI command from the mid-layer 2168 * @sgl_ptr: SGL to be filled in 2169 * @cmd: Fusion command frame 2170 * @sge_count: scatter gather element count. 2171 * 2172 * Returns: true: PRPs are built 2173 * false: IEEE SGLs needs to be built 2174 */ 2175 static bool 2176 megasas_make_prp_nvme(struct megasas_instance *instance, struct scsi_cmnd *scmd, 2177 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr, 2178 struct megasas_cmd_fusion *cmd, int sge_count) 2179 { 2180 int sge_len, offset, num_prp_in_chain = 0; 2181 struct MPI25_IEEE_SGE_CHAIN64 *main_chain_element, *ptr_first_sgl; 2182 u64 *ptr_sgl; 2183 dma_addr_t ptr_sgl_phys; 2184 u64 sge_addr; 2185 u32 page_mask, page_mask_result; 2186 struct scatterlist *sg_scmd; 2187 u32 first_prp_len; 2188 bool build_prp = false; 2189 int data_len = scsi_bufflen(scmd); 2190 u32 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, 2191 MR_DEFAULT_NVME_PAGE_SIZE); 2192 2193 build_prp = megasas_is_prp_possible(instance, scmd, sge_count); 2194 2195 if (!build_prp) 2196 return false; 2197 2198 /* 2199 * Nvme has a very convoluted prp format. One prp is required 2200 * for each page or partial page. Driver need to split up OS sg_list 2201 * entries if it is longer than one page or cross a page 2202 * boundary. Driver also have to insert a PRP list pointer entry as 2203 * the last entry in each physical page of the PRP list. 2204 * 2205 * NOTE: The first PRP "entry" is actually placed in the first 2206 * SGL entry in the main message as IEEE 64 format. The 2nd 2207 * entry in the main message is the chain element, and the rest 2208 * of the PRP entries are built in the contiguous pcie buffer. 2209 */ 2210 page_mask = mr_nvme_pg_size - 1; 2211 ptr_sgl = (u64 *)cmd->sg_frame; 2212 ptr_sgl_phys = cmd->sg_frame_phys_addr; 2213 memset(ptr_sgl, 0, instance->max_chain_frame_sz); 2214 2215 /* Build chain frame element which holds all prps except first*/ 2216 main_chain_element = (struct MPI25_IEEE_SGE_CHAIN64 *) 2217 ((u8 *)sgl_ptr + sizeof(struct MPI25_IEEE_SGE_CHAIN64)); 2218 2219 main_chain_element->Address = cpu_to_le64(ptr_sgl_phys); 2220 main_chain_element->NextChainOffset = 0; 2221 main_chain_element->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | 2222 IEEE_SGE_FLAGS_SYSTEM_ADDR | 2223 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP; 2224 2225 /* Build first prp, sge need not to be page aligned*/ 2226 ptr_first_sgl = sgl_ptr; 2227 sg_scmd = scsi_sglist(scmd); 2228 sge_addr = sg_dma_address(sg_scmd); 2229 sge_len = sg_dma_len(sg_scmd); 2230 2231 offset = (u32)(sge_addr & page_mask); 2232 first_prp_len = mr_nvme_pg_size - offset; 2233 2234 ptr_first_sgl->Address = cpu_to_le64(sge_addr); 2235 ptr_first_sgl->Length = cpu_to_le32(first_prp_len); 2236 2237 data_len -= first_prp_len; 2238 2239 if (sge_len > first_prp_len) { 2240 sge_addr += first_prp_len; 2241 sge_len -= first_prp_len; 2242 } else if (sge_len == first_prp_len) { 2243 sg_scmd = sg_next(sg_scmd); 2244 sge_addr = sg_dma_address(sg_scmd); 2245 sge_len = sg_dma_len(sg_scmd); 2246 } 2247 2248 for (;;) { 2249 offset = (u32)(sge_addr & page_mask); 2250 2251 /* Put PRP pointer due to page boundary*/ 2252 page_mask_result = (uintptr_t)(ptr_sgl + 1) & page_mask; 2253 if (unlikely(!page_mask_result)) { 2254 scmd_printk(KERN_NOTICE, 2255 scmd, "page boundary ptr_sgl: 0x%p\n", 2256 ptr_sgl); 2257 ptr_sgl_phys += 8; 2258 *ptr_sgl = cpu_to_le64(ptr_sgl_phys); 2259 ptr_sgl++; 2260 num_prp_in_chain++; 2261 } 2262 2263 *ptr_sgl = cpu_to_le64(sge_addr); 2264 ptr_sgl++; 2265 ptr_sgl_phys += 8; 2266 num_prp_in_chain++; 2267 2268 sge_addr += mr_nvme_pg_size; 2269 sge_len -= mr_nvme_pg_size; 2270 data_len -= mr_nvme_pg_size; 2271 2272 if (data_len <= 0) 2273 break; 2274 2275 if (sge_len > 0) 2276 continue; 2277 2278 sg_scmd = sg_next(sg_scmd); 2279 sge_addr = sg_dma_address(sg_scmd); 2280 sge_len = sg_dma_len(sg_scmd); 2281 } 2282 2283 main_chain_element->Length = 2284 cpu_to_le32(num_prp_in_chain * sizeof(u64)); 2285 2286 return build_prp; 2287 } 2288 2289 /** 2290 * megasas_make_sgl_fusion - Prepares 32-bit SGL 2291 * @instance: Adapter soft state 2292 * @scp: SCSI command from the mid-layer 2293 * @sgl_ptr: SGL to be filled in 2294 * @cmd: cmd we are working on 2295 * @sge_count sge count 2296 * 2297 */ 2298 static void 2299 megasas_make_sgl_fusion(struct megasas_instance *instance, 2300 struct scsi_cmnd *scp, 2301 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr, 2302 struct megasas_cmd_fusion *cmd, int sge_count) 2303 { 2304 int i, sg_processed; 2305 struct scatterlist *os_sgl; 2306 struct fusion_context *fusion; 2307 2308 fusion = instance->ctrl_context; 2309 2310 if (instance->adapter_type >= INVADER_SERIES) { 2311 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr; 2312 sgl_ptr_end += fusion->max_sge_in_main_msg - 1; 2313 sgl_ptr_end->Flags = 0; 2314 } 2315 2316 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 2317 sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl)); 2318 sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl)); 2319 sgl_ptr->Flags = 0; 2320 if (instance->adapter_type >= INVADER_SERIES) 2321 if (i == sge_count - 1) 2322 sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST; 2323 sgl_ptr++; 2324 sg_processed = i + 1; 2325 2326 if ((sg_processed == (fusion->max_sge_in_main_msg - 1)) && 2327 (sge_count > fusion->max_sge_in_main_msg)) { 2328 2329 struct MPI25_IEEE_SGE_CHAIN64 *sg_chain; 2330 if (instance->adapter_type >= INVADER_SERIES) { 2331 if ((le16_to_cpu(cmd->io_request->IoFlags) & 2332 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != 2333 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) 2334 cmd->io_request->ChainOffset = 2335 fusion-> 2336 chain_offset_io_request; 2337 else 2338 cmd->io_request->ChainOffset = 0; 2339 } else 2340 cmd->io_request->ChainOffset = 2341 fusion->chain_offset_io_request; 2342 2343 sg_chain = sgl_ptr; 2344 /* Prepare chain element */ 2345 sg_chain->NextChainOffset = 0; 2346 if (instance->adapter_type >= INVADER_SERIES) 2347 sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT; 2348 else 2349 sg_chain->Flags = 2350 (IEEE_SGE_FLAGS_CHAIN_ELEMENT | 2351 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); 2352 sg_chain->Length = cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed))); 2353 sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr); 2354 2355 sgl_ptr = 2356 (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame; 2357 memset(sgl_ptr, 0, instance->max_chain_frame_sz); 2358 } 2359 } 2360 } 2361 2362 /** 2363 * megasas_make_sgl - Build Scatter Gather List(SGLs) 2364 * @scp: SCSI command pointer 2365 * @instance: Soft instance of controller 2366 * @cmd: Fusion command pointer 2367 * 2368 * This function will build sgls based on device type. 2369 * For nvme drives, there is different way of building sgls in nvme native 2370 * format- PRPs(Physical Region Page). 2371 * 2372 * Returns the number of sg lists actually used, zero if the sg lists 2373 * is NULL, or -ENOMEM if the mapping failed 2374 */ 2375 static 2376 int megasas_make_sgl(struct megasas_instance *instance, struct scsi_cmnd *scp, 2377 struct megasas_cmd_fusion *cmd) 2378 { 2379 int sge_count; 2380 bool build_prp = false; 2381 struct MPI25_IEEE_SGE_CHAIN64 *sgl_chain64; 2382 2383 sge_count = scsi_dma_map(scp); 2384 2385 if ((sge_count > instance->max_num_sge) || (sge_count <= 0)) 2386 return sge_count; 2387 2388 sgl_chain64 = (struct MPI25_IEEE_SGE_CHAIN64 *)&cmd->io_request->SGL; 2389 if ((le16_to_cpu(cmd->io_request->IoFlags) & 2390 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) && 2391 (cmd->pd_interface == NVME_PD)) 2392 build_prp = megasas_make_prp_nvme(instance, scp, sgl_chain64, 2393 cmd, sge_count); 2394 2395 if (!build_prp) 2396 megasas_make_sgl_fusion(instance, scp, sgl_chain64, 2397 cmd, sge_count); 2398 2399 return sge_count; 2400 } 2401 2402 /** 2403 * megasas_set_pd_lba - Sets PD LBA 2404 * @cdb: CDB 2405 * @cdb_len: cdb length 2406 * @start_blk: Start block of IO 2407 * 2408 * Used to set the PD LBA in CDB for FP IOs 2409 */ 2410 static void 2411 megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, 2412 struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp, 2413 struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag) 2414 { 2415 struct MR_LD_RAID *raid; 2416 u16 ld; 2417 u64 start_blk = io_info->pdBlock; 2418 u8 *cdb = io_request->CDB.CDB32; 2419 u32 num_blocks = io_info->numBlocks; 2420 u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0; 2421 2422 /* Check if T10 PI (DIF) is enabled for this LD */ 2423 ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr); 2424 raid = MR_LdRaidGet(ld, local_map_ptr); 2425 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) { 2426 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 2427 cdb[0] = MEGASAS_SCSI_VARIABLE_LENGTH_CMD; 2428 cdb[7] = MEGASAS_SCSI_ADDL_CDB_LEN; 2429 2430 if (scp->sc_data_direction == DMA_FROM_DEVICE) 2431 cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32; 2432 else 2433 cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32; 2434 cdb[10] = MEGASAS_RD_WR_PROTECT_CHECK_ALL; 2435 2436 /* LBA */ 2437 cdb[12] = (u8)((start_blk >> 56) & 0xff); 2438 cdb[13] = (u8)((start_blk >> 48) & 0xff); 2439 cdb[14] = (u8)((start_blk >> 40) & 0xff); 2440 cdb[15] = (u8)((start_blk >> 32) & 0xff); 2441 cdb[16] = (u8)((start_blk >> 24) & 0xff); 2442 cdb[17] = (u8)((start_blk >> 16) & 0xff); 2443 cdb[18] = (u8)((start_blk >> 8) & 0xff); 2444 cdb[19] = (u8)(start_blk & 0xff); 2445 2446 /* Logical block reference tag */ 2447 io_request->CDB.EEDP32.PrimaryReferenceTag = 2448 cpu_to_be32(ref_tag); 2449 io_request->CDB.EEDP32.PrimaryApplicationTagMask = cpu_to_be16(0xffff); 2450 io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */ 2451 2452 /* Transfer length */ 2453 cdb[28] = (u8)((num_blocks >> 24) & 0xff); 2454 cdb[29] = (u8)((num_blocks >> 16) & 0xff); 2455 cdb[30] = (u8)((num_blocks >> 8) & 0xff); 2456 cdb[31] = (u8)(num_blocks & 0xff); 2457 2458 /* set SCSI IO EEDPFlags */ 2459 if (scp->sc_data_direction == DMA_FROM_DEVICE) { 2460 io_request->EEDPFlags = cpu_to_le16( 2461 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 2462 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 2463 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | 2464 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | 2465 MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE | 2466 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); 2467 } else { 2468 io_request->EEDPFlags = cpu_to_le16( 2469 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 2470 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP); 2471 } 2472 io_request->Control |= cpu_to_le32((0x4 << 26)); 2473 io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size); 2474 } else { 2475 /* Some drives don't support 16/12 byte CDB's, convert to 10 */ 2476 if (((cdb_len == 12) || (cdb_len == 16)) && 2477 (start_blk <= 0xffffffff)) { 2478 if (cdb_len == 16) { 2479 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10; 2480 flagvals = cdb[1]; 2481 groupnum = cdb[14]; 2482 control = cdb[15]; 2483 } else { 2484 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10; 2485 flagvals = cdb[1]; 2486 groupnum = cdb[10]; 2487 control = cdb[11]; 2488 } 2489 2490 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 2491 2492 cdb[0] = opcode; 2493 cdb[1] = flagvals; 2494 cdb[6] = groupnum; 2495 cdb[9] = control; 2496 2497 /* Transfer length */ 2498 cdb[8] = (u8)(num_blocks & 0xff); 2499 cdb[7] = (u8)((num_blocks >> 8) & 0xff); 2500 2501 io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */ 2502 cdb_len = 10; 2503 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) { 2504 /* Convert to 16 byte CDB for large LBA's */ 2505 switch (cdb_len) { 2506 case 6: 2507 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16; 2508 control = cdb[5]; 2509 break; 2510 case 10: 2511 opcode = 2512 cdb[0] == READ_10 ? READ_16 : WRITE_16; 2513 flagvals = cdb[1]; 2514 groupnum = cdb[6]; 2515 control = cdb[9]; 2516 break; 2517 case 12: 2518 opcode = 2519 cdb[0] == READ_12 ? READ_16 : WRITE_16; 2520 flagvals = cdb[1]; 2521 groupnum = cdb[10]; 2522 control = cdb[11]; 2523 break; 2524 } 2525 2526 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 2527 2528 cdb[0] = opcode; 2529 cdb[1] = flagvals; 2530 cdb[14] = groupnum; 2531 cdb[15] = control; 2532 2533 /* Transfer length */ 2534 cdb[13] = (u8)(num_blocks & 0xff); 2535 cdb[12] = (u8)((num_blocks >> 8) & 0xff); 2536 cdb[11] = (u8)((num_blocks >> 16) & 0xff); 2537 cdb[10] = (u8)((num_blocks >> 24) & 0xff); 2538 2539 io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */ 2540 cdb_len = 16; 2541 } 2542 2543 /* Normal case, just load LBA here */ 2544 switch (cdb_len) { 2545 case 6: 2546 { 2547 u8 val = cdb[1] & 0xE0; 2548 cdb[3] = (u8)(start_blk & 0xff); 2549 cdb[2] = (u8)((start_blk >> 8) & 0xff); 2550 cdb[1] = val | ((u8)(start_blk >> 16) & 0x1f); 2551 break; 2552 } 2553 case 10: 2554 cdb[5] = (u8)(start_blk & 0xff); 2555 cdb[4] = (u8)((start_blk >> 8) & 0xff); 2556 cdb[3] = (u8)((start_blk >> 16) & 0xff); 2557 cdb[2] = (u8)((start_blk >> 24) & 0xff); 2558 break; 2559 case 12: 2560 cdb[5] = (u8)(start_blk & 0xff); 2561 cdb[4] = (u8)((start_blk >> 8) & 0xff); 2562 cdb[3] = (u8)((start_blk >> 16) & 0xff); 2563 cdb[2] = (u8)((start_blk >> 24) & 0xff); 2564 break; 2565 case 16: 2566 cdb[9] = (u8)(start_blk & 0xff); 2567 cdb[8] = (u8)((start_blk >> 8) & 0xff); 2568 cdb[7] = (u8)((start_blk >> 16) & 0xff); 2569 cdb[6] = (u8)((start_blk >> 24) & 0xff); 2570 cdb[5] = (u8)((start_blk >> 32) & 0xff); 2571 cdb[4] = (u8)((start_blk >> 40) & 0xff); 2572 cdb[3] = (u8)((start_blk >> 48) & 0xff); 2573 cdb[2] = (u8)((start_blk >> 56) & 0xff); 2574 break; 2575 } 2576 } 2577 } 2578 2579 /** 2580 * megasas_stream_detect - stream detection on read and and write IOs 2581 * @instance: Adapter soft state 2582 * @cmd: Command to be prepared 2583 * @io_info: IO Request info 2584 * 2585 */ 2586 2587 /** stream detection on read and and write IOs */ 2588 static void megasas_stream_detect(struct megasas_instance *instance, 2589 struct megasas_cmd_fusion *cmd, 2590 struct IO_REQUEST_INFO *io_info) 2591 { 2592 struct fusion_context *fusion = instance->ctrl_context; 2593 u32 device_id = io_info->ldTgtId; 2594 struct LD_STREAM_DETECT *current_ld_sd 2595 = fusion->stream_detect_by_ld[device_id]; 2596 u32 *track_stream = ¤t_ld_sd->mru_bit_map, stream_num; 2597 u32 shifted_values, unshifted_values; 2598 u32 index_value_mask, shifted_values_mask; 2599 int i; 2600 bool is_read_ahead = false; 2601 struct STREAM_DETECT *current_sd; 2602 /* find possible stream */ 2603 for (i = 0; i < MAX_STREAMS_TRACKED; ++i) { 2604 stream_num = (*track_stream >> 2605 (i * BITS_PER_INDEX_STREAM)) & 2606 STREAM_MASK; 2607 current_sd = ¤t_ld_sd->stream_track[stream_num]; 2608 /* if we found a stream, update the raid 2609 * context and also update the mruBitMap 2610 */ 2611 /* boundary condition */ 2612 if ((current_sd->next_seq_lba) && 2613 (io_info->ldStartBlock >= current_sd->next_seq_lba) && 2614 (io_info->ldStartBlock <= (current_sd->next_seq_lba + 32)) && 2615 (current_sd->is_read == io_info->isRead)) { 2616 2617 if ((io_info->ldStartBlock != current_sd->next_seq_lba) && 2618 ((!io_info->isRead) || (!is_read_ahead))) 2619 /* 2620 * Once the API availible we need to change this. 2621 * At this point we are not allowing any gap 2622 */ 2623 continue; 2624 2625 SET_STREAM_DETECTED(cmd->io_request->RaidContext.raid_context_g35); 2626 current_sd->next_seq_lba = 2627 io_info->ldStartBlock + io_info->numBlocks; 2628 /* 2629 * update the mruBitMap LRU 2630 */ 2631 shifted_values_mask = 2632 (1 << i * BITS_PER_INDEX_STREAM) - 1; 2633 shifted_values = ((*track_stream & shifted_values_mask) 2634 << BITS_PER_INDEX_STREAM); 2635 index_value_mask = 2636 STREAM_MASK << i * BITS_PER_INDEX_STREAM; 2637 unshifted_values = 2638 *track_stream & ~(shifted_values_mask | 2639 index_value_mask); 2640 *track_stream = 2641 unshifted_values | shifted_values | stream_num; 2642 return; 2643 } 2644 } 2645 /* 2646 * if we did not find any stream, create a new one 2647 * from the least recently used 2648 */ 2649 stream_num = (*track_stream >> 2650 ((MAX_STREAMS_TRACKED - 1) * BITS_PER_INDEX_STREAM)) & 2651 STREAM_MASK; 2652 current_sd = ¤t_ld_sd->stream_track[stream_num]; 2653 current_sd->is_read = io_info->isRead; 2654 current_sd->next_seq_lba = io_info->ldStartBlock + io_info->numBlocks; 2655 *track_stream = (((*track_stream & ZERO_LAST_STREAM) << 4) | stream_num); 2656 return; 2657 } 2658 2659 /** 2660 * megasas_set_raidflag_cpu_affinity - This function sets the cpu 2661 * affinity (cpu of the controller) and raid_flags in the raid context 2662 * based on IO type. 2663 * 2664 * @praid_context: IO RAID context 2665 * @raid: LD raid map 2666 * @fp_possible: Is fast path possible? 2667 * @is_read: Is read IO? 2668 * 2669 */ 2670 static void 2671 megasas_set_raidflag_cpu_affinity(struct fusion_context *fusion, 2672 union RAID_CONTEXT_UNION *praid_context, 2673 struct MR_LD_RAID *raid, bool fp_possible, 2674 u8 is_read, u32 scsi_buff_len) 2675 { 2676 u8 cpu_sel = MR_RAID_CTX_CPUSEL_0; 2677 struct RAID_CONTEXT_G35 *rctx_g35; 2678 2679 rctx_g35 = &praid_context->raid_context_g35; 2680 if (fp_possible) { 2681 if (is_read) { 2682 if ((raid->cpuAffinity.pdRead.cpu0) && 2683 (raid->cpuAffinity.pdRead.cpu1)) 2684 cpu_sel = MR_RAID_CTX_CPUSEL_FCFS; 2685 else if (raid->cpuAffinity.pdRead.cpu1) 2686 cpu_sel = MR_RAID_CTX_CPUSEL_1; 2687 } else { 2688 if ((raid->cpuAffinity.pdWrite.cpu0) && 2689 (raid->cpuAffinity.pdWrite.cpu1)) 2690 cpu_sel = MR_RAID_CTX_CPUSEL_FCFS; 2691 else if (raid->cpuAffinity.pdWrite.cpu1) 2692 cpu_sel = MR_RAID_CTX_CPUSEL_1; 2693 /* Fast path cache by pass capable R0/R1 VD */ 2694 if ((raid->level <= 1) && 2695 (raid->capability.fp_cache_bypass_capable)) { 2696 rctx_g35->routing_flags |= 2697 (1 << MR_RAID_CTX_ROUTINGFLAGS_SLD_SHIFT); 2698 rctx_g35->raid_flags = 2699 (MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS 2700 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT); 2701 } 2702 } 2703 } else { 2704 if (is_read) { 2705 if ((raid->cpuAffinity.ldRead.cpu0) && 2706 (raid->cpuAffinity.ldRead.cpu1)) 2707 cpu_sel = MR_RAID_CTX_CPUSEL_FCFS; 2708 else if (raid->cpuAffinity.ldRead.cpu1) 2709 cpu_sel = MR_RAID_CTX_CPUSEL_1; 2710 } else { 2711 if ((raid->cpuAffinity.ldWrite.cpu0) && 2712 (raid->cpuAffinity.ldWrite.cpu1)) 2713 cpu_sel = MR_RAID_CTX_CPUSEL_FCFS; 2714 else if (raid->cpuAffinity.ldWrite.cpu1) 2715 cpu_sel = MR_RAID_CTX_CPUSEL_1; 2716 2717 if (is_stream_detected(rctx_g35) && 2718 ((raid->level == 5) || (raid->level == 6)) && 2719 (raid->writeMode == MR_RL_WRITE_THROUGH_MODE) && 2720 (cpu_sel == MR_RAID_CTX_CPUSEL_FCFS)) 2721 cpu_sel = MR_RAID_CTX_CPUSEL_0; 2722 } 2723 } 2724 2725 rctx_g35->routing_flags |= 2726 (cpu_sel << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT); 2727 2728 /* Always give priority to MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT 2729 * vs MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS. 2730 * IO Subtype is not bitmap. 2731 */ 2732 if ((fusion->pcie_bw_limitation) && (raid->level == 1) && (!is_read) && 2733 (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)) { 2734 praid_context->raid_context_g35.raid_flags = 2735 (MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT 2736 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT); 2737 } 2738 } 2739 2740 /** 2741 * megasas_build_ldio_fusion - Prepares IOs to devices 2742 * @instance: Adapter soft state 2743 * @scp: SCSI command 2744 * @cmd: Command to be prepared 2745 * 2746 * Prepares the io_request and chain elements (sg_frame) for IO 2747 * The IO can be for PD (Fast Path) or LD 2748 */ 2749 static void 2750 megasas_build_ldio_fusion(struct megasas_instance *instance, 2751 struct scsi_cmnd *scp, 2752 struct megasas_cmd_fusion *cmd) 2753 { 2754 bool fp_possible; 2755 u16 ld; 2756 u32 start_lba_lo, start_lba_hi, device_id, datalength = 0; 2757 u32 scsi_buff_len; 2758 struct MPI2_RAID_SCSI_IO_REQUEST *io_request; 2759 struct IO_REQUEST_INFO io_info; 2760 struct fusion_context *fusion; 2761 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 2762 u8 *raidLUN; 2763 unsigned long spinlock_flags; 2764 struct MR_LD_RAID *raid = NULL; 2765 struct MR_PRIV_DEVICE *mrdev_priv; 2766 struct RAID_CONTEXT *rctx; 2767 struct RAID_CONTEXT_G35 *rctx_g35; 2768 2769 device_id = MEGASAS_DEV_INDEX(scp); 2770 2771 fusion = instance->ctrl_context; 2772 2773 io_request = cmd->io_request; 2774 rctx = &io_request->RaidContext.raid_context; 2775 rctx_g35 = &io_request->RaidContext.raid_context_g35; 2776 2777 rctx->virtual_disk_tgt_id = cpu_to_le16(device_id); 2778 rctx->status = 0; 2779 rctx->ex_status = 0; 2780 2781 start_lba_lo = 0; 2782 start_lba_hi = 0; 2783 fp_possible = false; 2784 2785 /* 2786 * 6-byte READ(0x08) or WRITE(0x0A) cdb 2787 */ 2788 if (scp->cmd_len == 6) { 2789 datalength = (u32) scp->cmnd[4]; 2790 start_lba_lo = ((u32) scp->cmnd[1] << 16) | 2791 ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3]; 2792 2793 start_lba_lo &= 0x1FFFFF; 2794 } 2795 2796 /* 2797 * 10-byte READ(0x28) or WRITE(0x2A) cdb 2798 */ 2799 else if (scp->cmd_len == 10) { 2800 datalength = (u32) scp->cmnd[8] | 2801 ((u32) scp->cmnd[7] << 8); 2802 start_lba_lo = ((u32) scp->cmnd[2] << 24) | 2803 ((u32) scp->cmnd[3] << 16) | 2804 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; 2805 } 2806 2807 /* 2808 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 2809 */ 2810 else if (scp->cmd_len == 12) { 2811 datalength = ((u32) scp->cmnd[6] << 24) | 2812 ((u32) scp->cmnd[7] << 16) | 2813 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; 2814 start_lba_lo = ((u32) scp->cmnd[2] << 24) | 2815 ((u32) scp->cmnd[3] << 16) | 2816 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; 2817 } 2818 2819 /* 2820 * 16-byte READ(0x88) or WRITE(0x8A) cdb 2821 */ 2822 else if (scp->cmd_len == 16) { 2823 datalength = ((u32) scp->cmnd[10] << 24) | 2824 ((u32) scp->cmnd[11] << 16) | 2825 ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13]; 2826 start_lba_lo = ((u32) scp->cmnd[6] << 24) | 2827 ((u32) scp->cmnd[7] << 16) | 2828 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; 2829 2830 start_lba_hi = ((u32) scp->cmnd[2] << 24) | 2831 ((u32) scp->cmnd[3] << 16) | 2832 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; 2833 } 2834 2835 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO)); 2836 io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo; 2837 io_info.numBlocks = datalength; 2838 io_info.ldTgtId = device_id; 2839 io_info.r1_alt_dev_handle = MR_DEVHANDLE_INVALID; 2840 scsi_buff_len = scsi_bufflen(scp); 2841 io_request->DataLength = cpu_to_le32(scsi_buff_len); 2842 io_info.data_arms = 1; 2843 2844 if (scp->sc_data_direction == DMA_FROM_DEVICE) 2845 io_info.isRead = 1; 2846 2847 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 2848 ld = MR_TargetIdToLdGet(device_id, local_map_ptr); 2849 2850 if (ld < instance->fw_supported_vd_count) 2851 raid = MR_LdRaidGet(ld, local_map_ptr); 2852 2853 if (!raid || (!fusion->fast_path_io)) { 2854 rctx->reg_lock_flags = 0; 2855 fp_possible = false; 2856 } else { 2857 if (MR_BuildRaidContext(instance, &io_info, rctx, 2858 local_map_ptr, &raidLUN)) 2859 fp_possible = (io_info.fpOkForIo > 0) ? true : false; 2860 } 2861 2862 megasas_get_msix_index(instance, scp, cmd, io_info.data_arms); 2863 2864 if (instance->adapter_type >= VENTURA_SERIES) { 2865 /* FP for Optimal raid level 1. 2866 * All large RAID-1 writes (> 32 KiB, both WT and WB modes) 2867 * are built by the driver as LD I/Os. 2868 * All small RAID-1 WT writes (<= 32 KiB) are built as FP I/Os 2869 * (there is never a reason to process these as buffered writes) 2870 * All small RAID-1 WB writes (<= 32 KiB) are built as FP I/Os 2871 * with the SLD bit asserted. 2872 */ 2873 if (io_info.r1_alt_dev_handle != MR_DEVHANDLE_INVALID) { 2874 mrdev_priv = scp->device->hostdata; 2875 2876 if (atomic_inc_return(&instance->fw_outstanding) > 2877 (instance->host->can_queue)) { 2878 fp_possible = false; 2879 atomic_dec(&instance->fw_outstanding); 2880 } else if (fusion->pcie_bw_limitation && 2881 ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) || 2882 (atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint) > 0))) { 2883 fp_possible = false; 2884 atomic_dec(&instance->fw_outstanding); 2885 if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE) 2886 atomic_set(&mrdev_priv->r1_ldio_hint, 2887 instance->r1_ldio_hint_default); 2888 } 2889 } 2890 2891 if (!fp_possible || 2892 (io_info.isRead && io_info.ra_capable)) { 2893 spin_lock_irqsave(&instance->stream_lock, 2894 spinlock_flags); 2895 megasas_stream_detect(instance, cmd, &io_info); 2896 spin_unlock_irqrestore(&instance->stream_lock, 2897 spinlock_flags); 2898 /* In ventura if stream detected for a read and it is 2899 * read ahead capable make this IO as LDIO 2900 */ 2901 if (is_stream_detected(rctx_g35)) 2902 fp_possible = false; 2903 } 2904 2905 /* If raid is NULL, set CPU affinity to default CPU0 */ 2906 if (raid) 2907 megasas_set_raidflag_cpu_affinity(fusion, &io_request->RaidContext, 2908 raid, fp_possible, io_info.isRead, 2909 scsi_buff_len); 2910 else 2911 rctx_g35->routing_flags |= 2912 (MR_RAID_CTX_CPUSEL_0 << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT); 2913 } 2914 2915 if (fp_possible) { 2916 megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp, 2917 local_map_ptr, start_lba_lo); 2918 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 2919 cmd->request_desc->SCSIIO.RequestFlags = 2920 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO 2921 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2922 if (instance->adapter_type == INVADER_SERIES) { 2923 rctx->type = MPI2_TYPE_CUDA; 2924 rctx->nseg = 0x1; 2925 io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); 2926 rctx->reg_lock_flags |= 2927 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | 2928 MR_RL_FLAGS_SEQ_NUM_ENABLE); 2929 } else if (instance->adapter_type >= VENTURA_SERIES) { 2930 rctx_g35->nseg_type |= (1 << RAID_CONTEXT_NSEG_SHIFT); 2931 rctx_g35->nseg_type |= (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT); 2932 rctx_g35->routing_flags |= (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT); 2933 io_request->IoFlags |= 2934 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); 2935 } 2936 if (fusion->load_balance_info && 2937 (fusion->load_balance_info[device_id].loadBalanceFlag) && 2938 (io_info.isRead)) { 2939 io_info.devHandle = 2940 get_updated_dev_handle(instance, 2941 &fusion->load_balance_info[device_id], 2942 &io_info, local_map_ptr); 2943 scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG; 2944 cmd->pd_r1_lb = io_info.pd_after_lb; 2945 if (instance->adapter_type >= VENTURA_SERIES) 2946 rctx_g35->span_arm = io_info.span_arm; 2947 else 2948 rctx->span_arm = io_info.span_arm; 2949 2950 } else 2951 scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; 2952 2953 if (instance->adapter_type >= VENTURA_SERIES) 2954 cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle; 2955 else 2956 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; 2957 2958 if ((raidLUN[0] == 1) && 2959 (local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].validHandles > 1)) { 2960 instance->dev_handle = !(instance->dev_handle); 2961 io_info.devHandle = 2962 local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].devHandle[instance->dev_handle]; 2963 } 2964 2965 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; 2966 io_request->DevHandle = io_info.devHandle; 2967 cmd->pd_interface = io_info.pd_interface; 2968 /* populate the LUN field */ 2969 memcpy(io_request->LUN, raidLUN, 8); 2970 } else { 2971 rctx->timeout_value = 2972 cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec); 2973 cmd->request_desc->SCSIIO.RequestFlags = 2974 (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO 2975 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2976 if (instance->adapter_type == INVADER_SERIES) { 2977 if (io_info.do_fp_rlbypass || 2978 (rctx->reg_lock_flags == REGION_TYPE_UNUSED)) 2979 cmd->request_desc->SCSIIO.RequestFlags = 2980 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << 2981 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2982 rctx->type = MPI2_TYPE_CUDA; 2983 rctx->reg_lock_flags |= 2984 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | 2985 MR_RL_FLAGS_SEQ_NUM_ENABLE); 2986 rctx->nseg = 0x1; 2987 } else if (instance->adapter_type >= VENTURA_SERIES) { 2988 rctx_g35->routing_flags |= (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT); 2989 rctx_g35->nseg_type |= (1 << RAID_CONTEXT_NSEG_SHIFT); 2990 rctx_g35->nseg_type |= (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT); 2991 } 2992 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; 2993 io_request->DevHandle = cpu_to_le16(device_id); 2994 2995 } /* Not FP */ 2996 } 2997 2998 /** 2999 * megasas_build_ld_nonrw_fusion - prepares non rw ios for virtual disk 3000 * @instance: Adapter soft state 3001 * @scp: SCSI command 3002 * @cmd: Command to be prepared 3003 * 3004 * Prepares the io_request frame for non-rw io cmds for vd. 3005 */ 3006 static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance, 3007 struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd) 3008 { 3009 u32 device_id; 3010 struct MPI2_RAID_SCSI_IO_REQUEST *io_request; 3011 u16 ld; 3012 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 3013 struct fusion_context *fusion = instance->ctrl_context; 3014 u8 span, physArm; 3015 __le16 devHandle; 3016 u32 arRef, pd; 3017 struct MR_LD_RAID *raid; 3018 struct RAID_CONTEXT *pRAID_Context; 3019 u8 fp_possible = 1; 3020 3021 io_request = cmd->io_request; 3022 device_id = MEGASAS_DEV_INDEX(scmd); 3023 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 3024 io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); 3025 /* get RAID_Context pointer */ 3026 pRAID_Context = &io_request->RaidContext.raid_context; 3027 /* Check with FW team */ 3028 pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); 3029 pRAID_Context->reg_lock_row_lba = 0; 3030 pRAID_Context->reg_lock_length = 0; 3031 3032 if (fusion->fast_path_io && ( 3033 device_id < instance->fw_supported_vd_count)) { 3034 3035 ld = MR_TargetIdToLdGet(device_id, local_map_ptr); 3036 if (ld >= instance->fw_supported_vd_count - 1) 3037 fp_possible = 0; 3038 else { 3039 raid = MR_LdRaidGet(ld, local_map_ptr); 3040 if (!(raid->capability.fpNonRWCapable)) 3041 fp_possible = 0; 3042 } 3043 } else 3044 fp_possible = 0; 3045 3046 if (!fp_possible) { 3047 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; 3048 io_request->DevHandle = cpu_to_le16(device_id); 3049 io_request->LUN[1] = scmd->device->lun; 3050 pRAID_Context->timeout_value = 3051 cpu_to_le16 (scmd->request->timeout / HZ); 3052 cmd->request_desc->SCSIIO.RequestFlags = 3053 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 3054 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 3055 } else { 3056 3057 /* set RAID context values */ 3058 pRAID_Context->config_seq_num = raid->seqNum; 3059 if (instance->adapter_type < VENTURA_SERIES) 3060 pRAID_Context->reg_lock_flags = REGION_TYPE_SHARED_READ; 3061 pRAID_Context->timeout_value = 3062 cpu_to_le16(raid->fpIoTimeoutForLd); 3063 3064 /* get the DevHandle for the PD (since this is 3065 fpNonRWCapable, this is a single disk RAID0) */ 3066 span = physArm = 0; 3067 arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr); 3068 pd = MR_ArPdGet(arRef, physArm, local_map_ptr); 3069 devHandle = MR_PdDevHandleGet(pd, local_map_ptr); 3070 3071 /* build request descriptor */ 3072 cmd->request_desc->SCSIIO.RequestFlags = 3073 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << 3074 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 3075 cmd->request_desc->SCSIIO.DevHandle = devHandle; 3076 3077 /* populate the LUN field */ 3078 memcpy(io_request->LUN, raid->LUN, 8); 3079 3080 /* build the raidScsiIO structure */ 3081 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 3082 io_request->DevHandle = devHandle; 3083 } 3084 } 3085 3086 /** 3087 * megasas_build_syspd_fusion - prepares rw/non-rw ios for syspd 3088 * @instance: Adapter soft state 3089 * @scp: SCSI command 3090 * @cmd: Command to be prepared 3091 * @fp_possible: parameter to detect fast path or firmware path io. 3092 * 3093 * Prepares the io_request frame for rw/non-rw io cmds for syspds 3094 */ 3095 static void 3096 megasas_build_syspd_fusion(struct megasas_instance *instance, 3097 struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd, 3098 bool fp_possible) 3099 { 3100 u32 device_id; 3101 struct MPI2_RAID_SCSI_IO_REQUEST *io_request; 3102 u16 pd_index = 0; 3103 u16 os_timeout_value; 3104 u16 timeout_limit; 3105 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 3106 struct RAID_CONTEXT *pRAID_Context; 3107 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 3108 struct MR_PRIV_DEVICE *mr_device_priv_data; 3109 struct fusion_context *fusion = instance->ctrl_context; 3110 pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id - 1) & 1]; 3111 3112 device_id = MEGASAS_DEV_INDEX(scmd); 3113 pd_index = MEGASAS_PD_INDEX(scmd); 3114 os_timeout_value = scmd->request->timeout / HZ; 3115 mr_device_priv_data = scmd->device->hostdata; 3116 cmd->pd_interface = mr_device_priv_data->interface_type; 3117 3118 io_request = cmd->io_request; 3119 /* get RAID_Context pointer */ 3120 pRAID_Context = &io_request->RaidContext.raid_context; 3121 pRAID_Context->reg_lock_flags = 0; 3122 pRAID_Context->reg_lock_row_lba = 0; 3123 pRAID_Context->reg_lock_length = 0; 3124 io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); 3125 io_request->LUN[1] = scmd->device->lun; 3126 pRAID_Context->raid_flags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD 3127 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; 3128 3129 /* If FW supports PD sequence number */ 3130 if (instance->support_seqnum_jbod_fp) { 3131 if (instance->use_seqnum_jbod_fp && 3132 instance->pd_list[pd_index].driveType == TYPE_DISK) { 3133 3134 /* More than 256 PD/JBOD support for Ventura */ 3135 if (instance->support_morethan256jbod) 3136 pRAID_Context->virtual_disk_tgt_id = 3137 pd_sync->seq[pd_index].pd_target_id; 3138 else 3139 pRAID_Context->virtual_disk_tgt_id = 3140 cpu_to_le16(device_id + 3141 (MAX_PHYSICAL_DEVICES - 1)); 3142 pRAID_Context->config_seq_num = 3143 pd_sync->seq[pd_index].seqNum; 3144 io_request->DevHandle = 3145 pd_sync->seq[pd_index].devHandle; 3146 if (instance->adapter_type >= VENTURA_SERIES) { 3147 io_request->RaidContext.raid_context_g35.routing_flags |= 3148 (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT); 3149 io_request->RaidContext.raid_context_g35.nseg_type |= 3150 (1 << RAID_CONTEXT_NSEG_SHIFT); 3151 io_request->RaidContext.raid_context_g35.nseg_type |= 3152 (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT); 3153 } else { 3154 pRAID_Context->type = MPI2_TYPE_CUDA; 3155 pRAID_Context->nseg = 0x1; 3156 pRAID_Context->reg_lock_flags |= 3157 (MR_RL_FLAGS_SEQ_NUM_ENABLE | 3158 MR_RL_FLAGS_GRANT_DESTINATION_CUDA); 3159 } 3160 } else { 3161 pRAID_Context->virtual_disk_tgt_id = 3162 cpu_to_le16(device_id + 3163 (MAX_PHYSICAL_DEVICES - 1)); 3164 pRAID_Context->config_seq_num = 0; 3165 io_request->DevHandle = cpu_to_le16(0xFFFF); 3166 } 3167 } else { 3168 pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); 3169 pRAID_Context->config_seq_num = 0; 3170 3171 if (fusion->fast_path_io) { 3172 local_map_ptr = 3173 fusion->ld_drv_map[(instance->map_id & 1)]; 3174 io_request->DevHandle = 3175 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; 3176 } else { 3177 io_request->DevHandle = cpu_to_le16(0xFFFF); 3178 } 3179 } 3180 3181 cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle; 3182 3183 megasas_get_msix_index(instance, scmd, cmd, 1); 3184 3185 if (!fp_possible) { 3186 /* system pd firmware path */ 3187 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; 3188 cmd->request_desc->SCSIIO.RequestFlags = 3189 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 3190 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 3191 pRAID_Context->timeout_value = cpu_to_le16(os_timeout_value); 3192 pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); 3193 } else { 3194 if (os_timeout_value) 3195 os_timeout_value++; 3196 3197 /* system pd Fast Path */ 3198 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 3199 timeout_limit = (scmd->device->type == TYPE_DISK) ? 3200 255 : 0xFFFF; 3201 pRAID_Context->timeout_value = 3202 cpu_to_le16((os_timeout_value > timeout_limit) ? 3203 timeout_limit : os_timeout_value); 3204 if (instance->adapter_type >= INVADER_SERIES) 3205 io_request->IoFlags |= 3206 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); 3207 3208 cmd->request_desc->SCSIIO.RequestFlags = 3209 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << 3210 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 3211 } 3212 } 3213 3214 /** 3215 * megasas_build_io_fusion - Prepares IOs to devices 3216 * @instance: Adapter soft state 3217 * @scp: SCSI command 3218 * @cmd: Command to be prepared 3219 * 3220 * Invokes helper functions to prepare request frames 3221 * and sets flags appropriate for IO/Non-IO cmd 3222 */ 3223 static int 3224 megasas_build_io_fusion(struct megasas_instance *instance, 3225 struct scsi_cmnd *scp, 3226 struct megasas_cmd_fusion *cmd) 3227 { 3228 int sge_count; 3229 u8 cmd_type; 3230 struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request; 3231 struct MR_PRIV_DEVICE *mr_device_priv_data; 3232 mr_device_priv_data = scp->device->hostdata; 3233 3234 /* Zero out some fields so they don't get reused */ 3235 memset(io_request->LUN, 0x0, 8); 3236 io_request->CDB.EEDP32.PrimaryReferenceTag = 0; 3237 io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0; 3238 io_request->EEDPFlags = 0; 3239 io_request->Control = 0; 3240 io_request->EEDPBlockSize = 0; 3241 io_request->ChainOffset = 0; 3242 io_request->RaidContext.raid_context.raid_flags = 0; 3243 io_request->RaidContext.raid_context.type = 0; 3244 io_request->RaidContext.raid_context.nseg = 0; 3245 3246 memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len); 3247 /* 3248 * Just the CDB length,rest of the Flags are zero 3249 * This will be modified for FP in build_ldio_fusion 3250 */ 3251 io_request->IoFlags = cpu_to_le16(scp->cmd_len); 3252 3253 switch (cmd_type = megasas_cmd_type(scp)) { 3254 case READ_WRITE_LDIO: 3255 megasas_build_ldio_fusion(instance, scp, cmd); 3256 break; 3257 case NON_READ_WRITE_LDIO: 3258 megasas_build_ld_nonrw_fusion(instance, scp, cmd); 3259 break; 3260 case READ_WRITE_SYSPDIO: 3261 megasas_build_syspd_fusion(instance, scp, cmd, true); 3262 break; 3263 case NON_READ_WRITE_SYSPDIO: 3264 if (instance->secure_jbod_support || 3265 mr_device_priv_data->is_tm_capable) 3266 megasas_build_syspd_fusion(instance, scp, cmd, false); 3267 else 3268 megasas_build_syspd_fusion(instance, scp, cmd, true); 3269 break; 3270 default: 3271 break; 3272 } 3273 3274 /* 3275 * Construct SGL 3276 */ 3277 3278 sge_count = megasas_make_sgl(instance, scp, cmd); 3279 3280 if (sge_count > instance->max_num_sge || (sge_count < 0)) { 3281 dev_err(&instance->pdev->dev, 3282 "%s %d sge_count (%d) is out of range. Range is: 0-%d\n", 3283 __func__, __LINE__, sge_count, instance->max_num_sge); 3284 return 1; 3285 } 3286 3287 if (instance->adapter_type >= VENTURA_SERIES) { 3288 set_num_sge(&io_request->RaidContext.raid_context_g35, sge_count); 3289 cpu_to_le16s(&io_request->RaidContext.raid_context_g35.routing_flags); 3290 cpu_to_le16s(&io_request->RaidContext.raid_context_g35.nseg_type); 3291 } else { 3292 /* numSGE store lower 8 bit of sge_count. 3293 * numSGEExt store higher 8 bit of sge_count 3294 */ 3295 io_request->RaidContext.raid_context.num_sge = sge_count; 3296 io_request->RaidContext.raid_context.num_sge_ext = 3297 (u8)(sge_count >> 8); 3298 } 3299 3300 io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING); 3301 3302 if (scp->sc_data_direction == DMA_TO_DEVICE) 3303 io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE); 3304 else if (scp->sc_data_direction == DMA_FROM_DEVICE) 3305 io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ); 3306 3307 io_request->SGLOffset0 = 3308 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4; 3309 3310 io_request->SenseBufferLowAddress = 3311 cpu_to_le32(lower_32_bits(cmd->sense_phys_addr)); 3312 io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; 3313 3314 cmd->scmd = scp; 3315 scp->SCp.ptr = (char *)cmd; 3316 3317 return 0; 3318 } 3319 3320 static union MEGASAS_REQUEST_DESCRIPTOR_UNION * 3321 megasas_get_request_descriptor(struct megasas_instance *instance, u16 index) 3322 { 3323 u8 *p; 3324 struct fusion_context *fusion; 3325 3326 fusion = instance->ctrl_context; 3327 p = fusion->req_frames_desc + 3328 sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * index; 3329 3330 return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p; 3331 } 3332 3333 3334 /* megasas_prepate_secondRaid1_IO 3335 * It prepares the raid 1 second IO 3336 */ 3337 static void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance, 3338 struct megasas_cmd_fusion *cmd, 3339 struct megasas_cmd_fusion *r1_cmd) 3340 { 3341 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL; 3342 struct fusion_context *fusion; 3343 fusion = instance->ctrl_context; 3344 req_desc = cmd->request_desc; 3345 /* copy the io request frame as well as 8 SGEs data for r1 command*/ 3346 memcpy(r1_cmd->io_request, cmd->io_request, 3347 (sizeof(struct MPI2_RAID_SCSI_IO_REQUEST))); 3348 memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL, 3349 (fusion->max_sge_in_main_msg * sizeof(union MPI2_SGE_IO_UNION))); 3350 /*sense buffer is different for r1 command*/ 3351 r1_cmd->io_request->SenseBufferLowAddress = 3352 cpu_to_le32(lower_32_bits(r1_cmd->sense_phys_addr)); 3353 r1_cmd->scmd = cmd->scmd; 3354 req_desc2 = megasas_get_request_descriptor(instance, 3355 (r1_cmd->index - 1)); 3356 req_desc2->Words = 0; 3357 r1_cmd->request_desc = req_desc2; 3358 req_desc2->SCSIIO.SMID = cpu_to_le16(r1_cmd->index); 3359 req_desc2->SCSIIO.RequestFlags = req_desc->SCSIIO.RequestFlags; 3360 r1_cmd->request_desc->SCSIIO.DevHandle = cmd->r1_alt_dev_handle; 3361 r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle; 3362 r1_cmd->r1_alt_dev_handle = cmd->io_request->DevHandle; 3363 cmd->io_request->RaidContext.raid_context_g35.flow_specific.peer_smid = 3364 cpu_to_le16(r1_cmd->index); 3365 r1_cmd->io_request->RaidContext.raid_context_g35.flow_specific.peer_smid = 3366 cpu_to_le16(cmd->index); 3367 /*MSIxIndex of both commands request descriptors should be same*/ 3368 r1_cmd->request_desc->SCSIIO.MSIxIndex = 3369 cmd->request_desc->SCSIIO.MSIxIndex; 3370 /*span arm is different for r1 cmd*/ 3371 r1_cmd->io_request->RaidContext.raid_context_g35.span_arm = 3372 cmd->io_request->RaidContext.raid_context_g35.span_arm + 1; 3373 } 3374 3375 /** 3376 * megasas_build_and_issue_cmd_fusion -Main routine for building and 3377 * issuing non IOCTL cmd 3378 * @instance: Adapter soft state 3379 * @scmd: pointer to scsi cmd from OS 3380 */ 3381 static u32 3382 megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, 3383 struct scsi_cmnd *scmd) 3384 { 3385 struct megasas_cmd_fusion *cmd, *r1_cmd = NULL; 3386 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3387 u32 index; 3388 3389 if ((megasas_cmd_type(scmd) == READ_WRITE_LDIO) && 3390 instance->ldio_threshold && 3391 (atomic_inc_return(&instance->ldio_outstanding) > 3392 instance->ldio_threshold)) { 3393 atomic_dec(&instance->ldio_outstanding); 3394 return SCSI_MLQUEUE_DEVICE_BUSY; 3395 } 3396 3397 if (atomic_inc_return(&instance->fw_outstanding) > 3398 instance->host->can_queue) { 3399 atomic_dec(&instance->fw_outstanding); 3400 return SCSI_MLQUEUE_HOST_BUSY; 3401 } 3402 3403 cmd = megasas_get_cmd_fusion(instance, scmd->request->tag); 3404 3405 if (!cmd) { 3406 atomic_dec(&instance->fw_outstanding); 3407 return SCSI_MLQUEUE_HOST_BUSY; 3408 } 3409 3410 index = cmd->index; 3411 3412 req_desc = megasas_get_request_descriptor(instance, index-1); 3413 3414 req_desc->Words = 0; 3415 cmd->request_desc = req_desc; 3416 3417 if (megasas_build_io_fusion(instance, scmd, cmd)) { 3418 megasas_return_cmd_fusion(instance, cmd); 3419 dev_err(&instance->pdev->dev, "Error building command\n"); 3420 cmd->request_desc = NULL; 3421 atomic_dec(&instance->fw_outstanding); 3422 return SCSI_MLQUEUE_HOST_BUSY; 3423 } 3424 3425 req_desc = cmd->request_desc; 3426 req_desc->SCSIIO.SMID = cpu_to_le16(index); 3427 3428 if (cmd->io_request->ChainOffset != 0 && 3429 cmd->io_request->ChainOffset != 0xF) 3430 dev_err(&instance->pdev->dev, "The chain offset value is not " 3431 "correct : %x\n", cmd->io_request->ChainOffset); 3432 /* 3433 * if it is raid 1/10 fp write capable. 3434 * try to get second command from pool and construct it. 3435 * From FW, it has confirmed that lba values of two PDs 3436 * corresponds to single R1/10 LD are always same 3437 * 3438 */ 3439 /* driver side count always should be less than max_fw_cmds 3440 * to get new command 3441 */ 3442 if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) { 3443 r1_cmd = megasas_get_cmd_fusion(instance, 3444 (scmd->request->tag + instance->max_fw_cmds)); 3445 megasas_prepare_secondRaid1_IO(instance, cmd, r1_cmd); 3446 } 3447 3448 3449 /* 3450 * Issue the command to the FW 3451 */ 3452 3453 megasas_fire_cmd_fusion(instance, req_desc); 3454 3455 if (r1_cmd) 3456 megasas_fire_cmd_fusion(instance, r1_cmd->request_desc); 3457 3458 3459 return 0; 3460 } 3461 3462 /** 3463 * megasas_complete_r1_command - 3464 * completes R1 FP write commands which has valid peer smid 3465 * @instance: Adapter soft state 3466 * @cmd_fusion: MPT command frame 3467 * 3468 */ 3469 static inline void 3470 megasas_complete_r1_command(struct megasas_instance *instance, 3471 struct megasas_cmd_fusion *cmd) 3472 { 3473 u8 *sense, status, ex_status; 3474 u32 data_length; 3475 u16 peer_smid; 3476 struct fusion_context *fusion; 3477 struct megasas_cmd_fusion *r1_cmd = NULL; 3478 struct scsi_cmnd *scmd_local = NULL; 3479 struct RAID_CONTEXT_G35 *rctx_g35; 3480 3481 rctx_g35 = &cmd->io_request->RaidContext.raid_context_g35; 3482 fusion = instance->ctrl_context; 3483 peer_smid = le16_to_cpu(rctx_g35->flow_specific.peer_smid); 3484 3485 r1_cmd = fusion->cmd_list[peer_smid - 1]; 3486 scmd_local = cmd->scmd; 3487 status = rctx_g35->status; 3488 ex_status = rctx_g35->ex_status; 3489 data_length = cmd->io_request->DataLength; 3490 sense = cmd->sense; 3491 3492 cmd->cmd_completed = true; 3493 3494 /* Check if peer command is completed or not*/ 3495 if (r1_cmd->cmd_completed) { 3496 rctx_g35 = &r1_cmd->io_request->RaidContext.raid_context_g35; 3497 if (rctx_g35->status != MFI_STAT_OK) { 3498 status = rctx_g35->status; 3499 ex_status = rctx_g35->ex_status; 3500 data_length = r1_cmd->io_request->DataLength; 3501 sense = r1_cmd->sense; 3502 } 3503 3504 megasas_return_cmd_fusion(instance, r1_cmd); 3505 map_cmd_status(fusion, scmd_local, status, ex_status, 3506 le32_to_cpu(data_length), sense); 3507 if (instance->ldio_threshold && 3508 megasas_cmd_type(scmd_local) == READ_WRITE_LDIO) 3509 atomic_dec(&instance->ldio_outstanding); 3510 scmd_local->SCp.ptr = NULL; 3511 megasas_return_cmd_fusion(instance, cmd); 3512 scsi_dma_unmap(scmd_local); 3513 scmd_local->scsi_done(scmd_local); 3514 } 3515 } 3516 3517 /** 3518 * complete_cmd_fusion - Completes command 3519 * @instance: Adapter soft state 3520 * Completes all commands that is in reply descriptor queue 3521 */ 3522 static int 3523 complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex, 3524 struct megasas_irq_context *irq_context) 3525 { 3526 union MPI2_REPLY_DESCRIPTORS_UNION *desc; 3527 struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc; 3528 struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req; 3529 struct fusion_context *fusion; 3530 struct megasas_cmd *cmd_mfi; 3531 struct megasas_cmd_fusion *cmd_fusion; 3532 u16 smid, num_completed; 3533 u8 reply_descript_type, *sense, status, extStatus; 3534 u32 device_id, data_length; 3535 union desc_value d_val; 3536 struct LD_LOAD_BALANCE_INFO *lbinfo; 3537 int threshold_reply_count = 0; 3538 struct scsi_cmnd *scmd_local = NULL; 3539 struct MR_TASK_MANAGE_REQUEST *mr_tm_req; 3540 struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req; 3541 3542 fusion = instance->ctrl_context; 3543 3544 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 3545 return IRQ_HANDLED; 3546 3547 desc = fusion->reply_frames_desc[MSIxIndex] + 3548 fusion->last_reply_idx[MSIxIndex]; 3549 3550 reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; 3551 3552 d_val.word = desc->Words; 3553 3554 reply_descript_type = reply_desc->ReplyFlags & 3555 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 3556 3557 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 3558 return IRQ_NONE; 3559 3560 num_completed = 0; 3561 3562 while (d_val.u.low != cpu_to_le32(UINT_MAX) && 3563 d_val.u.high != cpu_to_le32(UINT_MAX)) { 3564 3565 smid = le16_to_cpu(reply_desc->SMID); 3566 cmd_fusion = fusion->cmd_list[smid - 1]; 3567 scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *) 3568 cmd_fusion->io_request; 3569 3570 scmd_local = cmd_fusion->scmd; 3571 status = scsi_io_req->RaidContext.raid_context.status; 3572 extStatus = scsi_io_req->RaidContext.raid_context.ex_status; 3573 sense = cmd_fusion->sense; 3574 data_length = scsi_io_req->DataLength; 3575 3576 switch (scsi_io_req->Function) { 3577 case MPI2_FUNCTION_SCSI_TASK_MGMT: 3578 mr_tm_req = (struct MR_TASK_MANAGE_REQUEST *) 3579 cmd_fusion->io_request; 3580 mpi_tm_req = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *) 3581 &mr_tm_req->TmRequest; 3582 dev_dbg(&instance->pdev->dev, "TM completion:" 3583 "type: 0x%x TaskMID: 0x%x\n", 3584 mpi_tm_req->TaskType, mpi_tm_req->TaskMID); 3585 complete(&cmd_fusion->done); 3586 break; 3587 case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/ 3588 /* Update load balancing info */ 3589 if (fusion->load_balance_info && 3590 (cmd_fusion->scmd->SCp.Status & 3591 MEGASAS_LOAD_BALANCE_FLAG)) { 3592 device_id = MEGASAS_DEV_INDEX(scmd_local); 3593 lbinfo = &fusion->load_balance_info[device_id]; 3594 atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]); 3595 cmd_fusion->scmd->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; 3596 } 3597 /* Fall through - and complete IO */ 3598 case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */ 3599 atomic_dec(&instance->fw_outstanding); 3600 if (cmd_fusion->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) { 3601 map_cmd_status(fusion, scmd_local, status, 3602 extStatus, le32_to_cpu(data_length), 3603 sense); 3604 if (instance->ldio_threshold && 3605 (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)) 3606 atomic_dec(&instance->ldio_outstanding); 3607 scmd_local->SCp.ptr = NULL; 3608 megasas_return_cmd_fusion(instance, cmd_fusion); 3609 scsi_dma_unmap(scmd_local); 3610 scmd_local->scsi_done(scmd_local); 3611 } else /* Optimal VD - R1 FP command completion. */ 3612 megasas_complete_r1_command(instance, cmd_fusion); 3613 break; 3614 case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */ 3615 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 3616 /* Poll mode. Dummy free. 3617 * In case of Interrupt mode, caller has reverse check. 3618 */ 3619 if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) { 3620 cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE; 3621 megasas_return_cmd(instance, cmd_mfi); 3622 } else 3623 megasas_complete_cmd(instance, cmd_mfi, DID_OK); 3624 break; 3625 } 3626 3627 fusion->last_reply_idx[MSIxIndex]++; 3628 if (fusion->last_reply_idx[MSIxIndex] >= 3629 fusion->reply_q_depth) 3630 fusion->last_reply_idx[MSIxIndex] = 0; 3631 3632 desc->Words = cpu_to_le64(ULLONG_MAX); 3633 num_completed++; 3634 threshold_reply_count++; 3635 3636 /* Get the next reply descriptor */ 3637 if (!fusion->last_reply_idx[MSIxIndex]) 3638 desc = fusion->reply_frames_desc[MSIxIndex]; 3639 else 3640 desc++; 3641 3642 reply_desc = 3643 (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; 3644 3645 d_val.word = desc->Words; 3646 3647 reply_descript_type = reply_desc->ReplyFlags & 3648 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 3649 3650 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 3651 break; 3652 /* 3653 * Write to reply post host index register after completing threshold 3654 * number of reply counts and still there are more replies in reply queue 3655 * pending to be completed 3656 */ 3657 if (threshold_reply_count >= instance->threshold_reply_count) { 3658 if (instance->msix_combined) 3659 writel(((MSIxIndex & 0x7) << 24) | 3660 fusion->last_reply_idx[MSIxIndex], 3661 instance->reply_post_host_index_addr[MSIxIndex/8]); 3662 else 3663 writel((MSIxIndex << 24) | 3664 fusion->last_reply_idx[MSIxIndex], 3665 instance->reply_post_host_index_addr[0]); 3666 threshold_reply_count = 0; 3667 if (irq_context) { 3668 if (!irq_context->irq_poll_scheduled) { 3669 irq_context->irq_poll_scheduled = true; 3670 irq_context->irq_line_enable = true; 3671 irq_poll_sched(&irq_context->irqpoll); 3672 } 3673 return num_completed; 3674 } 3675 } 3676 } 3677 3678 if (num_completed) { 3679 wmb(); 3680 if (instance->msix_combined) 3681 writel(((MSIxIndex & 0x7) << 24) | 3682 fusion->last_reply_idx[MSIxIndex], 3683 instance->reply_post_host_index_addr[MSIxIndex/8]); 3684 else 3685 writel((MSIxIndex << 24) | 3686 fusion->last_reply_idx[MSIxIndex], 3687 instance->reply_post_host_index_addr[0]); 3688 megasas_check_and_restore_queue_depth(instance); 3689 } 3690 return num_completed; 3691 } 3692 3693 /** 3694 * megasas_enable_irq_poll() - enable irqpoll 3695 */ 3696 static void megasas_enable_irq_poll(struct megasas_instance *instance) 3697 { 3698 u32 count, i; 3699 struct megasas_irq_context *irq_ctx; 3700 3701 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 3702 3703 for (i = 0; i < count; i++) { 3704 irq_ctx = &instance->irq_context[i]; 3705 irq_poll_enable(&irq_ctx->irqpoll); 3706 } 3707 } 3708 3709 /** 3710 * megasas_sync_irqs - Synchronizes all IRQs owned by adapter 3711 * @instance: Adapter soft state 3712 */ 3713 static void megasas_sync_irqs(unsigned long instance_addr) 3714 { 3715 u32 count, i; 3716 struct megasas_instance *instance = 3717 (struct megasas_instance *)instance_addr; 3718 struct megasas_irq_context *irq_ctx; 3719 3720 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 3721 3722 for (i = 0; i < count; i++) { 3723 synchronize_irq(pci_irq_vector(instance->pdev, i)); 3724 irq_ctx = &instance->irq_context[i]; 3725 irq_poll_disable(&irq_ctx->irqpoll); 3726 if (irq_ctx->irq_poll_scheduled) { 3727 irq_ctx->irq_poll_scheduled = false; 3728 enable_irq(irq_ctx->os_irq); 3729 } 3730 } 3731 } 3732 3733 /** 3734 * megasas_irqpoll() - process a queue for completed reply descriptors 3735 * @irqpoll: IRQ poll structure associated with queue to poll. 3736 * @budget: Threshold of reply descriptors to process per poll. 3737 * 3738 * Return: The number of entries processed. 3739 */ 3740 3741 int megasas_irqpoll(struct irq_poll *irqpoll, int budget) 3742 { 3743 struct megasas_irq_context *irq_ctx; 3744 struct megasas_instance *instance; 3745 int num_entries; 3746 3747 irq_ctx = container_of(irqpoll, struct megasas_irq_context, irqpoll); 3748 instance = irq_ctx->instance; 3749 3750 if (irq_ctx->irq_line_enable) { 3751 disable_irq(irq_ctx->os_irq); 3752 irq_ctx->irq_line_enable = false; 3753 } 3754 3755 num_entries = complete_cmd_fusion(instance, irq_ctx->MSIxIndex, irq_ctx); 3756 if (num_entries < budget) { 3757 irq_poll_complete(irqpoll); 3758 irq_ctx->irq_poll_scheduled = false; 3759 enable_irq(irq_ctx->os_irq); 3760 } 3761 3762 return num_entries; 3763 } 3764 3765 /** 3766 * megasas_complete_cmd_dpc_fusion - Completes command 3767 * @instance: Adapter soft state 3768 * 3769 * Tasklet to complete cmds 3770 */ 3771 static void 3772 megasas_complete_cmd_dpc_fusion(unsigned long instance_addr) 3773 { 3774 struct megasas_instance *instance = 3775 (struct megasas_instance *)instance_addr; 3776 u32 count, MSIxIndex; 3777 3778 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 3779 3780 /* If we have already declared adapter dead, donot complete cmds */ 3781 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 3782 return; 3783 3784 for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++) 3785 complete_cmd_fusion(instance, MSIxIndex, NULL); 3786 } 3787 3788 /** 3789 * megasas_isr_fusion - isr entry point 3790 */ 3791 static irqreturn_t megasas_isr_fusion(int irq, void *devp) 3792 { 3793 struct megasas_irq_context *irq_context = devp; 3794 struct megasas_instance *instance = irq_context->instance; 3795 u32 mfiStatus; 3796 3797 if (instance->mask_interrupts) 3798 return IRQ_NONE; 3799 3800 #if defined(ENABLE_IRQ_POLL) 3801 if (irq_context->irq_poll_scheduled) 3802 return IRQ_HANDLED; 3803 #endif 3804 3805 if (!instance->msix_vectors) { 3806 mfiStatus = instance->instancet->clear_intr(instance); 3807 if (!mfiStatus) 3808 return IRQ_NONE; 3809 } 3810 3811 /* If we are resetting, bail */ 3812 if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) { 3813 instance->instancet->clear_intr(instance); 3814 return IRQ_HANDLED; 3815 } 3816 3817 return complete_cmd_fusion(instance, irq_context->MSIxIndex, irq_context) 3818 ? IRQ_HANDLED : IRQ_NONE; 3819 } 3820 3821 /** 3822 * build_mpt_mfi_pass_thru - builds a cmd fo MFI Pass thru 3823 * @instance: Adapter soft state 3824 * mfi_cmd: megasas_cmd pointer 3825 * 3826 */ 3827 static void 3828 build_mpt_mfi_pass_thru(struct megasas_instance *instance, 3829 struct megasas_cmd *mfi_cmd) 3830 { 3831 struct MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain; 3832 struct MPI2_RAID_SCSI_IO_REQUEST *io_req; 3833 struct megasas_cmd_fusion *cmd; 3834 struct fusion_context *fusion; 3835 struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr; 3836 3837 fusion = instance->ctrl_context; 3838 3839 cmd = megasas_get_cmd_fusion(instance, 3840 instance->max_scsi_cmds + mfi_cmd->index); 3841 3842 /* Save the smid. To be used for returning the cmd */ 3843 mfi_cmd->context.smid = cmd->index; 3844 3845 /* 3846 * For cmds where the flag is set, store the flag and check 3847 * on completion. For cmds with this flag, don't call 3848 * megasas_complete_cmd 3849 */ 3850 3851 if (frame_hdr->flags & cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)) 3852 mfi_cmd->flags |= DRV_DCMD_POLLED_MODE; 3853 3854 io_req = cmd->io_request; 3855 3856 if (instance->adapter_type >= INVADER_SERIES) { 3857 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = 3858 (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL; 3859 sgl_ptr_end += fusion->max_sge_in_main_msg - 1; 3860 sgl_ptr_end->Flags = 0; 3861 } 3862 3863 mpi25_ieee_chain = 3864 (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain; 3865 3866 io_req->Function = MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST; 3867 io_req->SGLOffset0 = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, 3868 SGL) / 4; 3869 io_req->ChainOffset = fusion->chain_offset_mfi_pthru; 3870 3871 mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr); 3872 3873 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | 3874 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; 3875 3876 mpi25_ieee_chain->Length = cpu_to_le32(instance->mfi_frame_size); 3877 } 3878 3879 /** 3880 * build_mpt_cmd - Calls helper function to build a cmd MFI Pass thru cmd 3881 * @instance: Adapter soft state 3882 * @cmd: mfi cmd to build 3883 * 3884 */ 3885 static union MEGASAS_REQUEST_DESCRIPTOR_UNION * 3886 build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 3887 { 3888 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc = NULL; 3889 u16 index; 3890 3891 build_mpt_mfi_pass_thru(instance, cmd); 3892 index = cmd->context.smid; 3893 3894 req_desc = megasas_get_request_descriptor(instance, index - 1); 3895 3896 req_desc->Words = 0; 3897 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 3898 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 3899 3900 req_desc->SCSIIO.SMID = cpu_to_le16(index); 3901 3902 return req_desc; 3903 } 3904 3905 /** 3906 * megasas_issue_dcmd_fusion - Issues a MFI Pass thru cmd 3907 * @instance: Adapter soft state 3908 * @cmd: mfi cmd pointer 3909 * 3910 */ 3911 static void 3912 megasas_issue_dcmd_fusion(struct megasas_instance *instance, 3913 struct megasas_cmd *cmd) 3914 { 3915 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3916 3917 req_desc = build_mpt_cmd(instance, cmd); 3918 3919 megasas_fire_cmd_fusion(instance, req_desc); 3920 return; 3921 } 3922 3923 /** 3924 * megasas_release_fusion - Reverses the FW initialization 3925 * @instance: Adapter soft state 3926 */ 3927 void 3928 megasas_release_fusion(struct megasas_instance *instance) 3929 { 3930 megasas_free_ioc_init_cmd(instance); 3931 megasas_free_cmds(instance); 3932 megasas_free_cmds_fusion(instance); 3933 3934 iounmap(instance->reg_set); 3935 3936 pci_release_selected_regions(instance->pdev, 1<<instance->bar); 3937 } 3938 3939 /** 3940 * megasas_read_fw_status_reg_fusion - returns the current FW status value 3941 * @regs: MFI register set 3942 */ 3943 static u32 3944 megasas_read_fw_status_reg_fusion(struct megasas_instance *instance) 3945 { 3946 return megasas_readl(instance, &instance->reg_set->outbound_scratch_pad_0); 3947 } 3948 3949 /** 3950 * megasas_alloc_host_crash_buffer - Host buffers for Crash dump collection from Firmware 3951 * @instance: Controller's soft instance 3952 * return: Number of allocated host crash buffers 3953 */ 3954 static void 3955 megasas_alloc_host_crash_buffer(struct megasas_instance *instance) 3956 { 3957 unsigned int i; 3958 3959 for (i = 0; i < MAX_CRASH_DUMP_SIZE; i++) { 3960 instance->crash_buf[i] = vzalloc(CRASH_DMA_BUF_SIZE); 3961 if (!instance->crash_buf[i]) { 3962 dev_info(&instance->pdev->dev, "Firmware crash dump " 3963 "memory allocation failed at index %d\n", i); 3964 break; 3965 } 3966 } 3967 instance->drv_buf_alloc = i; 3968 } 3969 3970 /** 3971 * megasas_free_host_crash_buffer - Host buffers for Crash dump collection from Firmware 3972 * @instance: Controller's soft instance 3973 */ 3974 void 3975 megasas_free_host_crash_buffer(struct megasas_instance *instance) 3976 { 3977 unsigned int i; 3978 for (i = 0; i < instance->drv_buf_alloc; i++) { 3979 if (instance->crash_buf[i]) 3980 vfree(instance->crash_buf[i]); 3981 } 3982 instance->drv_buf_index = 0; 3983 instance->drv_buf_alloc = 0; 3984 instance->fw_crash_state = UNAVAILABLE; 3985 instance->fw_crash_buffer_size = 0; 3986 } 3987 3988 /** 3989 * megasas_adp_reset_fusion - For controller reset 3990 * @regs: MFI register set 3991 */ 3992 static int 3993 megasas_adp_reset_fusion(struct megasas_instance *instance, 3994 struct megasas_register_set __iomem *regs) 3995 { 3996 u32 host_diag, abs_state, retry; 3997 3998 /* Now try to reset the chip */ 3999 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 4000 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 4001 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 4002 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 4003 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 4004 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 4005 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 4006 4007 /* Check that the diag write enable (DRWE) bit is on */ 4008 host_diag = megasas_readl(instance, &instance->reg_set->fusion_host_diag); 4009 retry = 0; 4010 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { 4011 msleep(100); 4012 host_diag = megasas_readl(instance, 4013 &instance->reg_set->fusion_host_diag); 4014 if (retry++ == 100) { 4015 dev_warn(&instance->pdev->dev, 4016 "Host diag unlock failed from %s %d\n", 4017 __func__, __LINE__); 4018 break; 4019 } 4020 } 4021 if (!(host_diag & HOST_DIAG_WRITE_ENABLE)) 4022 return -1; 4023 4024 /* Send chip reset command */ 4025 writel(host_diag | HOST_DIAG_RESET_ADAPTER, 4026 &instance->reg_set->fusion_host_diag); 4027 msleep(3000); 4028 4029 /* Make sure reset adapter bit is cleared */ 4030 host_diag = megasas_readl(instance, &instance->reg_set->fusion_host_diag); 4031 retry = 0; 4032 while (host_diag & HOST_DIAG_RESET_ADAPTER) { 4033 msleep(100); 4034 host_diag = megasas_readl(instance, 4035 &instance->reg_set->fusion_host_diag); 4036 if (retry++ == 1000) { 4037 dev_warn(&instance->pdev->dev, 4038 "Diag reset adapter never cleared %s %d\n", 4039 __func__, __LINE__); 4040 break; 4041 } 4042 } 4043 if (host_diag & HOST_DIAG_RESET_ADAPTER) 4044 return -1; 4045 4046 abs_state = instance->instancet->read_fw_status_reg(instance) 4047 & MFI_STATE_MASK; 4048 retry = 0; 4049 4050 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) { 4051 msleep(100); 4052 abs_state = instance->instancet-> 4053 read_fw_status_reg(instance) & MFI_STATE_MASK; 4054 } 4055 if (abs_state <= MFI_STATE_FW_INIT) { 4056 dev_warn(&instance->pdev->dev, 4057 "fw state < MFI_STATE_FW_INIT, state = 0x%x %s %d\n", 4058 abs_state, __func__, __LINE__); 4059 return -1; 4060 } 4061 4062 return 0; 4063 } 4064 4065 /** 4066 * megasas_check_reset_fusion - For controller reset check 4067 * @regs: MFI register set 4068 */ 4069 static int 4070 megasas_check_reset_fusion(struct megasas_instance *instance, 4071 struct megasas_register_set __iomem *regs) 4072 { 4073 return 0; 4074 } 4075 4076 /** 4077 * megasas_trigger_snap_dump - Trigger snap dump in FW 4078 * @instance: Soft instance of adapter 4079 */ 4080 static inline void megasas_trigger_snap_dump(struct megasas_instance *instance) 4081 { 4082 int j; 4083 u32 fw_state, abs_state; 4084 4085 if (!instance->disableOnlineCtrlReset) { 4086 dev_info(&instance->pdev->dev, "Trigger snap dump\n"); 4087 writel(MFI_ADP_TRIGGER_SNAP_DUMP, 4088 &instance->reg_set->doorbell); 4089 readl(&instance->reg_set->doorbell); 4090 } 4091 4092 for (j = 0; j < instance->snapdump_wait_time; j++) { 4093 abs_state = instance->instancet->read_fw_status_reg(instance); 4094 fw_state = abs_state & MFI_STATE_MASK; 4095 if (fw_state == MFI_STATE_FAULT) { 4096 dev_printk(KERN_ERR, &instance->pdev->dev, 4097 "FW in FAULT state Fault code:0x%x subcode:0x%x func:%s\n", 4098 abs_state & MFI_STATE_FAULT_CODE, 4099 abs_state & MFI_STATE_FAULT_SUBCODE, __func__); 4100 return; 4101 } 4102 msleep(1000); 4103 } 4104 } 4105 4106 /* This function waits for outstanding commands on fusion to complete */ 4107 static int 4108 megasas_wait_for_outstanding_fusion(struct megasas_instance *instance, 4109 int reason, int *convert) 4110 { 4111 int i, outstanding, retval = 0, hb_seconds_missed = 0; 4112 u32 fw_state, abs_state; 4113 u32 waittime_for_io_completion; 4114 4115 waittime_for_io_completion = 4116 min_t(u32, resetwaittime, 4117 (resetwaittime - instance->snapdump_wait_time)); 4118 4119 if (reason == MFI_IO_TIMEOUT_OCR) { 4120 dev_info(&instance->pdev->dev, 4121 "MFI command is timed out\n"); 4122 megasas_complete_cmd_dpc_fusion((unsigned long)instance); 4123 if (instance->snapdump_wait_time) 4124 megasas_trigger_snap_dump(instance); 4125 retval = 1; 4126 goto out; 4127 } 4128 4129 for (i = 0; i < waittime_for_io_completion; i++) { 4130 /* Check if firmware is in fault state */ 4131 abs_state = instance->instancet->read_fw_status_reg(instance); 4132 fw_state = abs_state & MFI_STATE_MASK; 4133 if (fw_state == MFI_STATE_FAULT) { 4134 dev_printk(KERN_ERR, &instance->pdev->dev, 4135 "FW in FAULT state Fault code:0x%x subcode:0x%x func:%s\n", 4136 abs_state & MFI_STATE_FAULT_CODE, 4137 abs_state & MFI_STATE_FAULT_SUBCODE, __func__); 4138 megasas_complete_cmd_dpc_fusion((unsigned long)instance); 4139 if (instance->requestorId && reason) { 4140 dev_warn(&instance->pdev->dev, "SR-IOV Found FW in FAULT" 4141 " state while polling during" 4142 " I/O timeout handling for %d\n", 4143 instance->host->host_no); 4144 *convert = 1; 4145 } 4146 4147 retval = 1; 4148 goto out; 4149 } 4150 4151 4152 /* If SR-IOV VF mode & heartbeat timeout, don't wait */ 4153 if (instance->requestorId && !reason) { 4154 retval = 1; 4155 goto out; 4156 } 4157 4158 /* If SR-IOV VF mode & I/O timeout, check for HB timeout */ 4159 if (instance->requestorId && (reason == SCSIIO_TIMEOUT_OCR)) { 4160 if (instance->hb_host_mem->HB.fwCounter != 4161 instance->hb_host_mem->HB.driverCounter) { 4162 instance->hb_host_mem->HB.driverCounter = 4163 instance->hb_host_mem->HB.fwCounter; 4164 hb_seconds_missed = 0; 4165 } else { 4166 hb_seconds_missed++; 4167 if (hb_seconds_missed == 4168 (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) { 4169 dev_warn(&instance->pdev->dev, "SR-IOV:" 4170 " Heartbeat never completed " 4171 " while polling during I/O " 4172 " timeout handling for " 4173 "scsi%d.\n", 4174 instance->host->host_no); 4175 *convert = 1; 4176 retval = 1; 4177 goto out; 4178 } 4179 } 4180 } 4181 4182 megasas_complete_cmd_dpc_fusion((unsigned long)instance); 4183 outstanding = atomic_read(&instance->fw_outstanding); 4184 if (!outstanding) 4185 goto out; 4186 4187 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 4188 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d " 4189 "commands to complete for scsi%d\n", i, 4190 outstanding, instance->host->host_no); 4191 } 4192 msleep(1000); 4193 } 4194 4195 if (instance->snapdump_wait_time) { 4196 megasas_trigger_snap_dump(instance); 4197 retval = 1; 4198 goto out; 4199 } 4200 4201 if (atomic_read(&instance->fw_outstanding)) { 4202 dev_err(&instance->pdev->dev, "pending commands remain after waiting, " 4203 "will reset adapter scsi%d.\n", 4204 instance->host->host_no); 4205 *convert = 1; 4206 retval = 1; 4207 } 4208 4209 out: 4210 return retval; 4211 } 4212 4213 void megasas_reset_reply_desc(struct megasas_instance *instance) 4214 { 4215 int i, j, count; 4216 struct fusion_context *fusion; 4217 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; 4218 4219 fusion = instance->ctrl_context; 4220 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 4221 for (i = 0 ; i < count ; i++) { 4222 fusion->last_reply_idx[i] = 0; 4223 reply_desc = fusion->reply_frames_desc[i]; 4224 for (j = 0 ; j < fusion->reply_q_depth; j++, reply_desc++) 4225 reply_desc->Words = cpu_to_le64(ULLONG_MAX); 4226 } 4227 } 4228 4229 /* 4230 * megasas_refire_mgmt_cmd : Re-fire management commands 4231 * @instance: Controller's soft instance 4232 */ 4233 void megasas_refire_mgmt_cmd(struct megasas_instance *instance, 4234 bool return_ioctl) 4235 { 4236 int j; 4237 struct megasas_cmd_fusion *cmd_fusion; 4238 struct fusion_context *fusion; 4239 struct megasas_cmd *cmd_mfi; 4240 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 4241 u16 smid; 4242 bool refire_cmd = 0; 4243 u8 result; 4244 u32 opcode = 0; 4245 4246 fusion = instance->ctrl_context; 4247 4248 /* Re-fire management commands. 4249 * Do not traverse complet MPT frame pool. Start from max_scsi_cmds. 4250 */ 4251 for (j = instance->max_scsi_cmds ; j < instance->max_fw_cmds; j++) { 4252 cmd_fusion = fusion->cmd_list[j]; 4253 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 4254 smid = le16_to_cpu(cmd_mfi->context.smid); 4255 result = REFIRE_CMD; 4256 4257 if (!smid) 4258 continue; 4259 4260 req_desc = megasas_get_request_descriptor(instance, smid - 1); 4261 4262 switch (cmd_mfi->frame->hdr.cmd) { 4263 case MFI_CMD_DCMD: 4264 opcode = le32_to_cpu(cmd_mfi->frame->dcmd.opcode); 4265 /* Do not refire shutdown command */ 4266 if (opcode == MR_DCMD_CTRL_SHUTDOWN) { 4267 cmd_mfi->frame->dcmd.cmd_status = MFI_STAT_OK; 4268 result = COMPLETE_CMD; 4269 break; 4270 } 4271 4272 refire_cmd = ((opcode != MR_DCMD_LD_MAP_GET_INFO)) && 4273 (opcode != MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && 4274 !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE); 4275 4276 if (!refire_cmd) 4277 result = RETURN_CMD; 4278 4279 break; 4280 case MFI_CMD_NVME: 4281 if (!instance->support_nvme_passthru) { 4282 cmd_mfi->frame->hdr.cmd_status = MFI_STAT_INVALID_CMD; 4283 result = COMPLETE_CMD; 4284 } 4285 4286 break; 4287 case MFI_CMD_TOOLBOX: 4288 if (!instance->support_pci_lane_margining) { 4289 cmd_mfi->frame->hdr.cmd_status = MFI_STAT_INVALID_CMD; 4290 result = COMPLETE_CMD; 4291 } 4292 4293 break; 4294 default: 4295 break; 4296 } 4297 4298 if (return_ioctl && cmd_mfi->sync_cmd && 4299 cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) { 4300 dev_err(&instance->pdev->dev, 4301 "return -EBUSY from %s %d cmd 0x%x opcode 0x%x\n", 4302 __func__, __LINE__, cmd_mfi->frame->hdr.cmd, 4303 le32_to_cpu(cmd_mfi->frame->dcmd.opcode)); 4304 cmd_mfi->cmd_status_drv = DCMD_BUSY; 4305 result = COMPLETE_CMD; 4306 } 4307 4308 switch (result) { 4309 case REFIRE_CMD: 4310 megasas_fire_cmd_fusion(instance, req_desc); 4311 break; 4312 case RETURN_CMD: 4313 megasas_return_cmd(instance, cmd_mfi); 4314 break; 4315 case COMPLETE_CMD: 4316 megasas_complete_cmd(instance, cmd_mfi, DID_OK); 4317 break; 4318 } 4319 } 4320 } 4321 4322 /* 4323 * megasas_return_polled_cmds: Return polled mode commands back to the pool 4324 * before initiating an OCR. 4325 * @instance: Controller's soft instance 4326 */ 4327 static void 4328 megasas_return_polled_cmds(struct megasas_instance *instance) 4329 { 4330 int i; 4331 struct megasas_cmd_fusion *cmd_fusion; 4332 struct fusion_context *fusion; 4333 struct megasas_cmd *cmd_mfi; 4334 4335 fusion = instance->ctrl_context; 4336 4337 for (i = instance->max_scsi_cmds; i < instance->max_fw_cmds; i++) { 4338 cmd_fusion = fusion->cmd_list[i]; 4339 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 4340 4341 if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) { 4342 if (megasas_dbg_lvl & OCR_DEBUG) 4343 dev_info(&instance->pdev->dev, 4344 "%s %d return cmd 0x%x opcode 0x%x\n", 4345 __func__, __LINE__, cmd_mfi->frame->hdr.cmd, 4346 le32_to_cpu(cmd_mfi->frame->dcmd.opcode)); 4347 cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE; 4348 megasas_return_cmd(instance, cmd_mfi); 4349 } 4350 } 4351 } 4352 4353 /* 4354 * megasas_track_scsiio : Track SCSI IOs outstanding to a SCSI device 4355 * @instance: per adapter struct 4356 * @channel: the channel assigned by the OS 4357 * @id: the id assigned by the OS 4358 * 4359 * Returns SUCCESS if no IOs pending to SCSI device, else return FAILED 4360 */ 4361 4362 static int megasas_track_scsiio(struct megasas_instance *instance, 4363 int id, int channel) 4364 { 4365 int i, found = 0; 4366 struct megasas_cmd_fusion *cmd_fusion; 4367 struct fusion_context *fusion; 4368 fusion = instance->ctrl_context; 4369 4370 for (i = 0 ; i < instance->max_scsi_cmds; i++) { 4371 cmd_fusion = fusion->cmd_list[i]; 4372 if (cmd_fusion->scmd && 4373 (cmd_fusion->scmd->device->id == id && 4374 cmd_fusion->scmd->device->channel == channel)) { 4375 dev_info(&instance->pdev->dev, 4376 "SCSI commands pending to target" 4377 "channel %d id %d \tSMID: 0x%x\n", 4378 channel, id, cmd_fusion->index); 4379 scsi_print_command(cmd_fusion->scmd); 4380 found = 1; 4381 break; 4382 } 4383 } 4384 4385 return found ? FAILED : SUCCESS; 4386 } 4387 4388 /** 4389 * megasas_tm_response_code - translation of device response code 4390 * @ioc: per adapter object 4391 * @mpi_reply: MPI reply returned by firmware 4392 * 4393 * Return nothing. 4394 */ 4395 static void 4396 megasas_tm_response_code(struct megasas_instance *instance, 4397 struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply) 4398 { 4399 char *desc; 4400 4401 switch (mpi_reply->ResponseCode) { 4402 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: 4403 desc = "task management request completed"; 4404 break; 4405 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: 4406 desc = "invalid frame"; 4407 break; 4408 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: 4409 desc = "task management request not supported"; 4410 break; 4411 case MPI2_SCSITASKMGMT_RSP_TM_FAILED: 4412 desc = "task management request failed"; 4413 break; 4414 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: 4415 desc = "task management request succeeded"; 4416 break; 4417 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: 4418 desc = "invalid lun"; 4419 break; 4420 case 0xA: 4421 desc = "overlapped tag attempted"; 4422 break; 4423 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: 4424 desc = "task queued, however not sent to target"; 4425 break; 4426 default: 4427 desc = "unknown"; 4428 break; 4429 } 4430 dev_dbg(&instance->pdev->dev, "response_code(%01x): %s\n", 4431 mpi_reply->ResponseCode, desc); 4432 dev_dbg(&instance->pdev->dev, 4433 "TerminationCount/DevHandle/Function/TaskType/IOCStat/IOCLoginfo" 4434 " 0x%x/0x%x/0x%x/0x%x/0x%x/0x%x\n", 4435 mpi_reply->TerminationCount, mpi_reply->DevHandle, 4436 mpi_reply->Function, mpi_reply->TaskType, 4437 mpi_reply->IOCStatus, mpi_reply->IOCLogInfo); 4438 } 4439 4440 /** 4441 * megasas_issue_tm - main routine for sending tm requests 4442 * @instance: per adapter struct 4443 * @device_handle: device handle 4444 * @channel: the channel assigned by the OS 4445 * @id: the id assigned by the OS 4446 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in megaraid_sas_fusion.c) 4447 * @smid_task: smid assigned to the task 4448 * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF 4449 * Context: user 4450 * 4451 * MegaRaid use MPT interface for Task Magement request. 4452 * A generic API for sending task management requests to firmware. 4453 * 4454 * Return SUCCESS or FAILED. 4455 */ 4456 static int 4457 megasas_issue_tm(struct megasas_instance *instance, u16 device_handle, 4458 uint channel, uint id, u16 smid_task, u8 type, 4459 struct MR_PRIV_DEVICE *mr_device_priv_data) 4460 { 4461 struct MR_TASK_MANAGE_REQUEST *mr_request; 4462 struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_request; 4463 unsigned long timeleft; 4464 struct megasas_cmd_fusion *cmd_fusion; 4465 struct megasas_cmd *cmd_mfi; 4466 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 4467 struct fusion_context *fusion = NULL; 4468 struct megasas_cmd_fusion *scsi_lookup; 4469 int rc; 4470 int timeout = MEGASAS_DEFAULT_TM_TIMEOUT; 4471 struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply; 4472 4473 fusion = instance->ctrl_context; 4474 4475 cmd_mfi = megasas_get_cmd(instance); 4476 4477 if (!cmd_mfi) { 4478 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 4479 __func__, __LINE__); 4480 return -ENOMEM; 4481 } 4482 4483 cmd_fusion = megasas_get_cmd_fusion(instance, 4484 instance->max_scsi_cmds + cmd_mfi->index); 4485 4486 /* Save the smid. To be used for returning the cmd */ 4487 cmd_mfi->context.smid = cmd_fusion->index; 4488 4489 req_desc = megasas_get_request_descriptor(instance, 4490 (cmd_fusion->index - 1)); 4491 4492 cmd_fusion->request_desc = req_desc; 4493 req_desc->Words = 0; 4494 4495 mr_request = (struct MR_TASK_MANAGE_REQUEST *) cmd_fusion->io_request; 4496 memset(mr_request, 0, sizeof(struct MR_TASK_MANAGE_REQUEST)); 4497 mpi_request = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest; 4498 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 4499 mpi_request->DevHandle = cpu_to_le16(device_handle); 4500 mpi_request->TaskType = type; 4501 mpi_request->TaskMID = cpu_to_le16(smid_task); 4502 mpi_request->LUN[1] = 0; 4503 4504 4505 req_desc = cmd_fusion->request_desc; 4506 req_desc->HighPriority.SMID = cpu_to_le16(cmd_fusion->index); 4507 req_desc->HighPriority.RequestFlags = 4508 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << 4509 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 4510 req_desc->HighPriority.MSIxIndex = 0; 4511 req_desc->HighPriority.LMID = 0; 4512 req_desc->HighPriority.Reserved1 = 0; 4513 4514 if (channel < MEGASAS_MAX_PD_CHANNELS) 4515 mr_request->tmReqFlags.isTMForPD = 1; 4516 else 4517 mr_request->tmReqFlags.isTMForLD = 1; 4518 4519 init_completion(&cmd_fusion->done); 4520 megasas_fire_cmd_fusion(instance, req_desc); 4521 4522 switch (type) { 4523 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: 4524 timeout = mr_device_priv_data->task_abort_tmo; 4525 break; 4526 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 4527 timeout = mr_device_priv_data->target_reset_tmo; 4528 break; 4529 } 4530 4531 timeleft = wait_for_completion_timeout(&cmd_fusion->done, timeout * HZ); 4532 4533 if (!timeleft) { 4534 dev_err(&instance->pdev->dev, 4535 "task mgmt type 0x%x timed out\n", type); 4536 cmd_mfi->flags |= DRV_DCMD_SKIP_REFIRE; 4537 mutex_unlock(&instance->reset_mutex); 4538 rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR); 4539 mutex_lock(&instance->reset_mutex); 4540 return rc; 4541 } 4542 4543 mpi_reply = (struct MPI2_SCSI_TASK_MANAGE_REPLY *) &mr_request->TMReply; 4544 megasas_tm_response_code(instance, mpi_reply); 4545 4546 megasas_return_cmd(instance, cmd_mfi); 4547 rc = SUCCESS; 4548 switch (type) { 4549 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: 4550 scsi_lookup = fusion->cmd_list[smid_task - 1]; 4551 4552 if (scsi_lookup->scmd == NULL) 4553 break; 4554 else { 4555 instance->instancet->disable_intr(instance); 4556 megasas_sync_irqs((unsigned long)instance); 4557 instance->instancet->enable_intr(instance); 4558 megasas_enable_irq_poll(instance); 4559 if (scsi_lookup->scmd == NULL) 4560 break; 4561 } 4562 rc = FAILED; 4563 break; 4564 4565 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 4566 if ((channel == 0xFFFFFFFF) && (id == 0xFFFFFFFF)) 4567 break; 4568 instance->instancet->disable_intr(instance); 4569 megasas_sync_irqs((unsigned long)instance); 4570 rc = megasas_track_scsiio(instance, id, channel); 4571 instance->instancet->enable_intr(instance); 4572 megasas_enable_irq_poll(instance); 4573 4574 break; 4575 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: 4576 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK: 4577 break; 4578 default: 4579 rc = FAILED; 4580 break; 4581 } 4582 4583 return rc; 4584 4585 } 4586 4587 /* 4588 * megasas_fusion_smid_lookup : Look for fusion command correpspodning to SCSI 4589 * @instance: per adapter struct 4590 * 4591 * Return Non Zero index, if SMID found in outstanding commands 4592 */ 4593 static u16 megasas_fusion_smid_lookup(struct scsi_cmnd *scmd) 4594 { 4595 int i, ret = 0; 4596 struct megasas_instance *instance; 4597 struct megasas_cmd_fusion *cmd_fusion; 4598 struct fusion_context *fusion; 4599 4600 instance = (struct megasas_instance *)scmd->device->host->hostdata; 4601 4602 fusion = instance->ctrl_context; 4603 4604 for (i = 0; i < instance->max_scsi_cmds; i++) { 4605 cmd_fusion = fusion->cmd_list[i]; 4606 if (cmd_fusion->scmd && (cmd_fusion->scmd == scmd)) { 4607 scmd_printk(KERN_NOTICE, scmd, "Abort request is for" 4608 " SMID: %d\n", cmd_fusion->index); 4609 ret = cmd_fusion->index; 4610 break; 4611 } 4612 } 4613 4614 return ret; 4615 } 4616 4617 /* 4618 * megasas_get_tm_devhandle - Get devhandle for TM request 4619 * @sdev- OS provided scsi device 4620 * 4621 * Returns- devhandle/targetID of SCSI device 4622 */ 4623 static u16 megasas_get_tm_devhandle(struct scsi_device *sdev) 4624 { 4625 u16 pd_index = 0; 4626 u32 device_id; 4627 struct megasas_instance *instance; 4628 struct fusion_context *fusion; 4629 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 4630 u16 devhandle = (u16)ULONG_MAX; 4631 4632 instance = (struct megasas_instance *)sdev->host->hostdata; 4633 fusion = instance->ctrl_context; 4634 4635 if (!MEGASAS_IS_LOGICAL(sdev)) { 4636 if (instance->use_seqnum_jbod_fp) { 4637 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) 4638 + sdev->id; 4639 pd_sync = (void *)fusion->pd_seq_sync 4640 [(instance->pd_seq_map_id - 1) & 1]; 4641 devhandle = pd_sync->seq[pd_index].devHandle; 4642 } else 4643 sdev_printk(KERN_ERR, sdev, "Firmware expose tmCapable" 4644 " without JBOD MAP support from %s %d\n", __func__, __LINE__); 4645 } else { 4646 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) 4647 + sdev->id; 4648 devhandle = device_id; 4649 } 4650 4651 return devhandle; 4652 } 4653 4654 /* 4655 * megasas_task_abort_fusion : SCSI task abort function for fusion adapters 4656 * @scmd : pointer to scsi command object 4657 * 4658 * Return SUCCESS, if command aborted else FAILED 4659 */ 4660 4661 int megasas_task_abort_fusion(struct scsi_cmnd *scmd) 4662 { 4663 struct megasas_instance *instance; 4664 u16 smid, devhandle; 4665 int ret; 4666 struct MR_PRIV_DEVICE *mr_device_priv_data; 4667 mr_device_priv_data = scmd->device->hostdata; 4668 4669 instance = (struct megasas_instance *)scmd->device->host->hostdata; 4670 4671 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 4672 dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL," 4673 "SCSI host:%d\n", instance->host->host_no); 4674 ret = FAILED; 4675 return ret; 4676 } 4677 4678 if (!mr_device_priv_data) { 4679 sdev_printk(KERN_INFO, scmd->device, "device been deleted! " 4680 "scmd(%p)\n", scmd); 4681 scmd->result = DID_NO_CONNECT << 16; 4682 ret = SUCCESS; 4683 goto out; 4684 } 4685 4686 if (!mr_device_priv_data->is_tm_capable) { 4687 ret = FAILED; 4688 goto out; 4689 } 4690 4691 mutex_lock(&instance->reset_mutex); 4692 4693 smid = megasas_fusion_smid_lookup(scmd); 4694 4695 if (!smid) { 4696 ret = SUCCESS; 4697 scmd_printk(KERN_NOTICE, scmd, "Command for which abort is" 4698 " issued is not found in outstanding commands\n"); 4699 mutex_unlock(&instance->reset_mutex); 4700 goto out; 4701 } 4702 4703 devhandle = megasas_get_tm_devhandle(scmd->device); 4704 4705 if (devhandle == (u16)ULONG_MAX) { 4706 ret = SUCCESS; 4707 sdev_printk(KERN_INFO, scmd->device, 4708 "task abort issued for invalid devhandle\n"); 4709 mutex_unlock(&instance->reset_mutex); 4710 goto out; 4711 } 4712 sdev_printk(KERN_INFO, scmd->device, 4713 "attempting task abort! scmd(0x%p) tm_dev_handle 0x%x\n", 4714 scmd, devhandle); 4715 4716 mr_device_priv_data->tm_busy = 1; 4717 ret = megasas_issue_tm(instance, devhandle, 4718 scmd->device->channel, scmd->device->id, smid, 4719 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 4720 mr_device_priv_data); 4721 mr_device_priv_data->tm_busy = 0; 4722 4723 mutex_unlock(&instance->reset_mutex); 4724 scmd_printk(KERN_INFO, scmd, "task abort %s!! scmd(0x%p)\n", 4725 ((ret == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4726 out: 4727 scsi_print_command(scmd); 4728 if (megasas_dbg_lvl & TM_DEBUG) 4729 megasas_dump_fusion_io(scmd); 4730 4731 return ret; 4732 } 4733 4734 /* 4735 * megasas_reset_target_fusion : target reset function for fusion adapters 4736 * scmd: SCSI command pointer 4737 * 4738 * Returns SUCCESS if all commands associated with target aborted else FAILED 4739 */ 4740 4741 int megasas_reset_target_fusion(struct scsi_cmnd *scmd) 4742 { 4743 4744 struct megasas_instance *instance; 4745 int ret = FAILED; 4746 u16 devhandle; 4747 struct MR_PRIV_DEVICE *mr_device_priv_data; 4748 mr_device_priv_data = scmd->device->hostdata; 4749 4750 instance = (struct megasas_instance *)scmd->device->host->hostdata; 4751 4752 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 4753 dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL," 4754 "SCSI host:%d\n", instance->host->host_no); 4755 ret = FAILED; 4756 return ret; 4757 } 4758 4759 if (!mr_device_priv_data) { 4760 sdev_printk(KERN_INFO, scmd->device, 4761 "device been deleted! scmd: (0x%p)\n", scmd); 4762 scmd->result = DID_NO_CONNECT << 16; 4763 ret = SUCCESS; 4764 goto out; 4765 } 4766 4767 if (!mr_device_priv_data->is_tm_capable) { 4768 ret = FAILED; 4769 goto out; 4770 } 4771 4772 mutex_lock(&instance->reset_mutex); 4773 devhandle = megasas_get_tm_devhandle(scmd->device); 4774 4775 if (devhandle == (u16)ULONG_MAX) { 4776 ret = SUCCESS; 4777 sdev_printk(KERN_INFO, scmd->device, 4778 "target reset issued for invalid devhandle\n"); 4779 mutex_unlock(&instance->reset_mutex); 4780 goto out; 4781 } 4782 4783 sdev_printk(KERN_INFO, scmd->device, 4784 "attempting target reset! scmd(0x%p) tm_dev_handle: 0x%x\n", 4785 scmd, devhandle); 4786 mr_device_priv_data->tm_busy = 1; 4787 ret = megasas_issue_tm(instance, devhandle, 4788 scmd->device->channel, scmd->device->id, 0, 4789 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 4790 mr_device_priv_data); 4791 mr_device_priv_data->tm_busy = 0; 4792 mutex_unlock(&instance->reset_mutex); 4793 scmd_printk(KERN_NOTICE, scmd, "target reset %s!!\n", 4794 (ret == SUCCESS) ? "SUCCESS" : "FAILED"); 4795 4796 out: 4797 return ret; 4798 } 4799 4800 /*SRIOV get other instance in cluster if any*/ 4801 static struct 4802 megasas_instance *megasas_get_peer_instance(struct megasas_instance *instance) 4803 { 4804 int i; 4805 4806 for (i = 0; i < MAX_MGMT_ADAPTERS; i++) { 4807 if (megasas_mgmt_info.instance[i] && 4808 (megasas_mgmt_info.instance[i] != instance) && 4809 megasas_mgmt_info.instance[i]->requestorId && 4810 megasas_mgmt_info.instance[i]->peerIsPresent && 4811 (memcmp((megasas_mgmt_info.instance[i]->clusterId), 4812 instance->clusterId, MEGASAS_CLUSTER_ID_SIZE) == 0)) 4813 return megasas_mgmt_info.instance[i]; 4814 } 4815 return NULL; 4816 } 4817 4818 /* Check for a second path that is currently UP */ 4819 int megasas_check_mpio_paths(struct megasas_instance *instance, 4820 struct scsi_cmnd *scmd) 4821 { 4822 struct megasas_instance *peer_instance = NULL; 4823 int retval = (DID_REQUEUE << 16); 4824 4825 if (instance->peerIsPresent) { 4826 peer_instance = megasas_get_peer_instance(instance); 4827 if ((peer_instance) && 4828 (atomic_read(&peer_instance->adprecovery) == 4829 MEGASAS_HBA_OPERATIONAL)) 4830 retval = (DID_NO_CONNECT << 16); 4831 } 4832 return retval; 4833 } 4834 4835 /* Core fusion reset function */ 4836 int megasas_reset_fusion(struct Scsi_Host *shost, int reason) 4837 { 4838 int retval = SUCCESS, i, j, convert = 0; 4839 struct megasas_instance *instance; 4840 struct megasas_cmd_fusion *cmd_fusion, *r1_cmd; 4841 struct fusion_context *fusion; 4842 u32 abs_state, status_reg, reset_adapter, fpio_count = 0; 4843 u32 io_timeout_in_crash_mode = 0; 4844 struct scsi_cmnd *scmd_local = NULL; 4845 struct scsi_device *sdev; 4846 int ret_target_prop = DCMD_FAILED; 4847 bool is_target_prop = false; 4848 bool do_adp_reset = true; 4849 int max_reset_tries = MEGASAS_FUSION_MAX_RESET_TRIES; 4850 4851 instance = (struct megasas_instance *)shost->hostdata; 4852 fusion = instance->ctrl_context; 4853 4854 mutex_lock(&instance->reset_mutex); 4855 4856 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 4857 dev_warn(&instance->pdev->dev, "Hardware critical error, " 4858 "returning FAILED for scsi%d.\n", 4859 instance->host->host_no); 4860 mutex_unlock(&instance->reset_mutex); 4861 return FAILED; 4862 } 4863 status_reg = instance->instancet->read_fw_status_reg(instance); 4864 abs_state = status_reg & MFI_STATE_MASK; 4865 4866 /* IO timeout detected, forcibly put FW in FAULT state */ 4867 if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf && 4868 instance->crash_dump_app_support && reason) { 4869 dev_info(&instance->pdev->dev, "IO/DCMD timeout is detected, " 4870 "forcibly FAULT Firmware\n"); 4871 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 4872 status_reg = megasas_readl(instance, &instance->reg_set->doorbell); 4873 writel(status_reg | MFI_STATE_FORCE_OCR, 4874 &instance->reg_set->doorbell); 4875 readl(&instance->reg_set->doorbell); 4876 mutex_unlock(&instance->reset_mutex); 4877 do { 4878 ssleep(3); 4879 io_timeout_in_crash_mode++; 4880 dev_dbg(&instance->pdev->dev, "waiting for [%d] " 4881 "seconds for crash dump collection and OCR " 4882 "to be done\n", (io_timeout_in_crash_mode * 3)); 4883 } while ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) && 4884 (io_timeout_in_crash_mode < 80)); 4885 4886 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) { 4887 dev_info(&instance->pdev->dev, "OCR done for IO " 4888 "timeout case\n"); 4889 retval = SUCCESS; 4890 } else { 4891 dev_info(&instance->pdev->dev, "Controller is not " 4892 "operational after 240 seconds wait for IO " 4893 "timeout case in FW crash dump mode\n do " 4894 "OCR/kill adapter\n"); 4895 retval = megasas_reset_fusion(shost, 0); 4896 } 4897 return retval; 4898 } 4899 4900 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 4901 del_timer_sync(&instance->sriov_heartbeat_timer); 4902 set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); 4903 set_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags); 4904 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING); 4905 instance->instancet->disable_intr(instance); 4906 megasas_sync_irqs((unsigned long)instance); 4907 4908 /* First try waiting for commands to complete */ 4909 if (megasas_wait_for_outstanding_fusion(instance, reason, 4910 &convert)) { 4911 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 4912 dev_warn(&instance->pdev->dev, "resetting fusion " 4913 "adapter scsi%d.\n", instance->host->host_no); 4914 if (convert) 4915 reason = 0; 4916 4917 if (megasas_dbg_lvl & OCR_DEBUG) 4918 dev_info(&instance->pdev->dev, "\nPending SCSI commands:\n"); 4919 4920 /* Now return commands back to the OS */ 4921 for (i = 0 ; i < instance->max_scsi_cmds; i++) { 4922 cmd_fusion = fusion->cmd_list[i]; 4923 /*check for extra commands issued by driver*/ 4924 if (instance->adapter_type >= VENTURA_SERIES) { 4925 r1_cmd = fusion->cmd_list[i + instance->max_fw_cmds]; 4926 megasas_return_cmd_fusion(instance, r1_cmd); 4927 } 4928 scmd_local = cmd_fusion->scmd; 4929 if (cmd_fusion->scmd) { 4930 if (megasas_dbg_lvl & OCR_DEBUG) { 4931 sdev_printk(KERN_INFO, 4932 cmd_fusion->scmd->device, "SMID: 0x%x\n", 4933 cmd_fusion->index); 4934 megasas_dump_fusion_io(cmd_fusion->scmd); 4935 } 4936 4937 if (cmd_fusion->io_request->Function == 4938 MPI2_FUNCTION_SCSI_IO_REQUEST) 4939 fpio_count++; 4940 4941 scmd_local->result = 4942 megasas_check_mpio_paths(instance, 4943 scmd_local); 4944 if (instance->ldio_threshold && 4945 megasas_cmd_type(scmd_local) == READ_WRITE_LDIO) 4946 atomic_dec(&instance->ldio_outstanding); 4947 megasas_return_cmd_fusion(instance, cmd_fusion); 4948 scsi_dma_unmap(scmd_local); 4949 scmd_local->scsi_done(scmd_local); 4950 } 4951 } 4952 4953 dev_info(&instance->pdev->dev, "Outstanding fastpath IOs: %d\n", 4954 fpio_count); 4955 4956 atomic_set(&instance->fw_outstanding, 0); 4957 4958 status_reg = instance->instancet->read_fw_status_reg(instance); 4959 abs_state = status_reg & MFI_STATE_MASK; 4960 reset_adapter = status_reg & MFI_RESET_ADAPTER; 4961 if (instance->disableOnlineCtrlReset || 4962 (abs_state == MFI_STATE_FAULT && !reset_adapter)) { 4963 /* Reset not supported, kill adapter */ 4964 dev_warn(&instance->pdev->dev, "Reset not supported" 4965 ", killing adapter scsi%d.\n", 4966 instance->host->host_no); 4967 goto kill_hba; 4968 } 4969 4970 /* Let SR-IOV VF & PF sync up if there was a HB failure */ 4971 if (instance->requestorId && !reason) { 4972 msleep(MEGASAS_OCR_SETTLE_TIME_VF); 4973 do_adp_reset = false; 4974 max_reset_tries = MEGASAS_SRIOV_MAX_RESET_TRIES_VF; 4975 } 4976 4977 /* Now try to reset the chip */ 4978 for (i = 0; i < max_reset_tries; i++) { 4979 /* 4980 * Do adp reset and wait for 4981 * controller to transition to ready 4982 */ 4983 if (megasas_adp_reset_wait_for_ready(instance, 4984 do_adp_reset, 1) == FAILED) 4985 continue; 4986 4987 /* Wait for FW to become ready */ 4988 if (megasas_transition_to_ready(instance, 1)) { 4989 dev_warn(&instance->pdev->dev, 4990 "Failed to transition controller to ready for " 4991 "scsi%d.\n", instance->host->host_no); 4992 continue; 4993 } 4994 megasas_reset_reply_desc(instance); 4995 megasas_fusion_update_can_queue(instance, OCR_CONTEXT); 4996 4997 if (megasas_ioc_init_fusion(instance)) { 4998 continue; 4999 } 5000 5001 if (megasas_get_ctrl_info(instance)) { 5002 dev_info(&instance->pdev->dev, 5003 "Failed from %s %d\n", 5004 __func__, __LINE__); 5005 goto kill_hba; 5006 } 5007 5008 megasas_refire_mgmt_cmd(instance, 5009 (i == (MEGASAS_FUSION_MAX_RESET_TRIES - 1) 5010 ? 1 : 0)); 5011 5012 /* Reset load balance info */ 5013 if (fusion->load_balance_info) 5014 memset(fusion->load_balance_info, 0, 5015 (sizeof(struct LD_LOAD_BALANCE_INFO) * 5016 MAX_LOGICAL_DRIVES_EXT)); 5017 5018 if (!megasas_get_map_info(instance)) { 5019 megasas_sync_map_info(instance); 5020 } else { 5021 /* 5022 * Return pending polled mode cmds before 5023 * retrying OCR 5024 */ 5025 megasas_return_polled_cmds(instance); 5026 continue; 5027 } 5028 5029 megasas_setup_jbod_map(instance); 5030 5031 /* reset stream detection array */ 5032 if (instance->adapter_type >= VENTURA_SERIES) { 5033 for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) { 5034 memset(fusion->stream_detect_by_ld[j], 5035 0, sizeof(struct LD_STREAM_DETECT)); 5036 fusion->stream_detect_by_ld[j]->mru_bit_map 5037 = MR_STREAM_BITMAP; 5038 } 5039 } 5040 5041 clear_bit(MEGASAS_FUSION_IN_RESET, 5042 &instance->reset_flags); 5043 instance->instancet->enable_intr(instance); 5044 megasas_enable_irq_poll(instance); 5045 shost_for_each_device(sdev, shost) { 5046 if ((instance->tgt_prop) && 5047 (instance->nvme_page_size)) 5048 ret_target_prop = megasas_get_target_prop(instance, sdev); 5049 5050 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; 5051 megasas_set_dynamic_target_properties(sdev, is_target_prop); 5052 } 5053 5054 status_reg = instance->instancet->read_fw_status_reg 5055 (instance); 5056 abs_state = status_reg & MFI_STATE_MASK; 5057 if (abs_state != MFI_STATE_OPERATIONAL) { 5058 dev_info(&instance->pdev->dev, 5059 "Adapter is not OPERATIONAL, state 0x%x for scsi:%d\n", 5060 abs_state, instance->host->host_no); 5061 goto out; 5062 } 5063 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 5064 5065 dev_info(&instance->pdev->dev, 5066 "Adapter is OPERATIONAL for scsi:%d\n", 5067 instance->host->host_no); 5068 5069 /* Restart SR-IOV heartbeat */ 5070 if (instance->requestorId) { 5071 if (!megasas_sriov_start_heartbeat(instance, 0)) 5072 megasas_start_timer(instance); 5073 else 5074 instance->skip_heartbeat_timer_del = 1; 5075 } 5076 5077 if (instance->crash_dump_drv_support && 5078 instance->crash_dump_app_support) 5079 megasas_set_crash_dump_params(instance, 5080 MR_CRASH_BUF_TURN_ON); 5081 else 5082 megasas_set_crash_dump_params(instance, 5083 MR_CRASH_BUF_TURN_OFF); 5084 5085 if (instance->snapdump_wait_time) { 5086 megasas_get_snapdump_properties(instance); 5087 dev_info(&instance->pdev->dev, 5088 "Snap dump wait time\t: %d\n", 5089 instance->snapdump_wait_time); 5090 } 5091 5092 retval = SUCCESS; 5093 5094 /* Adapter reset completed successfully */ 5095 dev_warn(&instance->pdev->dev, 5096 "Reset successful for scsi%d.\n", 5097 instance->host->host_no); 5098 5099 goto out; 5100 } 5101 /* Reset failed, kill the adapter */ 5102 dev_warn(&instance->pdev->dev, "Reset failed, killing " 5103 "adapter scsi%d.\n", instance->host->host_no); 5104 goto kill_hba; 5105 } else { 5106 /* For VF: Restart HB timer if we didn't OCR */ 5107 if (instance->requestorId) { 5108 megasas_start_timer(instance); 5109 } 5110 clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); 5111 instance->instancet->enable_intr(instance); 5112 megasas_enable_irq_poll(instance); 5113 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 5114 goto out; 5115 } 5116 kill_hba: 5117 megaraid_sas_kill_hba(instance); 5118 megasas_enable_irq_poll(instance); 5119 instance->skip_heartbeat_timer_del = 1; 5120 retval = FAILED; 5121 out: 5122 clear_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags); 5123 mutex_unlock(&instance->reset_mutex); 5124 return retval; 5125 } 5126 5127 /* Fusion Crash dump collection */ 5128 static void megasas_fusion_crash_dump(struct megasas_instance *instance) 5129 { 5130 u32 status_reg; 5131 u8 partial_copy = 0; 5132 int wait = 0; 5133 5134 5135 status_reg = instance->instancet->read_fw_status_reg(instance); 5136 5137 /* 5138 * Allocate host crash buffers to copy data from 1 MB DMA crash buffer 5139 * to host crash buffers 5140 */ 5141 if (instance->drv_buf_index == 0) { 5142 /* Buffer is already allocated for old Crash dump. 5143 * Do OCR and do not wait for crash dump collection 5144 */ 5145 if (instance->drv_buf_alloc) { 5146 dev_info(&instance->pdev->dev, "earlier crash dump is " 5147 "not yet copied by application, ignoring this " 5148 "crash dump and initiating OCR\n"); 5149 status_reg |= MFI_STATE_CRASH_DUMP_DONE; 5150 writel(status_reg, 5151 &instance->reg_set->outbound_scratch_pad_0); 5152 readl(&instance->reg_set->outbound_scratch_pad_0); 5153 return; 5154 } 5155 megasas_alloc_host_crash_buffer(instance); 5156 dev_info(&instance->pdev->dev, "Number of host crash buffers " 5157 "allocated: %d\n", instance->drv_buf_alloc); 5158 } 5159 5160 while (!(status_reg & MFI_STATE_CRASH_DUMP_DONE) && 5161 (wait < MEGASAS_WATCHDOG_WAIT_COUNT)) { 5162 if (!(status_reg & MFI_STATE_DMADONE)) { 5163 /* 5164 * Next crash dump buffer is not yet DMA'd by FW 5165 * Check after 10ms. Wait for 1 second for FW to 5166 * post the next buffer. If not bail out. 5167 */ 5168 wait++; 5169 msleep(MEGASAS_WAIT_FOR_NEXT_DMA_MSECS); 5170 status_reg = instance->instancet->read_fw_status_reg( 5171 instance); 5172 continue; 5173 } 5174 5175 wait = 0; 5176 if (instance->drv_buf_index >= instance->drv_buf_alloc) { 5177 dev_info(&instance->pdev->dev, 5178 "Driver is done copying the buffer: %d\n", 5179 instance->drv_buf_alloc); 5180 status_reg |= MFI_STATE_CRASH_DUMP_DONE; 5181 partial_copy = 1; 5182 break; 5183 } else { 5184 memcpy(instance->crash_buf[instance->drv_buf_index], 5185 instance->crash_dump_buf, CRASH_DMA_BUF_SIZE); 5186 instance->drv_buf_index++; 5187 status_reg &= ~MFI_STATE_DMADONE; 5188 } 5189 5190 writel(status_reg, &instance->reg_set->outbound_scratch_pad_0); 5191 readl(&instance->reg_set->outbound_scratch_pad_0); 5192 5193 msleep(MEGASAS_WAIT_FOR_NEXT_DMA_MSECS); 5194 status_reg = instance->instancet->read_fw_status_reg(instance); 5195 } 5196 5197 if (status_reg & MFI_STATE_CRASH_DUMP_DONE) { 5198 dev_info(&instance->pdev->dev, "Crash Dump is available,number " 5199 "of copied buffers: %d\n", instance->drv_buf_index); 5200 instance->fw_crash_buffer_size = instance->drv_buf_index; 5201 instance->fw_crash_state = AVAILABLE; 5202 instance->drv_buf_index = 0; 5203 writel(status_reg, &instance->reg_set->outbound_scratch_pad_0); 5204 readl(&instance->reg_set->outbound_scratch_pad_0); 5205 if (!partial_copy) 5206 megasas_reset_fusion(instance->host, 0); 5207 } 5208 } 5209 5210 5211 /* Fusion OCR work queue */ 5212 void megasas_fusion_ocr_wq(struct work_struct *work) 5213 { 5214 struct megasas_instance *instance = 5215 container_of(work, struct megasas_instance, work_init); 5216 5217 megasas_reset_fusion(instance->host, 0); 5218 } 5219 5220 /* Allocate fusion context */ 5221 int 5222 megasas_alloc_fusion_context(struct megasas_instance *instance) 5223 { 5224 struct fusion_context *fusion; 5225 5226 instance->ctrl_context = kzalloc(sizeof(struct fusion_context), 5227 GFP_KERNEL); 5228 if (!instance->ctrl_context) { 5229 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 5230 __func__, __LINE__); 5231 return -ENOMEM; 5232 } 5233 5234 fusion = instance->ctrl_context; 5235 5236 fusion->log_to_span_pages = get_order(MAX_LOGICAL_DRIVES_EXT * 5237 sizeof(LD_SPAN_INFO)); 5238 fusion->log_to_span = 5239 (PLD_SPAN_INFO)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 5240 fusion->log_to_span_pages); 5241 if (!fusion->log_to_span) { 5242 fusion->log_to_span = 5243 vzalloc(array_size(MAX_LOGICAL_DRIVES_EXT, 5244 sizeof(LD_SPAN_INFO))); 5245 if (!fusion->log_to_span) { 5246 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 5247 __func__, __LINE__); 5248 return -ENOMEM; 5249 } 5250 } 5251 5252 fusion->load_balance_info_pages = get_order(MAX_LOGICAL_DRIVES_EXT * 5253 sizeof(struct LD_LOAD_BALANCE_INFO)); 5254 fusion->load_balance_info = 5255 (struct LD_LOAD_BALANCE_INFO *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 5256 fusion->load_balance_info_pages); 5257 if (!fusion->load_balance_info) { 5258 fusion->load_balance_info = 5259 vzalloc(array_size(MAX_LOGICAL_DRIVES_EXT, 5260 sizeof(struct LD_LOAD_BALANCE_INFO))); 5261 if (!fusion->load_balance_info) 5262 dev_err(&instance->pdev->dev, "Failed to allocate load_balance_info, " 5263 "continuing without Load Balance support\n"); 5264 } 5265 5266 return 0; 5267 } 5268 5269 void 5270 megasas_free_fusion_context(struct megasas_instance *instance) 5271 { 5272 struct fusion_context *fusion = instance->ctrl_context; 5273 5274 if (fusion) { 5275 if (fusion->load_balance_info) { 5276 if (is_vmalloc_addr(fusion->load_balance_info)) 5277 vfree(fusion->load_balance_info); 5278 else 5279 free_pages((ulong)fusion->load_balance_info, 5280 fusion->load_balance_info_pages); 5281 } 5282 5283 if (fusion->log_to_span) { 5284 if (is_vmalloc_addr(fusion->log_to_span)) 5285 vfree(fusion->log_to_span); 5286 else 5287 free_pages((ulong)fusion->log_to_span, 5288 fusion->log_to_span_pages); 5289 } 5290 5291 kfree(fusion); 5292 } 5293 } 5294 5295 struct megasas_instance_template megasas_instance_template_fusion = { 5296 .enable_intr = megasas_enable_intr_fusion, 5297 .disable_intr = megasas_disable_intr_fusion, 5298 .clear_intr = megasas_clear_intr_fusion, 5299 .read_fw_status_reg = megasas_read_fw_status_reg_fusion, 5300 .adp_reset = megasas_adp_reset_fusion, 5301 .check_reset = megasas_check_reset_fusion, 5302 .service_isr = megasas_isr_fusion, 5303 .tasklet = megasas_complete_cmd_dpc_fusion, 5304 .init_adapter = megasas_init_adapter_fusion, 5305 .build_and_issue_cmd = megasas_build_and_issue_cmd_fusion, 5306 .issue_dcmd = megasas_issue_dcmd_fusion, 5307 }; 5308