1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux MegaRAID driver for SAS based RAID controllers 4 * 5 * Copyright (c) 2009-2013 LSI Corporation 6 * Copyright (c) 2013-2016 Avago Technologies 7 * Copyright (c) 2016-2018 Broadcom Inc. 8 * 9 * FILE: megaraid_sas_fusion.c 10 * 11 * Authors: Broadcom Inc. 12 * Sumant Patro 13 * Adam Radford 14 * Kashyap Desai <kashyap.desai@broadcom.com> 15 * Sumit Saxena <sumit.saxena@broadcom.com> 16 * 17 * Send feedback to: megaraidlinux.pdl@broadcom.com 18 */ 19 20 #include <linux/kernel.h> 21 #include <linux/types.h> 22 #include <linux/pci.h> 23 #include <linux/list.h> 24 #include <linux/moduleparam.h> 25 #include <linux/module.h> 26 #include <linux/spinlock.h> 27 #include <linux/interrupt.h> 28 #include <linux/delay.h> 29 #include <linux/uio.h> 30 #include <linux/uaccess.h> 31 #include <linux/fs.h> 32 #include <linux/compat.h> 33 #include <linux/blkdev.h> 34 #include <linux/mutex.h> 35 #include <linux/poll.h> 36 #include <linux/vmalloc.h> 37 #include <linux/workqueue.h> 38 #include <linux/irq_poll.h> 39 40 #include <scsi/scsi.h> 41 #include <scsi/scsi_cmnd.h> 42 #include <scsi/scsi_device.h> 43 #include <scsi/scsi_host.h> 44 #include <scsi/scsi_dbg.h> 45 #include <linux/dmi.h> 46 47 #include "megaraid_sas_fusion.h" 48 #include "megaraid_sas.h" 49 50 51 extern void 52 megasas_complete_cmd(struct megasas_instance *instance, 53 struct megasas_cmd *cmd, u8 alt_status); 54 int 55 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, 56 int seconds); 57 58 int 59 megasas_clear_intr_fusion(struct megasas_instance *instance); 60 61 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); 62 63 extern u32 megasas_dbg_lvl; 64 int megasas_sriov_start_heartbeat(struct megasas_instance *instance, 65 int initial); 66 extern struct megasas_mgmt_info megasas_mgmt_info; 67 extern unsigned int resetwaittime; 68 extern unsigned int dual_qdepth_disable; 69 static void megasas_free_rdpq_fusion(struct megasas_instance *instance); 70 static void megasas_free_reply_fusion(struct megasas_instance *instance); 71 static inline 72 void megasas_configure_queue_sizes(struct megasas_instance *instance); 73 static void megasas_fusion_crash_dump(struct megasas_instance *instance); 74 75 /** 76 * megasas_adp_reset_wait_for_ready - initiate chip reset and wait for 77 * controller to come to ready state 78 * @instance: adapter's soft state 79 * @do_adp_reset: If true, do a chip reset 80 * @ocr_context: If called from OCR context this will 81 * be set to 1, else 0 82 * 83 * This function initates a chip reset followed by a wait for controller to 84 * transition to ready state. 85 * During this, driver will block all access to PCI config space from userspace 86 */ 87 int 88 megasas_adp_reset_wait_for_ready(struct megasas_instance *instance, 89 bool do_adp_reset, 90 int ocr_context) 91 { 92 int ret = FAILED; 93 94 /* 95 * Block access to PCI config space from userspace 96 * when diag reset is initiated from driver 97 */ 98 if (megasas_dbg_lvl & OCR_DEBUG) 99 dev_info(&instance->pdev->dev, 100 "Block access to PCI config space %s %d\n", 101 __func__, __LINE__); 102 103 pci_cfg_access_lock(instance->pdev); 104 105 if (do_adp_reset) { 106 if (instance->instancet->adp_reset 107 (instance, instance->reg_set)) 108 goto out; 109 } 110 111 /* Wait for FW to become ready */ 112 if (megasas_transition_to_ready(instance, ocr_context)) { 113 dev_warn(&instance->pdev->dev, 114 "Failed to transition controller to ready for scsi%d.\n", 115 instance->host->host_no); 116 goto out; 117 } 118 119 ret = SUCCESS; 120 out: 121 if (megasas_dbg_lvl & OCR_DEBUG) 122 dev_info(&instance->pdev->dev, 123 "Unlock access to PCI config space %s %d\n", 124 __func__, __LINE__); 125 126 pci_cfg_access_unlock(instance->pdev); 127 128 return ret; 129 } 130 131 /** 132 * megasas_check_same_4gb_region - check if allocation 133 * crosses same 4GB boundary or not 134 * @instance: adapter's soft instance 135 * @start_addr: start address of DMA allocation 136 * @size: size of allocation in bytes 137 * @return: true : allocation does not cross same 138 * 4GB boundary 139 * false: allocation crosses same 140 * 4GB boundary 141 */ 142 static inline bool megasas_check_same_4gb_region 143 (struct megasas_instance *instance, dma_addr_t start_addr, size_t size) 144 { 145 dma_addr_t end_addr; 146 147 end_addr = start_addr + size; 148 149 if (upper_32_bits(start_addr) != upper_32_bits(end_addr)) { 150 dev_err(&instance->pdev->dev, 151 "Failed to get same 4GB boundary: start_addr: 0x%llx end_addr: 0x%llx\n", 152 (unsigned long long)start_addr, 153 (unsigned long long)end_addr); 154 return false; 155 } 156 157 return true; 158 } 159 160 /** 161 * megasas_enable_intr_fusion - Enables interrupts 162 * @instance: adapter's soft instance 163 */ 164 static void 165 megasas_enable_intr_fusion(struct megasas_instance *instance) 166 { 167 struct megasas_register_set __iomem *regs; 168 regs = instance->reg_set; 169 170 instance->mask_interrupts = 0; 171 /* For Thunderbolt/Invader also clear intr on enable */ 172 writel(~0, ®s->outbound_intr_status); 173 readl(®s->outbound_intr_status); 174 175 writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 176 177 /* Dummy readl to force pci flush */ 178 dev_info(&instance->pdev->dev, "%s is called outbound_intr_mask:0x%08x\n", 179 __func__, readl(®s->outbound_intr_mask)); 180 } 181 182 /** 183 * megasas_disable_intr_fusion - Disables interrupt 184 * @instance: adapter's soft instance 185 */ 186 static void 187 megasas_disable_intr_fusion(struct megasas_instance *instance) 188 { 189 u32 mask = 0xFFFFFFFF; 190 struct megasas_register_set __iomem *regs; 191 regs = instance->reg_set; 192 instance->mask_interrupts = 1; 193 194 writel(mask, ®s->outbound_intr_mask); 195 /* Dummy readl to force pci flush */ 196 dev_info(&instance->pdev->dev, "%s is called outbound_intr_mask:0x%08x\n", 197 __func__, readl(®s->outbound_intr_mask)); 198 } 199 200 int 201 megasas_clear_intr_fusion(struct megasas_instance *instance) 202 { 203 u32 status; 204 struct megasas_register_set __iomem *regs; 205 regs = instance->reg_set; 206 /* 207 * Check if it is our interrupt 208 */ 209 status = megasas_readl(instance, 210 ®s->outbound_intr_status); 211 212 if (status & 1) { 213 writel(status, ®s->outbound_intr_status); 214 readl(®s->outbound_intr_status); 215 return 1; 216 } 217 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) 218 return 0; 219 220 return 1; 221 } 222 223 /** 224 * megasas_get_cmd_fusion - Get a command from the free pool 225 * @instance: Adapter soft state 226 * @blk_tag: Command tag 227 * 228 * Returns a blk_tag indexed mpt frame 229 */ 230 inline struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance 231 *instance, u32 blk_tag) 232 { 233 struct fusion_context *fusion; 234 235 fusion = instance->ctrl_context; 236 return fusion->cmd_list[blk_tag]; 237 } 238 239 /** 240 * megasas_return_cmd_fusion - Return a cmd to free command pool 241 * @instance: Adapter soft state 242 * @cmd: Command packet to be returned to free command pool 243 */ 244 inline void megasas_return_cmd_fusion(struct megasas_instance *instance, 245 struct megasas_cmd_fusion *cmd) 246 { 247 cmd->scmd = NULL; 248 memset(cmd->io_request, 0, MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE); 249 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; 250 cmd->cmd_completed = false; 251 } 252 253 /** 254 * megasas_write_64bit_req_desc - PCI writes 64bit request descriptor 255 * @instance: Adapter soft state 256 * @req_desc: 64bit Request descriptor 257 */ 258 static void 259 megasas_write_64bit_req_desc(struct megasas_instance *instance, 260 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc) 261 { 262 #if defined(writeq) && defined(CONFIG_64BIT) 263 u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) | 264 le32_to_cpu(req_desc->u.low)); 265 writeq(req_data, &instance->reg_set->inbound_low_queue_port); 266 #else 267 unsigned long flags; 268 spin_lock_irqsave(&instance->hba_lock, flags); 269 writel(le32_to_cpu(req_desc->u.low), 270 &instance->reg_set->inbound_low_queue_port); 271 writel(le32_to_cpu(req_desc->u.high), 272 &instance->reg_set->inbound_high_queue_port); 273 spin_unlock_irqrestore(&instance->hba_lock, flags); 274 #endif 275 } 276 277 /** 278 * megasas_fire_cmd_fusion - Sends command to the FW 279 * @instance: Adapter soft state 280 * @req_desc: 32bit or 64bit Request descriptor 281 * 282 * Perform PCI Write. AERO SERIES supports 32 bit Descriptor. 283 * Prior to AERO_SERIES support 64 bit Descriptor. 284 */ 285 static void 286 megasas_fire_cmd_fusion(struct megasas_instance *instance, 287 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc) 288 { 289 if (instance->atomic_desc_support) 290 writel(le32_to_cpu(req_desc->u.low), 291 &instance->reg_set->inbound_single_queue_port); 292 else 293 megasas_write_64bit_req_desc(instance, req_desc); 294 } 295 296 /** 297 * megasas_fusion_update_can_queue - Do all Adapter Queue depth related calculations here 298 * @instance: Adapter soft state 299 * @fw_boot_context: Whether this function called during probe or after OCR 300 * 301 * This function is only for fusion controllers. 302 * Update host can queue, if firmware downgrade max supported firmware commands. 303 * Firmware upgrade case will be skiped because underlying firmware has 304 * more resource than exposed to the OS. 305 * 306 */ 307 static void 308 megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_context) 309 { 310 u16 cur_max_fw_cmds = 0; 311 u16 ldio_threshold = 0; 312 313 /* ventura FW does not fill outbound_scratch_pad_2 with queue depth */ 314 if (instance->adapter_type < VENTURA_SERIES) 315 cur_max_fw_cmds = 316 megasas_readl(instance, 317 &instance->reg_set->outbound_scratch_pad_2) & 0x00FFFF; 318 319 if (dual_qdepth_disable || !cur_max_fw_cmds) 320 cur_max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF; 321 else 322 ldio_threshold = 323 (instance->instancet->read_fw_status_reg(instance) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS; 324 325 dev_info(&instance->pdev->dev, 326 "Current firmware supports maximum commands: %d\t LDIO threshold: %d\n", 327 cur_max_fw_cmds, ldio_threshold); 328 329 if (fw_boot_context == OCR_CONTEXT) { 330 cur_max_fw_cmds = cur_max_fw_cmds - 1; 331 if (cur_max_fw_cmds < instance->max_fw_cmds) { 332 instance->cur_can_queue = 333 cur_max_fw_cmds - (MEGASAS_FUSION_INTERNAL_CMDS + 334 MEGASAS_FUSION_IOCTL_CMDS); 335 instance->host->can_queue = instance->cur_can_queue; 336 instance->ldio_threshold = ldio_threshold; 337 } 338 } else { 339 instance->max_fw_cmds = cur_max_fw_cmds; 340 instance->ldio_threshold = ldio_threshold; 341 342 if (reset_devices) 343 instance->max_fw_cmds = min(instance->max_fw_cmds, 344 (u16)MEGASAS_KDUMP_QUEUE_DEPTH); 345 /* 346 * Reduce the max supported cmds by 1. This is to ensure that the 347 * reply_q_sz (1 more than the max cmd that driver may send) 348 * does not exceed max cmds that the FW can support 349 */ 350 instance->max_fw_cmds = instance->max_fw_cmds-1; 351 } 352 } 353 354 static inline void 355 megasas_get_msix_index(struct megasas_instance *instance, 356 struct scsi_cmnd *scmd, 357 struct megasas_cmd_fusion *cmd, 358 u8 data_arms) 359 { 360 int sdev_busy; 361 362 /* nr_hw_queue = 1 for MegaRAID */ 363 struct blk_mq_hw_ctx *hctx = 364 scmd->device->request_queue->queue_hw_ctx[0]; 365 366 sdev_busy = atomic_read(&hctx->nr_active); 367 368 if (instance->perf_mode == MR_BALANCED_PERF_MODE && 369 sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH)) 370 cmd->request_desc->SCSIIO.MSIxIndex = 371 mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) / 372 MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start); 373 else if (instance->msix_load_balance) 374 cmd->request_desc->SCSIIO.MSIxIndex = 375 (mega_mod64(atomic64_add_return(1, &instance->total_io_count), 376 instance->msix_vectors)); 377 else 378 cmd->request_desc->SCSIIO.MSIxIndex = 379 instance->reply_map[raw_smp_processor_id()]; 380 } 381 382 /** 383 * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool 384 * @instance: Adapter soft state 385 */ 386 void 387 megasas_free_cmds_fusion(struct megasas_instance *instance) 388 { 389 int i; 390 struct fusion_context *fusion = instance->ctrl_context; 391 struct megasas_cmd_fusion *cmd; 392 393 if (fusion->sense) 394 dma_pool_free(fusion->sense_dma_pool, fusion->sense, 395 fusion->sense_phys_addr); 396 397 /* SG */ 398 if (fusion->cmd_list) { 399 for (i = 0; i < instance->max_mpt_cmds; i++) { 400 cmd = fusion->cmd_list[i]; 401 if (cmd) { 402 if (cmd->sg_frame) 403 dma_pool_free(fusion->sg_dma_pool, 404 cmd->sg_frame, 405 cmd->sg_frame_phys_addr); 406 } 407 kfree(cmd); 408 } 409 kfree(fusion->cmd_list); 410 } 411 412 if (fusion->sg_dma_pool) { 413 dma_pool_destroy(fusion->sg_dma_pool); 414 fusion->sg_dma_pool = NULL; 415 } 416 if (fusion->sense_dma_pool) { 417 dma_pool_destroy(fusion->sense_dma_pool); 418 fusion->sense_dma_pool = NULL; 419 } 420 421 422 /* Reply Frame, Desc*/ 423 if (instance->is_rdpq) 424 megasas_free_rdpq_fusion(instance); 425 else 426 megasas_free_reply_fusion(instance); 427 428 /* Request Frame, Desc*/ 429 if (fusion->req_frames_desc) 430 dma_free_coherent(&instance->pdev->dev, 431 fusion->request_alloc_sz, fusion->req_frames_desc, 432 fusion->req_frames_desc_phys); 433 if (fusion->io_request_frames) 434 dma_pool_free(fusion->io_request_frames_pool, 435 fusion->io_request_frames, 436 fusion->io_request_frames_phys); 437 if (fusion->io_request_frames_pool) { 438 dma_pool_destroy(fusion->io_request_frames_pool); 439 fusion->io_request_frames_pool = NULL; 440 } 441 } 442 443 /** 444 * megasas_create_sg_sense_fusion - Creates DMA pool for cmd frames 445 * @instance: Adapter soft state 446 * 447 */ 448 static int megasas_create_sg_sense_fusion(struct megasas_instance *instance) 449 { 450 int i; 451 u16 max_cmd; 452 struct fusion_context *fusion; 453 struct megasas_cmd_fusion *cmd; 454 int sense_sz; 455 u32 offset; 456 457 fusion = instance->ctrl_context; 458 max_cmd = instance->max_fw_cmds; 459 sense_sz = instance->max_mpt_cmds * SCSI_SENSE_BUFFERSIZE; 460 461 fusion->sg_dma_pool = 462 dma_pool_create("mr_sg", &instance->pdev->dev, 463 instance->max_chain_frame_sz, 464 MR_DEFAULT_NVME_PAGE_SIZE, 0); 465 /* SCSI_SENSE_BUFFERSIZE = 96 bytes */ 466 fusion->sense_dma_pool = 467 dma_pool_create("mr_sense", &instance->pdev->dev, 468 sense_sz, 64, 0); 469 470 if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) { 471 dev_err(&instance->pdev->dev, 472 "Failed from %s %d\n", __func__, __LINE__); 473 return -ENOMEM; 474 } 475 476 fusion->sense = dma_pool_alloc(fusion->sense_dma_pool, 477 GFP_KERNEL, &fusion->sense_phys_addr); 478 if (!fusion->sense) { 479 dev_err(&instance->pdev->dev, 480 "failed from %s %d\n", __func__, __LINE__); 481 return -ENOMEM; 482 } 483 484 /* sense buffer, request frame and reply desc pool requires to be in 485 * same 4 gb region. Below function will check this. 486 * In case of failure, new pci pool will be created with updated 487 * alignment. 488 * Older allocation and pool will be destroyed. 489 * Alignment will be used such a way that next allocation if success, 490 * will always meet same 4gb region requirement. 491 * Actual requirement is not alignment, but we need start and end of 492 * DMA address must have same upper 32 bit address. 493 */ 494 495 if (!megasas_check_same_4gb_region(instance, fusion->sense_phys_addr, 496 sense_sz)) { 497 dma_pool_free(fusion->sense_dma_pool, fusion->sense, 498 fusion->sense_phys_addr); 499 fusion->sense = NULL; 500 dma_pool_destroy(fusion->sense_dma_pool); 501 502 fusion->sense_dma_pool = 503 dma_pool_create("mr_sense_align", &instance->pdev->dev, 504 sense_sz, roundup_pow_of_two(sense_sz), 505 0); 506 if (!fusion->sense_dma_pool) { 507 dev_err(&instance->pdev->dev, 508 "Failed from %s %d\n", __func__, __LINE__); 509 return -ENOMEM; 510 } 511 fusion->sense = dma_pool_alloc(fusion->sense_dma_pool, 512 GFP_KERNEL, 513 &fusion->sense_phys_addr); 514 if (!fusion->sense) { 515 dev_err(&instance->pdev->dev, 516 "failed from %s %d\n", __func__, __LINE__); 517 return -ENOMEM; 518 } 519 } 520 521 /* 522 * Allocate and attach a frame to each of the commands in cmd_list 523 */ 524 for (i = 0; i < max_cmd; i++) { 525 cmd = fusion->cmd_list[i]; 526 cmd->sg_frame = dma_pool_alloc(fusion->sg_dma_pool, 527 GFP_KERNEL, &cmd->sg_frame_phys_addr); 528 529 offset = SCSI_SENSE_BUFFERSIZE * i; 530 cmd->sense = (u8 *)fusion->sense + offset; 531 cmd->sense_phys_addr = fusion->sense_phys_addr + offset; 532 533 if (!cmd->sg_frame) { 534 dev_err(&instance->pdev->dev, 535 "Failed from %s %d\n", __func__, __LINE__); 536 return -ENOMEM; 537 } 538 } 539 540 /* create sense buffer for the raid 1/10 fp */ 541 for (i = max_cmd; i < instance->max_mpt_cmds; i++) { 542 cmd = fusion->cmd_list[i]; 543 offset = SCSI_SENSE_BUFFERSIZE * i; 544 cmd->sense = (u8 *)fusion->sense + offset; 545 cmd->sense_phys_addr = fusion->sense_phys_addr + offset; 546 547 } 548 549 return 0; 550 } 551 552 static int 553 megasas_alloc_cmdlist_fusion(struct megasas_instance *instance) 554 { 555 u32 max_mpt_cmd, i, j; 556 struct fusion_context *fusion; 557 558 fusion = instance->ctrl_context; 559 560 max_mpt_cmd = instance->max_mpt_cmds; 561 562 /* 563 * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers. 564 * Allocate the dynamic array first and then allocate individual 565 * commands. 566 */ 567 fusion->cmd_list = 568 kcalloc(max_mpt_cmd, sizeof(struct megasas_cmd_fusion *), 569 GFP_KERNEL); 570 if (!fusion->cmd_list) { 571 dev_err(&instance->pdev->dev, 572 "Failed from %s %d\n", __func__, __LINE__); 573 return -ENOMEM; 574 } 575 576 for (i = 0; i < max_mpt_cmd; i++) { 577 fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion), 578 GFP_KERNEL); 579 if (!fusion->cmd_list[i]) { 580 for (j = 0; j < i; j++) 581 kfree(fusion->cmd_list[j]); 582 kfree(fusion->cmd_list); 583 dev_err(&instance->pdev->dev, 584 "Failed from %s %d\n", __func__, __LINE__); 585 return -ENOMEM; 586 } 587 } 588 589 return 0; 590 } 591 592 static int 593 megasas_alloc_request_fusion(struct megasas_instance *instance) 594 { 595 struct fusion_context *fusion; 596 597 fusion = instance->ctrl_context; 598 599 retry_alloc: 600 fusion->io_request_frames_pool = 601 dma_pool_create("mr_ioreq", &instance->pdev->dev, 602 fusion->io_frames_alloc_sz, 16, 0); 603 604 if (!fusion->io_request_frames_pool) { 605 dev_err(&instance->pdev->dev, 606 "Failed from %s %d\n", __func__, __LINE__); 607 return -ENOMEM; 608 } 609 610 fusion->io_request_frames = 611 dma_pool_alloc(fusion->io_request_frames_pool, 612 GFP_KERNEL | __GFP_NOWARN, 613 &fusion->io_request_frames_phys); 614 if (!fusion->io_request_frames) { 615 if (instance->max_fw_cmds >= (MEGASAS_REDUCE_QD_COUNT * 2)) { 616 instance->max_fw_cmds -= MEGASAS_REDUCE_QD_COUNT; 617 dma_pool_destroy(fusion->io_request_frames_pool); 618 megasas_configure_queue_sizes(instance); 619 goto retry_alloc; 620 } else { 621 dev_err(&instance->pdev->dev, 622 "Failed from %s %d\n", __func__, __LINE__); 623 return -ENOMEM; 624 } 625 } 626 627 if (!megasas_check_same_4gb_region(instance, 628 fusion->io_request_frames_phys, 629 fusion->io_frames_alloc_sz)) { 630 dma_pool_free(fusion->io_request_frames_pool, 631 fusion->io_request_frames, 632 fusion->io_request_frames_phys); 633 fusion->io_request_frames = NULL; 634 dma_pool_destroy(fusion->io_request_frames_pool); 635 636 fusion->io_request_frames_pool = 637 dma_pool_create("mr_ioreq_align", 638 &instance->pdev->dev, 639 fusion->io_frames_alloc_sz, 640 roundup_pow_of_two(fusion->io_frames_alloc_sz), 641 0); 642 643 if (!fusion->io_request_frames_pool) { 644 dev_err(&instance->pdev->dev, 645 "Failed from %s %d\n", __func__, __LINE__); 646 return -ENOMEM; 647 } 648 649 fusion->io_request_frames = 650 dma_pool_alloc(fusion->io_request_frames_pool, 651 GFP_KERNEL | __GFP_NOWARN, 652 &fusion->io_request_frames_phys); 653 654 if (!fusion->io_request_frames) { 655 dev_err(&instance->pdev->dev, 656 "Failed from %s %d\n", __func__, __LINE__); 657 return -ENOMEM; 658 } 659 } 660 661 fusion->req_frames_desc = 662 dma_alloc_coherent(&instance->pdev->dev, 663 fusion->request_alloc_sz, 664 &fusion->req_frames_desc_phys, GFP_KERNEL); 665 if (!fusion->req_frames_desc) { 666 dev_err(&instance->pdev->dev, 667 "Failed from %s %d\n", __func__, __LINE__); 668 return -ENOMEM; 669 } 670 671 return 0; 672 } 673 674 static int 675 megasas_alloc_reply_fusion(struct megasas_instance *instance) 676 { 677 int i, count; 678 struct fusion_context *fusion; 679 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; 680 fusion = instance->ctrl_context; 681 682 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 683 fusion->reply_frames_desc_pool = 684 dma_pool_create("mr_reply", &instance->pdev->dev, 685 fusion->reply_alloc_sz * count, 16, 0); 686 687 if (!fusion->reply_frames_desc_pool) { 688 dev_err(&instance->pdev->dev, 689 "Failed from %s %d\n", __func__, __LINE__); 690 return -ENOMEM; 691 } 692 693 fusion->reply_frames_desc[0] = 694 dma_pool_alloc(fusion->reply_frames_desc_pool, 695 GFP_KERNEL, &fusion->reply_frames_desc_phys[0]); 696 if (!fusion->reply_frames_desc[0]) { 697 dev_err(&instance->pdev->dev, 698 "Failed from %s %d\n", __func__, __LINE__); 699 return -ENOMEM; 700 } 701 702 if (!megasas_check_same_4gb_region(instance, 703 fusion->reply_frames_desc_phys[0], 704 (fusion->reply_alloc_sz * count))) { 705 dma_pool_free(fusion->reply_frames_desc_pool, 706 fusion->reply_frames_desc[0], 707 fusion->reply_frames_desc_phys[0]); 708 fusion->reply_frames_desc[0] = NULL; 709 dma_pool_destroy(fusion->reply_frames_desc_pool); 710 711 fusion->reply_frames_desc_pool = 712 dma_pool_create("mr_reply_align", 713 &instance->pdev->dev, 714 fusion->reply_alloc_sz * count, 715 roundup_pow_of_two(fusion->reply_alloc_sz * count), 716 0); 717 718 if (!fusion->reply_frames_desc_pool) { 719 dev_err(&instance->pdev->dev, 720 "Failed from %s %d\n", __func__, __LINE__); 721 return -ENOMEM; 722 } 723 724 fusion->reply_frames_desc[0] = 725 dma_pool_alloc(fusion->reply_frames_desc_pool, 726 GFP_KERNEL, 727 &fusion->reply_frames_desc_phys[0]); 728 729 if (!fusion->reply_frames_desc[0]) { 730 dev_err(&instance->pdev->dev, 731 "Failed from %s %d\n", __func__, __LINE__); 732 return -ENOMEM; 733 } 734 } 735 736 reply_desc = fusion->reply_frames_desc[0]; 737 for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++) 738 reply_desc->Words = cpu_to_le64(ULLONG_MAX); 739 740 /* This is not a rdpq mode, but driver still populate 741 * reply_frame_desc array to use same msix index in ISR path. 742 */ 743 for (i = 0; i < (count - 1); i++) 744 fusion->reply_frames_desc[i + 1] = 745 fusion->reply_frames_desc[i] + 746 (fusion->reply_alloc_sz)/sizeof(union MPI2_REPLY_DESCRIPTORS_UNION); 747 748 return 0; 749 } 750 751 static int 752 megasas_alloc_rdpq_fusion(struct megasas_instance *instance) 753 { 754 int i, j, k, msix_count; 755 struct fusion_context *fusion; 756 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; 757 union MPI2_REPLY_DESCRIPTORS_UNION *rdpq_chunk_virt[RDPQ_MAX_CHUNK_COUNT]; 758 dma_addr_t rdpq_chunk_phys[RDPQ_MAX_CHUNK_COUNT]; 759 u8 dma_alloc_count, abs_index; 760 u32 chunk_size, array_size, offset; 761 762 fusion = instance->ctrl_context; 763 chunk_size = fusion->reply_alloc_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK; 764 array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * 765 MAX_MSIX_QUEUES_FUSION; 766 767 fusion->rdpq_virt = dma_alloc_coherent(&instance->pdev->dev, 768 array_size, &fusion->rdpq_phys, 769 GFP_KERNEL); 770 if (!fusion->rdpq_virt) { 771 dev_err(&instance->pdev->dev, 772 "Failed from %s %d\n", __func__, __LINE__); 773 return -ENOMEM; 774 } 775 776 msix_count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 777 778 fusion->reply_frames_desc_pool = dma_pool_create("mr_rdpq", 779 &instance->pdev->dev, 780 chunk_size, 16, 0); 781 fusion->reply_frames_desc_pool_align = 782 dma_pool_create("mr_rdpq_align", 783 &instance->pdev->dev, 784 chunk_size, 785 roundup_pow_of_two(chunk_size), 786 0); 787 788 if (!fusion->reply_frames_desc_pool || 789 !fusion->reply_frames_desc_pool_align) { 790 dev_err(&instance->pdev->dev, 791 "Failed from %s %d\n", __func__, __LINE__); 792 return -ENOMEM; 793 } 794 795 /* 796 * For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and 797 * VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should be 798 * within 4GB boundary and also reply queues in a set must have same 799 * upper 32-bits in their memory address. so here driver is allocating the 800 * DMA'able memory for reply queues according. Driver uses limitation of 801 * VENTURA_SERIES to manage INVADER_SERIES as well. 802 */ 803 dma_alloc_count = DIV_ROUND_UP(msix_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK); 804 805 for (i = 0; i < dma_alloc_count; i++) { 806 rdpq_chunk_virt[i] = 807 dma_pool_alloc(fusion->reply_frames_desc_pool, 808 GFP_KERNEL, &rdpq_chunk_phys[i]); 809 if (!rdpq_chunk_virt[i]) { 810 dev_err(&instance->pdev->dev, 811 "Failed from %s %d\n", __func__, __LINE__); 812 return -ENOMEM; 813 } 814 /* reply desc pool requires to be in same 4 gb region. 815 * Below function will check this. 816 * In case of failure, new pci pool will be created with updated 817 * alignment. 818 * For RDPQ buffers, driver always allocate two separate pci pool. 819 * Alignment will be used such a way that next allocation if 820 * success, will always meet same 4gb region requirement. 821 * rdpq_tracker keep track of each buffer's physical, 822 * virtual address and pci pool descriptor. It will help driver 823 * while freeing the resources. 824 * 825 */ 826 if (!megasas_check_same_4gb_region(instance, rdpq_chunk_phys[i], 827 chunk_size)) { 828 dma_pool_free(fusion->reply_frames_desc_pool, 829 rdpq_chunk_virt[i], 830 rdpq_chunk_phys[i]); 831 832 rdpq_chunk_virt[i] = 833 dma_pool_alloc(fusion->reply_frames_desc_pool_align, 834 GFP_KERNEL, &rdpq_chunk_phys[i]); 835 if (!rdpq_chunk_virt[i]) { 836 dev_err(&instance->pdev->dev, 837 "Failed from %s %d\n", 838 __func__, __LINE__); 839 return -ENOMEM; 840 } 841 fusion->rdpq_tracker[i].dma_pool_ptr = 842 fusion->reply_frames_desc_pool_align; 843 } else { 844 fusion->rdpq_tracker[i].dma_pool_ptr = 845 fusion->reply_frames_desc_pool; 846 } 847 848 fusion->rdpq_tracker[i].pool_entry_phys = rdpq_chunk_phys[i]; 849 fusion->rdpq_tracker[i].pool_entry_virt = rdpq_chunk_virt[i]; 850 } 851 852 for (k = 0; k < dma_alloc_count; k++) { 853 for (i = 0; i < RDPQ_MAX_INDEX_IN_ONE_CHUNK; i++) { 854 abs_index = (k * RDPQ_MAX_INDEX_IN_ONE_CHUNK) + i; 855 856 if (abs_index == msix_count) 857 break; 858 offset = fusion->reply_alloc_sz * i; 859 fusion->rdpq_virt[abs_index].RDPQBaseAddress = 860 cpu_to_le64(rdpq_chunk_phys[k] + offset); 861 fusion->reply_frames_desc_phys[abs_index] = 862 rdpq_chunk_phys[k] + offset; 863 fusion->reply_frames_desc[abs_index] = 864 (union MPI2_REPLY_DESCRIPTORS_UNION *)((u8 *)rdpq_chunk_virt[k] + offset); 865 866 reply_desc = fusion->reply_frames_desc[abs_index]; 867 for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++) 868 reply_desc->Words = ULLONG_MAX; 869 } 870 } 871 872 return 0; 873 } 874 875 static void 876 megasas_free_rdpq_fusion(struct megasas_instance *instance) { 877 878 int i; 879 struct fusion_context *fusion; 880 881 fusion = instance->ctrl_context; 882 883 for (i = 0; i < RDPQ_MAX_CHUNK_COUNT; i++) { 884 if (fusion->rdpq_tracker[i].pool_entry_virt) 885 dma_pool_free(fusion->rdpq_tracker[i].dma_pool_ptr, 886 fusion->rdpq_tracker[i].pool_entry_virt, 887 fusion->rdpq_tracker[i].pool_entry_phys); 888 889 } 890 891 dma_pool_destroy(fusion->reply_frames_desc_pool); 892 dma_pool_destroy(fusion->reply_frames_desc_pool_align); 893 894 if (fusion->rdpq_virt) 895 dma_free_coherent(&instance->pdev->dev, 896 sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION, 897 fusion->rdpq_virt, fusion->rdpq_phys); 898 } 899 900 static void 901 megasas_free_reply_fusion(struct megasas_instance *instance) { 902 903 struct fusion_context *fusion; 904 905 fusion = instance->ctrl_context; 906 907 if (fusion->reply_frames_desc[0]) 908 dma_pool_free(fusion->reply_frames_desc_pool, 909 fusion->reply_frames_desc[0], 910 fusion->reply_frames_desc_phys[0]); 911 912 dma_pool_destroy(fusion->reply_frames_desc_pool); 913 914 } 915 916 917 /** 918 * megasas_alloc_cmds_fusion - Allocates the command packets 919 * @instance: Adapter soft state 920 * 921 * 922 * Each frame has a 32-bit field called context. This context is used to get 923 * back the megasas_cmd_fusion from the frame when a frame gets completed 924 * In this driver, the 32 bit values are the indices into an array cmd_list. 925 * This array is used only to look up the megasas_cmd_fusion given the context. 926 * The free commands themselves are maintained in a linked list called cmd_pool. 927 * 928 * cmds are formed in the io_request and sg_frame members of the 929 * megasas_cmd_fusion. The context field is used to get a request descriptor 930 * and is used as SMID of the cmd. 931 * SMID value range is from 1 to max_fw_cmds. 932 */ 933 static int 934 megasas_alloc_cmds_fusion(struct megasas_instance *instance) 935 { 936 int i; 937 struct fusion_context *fusion; 938 struct megasas_cmd_fusion *cmd; 939 u32 offset; 940 dma_addr_t io_req_base_phys; 941 u8 *io_req_base; 942 943 944 fusion = instance->ctrl_context; 945 946 if (megasas_alloc_request_fusion(instance)) 947 goto fail_exit; 948 949 if (instance->is_rdpq) { 950 if (megasas_alloc_rdpq_fusion(instance)) 951 goto fail_exit; 952 } else 953 if (megasas_alloc_reply_fusion(instance)) 954 goto fail_exit; 955 956 if (megasas_alloc_cmdlist_fusion(instance)) 957 goto fail_exit; 958 959 dev_info(&instance->pdev->dev, "Configured max firmware commands: %d\n", 960 instance->max_fw_cmds); 961 962 /* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */ 963 io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 964 io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 965 966 /* 967 * Add all the commands to command pool (fusion->cmd_pool) 968 */ 969 970 /* SMID 0 is reserved. Set SMID/index from 1 */ 971 for (i = 0; i < instance->max_mpt_cmds; i++) { 972 cmd = fusion->cmd_list[i]; 973 offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; 974 memset(cmd, 0, sizeof(struct megasas_cmd_fusion)); 975 cmd->index = i + 1; 976 cmd->scmd = NULL; 977 cmd->sync_cmd_idx = 978 (i >= instance->max_scsi_cmds && i < instance->max_fw_cmds) ? 979 (i - instance->max_scsi_cmds) : 980 (u32)ULONG_MAX; /* Set to Invalid */ 981 cmd->instance = instance; 982 cmd->io_request = 983 (struct MPI2_RAID_SCSI_IO_REQUEST *) 984 (io_req_base + offset); 985 memset(cmd->io_request, 0, 986 sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); 987 cmd->io_request_phys_addr = io_req_base_phys + offset; 988 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; 989 } 990 991 if (megasas_create_sg_sense_fusion(instance)) 992 goto fail_exit; 993 994 return 0; 995 996 fail_exit: 997 megasas_free_cmds_fusion(instance); 998 return -ENOMEM; 999 } 1000 1001 /** 1002 * wait_and_poll - Issues a polling command 1003 * @instance: Adapter soft state 1004 * @cmd: Command packet to be issued 1005 * @seconds: Maximum poll time 1006 * 1007 * For polling, MFI requires the cmd_status to be set to 0xFF before posting. 1008 */ 1009 int 1010 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, 1011 int seconds) 1012 { 1013 int i; 1014 struct megasas_header *frame_hdr = &cmd->frame->hdr; 1015 u32 status_reg; 1016 1017 u32 msecs = seconds * 1000; 1018 1019 /* 1020 * Wait for cmd_status to change 1021 */ 1022 for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) { 1023 rmb(); 1024 msleep(20); 1025 if (!(i % 5000)) { 1026 status_reg = instance->instancet->read_fw_status_reg(instance) 1027 & MFI_STATE_MASK; 1028 if (status_reg == MFI_STATE_FAULT) 1029 break; 1030 } 1031 } 1032 1033 if (frame_hdr->cmd_status == MFI_STAT_INVALID_STATUS) 1034 return DCMD_TIMEOUT; 1035 else if (frame_hdr->cmd_status == MFI_STAT_OK) 1036 return DCMD_SUCCESS; 1037 else 1038 return DCMD_FAILED; 1039 } 1040 1041 /** 1042 * megasas_ioc_init_fusion - Initializes the FW 1043 * @instance: Adapter soft state 1044 * 1045 * Issues the IOC Init cmd 1046 */ 1047 int 1048 megasas_ioc_init_fusion(struct megasas_instance *instance) 1049 { 1050 struct megasas_init_frame *init_frame; 1051 struct MPI2_IOC_INIT_REQUEST *IOCInitMessage = NULL; 1052 dma_addr_t ioc_init_handle; 1053 struct megasas_cmd *cmd; 1054 u8 ret, cur_rdpq_mode; 1055 struct fusion_context *fusion; 1056 union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc; 1057 int i; 1058 struct megasas_header *frame_hdr; 1059 const char *sys_info; 1060 MFI_CAPABILITIES *drv_ops; 1061 u32 scratch_pad_1; 1062 ktime_t time; 1063 bool cur_fw_64bit_dma_capable; 1064 bool cur_intr_coalescing; 1065 1066 fusion = instance->ctrl_context; 1067 1068 ioc_init_handle = fusion->ioc_init_request_phys; 1069 IOCInitMessage = fusion->ioc_init_request; 1070 1071 cmd = fusion->ioc_init_cmd; 1072 1073 scratch_pad_1 = megasas_readl 1074 (instance, &instance->reg_set->outbound_scratch_pad_1); 1075 1076 cur_rdpq_mode = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ? 1 : 0; 1077 1078 if (instance->adapter_type == INVADER_SERIES) { 1079 cur_fw_64bit_dma_capable = 1080 (scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET) ? true : false; 1081 1082 if (instance->consistent_mask_64bit && !cur_fw_64bit_dma_capable) { 1083 dev_err(&instance->pdev->dev, "Driver was operating on 64bit " 1084 "DMA mask, but upcoming FW does not support 64bit DMA mask\n"); 1085 megaraid_sas_kill_hba(instance); 1086 ret = 1; 1087 goto fail_fw_init; 1088 } 1089 } 1090 1091 if (instance->is_rdpq && !cur_rdpq_mode) { 1092 dev_err(&instance->pdev->dev, "Firmware downgrade *NOT SUPPORTED*" 1093 " from RDPQ mode to non RDPQ mode\n"); 1094 ret = 1; 1095 goto fail_fw_init; 1096 } 1097 1098 cur_intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ? 1099 true : false; 1100 1101 if ((instance->low_latency_index_start == 1102 MR_HIGH_IOPS_QUEUE_COUNT) && cur_intr_coalescing) 1103 instance->perf_mode = MR_BALANCED_PERF_MODE; 1104 1105 dev_info(&instance->pdev->dev, "Performance mode :%s\n", 1106 MEGASAS_PERF_MODE_2STR(instance->perf_mode)); 1107 1108 instance->fw_sync_cache_support = (scratch_pad_1 & 1109 MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0; 1110 dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n", 1111 instance->fw_sync_cache_support ? "Yes" : "No"); 1112 1113 memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST)); 1114 1115 IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT; 1116 IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER; 1117 IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION); 1118 IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); 1119 IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4); 1120 1121 IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth); 1122 IOCInitMessage->ReplyDescriptorPostQueueAddress = instance->is_rdpq ? 1123 cpu_to_le64(fusion->rdpq_phys) : 1124 cpu_to_le64(fusion->reply_frames_desc_phys[0]); 1125 IOCInitMessage->MsgFlags = instance->is_rdpq ? 1126 MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0; 1127 IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys); 1128 IOCInitMessage->SenseBufferAddressHigh = cpu_to_le32(upper_32_bits(fusion->sense_phys_addr)); 1129 IOCInitMessage->HostMSIxVectors = instance->msix_vectors; 1130 IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT; 1131 1132 time = ktime_get_real(); 1133 /* Convert to milliseconds as per FW requirement */ 1134 IOCInitMessage->TimeStamp = cpu_to_le64(ktime_to_ms(time)); 1135 1136 init_frame = (struct megasas_init_frame *)cmd->frame; 1137 memset(init_frame, 0, IOC_INIT_FRAME_SIZE); 1138 1139 frame_hdr = &cmd->frame->hdr; 1140 frame_hdr->cmd_status = 0xFF; 1141 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); 1142 1143 init_frame->cmd = MFI_CMD_INIT; 1144 init_frame->cmd_status = 0xFF; 1145 1146 drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations); 1147 1148 /* driver support Extended MSIX */ 1149 if (instance->adapter_type >= INVADER_SERIES) 1150 drv_ops->mfi_capabilities.support_additional_msix = 1; 1151 /* driver supports HA / Remote LUN over Fast Path interface */ 1152 drv_ops->mfi_capabilities.support_fp_remote_lun = 1; 1153 1154 drv_ops->mfi_capabilities.support_max_255lds = 1; 1155 drv_ops->mfi_capabilities.support_ndrive_r1_lb = 1; 1156 drv_ops->mfi_capabilities.security_protocol_cmds_fw = 1; 1157 1158 if (instance->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN) 1159 drv_ops->mfi_capabilities.support_ext_io_size = 1; 1160 1161 drv_ops->mfi_capabilities.support_fp_rlbypass = 1; 1162 if (!dual_qdepth_disable) 1163 drv_ops->mfi_capabilities.support_ext_queue_depth = 1; 1164 1165 drv_ops->mfi_capabilities.support_qd_throttling = 1; 1166 drv_ops->mfi_capabilities.support_pd_map_target_id = 1; 1167 drv_ops->mfi_capabilities.support_nvme_passthru = 1; 1168 drv_ops->mfi_capabilities.support_fw_exposed_dev_list = 1; 1169 1170 if (instance->consistent_mask_64bit) 1171 drv_ops->mfi_capabilities.support_64bit_mode = 1; 1172 1173 /* Convert capability to LE32 */ 1174 cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities); 1175 1176 sys_info = dmi_get_system_info(DMI_PRODUCT_UUID); 1177 if (instance->system_info_buf && sys_info) { 1178 memcpy(instance->system_info_buf->systemId, sys_info, 1179 strlen(sys_info) > 64 ? 64 : strlen(sys_info)); 1180 instance->system_info_buf->systemIdLength = 1181 strlen(sys_info) > 64 ? 64 : strlen(sys_info); 1182 init_frame->system_info_lo = cpu_to_le32(lower_32_bits(instance->system_info_h)); 1183 init_frame->system_info_hi = cpu_to_le32(upper_32_bits(instance->system_info_h)); 1184 } 1185 1186 init_frame->queue_info_new_phys_addr_hi = 1187 cpu_to_le32(upper_32_bits(ioc_init_handle)); 1188 init_frame->queue_info_new_phys_addr_lo = 1189 cpu_to_le32(lower_32_bits(ioc_init_handle)); 1190 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST)); 1191 1192 /* 1193 * Each bit in replyqueue_mask represents one group of MSI-x vectors 1194 * (each group has 8 vectors) 1195 */ 1196 switch (instance->perf_mode) { 1197 case MR_BALANCED_PERF_MODE: 1198 init_frame->replyqueue_mask = 1199 cpu_to_le16(~(~0 << instance->low_latency_index_start/8)); 1200 break; 1201 case MR_IOPS_PERF_MODE: 1202 init_frame->replyqueue_mask = 1203 cpu_to_le16(~(~0 << instance->msix_vectors/8)); 1204 break; 1205 } 1206 1207 1208 req_desc.u.low = cpu_to_le32(lower_32_bits(cmd->frame_phys_addr)); 1209 req_desc.u.high = cpu_to_le32(upper_32_bits(cmd->frame_phys_addr)); 1210 req_desc.MFAIo.RequestFlags = 1211 (MEGASAS_REQ_DESCRIPT_FLAGS_MFA << 1212 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1213 1214 /* 1215 * disable the intr before firing the init frame 1216 */ 1217 instance->instancet->disable_intr(instance); 1218 1219 for (i = 0; i < (10 * 1000); i += 20) { 1220 if (megasas_readl(instance, &instance->reg_set->doorbell) & 1) 1221 msleep(20); 1222 else 1223 break; 1224 } 1225 1226 /* For AERO also, IOC_INIT requires 64 bit descriptor write */ 1227 megasas_write_64bit_req_desc(instance, &req_desc); 1228 1229 wait_and_poll(instance, cmd, MFI_IO_TIMEOUT_SECS); 1230 1231 frame_hdr = &cmd->frame->hdr; 1232 if (frame_hdr->cmd_status != 0) { 1233 ret = 1; 1234 goto fail_fw_init; 1235 } 1236 1237 if (instance->adapter_type >= AERO_SERIES) { 1238 scratch_pad_1 = megasas_readl 1239 (instance, &instance->reg_set->outbound_scratch_pad_1); 1240 1241 instance->atomic_desc_support = 1242 (scratch_pad_1 & MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET) ? 1 : 0; 1243 1244 dev_info(&instance->pdev->dev, "FW supports atomic descriptor\t: %s\n", 1245 instance->atomic_desc_support ? "Yes" : "No"); 1246 } 1247 1248 return 0; 1249 1250 fail_fw_init: 1251 dev_err(&instance->pdev->dev, 1252 "Init cmd return status FAILED for SCSI host %d\n", 1253 instance->host->host_no); 1254 1255 return ret; 1256 } 1257 1258 /** 1259 * megasas_sync_pd_seq_num - JBOD SEQ MAP 1260 * @instance: Adapter soft state 1261 * @pend: set to 1, if it is pended jbod map. 1262 * 1263 * Issue Jbod map to the firmware. If it is pended command, 1264 * issue command and return. If it is first instance of jbod map 1265 * issue and receive command. 1266 */ 1267 int 1268 megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) { 1269 int ret = 0; 1270 size_t pd_seq_map_sz; 1271 struct megasas_cmd *cmd; 1272 struct megasas_dcmd_frame *dcmd; 1273 struct fusion_context *fusion = instance->ctrl_context; 1274 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 1275 dma_addr_t pd_seq_h; 1276 1277 pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id & 1)]; 1278 pd_seq_h = fusion->pd_seq_phys[(instance->pd_seq_map_id & 1)]; 1279 pd_seq_map_sz = struct_size(pd_sync, seq, MAX_PHYSICAL_DEVICES - 1); 1280 1281 cmd = megasas_get_cmd(instance); 1282 if (!cmd) { 1283 dev_err(&instance->pdev->dev, 1284 "Could not get mfi cmd. Fail from %s %d\n", 1285 __func__, __LINE__); 1286 return -ENOMEM; 1287 } 1288 1289 dcmd = &cmd->frame->dcmd; 1290 1291 memset(pd_sync, 0, pd_seq_map_sz); 1292 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 1293 1294 if (pend) { 1295 dcmd->mbox.b[0] = MEGASAS_DCMD_MBOX_PEND_FLAG; 1296 dcmd->flags = MFI_FRAME_DIR_WRITE; 1297 instance->jbod_seq_cmd = cmd; 1298 } else { 1299 dcmd->flags = MFI_FRAME_DIR_READ; 1300 } 1301 1302 dcmd->cmd = MFI_CMD_DCMD; 1303 dcmd->cmd_status = 0xFF; 1304 dcmd->sge_count = 1; 1305 dcmd->timeout = 0; 1306 dcmd->pad_0 = 0; 1307 dcmd->data_xfer_len = cpu_to_le32(pd_seq_map_sz); 1308 dcmd->opcode = cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO); 1309 1310 megasas_set_dma_settings(instance, dcmd, pd_seq_h, pd_seq_map_sz); 1311 1312 if (pend) { 1313 instance->instancet->issue_dcmd(instance, cmd); 1314 return 0; 1315 } 1316 1317 /* Below code is only for non pended DCMD */ 1318 if (!instance->mask_interrupts) 1319 ret = megasas_issue_blocked_cmd(instance, cmd, 1320 MFI_IO_TIMEOUT_SECS); 1321 else 1322 ret = megasas_issue_polled(instance, cmd); 1323 1324 if (le32_to_cpu(pd_sync->count) > MAX_PHYSICAL_DEVICES) { 1325 dev_warn(&instance->pdev->dev, 1326 "driver supports max %d JBOD, but FW reports %d\n", 1327 MAX_PHYSICAL_DEVICES, le32_to_cpu(pd_sync->count)); 1328 ret = -EINVAL; 1329 } 1330 1331 if (ret == DCMD_TIMEOUT) 1332 dev_warn(&instance->pdev->dev, 1333 "%s DCMD timed out, continue without JBOD sequence map\n", 1334 __func__); 1335 1336 if (ret == DCMD_SUCCESS) 1337 instance->pd_seq_map_id++; 1338 1339 megasas_return_cmd(instance, cmd); 1340 return ret; 1341 } 1342 1343 /* 1344 * megasas_get_ld_map_info - Returns FW's ld_map structure 1345 * @instance: Adapter soft state 1346 * @pend: Pend the command or not 1347 * Issues an internal command (DCMD) to get the FW's controller PD 1348 * list structure. This information is mainly used to find out SYSTEM 1349 * supported by the FW. 1350 * dcmd.mbox value setting for MR_DCMD_LD_MAP_GET_INFO 1351 * dcmd.mbox.b[0] - number of LDs being sync'd 1352 * dcmd.mbox.b[1] - 0 - complete command immediately. 1353 * - 1 - pend till config change 1354 * dcmd.mbox.b[2] - 0 - supports max 64 lds and uses legacy MR_FW_RAID_MAP 1355 * - 1 - supports max MAX_LOGICAL_DRIVES_EXT lds and 1356 * uses extended struct MR_FW_RAID_MAP_EXT 1357 */ 1358 static int 1359 megasas_get_ld_map_info(struct megasas_instance *instance) 1360 { 1361 int ret = 0; 1362 struct megasas_cmd *cmd; 1363 struct megasas_dcmd_frame *dcmd; 1364 void *ci; 1365 dma_addr_t ci_h = 0; 1366 u32 size_map_info; 1367 struct fusion_context *fusion; 1368 1369 cmd = megasas_get_cmd(instance); 1370 1371 if (!cmd) { 1372 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for map info\n"); 1373 return -ENOMEM; 1374 } 1375 1376 fusion = instance->ctrl_context; 1377 1378 if (!fusion) { 1379 megasas_return_cmd(instance, cmd); 1380 return -ENXIO; 1381 } 1382 1383 dcmd = &cmd->frame->dcmd; 1384 1385 size_map_info = fusion->current_map_sz; 1386 1387 ci = (void *) fusion->ld_map[(instance->map_id & 1)]; 1388 ci_h = fusion->ld_map_phys[(instance->map_id & 1)]; 1389 1390 if (!ci) { 1391 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ld_map_info\n"); 1392 megasas_return_cmd(instance, cmd); 1393 return -ENOMEM; 1394 } 1395 1396 memset(ci, 0, fusion->max_map_sz); 1397 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 1398 dcmd->cmd = MFI_CMD_DCMD; 1399 dcmd->cmd_status = 0xFF; 1400 dcmd->sge_count = 1; 1401 dcmd->flags = MFI_FRAME_DIR_READ; 1402 dcmd->timeout = 0; 1403 dcmd->pad_0 = 0; 1404 dcmd->data_xfer_len = cpu_to_le32(size_map_info); 1405 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO); 1406 1407 megasas_set_dma_settings(instance, dcmd, ci_h, size_map_info); 1408 1409 if (!instance->mask_interrupts) 1410 ret = megasas_issue_blocked_cmd(instance, cmd, 1411 MFI_IO_TIMEOUT_SECS); 1412 else 1413 ret = megasas_issue_polled(instance, cmd); 1414 1415 if (ret == DCMD_TIMEOUT) 1416 dev_warn(&instance->pdev->dev, 1417 "%s DCMD timed out, RAID map is disabled\n", 1418 __func__); 1419 1420 megasas_return_cmd(instance, cmd); 1421 1422 return ret; 1423 } 1424 1425 u8 1426 megasas_get_map_info(struct megasas_instance *instance) 1427 { 1428 struct fusion_context *fusion = instance->ctrl_context; 1429 1430 fusion->fast_path_io = 0; 1431 if (!megasas_get_ld_map_info(instance)) { 1432 if (MR_ValidateMapInfo(instance, instance->map_id)) { 1433 fusion->fast_path_io = 1; 1434 return 0; 1435 } 1436 } 1437 return 1; 1438 } 1439 1440 /* 1441 * megasas_sync_map_info - Returns FW's ld_map structure 1442 * @instance: Adapter soft state 1443 * 1444 * Issues an internal command (DCMD) to get the FW's controller PD 1445 * list structure. This information is mainly used to find out SYSTEM 1446 * supported by the FW. 1447 */ 1448 int 1449 megasas_sync_map_info(struct megasas_instance *instance) 1450 { 1451 int i; 1452 struct megasas_cmd *cmd; 1453 struct megasas_dcmd_frame *dcmd; 1454 u16 num_lds; 1455 struct fusion_context *fusion; 1456 struct MR_LD_TARGET_SYNC *ci = NULL; 1457 struct MR_DRV_RAID_MAP_ALL *map; 1458 struct MR_LD_RAID *raid; 1459 struct MR_LD_TARGET_SYNC *ld_sync; 1460 dma_addr_t ci_h = 0; 1461 u32 size_map_info; 1462 1463 cmd = megasas_get_cmd(instance); 1464 1465 if (!cmd) { 1466 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for sync info\n"); 1467 return -ENOMEM; 1468 } 1469 1470 fusion = instance->ctrl_context; 1471 1472 if (!fusion) { 1473 megasas_return_cmd(instance, cmd); 1474 return 1; 1475 } 1476 1477 map = fusion->ld_drv_map[instance->map_id & 1]; 1478 1479 num_lds = le16_to_cpu(map->raidMap.ldCount); 1480 1481 dcmd = &cmd->frame->dcmd; 1482 1483 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 1484 1485 ci = (struct MR_LD_TARGET_SYNC *) 1486 fusion->ld_map[(instance->map_id - 1) & 1]; 1487 memset(ci, 0, fusion->max_map_sz); 1488 1489 ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1]; 1490 1491 ld_sync = (struct MR_LD_TARGET_SYNC *)ci; 1492 1493 for (i = 0; i < num_lds; i++, ld_sync++) { 1494 raid = MR_LdRaidGet(i, map); 1495 ld_sync->targetId = MR_GetLDTgtId(i, map); 1496 ld_sync->seqNum = raid->seqNum; 1497 } 1498 1499 size_map_info = fusion->current_map_sz; 1500 1501 dcmd->cmd = MFI_CMD_DCMD; 1502 dcmd->cmd_status = 0xFF; 1503 dcmd->sge_count = 1; 1504 dcmd->flags = MFI_FRAME_DIR_WRITE; 1505 dcmd->timeout = 0; 1506 dcmd->pad_0 = 0; 1507 dcmd->data_xfer_len = cpu_to_le32(size_map_info); 1508 dcmd->mbox.b[0] = num_lds; 1509 dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG; 1510 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO); 1511 1512 megasas_set_dma_settings(instance, dcmd, ci_h, size_map_info); 1513 1514 instance->map_update_cmd = cmd; 1515 1516 instance->instancet->issue_dcmd(instance, cmd); 1517 1518 return 0; 1519 } 1520 1521 /* 1522 * meagasas_display_intel_branding - Display branding string 1523 * @instance: per adapter object 1524 * 1525 * Return nothing. 1526 */ 1527 static void 1528 megasas_display_intel_branding(struct megasas_instance *instance) 1529 { 1530 if (instance->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL) 1531 return; 1532 1533 switch (instance->pdev->device) { 1534 case PCI_DEVICE_ID_LSI_INVADER: 1535 switch (instance->pdev->subsystem_device) { 1536 case MEGARAID_INTEL_RS3DC080_SSDID: 1537 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 1538 instance->host->host_no, 1539 MEGARAID_INTEL_RS3DC080_BRANDING); 1540 break; 1541 case MEGARAID_INTEL_RS3DC040_SSDID: 1542 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 1543 instance->host->host_no, 1544 MEGARAID_INTEL_RS3DC040_BRANDING); 1545 break; 1546 case MEGARAID_INTEL_RS3SC008_SSDID: 1547 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 1548 instance->host->host_no, 1549 MEGARAID_INTEL_RS3SC008_BRANDING); 1550 break; 1551 case MEGARAID_INTEL_RS3MC044_SSDID: 1552 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 1553 instance->host->host_no, 1554 MEGARAID_INTEL_RS3MC044_BRANDING); 1555 break; 1556 default: 1557 break; 1558 } 1559 break; 1560 case PCI_DEVICE_ID_LSI_FURY: 1561 switch (instance->pdev->subsystem_device) { 1562 case MEGARAID_INTEL_RS3WC080_SSDID: 1563 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 1564 instance->host->host_no, 1565 MEGARAID_INTEL_RS3WC080_BRANDING); 1566 break; 1567 case MEGARAID_INTEL_RS3WC040_SSDID: 1568 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 1569 instance->host->host_no, 1570 MEGARAID_INTEL_RS3WC040_BRANDING); 1571 break; 1572 default: 1573 break; 1574 } 1575 break; 1576 case PCI_DEVICE_ID_LSI_CUTLASS_52: 1577 case PCI_DEVICE_ID_LSI_CUTLASS_53: 1578 switch (instance->pdev->subsystem_device) { 1579 case MEGARAID_INTEL_RMS3BC160_SSDID: 1580 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 1581 instance->host->host_no, 1582 MEGARAID_INTEL_RMS3BC160_BRANDING); 1583 break; 1584 default: 1585 break; 1586 } 1587 break; 1588 default: 1589 break; 1590 } 1591 } 1592 1593 /** 1594 * megasas_allocate_raid_maps - Allocate memory for RAID maps 1595 * @instance: Adapter soft state 1596 * 1597 * return: if success: return 0 1598 * failed: return -ENOMEM 1599 */ 1600 static inline int megasas_allocate_raid_maps(struct megasas_instance *instance) 1601 { 1602 struct fusion_context *fusion; 1603 int i = 0; 1604 1605 fusion = instance->ctrl_context; 1606 1607 fusion->drv_map_pages = get_order(fusion->drv_map_sz); 1608 1609 for (i = 0; i < 2; i++) { 1610 fusion->ld_map[i] = NULL; 1611 1612 fusion->ld_drv_map[i] = (void *) 1613 __get_free_pages(__GFP_ZERO | GFP_KERNEL, 1614 fusion->drv_map_pages); 1615 1616 if (!fusion->ld_drv_map[i]) { 1617 fusion->ld_drv_map[i] = vzalloc(fusion->drv_map_sz); 1618 1619 if (!fusion->ld_drv_map[i]) { 1620 dev_err(&instance->pdev->dev, 1621 "Could not allocate memory for local map" 1622 " size requested: %d\n", 1623 fusion->drv_map_sz); 1624 goto ld_drv_map_alloc_fail; 1625 } 1626 } 1627 } 1628 1629 for (i = 0; i < 2; i++) { 1630 fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev, 1631 fusion->max_map_sz, 1632 &fusion->ld_map_phys[i], 1633 GFP_KERNEL); 1634 if (!fusion->ld_map[i]) { 1635 dev_err(&instance->pdev->dev, 1636 "Could not allocate memory for map info %s:%d\n", 1637 __func__, __LINE__); 1638 goto ld_map_alloc_fail; 1639 } 1640 } 1641 1642 return 0; 1643 1644 ld_map_alloc_fail: 1645 for (i = 0; i < 2; i++) { 1646 if (fusion->ld_map[i]) 1647 dma_free_coherent(&instance->pdev->dev, 1648 fusion->max_map_sz, 1649 fusion->ld_map[i], 1650 fusion->ld_map_phys[i]); 1651 } 1652 1653 ld_drv_map_alloc_fail: 1654 for (i = 0; i < 2; i++) { 1655 if (fusion->ld_drv_map[i]) { 1656 if (is_vmalloc_addr(fusion->ld_drv_map[i])) 1657 vfree(fusion->ld_drv_map[i]); 1658 else 1659 free_pages((ulong)fusion->ld_drv_map[i], 1660 fusion->drv_map_pages); 1661 } 1662 } 1663 1664 return -ENOMEM; 1665 } 1666 1667 /** 1668 * megasas_configure_queue_sizes - Calculate size of request desc queue, 1669 * reply desc queue, 1670 * IO request frame queue, set can_queue. 1671 * @instance: Adapter soft state 1672 * @return: void 1673 */ 1674 static inline 1675 void megasas_configure_queue_sizes(struct megasas_instance *instance) 1676 { 1677 struct fusion_context *fusion; 1678 u16 max_cmd; 1679 1680 fusion = instance->ctrl_context; 1681 max_cmd = instance->max_fw_cmds; 1682 1683 if (instance->adapter_type >= VENTURA_SERIES) 1684 instance->max_mpt_cmds = instance->max_fw_cmds * RAID_1_PEER_CMDS; 1685 else 1686 instance->max_mpt_cmds = instance->max_fw_cmds; 1687 1688 instance->max_scsi_cmds = instance->max_fw_cmds - instance->max_mfi_cmds; 1689 instance->cur_can_queue = instance->max_scsi_cmds; 1690 instance->host->can_queue = instance->cur_can_queue; 1691 1692 fusion->reply_q_depth = 2 * ((max_cmd + 1 + 15) / 16) * 16; 1693 1694 fusion->request_alloc_sz = sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * 1695 instance->max_mpt_cmds; 1696 fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION) * 1697 (fusion->reply_q_depth); 1698 fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + 1699 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE 1700 * (instance->max_mpt_cmds + 1)); /* Extra 1 for SMID 0 */ 1701 } 1702 1703 static int megasas_alloc_ioc_init_frame(struct megasas_instance *instance) 1704 { 1705 struct fusion_context *fusion; 1706 struct megasas_cmd *cmd; 1707 1708 fusion = instance->ctrl_context; 1709 1710 cmd = kzalloc(sizeof(struct megasas_cmd), GFP_KERNEL); 1711 1712 if (!cmd) { 1713 dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n", 1714 __func__, __LINE__); 1715 return -ENOMEM; 1716 } 1717 1718 cmd->frame = dma_alloc_coherent(&instance->pdev->dev, 1719 IOC_INIT_FRAME_SIZE, 1720 &cmd->frame_phys_addr, GFP_KERNEL); 1721 1722 if (!cmd->frame) { 1723 dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n", 1724 __func__, __LINE__); 1725 kfree(cmd); 1726 return -ENOMEM; 1727 } 1728 1729 fusion->ioc_init_cmd = cmd; 1730 return 0; 1731 } 1732 1733 /** 1734 * megasas_free_ioc_init_cmd - Free IOC INIT command frame 1735 * @instance: Adapter soft state 1736 */ 1737 static inline void megasas_free_ioc_init_cmd(struct megasas_instance *instance) 1738 { 1739 struct fusion_context *fusion; 1740 1741 fusion = instance->ctrl_context; 1742 1743 if (fusion->ioc_init_cmd && fusion->ioc_init_cmd->frame) 1744 dma_free_coherent(&instance->pdev->dev, 1745 IOC_INIT_FRAME_SIZE, 1746 fusion->ioc_init_cmd->frame, 1747 fusion->ioc_init_cmd->frame_phys_addr); 1748 1749 kfree(fusion->ioc_init_cmd); 1750 } 1751 1752 /** 1753 * megasas_init_adapter_fusion - Initializes the FW 1754 * @instance: Adapter soft state 1755 * 1756 * This is the main function for initializing firmware. 1757 */ 1758 static u32 1759 megasas_init_adapter_fusion(struct megasas_instance *instance) 1760 { 1761 struct fusion_context *fusion; 1762 u32 scratch_pad_1; 1763 int i = 0, count; 1764 u32 status_reg; 1765 1766 fusion = instance->ctrl_context; 1767 1768 megasas_fusion_update_can_queue(instance, PROBE_CONTEXT); 1769 1770 /* 1771 * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames 1772 */ 1773 instance->max_mfi_cmds = 1774 MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS; 1775 1776 megasas_configure_queue_sizes(instance); 1777 1778 scratch_pad_1 = megasas_readl(instance, 1779 &instance->reg_set->outbound_scratch_pad_1); 1780 /* If scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set, 1781 * Firmware support extended IO chain frame which is 4 times more than 1782 * legacy Firmware. 1783 * Legacy Firmware - Frame size is (8 * 128) = 1K 1784 * 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K 1785 */ 1786 if (scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK) 1787 instance->max_chain_frame_sz = 1788 ((scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 1789 MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_1MB_IO; 1790 else 1791 instance->max_chain_frame_sz = 1792 ((scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 1793 MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_256K_IO; 1794 1795 if (instance->max_chain_frame_sz < MEGASAS_CHAIN_FRAME_SZ_MIN) { 1796 dev_warn(&instance->pdev->dev, "frame size %d invalid, fall back to legacy max frame size %d\n", 1797 instance->max_chain_frame_sz, 1798 MEGASAS_CHAIN_FRAME_SZ_MIN); 1799 instance->max_chain_frame_sz = MEGASAS_CHAIN_FRAME_SZ_MIN; 1800 } 1801 1802 fusion->max_sge_in_main_msg = 1803 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE 1804 - offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL))/16; 1805 1806 fusion->max_sge_in_chain = 1807 instance->max_chain_frame_sz 1808 / sizeof(union MPI2_SGE_IO_UNION); 1809 1810 instance->max_num_sge = 1811 rounddown_pow_of_two(fusion->max_sge_in_main_msg 1812 + fusion->max_sge_in_chain - 2); 1813 1814 /* Used for pass thru MFI frame (DCMD) */ 1815 fusion->chain_offset_mfi_pthru = 1816 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL)/16; 1817 1818 fusion->chain_offset_io_request = 1819 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 1820 sizeof(union MPI2_SGE_IO_UNION))/16; 1821 1822 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 1823 for (i = 0 ; i < count; i++) 1824 fusion->last_reply_idx[i] = 0; 1825 1826 /* 1827 * For fusion adapters, 3 commands for IOCTL and 8 commands 1828 * for driver's internal DCMDs. 1829 */ 1830 instance->max_scsi_cmds = instance->max_fw_cmds - 1831 (MEGASAS_FUSION_INTERNAL_CMDS + 1832 MEGASAS_FUSION_IOCTL_CMDS); 1833 sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS); 1834 1835 if (megasas_alloc_ioc_init_frame(instance)) 1836 return 1; 1837 1838 /* 1839 * Allocate memory for descriptors 1840 * Create a pool of commands 1841 */ 1842 if (megasas_alloc_cmds(instance)) 1843 goto fail_alloc_mfi_cmds; 1844 if (megasas_alloc_cmds_fusion(instance)) 1845 goto fail_alloc_cmds; 1846 1847 if (megasas_ioc_init_fusion(instance)) { 1848 status_reg = instance->instancet->read_fw_status_reg(instance); 1849 if (((status_reg & MFI_STATE_MASK) == MFI_STATE_FAULT) && 1850 (status_reg & MFI_RESET_ADAPTER)) { 1851 /* Do a chip reset and then retry IOC INIT once */ 1852 if (megasas_adp_reset_wait_for_ready 1853 (instance, true, 0) == FAILED) 1854 goto fail_ioc_init; 1855 1856 if (megasas_ioc_init_fusion(instance)) 1857 goto fail_ioc_init; 1858 } else { 1859 goto fail_ioc_init; 1860 } 1861 } 1862 1863 megasas_display_intel_branding(instance); 1864 if (megasas_get_ctrl_info(instance)) { 1865 dev_err(&instance->pdev->dev, 1866 "Could not get controller info. Fail from %s %d\n", 1867 __func__, __LINE__); 1868 goto fail_ioc_init; 1869 } 1870 1871 instance->flag_ieee = 1; 1872 instance->r1_ldio_hint_default = MR_R1_LDIO_PIGGYBACK_DEFAULT; 1873 instance->threshold_reply_count = instance->max_fw_cmds / 4; 1874 fusion->fast_path_io = 0; 1875 1876 if (megasas_allocate_raid_maps(instance)) 1877 goto fail_ioc_init; 1878 1879 if (!megasas_get_map_info(instance)) 1880 megasas_sync_map_info(instance); 1881 1882 return 0; 1883 1884 fail_ioc_init: 1885 megasas_free_cmds_fusion(instance); 1886 fail_alloc_cmds: 1887 megasas_free_cmds(instance); 1888 fail_alloc_mfi_cmds: 1889 megasas_free_ioc_init_cmd(instance); 1890 return 1; 1891 } 1892 1893 /** 1894 * megasas_fault_detect_work - Worker function of 1895 * FW fault handling workqueue. 1896 * @work: FW fault work struct 1897 */ 1898 static void 1899 megasas_fault_detect_work(struct work_struct *work) 1900 { 1901 struct megasas_instance *instance = 1902 container_of(work, struct megasas_instance, 1903 fw_fault_work.work); 1904 u32 fw_state, dma_state, status; 1905 1906 /* Check the fw state */ 1907 fw_state = instance->instancet->read_fw_status_reg(instance) & 1908 MFI_STATE_MASK; 1909 1910 if (fw_state == MFI_STATE_FAULT) { 1911 dma_state = instance->instancet->read_fw_status_reg(instance) & 1912 MFI_STATE_DMADONE; 1913 /* Start collecting crash, if DMA bit is done */ 1914 if (instance->crash_dump_drv_support && 1915 instance->crash_dump_app_support && dma_state) { 1916 megasas_fusion_crash_dump(instance); 1917 } else { 1918 if (instance->unload == 0) { 1919 status = megasas_reset_fusion(instance->host, 0); 1920 if (status != SUCCESS) { 1921 dev_err(&instance->pdev->dev, 1922 "Failed from %s %d, do not re-arm timer\n", 1923 __func__, __LINE__); 1924 return; 1925 } 1926 } 1927 } 1928 } 1929 1930 if (instance->fw_fault_work_q) 1931 queue_delayed_work(instance->fw_fault_work_q, 1932 &instance->fw_fault_work, 1933 msecs_to_jiffies(MEGASAS_WATCHDOG_THREAD_INTERVAL)); 1934 } 1935 1936 int 1937 megasas_fusion_start_watchdog(struct megasas_instance *instance) 1938 { 1939 /* Check if the Fault WQ is already started */ 1940 if (instance->fw_fault_work_q) 1941 return SUCCESS; 1942 1943 INIT_DELAYED_WORK(&instance->fw_fault_work, megasas_fault_detect_work); 1944 1945 snprintf(instance->fault_handler_work_q_name, 1946 sizeof(instance->fault_handler_work_q_name), 1947 "poll_megasas%d_status", instance->host->host_no); 1948 1949 instance->fw_fault_work_q = 1950 create_singlethread_workqueue(instance->fault_handler_work_q_name); 1951 if (!instance->fw_fault_work_q) { 1952 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1953 __func__, __LINE__); 1954 return FAILED; 1955 } 1956 1957 queue_delayed_work(instance->fw_fault_work_q, 1958 &instance->fw_fault_work, 1959 msecs_to_jiffies(MEGASAS_WATCHDOG_THREAD_INTERVAL)); 1960 1961 return SUCCESS; 1962 } 1963 1964 void 1965 megasas_fusion_stop_watchdog(struct megasas_instance *instance) 1966 { 1967 struct workqueue_struct *wq; 1968 1969 if (instance->fw_fault_work_q) { 1970 wq = instance->fw_fault_work_q; 1971 instance->fw_fault_work_q = NULL; 1972 if (!cancel_delayed_work_sync(&instance->fw_fault_work)) 1973 flush_workqueue(wq); 1974 destroy_workqueue(wq); 1975 } 1976 } 1977 1978 /** 1979 * map_cmd_status - Maps FW cmd status to OS cmd status 1980 * @fusion: fusion context 1981 * @scmd: Pointer to cmd 1982 * @status: status of cmd returned by FW 1983 * @ext_status: ext status of cmd returned by FW 1984 * @data_length: command data length 1985 * @sense: command sense data 1986 */ 1987 static void 1988 map_cmd_status(struct fusion_context *fusion, 1989 struct scsi_cmnd *scmd, u8 status, u8 ext_status, 1990 u32 data_length, u8 *sense) 1991 { 1992 u8 cmd_type; 1993 int resid; 1994 1995 cmd_type = megasas_cmd_type(scmd); 1996 switch (status) { 1997 1998 case MFI_STAT_OK: 1999 scmd->result = DID_OK << 16; 2000 break; 2001 2002 case MFI_STAT_SCSI_IO_FAILED: 2003 case MFI_STAT_LD_INIT_IN_PROGRESS: 2004 scmd->result = (DID_ERROR << 16) | ext_status; 2005 break; 2006 2007 case MFI_STAT_SCSI_DONE_WITH_ERROR: 2008 2009 scmd->result = (DID_OK << 16) | ext_status; 2010 if (ext_status == SAM_STAT_CHECK_CONDITION) { 2011 memset(scmd->sense_buffer, 0, 2012 SCSI_SENSE_BUFFERSIZE); 2013 memcpy(scmd->sense_buffer, sense, 2014 SCSI_SENSE_BUFFERSIZE); 2015 scmd->result |= DRIVER_SENSE << 24; 2016 } 2017 2018 /* 2019 * If the IO request is partially completed, then MR FW will 2020 * update "io_request->DataLength" field with actual number of 2021 * bytes transferred.Driver will set residual bytes count in 2022 * SCSI command structure. 2023 */ 2024 resid = (scsi_bufflen(scmd) - data_length); 2025 scsi_set_resid(scmd, resid); 2026 2027 if (resid && 2028 ((cmd_type == READ_WRITE_LDIO) || 2029 (cmd_type == READ_WRITE_SYSPDIO))) 2030 scmd_printk(KERN_INFO, scmd, "BRCM Debug mfi stat 0x%x, data len" 2031 " requested/completed 0x%x/0x%x\n", 2032 status, scsi_bufflen(scmd), data_length); 2033 break; 2034 2035 case MFI_STAT_LD_OFFLINE: 2036 case MFI_STAT_DEVICE_NOT_FOUND: 2037 scmd->result = DID_BAD_TARGET << 16; 2038 break; 2039 case MFI_STAT_CONFIG_SEQ_MISMATCH: 2040 scmd->result = DID_IMM_RETRY << 16; 2041 break; 2042 default: 2043 scmd->result = DID_ERROR << 16; 2044 break; 2045 } 2046 } 2047 2048 /** 2049 * megasas_is_prp_possible - 2050 * Checks if native NVMe PRPs can be built for the IO 2051 * 2052 * @instance: Adapter soft state 2053 * @scmd: SCSI command from the mid-layer 2054 * @sge_count: scatter gather element count. 2055 * 2056 * Returns: true: PRPs can be built 2057 * false: IEEE SGLs needs to be built 2058 */ 2059 static bool 2060 megasas_is_prp_possible(struct megasas_instance *instance, 2061 struct scsi_cmnd *scmd, int sge_count) 2062 { 2063 u32 data_length = 0; 2064 struct scatterlist *sg_scmd; 2065 bool build_prp = false; 2066 u32 mr_nvme_pg_size; 2067 2068 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, 2069 MR_DEFAULT_NVME_PAGE_SIZE); 2070 data_length = scsi_bufflen(scmd); 2071 sg_scmd = scsi_sglist(scmd); 2072 2073 /* 2074 * NVMe uses one PRP for each page (or part of a page) 2075 * look at the data length - if 4 pages or less then IEEE is OK 2076 * if > 5 pages then we need to build a native SGL 2077 * if > 4 and <= 5 pages, then check physical address of 1st SG entry 2078 * if this first size in the page is >= the residual beyond 4 pages 2079 * then use IEEE, otherwise use native SGL 2080 */ 2081 2082 if (data_length > (mr_nvme_pg_size * 5)) { 2083 build_prp = true; 2084 } else if ((data_length > (mr_nvme_pg_size * 4)) && 2085 (data_length <= (mr_nvme_pg_size * 5))) { 2086 /* check if 1st SG entry size is < residual beyond 4 pages */ 2087 if (sg_dma_len(sg_scmd) < (data_length - (mr_nvme_pg_size * 4))) 2088 build_prp = true; 2089 } 2090 2091 return build_prp; 2092 } 2093 2094 /** 2095 * megasas_make_prp_nvme - 2096 * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only 2097 * 2098 * @instance: Adapter soft state 2099 * @scmd: SCSI command from the mid-layer 2100 * @sgl_ptr: SGL to be filled in 2101 * @cmd: Fusion command frame 2102 * @sge_count: scatter gather element count. 2103 * 2104 * Returns: true: PRPs are built 2105 * false: IEEE SGLs needs to be built 2106 */ 2107 static bool 2108 megasas_make_prp_nvme(struct megasas_instance *instance, struct scsi_cmnd *scmd, 2109 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr, 2110 struct megasas_cmd_fusion *cmd, int sge_count) 2111 { 2112 int sge_len, offset, num_prp_in_chain = 0; 2113 struct MPI25_IEEE_SGE_CHAIN64 *main_chain_element, *ptr_first_sgl; 2114 u64 *ptr_sgl; 2115 dma_addr_t ptr_sgl_phys; 2116 u64 sge_addr; 2117 u32 page_mask, page_mask_result; 2118 struct scatterlist *sg_scmd; 2119 u32 first_prp_len; 2120 bool build_prp = false; 2121 int data_len = scsi_bufflen(scmd); 2122 u32 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, 2123 MR_DEFAULT_NVME_PAGE_SIZE); 2124 2125 build_prp = megasas_is_prp_possible(instance, scmd, sge_count); 2126 2127 if (!build_prp) 2128 return false; 2129 2130 /* 2131 * Nvme has a very convoluted prp format. One prp is required 2132 * for each page or partial page. Driver need to split up OS sg_list 2133 * entries if it is longer than one page or cross a page 2134 * boundary. Driver also have to insert a PRP list pointer entry as 2135 * the last entry in each physical page of the PRP list. 2136 * 2137 * NOTE: The first PRP "entry" is actually placed in the first 2138 * SGL entry in the main message as IEEE 64 format. The 2nd 2139 * entry in the main message is the chain element, and the rest 2140 * of the PRP entries are built in the contiguous pcie buffer. 2141 */ 2142 page_mask = mr_nvme_pg_size - 1; 2143 ptr_sgl = (u64 *)cmd->sg_frame; 2144 ptr_sgl_phys = cmd->sg_frame_phys_addr; 2145 memset(ptr_sgl, 0, instance->max_chain_frame_sz); 2146 2147 /* Build chain frame element which holds all prps except first*/ 2148 main_chain_element = (struct MPI25_IEEE_SGE_CHAIN64 *) 2149 ((u8 *)sgl_ptr + sizeof(struct MPI25_IEEE_SGE_CHAIN64)); 2150 2151 main_chain_element->Address = cpu_to_le64(ptr_sgl_phys); 2152 main_chain_element->NextChainOffset = 0; 2153 main_chain_element->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | 2154 IEEE_SGE_FLAGS_SYSTEM_ADDR | 2155 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP; 2156 2157 /* Build first prp, sge need not to be page aligned*/ 2158 ptr_first_sgl = sgl_ptr; 2159 sg_scmd = scsi_sglist(scmd); 2160 sge_addr = sg_dma_address(sg_scmd); 2161 sge_len = sg_dma_len(sg_scmd); 2162 2163 offset = (u32)(sge_addr & page_mask); 2164 first_prp_len = mr_nvme_pg_size - offset; 2165 2166 ptr_first_sgl->Address = cpu_to_le64(sge_addr); 2167 ptr_first_sgl->Length = cpu_to_le32(first_prp_len); 2168 2169 data_len -= first_prp_len; 2170 2171 if (sge_len > first_prp_len) { 2172 sge_addr += first_prp_len; 2173 sge_len -= first_prp_len; 2174 } else if (sge_len == first_prp_len) { 2175 sg_scmd = sg_next(sg_scmd); 2176 sge_addr = sg_dma_address(sg_scmd); 2177 sge_len = sg_dma_len(sg_scmd); 2178 } 2179 2180 for (;;) { 2181 offset = (u32)(sge_addr & page_mask); 2182 2183 /* Put PRP pointer due to page boundary*/ 2184 page_mask_result = (uintptr_t)(ptr_sgl + 1) & page_mask; 2185 if (unlikely(!page_mask_result)) { 2186 scmd_printk(KERN_NOTICE, 2187 scmd, "page boundary ptr_sgl: 0x%p\n", 2188 ptr_sgl); 2189 ptr_sgl_phys += 8; 2190 *ptr_sgl = cpu_to_le64(ptr_sgl_phys); 2191 ptr_sgl++; 2192 num_prp_in_chain++; 2193 } 2194 2195 *ptr_sgl = cpu_to_le64(sge_addr); 2196 ptr_sgl++; 2197 ptr_sgl_phys += 8; 2198 num_prp_in_chain++; 2199 2200 sge_addr += mr_nvme_pg_size; 2201 sge_len -= mr_nvme_pg_size; 2202 data_len -= mr_nvme_pg_size; 2203 2204 if (data_len <= 0) 2205 break; 2206 2207 if (sge_len > 0) 2208 continue; 2209 2210 sg_scmd = sg_next(sg_scmd); 2211 sge_addr = sg_dma_address(sg_scmd); 2212 sge_len = sg_dma_len(sg_scmd); 2213 } 2214 2215 main_chain_element->Length = 2216 cpu_to_le32(num_prp_in_chain * sizeof(u64)); 2217 2218 return build_prp; 2219 } 2220 2221 /** 2222 * megasas_make_sgl_fusion - Prepares 32-bit SGL 2223 * @instance: Adapter soft state 2224 * @scp: SCSI command from the mid-layer 2225 * @sgl_ptr: SGL to be filled in 2226 * @cmd: cmd we are working on 2227 * @sge_count: sge count 2228 * 2229 */ 2230 static void 2231 megasas_make_sgl_fusion(struct megasas_instance *instance, 2232 struct scsi_cmnd *scp, 2233 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr, 2234 struct megasas_cmd_fusion *cmd, int sge_count) 2235 { 2236 int i, sg_processed; 2237 struct scatterlist *os_sgl; 2238 struct fusion_context *fusion; 2239 2240 fusion = instance->ctrl_context; 2241 2242 if (instance->adapter_type >= INVADER_SERIES) { 2243 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr; 2244 sgl_ptr_end += fusion->max_sge_in_main_msg - 1; 2245 sgl_ptr_end->Flags = 0; 2246 } 2247 2248 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 2249 sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl)); 2250 sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl)); 2251 sgl_ptr->Flags = 0; 2252 if (instance->adapter_type >= INVADER_SERIES) 2253 if (i == sge_count - 1) 2254 sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST; 2255 sgl_ptr++; 2256 sg_processed = i + 1; 2257 2258 if ((sg_processed == (fusion->max_sge_in_main_msg - 1)) && 2259 (sge_count > fusion->max_sge_in_main_msg)) { 2260 2261 struct MPI25_IEEE_SGE_CHAIN64 *sg_chain; 2262 if (instance->adapter_type >= INVADER_SERIES) { 2263 if ((le16_to_cpu(cmd->io_request->IoFlags) & 2264 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != 2265 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) 2266 cmd->io_request->ChainOffset = 2267 fusion-> 2268 chain_offset_io_request; 2269 else 2270 cmd->io_request->ChainOffset = 0; 2271 } else 2272 cmd->io_request->ChainOffset = 2273 fusion->chain_offset_io_request; 2274 2275 sg_chain = sgl_ptr; 2276 /* Prepare chain element */ 2277 sg_chain->NextChainOffset = 0; 2278 if (instance->adapter_type >= INVADER_SERIES) 2279 sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT; 2280 else 2281 sg_chain->Flags = 2282 (IEEE_SGE_FLAGS_CHAIN_ELEMENT | 2283 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); 2284 sg_chain->Length = cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed))); 2285 sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr); 2286 2287 sgl_ptr = 2288 (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame; 2289 memset(sgl_ptr, 0, instance->max_chain_frame_sz); 2290 } 2291 } 2292 } 2293 2294 /** 2295 * megasas_make_sgl - Build Scatter Gather List(SGLs) 2296 * @scp: SCSI command pointer 2297 * @instance: Soft instance of controller 2298 * @cmd: Fusion command pointer 2299 * 2300 * This function will build sgls based on device type. 2301 * For nvme drives, there is different way of building sgls in nvme native 2302 * format- PRPs(Physical Region Page). 2303 * 2304 * Returns the number of sg lists actually used, zero if the sg lists 2305 * is NULL, or -ENOMEM if the mapping failed 2306 */ 2307 static 2308 int megasas_make_sgl(struct megasas_instance *instance, struct scsi_cmnd *scp, 2309 struct megasas_cmd_fusion *cmd) 2310 { 2311 int sge_count; 2312 bool build_prp = false; 2313 struct MPI25_IEEE_SGE_CHAIN64 *sgl_chain64; 2314 2315 sge_count = scsi_dma_map(scp); 2316 2317 if ((sge_count > instance->max_num_sge) || (sge_count <= 0)) 2318 return sge_count; 2319 2320 sgl_chain64 = (struct MPI25_IEEE_SGE_CHAIN64 *)&cmd->io_request->SGL; 2321 if ((le16_to_cpu(cmd->io_request->IoFlags) & 2322 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) && 2323 (cmd->pd_interface == NVME_PD)) 2324 build_prp = megasas_make_prp_nvme(instance, scp, sgl_chain64, 2325 cmd, sge_count); 2326 2327 if (!build_prp) 2328 megasas_make_sgl_fusion(instance, scp, sgl_chain64, 2329 cmd, sge_count); 2330 2331 return sge_count; 2332 } 2333 2334 /** 2335 * megasas_set_pd_lba - Sets PD LBA 2336 * @io_request: IO request 2337 * @cdb_len: cdb length 2338 * @io_info: IO information 2339 * @scp: SCSI command 2340 * @local_map_ptr: Raid map 2341 * @ref_tag: Primary reference tag 2342 * 2343 * Used to set the PD LBA in CDB for FP IOs 2344 */ 2345 static void 2346 megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, 2347 struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp, 2348 struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag) 2349 { 2350 struct MR_LD_RAID *raid; 2351 u16 ld; 2352 u64 start_blk = io_info->pdBlock; 2353 u8 *cdb = io_request->CDB.CDB32; 2354 u32 num_blocks = io_info->numBlocks; 2355 u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0; 2356 2357 /* Check if T10 PI (DIF) is enabled for this LD */ 2358 ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr); 2359 raid = MR_LdRaidGet(ld, local_map_ptr); 2360 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) { 2361 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 2362 cdb[0] = MEGASAS_SCSI_VARIABLE_LENGTH_CMD; 2363 cdb[7] = MEGASAS_SCSI_ADDL_CDB_LEN; 2364 2365 if (scp->sc_data_direction == DMA_FROM_DEVICE) 2366 cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32; 2367 else 2368 cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32; 2369 cdb[10] = MEGASAS_RD_WR_PROTECT_CHECK_ALL; 2370 2371 /* LBA */ 2372 cdb[12] = (u8)((start_blk >> 56) & 0xff); 2373 cdb[13] = (u8)((start_blk >> 48) & 0xff); 2374 cdb[14] = (u8)((start_blk >> 40) & 0xff); 2375 cdb[15] = (u8)((start_blk >> 32) & 0xff); 2376 cdb[16] = (u8)((start_blk >> 24) & 0xff); 2377 cdb[17] = (u8)((start_blk >> 16) & 0xff); 2378 cdb[18] = (u8)((start_blk >> 8) & 0xff); 2379 cdb[19] = (u8)(start_blk & 0xff); 2380 2381 /* Logical block reference tag */ 2382 io_request->CDB.EEDP32.PrimaryReferenceTag = 2383 cpu_to_be32(ref_tag); 2384 io_request->CDB.EEDP32.PrimaryApplicationTagMask = cpu_to_be16(0xffff); 2385 io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */ 2386 2387 /* Transfer length */ 2388 cdb[28] = (u8)((num_blocks >> 24) & 0xff); 2389 cdb[29] = (u8)((num_blocks >> 16) & 0xff); 2390 cdb[30] = (u8)((num_blocks >> 8) & 0xff); 2391 cdb[31] = (u8)(num_blocks & 0xff); 2392 2393 /* set SCSI IO EEDPFlags */ 2394 if (scp->sc_data_direction == DMA_FROM_DEVICE) { 2395 io_request->EEDPFlags = cpu_to_le16( 2396 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 2397 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 2398 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | 2399 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | 2400 MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE | 2401 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); 2402 } else { 2403 io_request->EEDPFlags = cpu_to_le16( 2404 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 2405 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP); 2406 } 2407 io_request->Control |= cpu_to_le32((0x4 << 26)); 2408 io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size); 2409 } else { 2410 /* Some drives don't support 16/12 byte CDB's, convert to 10 */ 2411 if (((cdb_len == 12) || (cdb_len == 16)) && 2412 (start_blk <= 0xffffffff)) { 2413 if (cdb_len == 16) { 2414 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10; 2415 flagvals = cdb[1]; 2416 groupnum = cdb[14]; 2417 control = cdb[15]; 2418 } else { 2419 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10; 2420 flagvals = cdb[1]; 2421 groupnum = cdb[10]; 2422 control = cdb[11]; 2423 } 2424 2425 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 2426 2427 cdb[0] = opcode; 2428 cdb[1] = flagvals; 2429 cdb[6] = groupnum; 2430 cdb[9] = control; 2431 2432 /* Transfer length */ 2433 cdb[8] = (u8)(num_blocks & 0xff); 2434 cdb[7] = (u8)((num_blocks >> 8) & 0xff); 2435 2436 io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */ 2437 cdb_len = 10; 2438 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) { 2439 /* Convert to 16 byte CDB for large LBA's */ 2440 switch (cdb_len) { 2441 case 6: 2442 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16; 2443 control = cdb[5]; 2444 break; 2445 case 10: 2446 opcode = 2447 cdb[0] == READ_10 ? READ_16 : WRITE_16; 2448 flagvals = cdb[1]; 2449 groupnum = cdb[6]; 2450 control = cdb[9]; 2451 break; 2452 case 12: 2453 opcode = 2454 cdb[0] == READ_12 ? READ_16 : WRITE_16; 2455 flagvals = cdb[1]; 2456 groupnum = cdb[10]; 2457 control = cdb[11]; 2458 break; 2459 } 2460 2461 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 2462 2463 cdb[0] = opcode; 2464 cdb[1] = flagvals; 2465 cdb[14] = groupnum; 2466 cdb[15] = control; 2467 2468 /* Transfer length */ 2469 cdb[13] = (u8)(num_blocks & 0xff); 2470 cdb[12] = (u8)((num_blocks >> 8) & 0xff); 2471 cdb[11] = (u8)((num_blocks >> 16) & 0xff); 2472 cdb[10] = (u8)((num_blocks >> 24) & 0xff); 2473 2474 io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */ 2475 cdb_len = 16; 2476 } 2477 2478 /* Normal case, just load LBA here */ 2479 switch (cdb_len) { 2480 case 6: 2481 { 2482 u8 val = cdb[1] & 0xE0; 2483 cdb[3] = (u8)(start_blk & 0xff); 2484 cdb[2] = (u8)((start_blk >> 8) & 0xff); 2485 cdb[1] = val | ((u8)(start_blk >> 16) & 0x1f); 2486 break; 2487 } 2488 case 10: 2489 cdb[5] = (u8)(start_blk & 0xff); 2490 cdb[4] = (u8)((start_blk >> 8) & 0xff); 2491 cdb[3] = (u8)((start_blk >> 16) & 0xff); 2492 cdb[2] = (u8)((start_blk >> 24) & 0xff); 2493 break; 2494 case 12: 2495 cdb[5] = (u8)(start_blk & 0xff); 2496 cdb[4] = (u8)((start_blk >> 8) & 0xff); 2497 cdb[3] = (u8)((start_blk >> 16) & 0xff); 2498 cdb[2] = (u8)((start_blk >> 24) & 0xff); 2499 break; 2500 case 16: 2501 cdb[9] = (u8)(start_blk & 0xff); 2502 cdb[8] = (u8)((start_blk >> 8) & 0xff); 2503 cdb[7] = (u8)((start_blk >> 16) & 0xff); 2504 cdb[6] = (u8)((start_blk >> 24) & 0xff); 2505 cdb[5] = (u8)((start_blk >> 32) & 0xff); 2506 cdb[4] = (u8)((start_blk >> 40) & 0xff); 2507 cdb[3] = (u8)((start_blk >> 48) & 0xff); 2508 cdb[2] = (u8)((start_blk >> 56) & 0xff); 2509 break; 2510 } 2511 } 2512 } 2513 2514 /** 2515 * megasas_stream_detect - stream detection on read and and write IOs 2516 * @instance: Adapter soft state 2517 * @cmd: Command to be prepared 2518 * @io_info: IO Request info 2519 * 2520 */ 2521 2522 /** stream detection on read and and write IOs */ 2523 static void megasas_stream_detect(struct megasas_instance *instance, 2524 struct megasas_cmd_fusion *cmd, 2525 struct IO_REQUEST_INFO *io_info) 2526 { 2527 struct fusion_context *fusion = instance->ctrl_context; 2528 u32 device_id = io_info->ldTgtId; 2529 struct LD_STREAM_DETECT *current_ld_sd 2530 = fusion->stream_detect_by_ld[device_id]; 2531 u32 *track_stream = ¤t_ld_sd->mru_bit_map, stream_num; 2532 u32 shifted_values, unshifted_values; 2533 u32 index_value_mask, shifted_values_mask; 2534 int i; 2535 bool is_read_ahead = false; 2536 struct STREAM_DETECT *current_sd; 2537 /* find possible stream */ 2538 for (i = 0; i < MAX_STREAMS_TRACKED; ++i) { 2539 stream_num = (*track_stream >> 2540 (i * BITS_PER_INDEX_STREAM)) & 2541 STREAM_MASK; 2542 current_sd = ¤t_ld_sd->stream_track[stream_num]; 2543 /* if we found a stream, update the raid 2544 * context and also update the mruBitMap 2545 */ 2546 /* boundary condition */ 2547 if ((current_sd->next_seq_lba) && 2548 (io_info->ldStartBlock >= current_sd->next_seq_lba) && 2549 (io_info->ldStartBlock <= (current_sd->next_seq_lba + 32)) && 2550 (current_sd->is_read == io_info->isRead)) { 2551 2552 if ((io_info->ldStartBlock != current_sd->next_seq_lba) && 2553 ((!io_info->isRead) || (!is_read_ahead))) 2554 /* 2555 * Once the API availible we need to change this. 2556 * At this point we are not allowing any gap 2557 */ 2558 continue; 2559 2560 SET_STREAM_DETECTED(cmd->io_request->RaidContext.raid_context_g35); 2561 current_sd->next_seq_lba = 2562 io_info->ldStartBlock + io_info->numBlocks; 2563 /* 2564 * update the mruBitMap LRU 2565 */ 2566 shifted_values_mask = 2567 (1 << i * BITS_PER_INDEX_STREAM) - 1; 2568 shifted_values = ((*track_stream & shifted_values_mask) 2569 << BITS_PER_INDEX_STREAM); 2570 index_value_mask = 2571 STREAM_MASK << i * BITS_PER_INDEX_STREAM; 2572 unshifted_values = 2573 *track_stream & ~(shifted_values_mask | 2574 index_value_mask); 2575 *track_stream = 2576 unshifted_values | shifted_values | stream_num; 2577 return; 2578 } 2579 } 2580 /* 2581 * if we did not find any stream, create a new one 2582 * from the least recently used 2583 */ 2584 stream_num = (*track_stream >> 2585 ((MAX_STREAMS_TRACKED - 1) * BITS_PER_INDEX_STREAM)) & 2586 STREAM_MASK; 2587 current_sd = ¤t_ld_sd->stream_track[stream_num]; 2588 current_sd->is_read = io_info->isRead; 2589 current_sd->next_seq_lba = io_info->ldStartBlock + io_info->numBlocks; 2590 *track_stream = (((*track_stream & ZERO_LAST_STREAM) << 4) | stream_num); 2591 return; 2592 } 2593 2594 /** 2595 * megasas_set_raidflag_cpu_affinity - This function sets the cpu 2596 * affinity (cpu of the controller) and raid_flags in the raid context 2597 * based on IO type. 2598 * 2599 * @fusion: Fusion context 2600 * @praid_context: IO RAID context 2601 * @raid: LD raid map 2602 * @fp_possible: Is fast path possible? 2603 * @is_read: Is read IO? 2604 * @scsi_buff_len: SCSI command buffer length 2605 * 2606 */ 2607 static void 2608 megasas_set_raidflag_cpu_affinity(struct fusion_context *fusion, 2609 union RAID_CONTEXT_UNION *praid_context, 2610 struct MR_LD_RAID *raid, bool fp_possible, 2611 u8 is_read, u32 scsi_buff_len) 2612 { 2613 u8 cpu_sel = MR_RAID_CTX_CPUSEL_0; 2614 struct RAID_CONTEXT_G35 *rctx_g35; 2615 2616 rctx_g35 = &praid_context->raid_context_g35; 2617 if (fp_possible) { 2618 if (is_read) { 2619 if ((raid->cpuAffinity.pdRead.cpu0) && 2620 (raid->cpuAffinity.pdRead.cpu1)) 2621 cpu_sel = MR_RAID_CTX_CPUSEL_FCFS; 2622 else if (raid->cpuAffinity.pdRead.cpu1) 2623 cpu_sel = MR_RAID_CTX_CPUSEL_1; 2624 } else { 2625 if ((raid->cpuAffinity.pdWrite.cpu0) && 2626 (raid->cpuAffinity.pdWrite.cpu1)) 2627 cpu_sel = MR_RAID_CTX_CPUSEL_FCFS; 2628 else if (raid->cpuAffinity.pdWrite.cpu1) 2629 cpu_sel = MR_RAID_CTX_CPUSEL_1; 2630 /* Fast path cache by pass capable R0/R1 VD */ 2631 if ((raid->level <= 1) && 2632 (raid->capability.fp_cache_bypass_capable)) { 2633 rctx_g35->routing_flags |= 2634 (1 << MR_RAID_CTX_ROUTINGFLAGS_SLD_SHIFT); 2635 rctx_g35->raid_flags = 2636 (MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS 2637 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT); 2638 } 2639 } 2640 } else { 2641 if (is_read) { 2642 if ((raid->cpuAffinity.ldRead.cpu0) && 2643 (raid->cpuAffinity.ldRead.cpu1)) 2644 cpu_sel = MR_RAID_CTX_CPUSEL_FCFS; 2645 else if (raid->cpuAffinity.ldRead.cpu1) 2646 cpu_sel = MR_RAID_CTX_CPUSEL_1; 2647 } else { 2648 if ((raid->cpuAffinity.ldWrite.cpu0) && 2649 (raid->cpuAffinity.ldWrite.cpu1)) 2650 cpu_sel = MR_RAID_CTX_CPUSEL_FCFS; 2651 else if (raid->cpuAffinity.ldWrite.cpu1) 2652 cpu_sel = MR_RAID_CTX_CPUSEL_1; 2653 2654 if (is_stream_detected(rctx_g35) && 2655 ((raid->level == 5) || (raid->level == 6)) && 2656 (raid->writeMode == MR_RL_WRITE_THROUGH_MODE) && 2657 (cpu_sel == MR_RAID_CTX_CPUSEL_FCFS)) 2658 cpu_sel = MR_RAID_CTX_CPUSEL_0; 2659 } 2660 } 2661 2662 rctx_g35->routing_flags |= 2663 (cpu_sel << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT); 2664 2665 /* Always give priority to MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT 2666 * vs MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS. 2667 * IO Subtype is not bitmap. 2668 */ 2669 if ((fusion->pcie_bw_limitation) && (raid->level == 1) && (!is_read) && 2670 (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)) { 2671 praid_context->raid_context_g35.raid_flags = 2672 (MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT 2673 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT); 2674 } 2675 } 2676 2677 /** 2678 * megasas_build_ldio_fusion - Prepares IOs to devices 2679 * @instance: Adapter soft state 2680 * @scp: SCSI command 2681 * @cmd: Command to be prepared 2682 * 2683 * Prepares the io_request and chain elements (sg_frame) for IO 2684 * The IO can be for PD (Fast Path) or LD 2685 */ 2686 static void 2687 megasas_build_ldio_fusion(struct megasas_instance *instance, 2688 struct scsi_cmnd *scp, 2689 struct megasas_cmd_fusion *cmd) 2690 { 2691 bool fp_possible; 2692 u16 ld; 2693 u32 start_lba_lo, start_lba_hi, device_id, datalength = 0; 2694 u32 scsi_buff_len; 2695 struct MPI2_RAID_SCSI_IO_REQUEST *io_request; 2696 struct IO_REQUEST_INFO io_info; 2697 struct fusion_context *fusion; 2698 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 2699 u8 *raidLUN; 2700 unsigned long spinlock_flags; 2701 struct MR_LD_RAID *raid = NULL; 2702 struct MR_PRIV_DEVICE *mrdev_priv; 2703 struct RAID_CONTEXT *rctx; 2704 struct RAID_CONTEXT_G35 *rctx_g35; 2705 2706 device_id = MEGASAS_DEV_INDEX(scp); 2707 2708 fusion = instance->ctrl_context; 2709 2710 io_request = cmd->io_request; 2711 rctx = &io_request->RaidContext.raid_context; 2712 rctx_g35 = &io_request->RaidContext.raid_context_g35; 2713 2714 rctx->virtual_disk_tgt_id = cpu_to_le16(device_id); 2715 rctx->status = 0; 2716 rctx->ex_status = 0; 2717 2718 start_lba_lo = 0; 2719 start_lba_hi = 0; 2720 fp_possible = false; 2721 2722 /* 2723 * 6-byte READ(0x08) or WRITE(0x0A) cdb 2724 */ 2725 if (scp->cmd_len == 6) { 2726 datalength = (u32) scp->cmnd[4]; 2727 start_lba_lo = ((u32) scp->cmnd[1] << 16) | 2728 ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3]; 2729 2730 start_lba_lo &= 0x1FFFFF; 2731 } 2732 2733 /* 2734 * 10-byte READ(0x28) or WRITE(0x2A) cdb 2735 */ 2736 else if (scp->cmd_len == 10) { 2737 datalength = (u32) scp->cmnd[8] | 2738 ((u32) scp->cmnd[7] << 8); 2739 start_lba_lo = ((u32) scp->cmnd[2] << 24) | 2740 ((u32) scp->cmnd[3] << 16) | 2741 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; 2742 } 2743 2744 /* 2745 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 2746 */ 2747 else if (scp->cmd_len == 12) { 2748 datalength = ((u32) scp->cmnd[6] << 24) | 2749 ((u32) scp->cmnd[7] << 16) | 2750 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; 2751 start_lba_lo = ((u32) scp->cmnd[2] << 24) | 2752 ((u32) scp->cmnd[3] << 16) | 2753 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; 2754 } 2755 2756 /* 2757 * 16-byte READ(0x88) or WRITE(0x8A) cdb 2758 */ 2759 else if (scp->cmd_len == 16) { 2760 datalength = ((u32) scp->cmnd[10] << 24) | 2761 ((u32) scp->cmnd[11] << 16) | 2762 ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13]; 2763 start_lba_lo = ((u32) scp->cmnd[6] << 24) | 2764 ((u32) scp->cmnd[7] << 16) | 2765 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; 2766 2767 start_lba_hi = ((u32) scp->cmnd[2] << 24) | 2768 ((u32) scp->cmnd[3] << 16) | 2769 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; 2770 } 2771 2772 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO)); 2773 io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo; 2774 io_info.numBlocks = datalength; 2775 io_info.ldTgtId = device_id; 2776 io_info.r1_alt_dev_handle = MR_DEVHANDLE_INVALID; 2777 scsi_buff_len = scsi_bufflen(scp); 2778 io_request->DataLength = cpu_to_le32(scsi_buff_len); 2779 io_info.data_arms = 1; 2780 2781 if (scp->sc_data_direction == DMA_FROM_DEVICE) 2782 io_info.isRead = 1; 2783 2784 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 2785 ld = MR_TargetIdToLdGet(device_id, local_map_ptr); 2786 2787 if (ld < instance->fw_supported_vd_count) 2788 raid = MR_LdRaidGet(ld, local_map_ptr); 2789 2790 if (!raid || (!fusion->fast_path_io)) { 2791 rctx->reg_lock_flags = 0; 2792 fp_possible = false; 2793 } else { 2794 if (MR_BuildRaidContext(instance, &io_info, rctx, 2795 local_map_ptr, &raidLUN)) 2796 fp_possible = (io_info.fpOkForIo > 0) ? true : false; 2797 } 2798 2799 megasas_get_msix_index(instance, scp, cmd, io_info.data_arms); 2800 2801 if (instance->adapter_type >= VENTURA_SERIES) { 2802 /* FP for Optimal raid level 1. 2803 * All large RAID-1 writes (> 32 KiB, both WT and WB modes) 2804 * are built by the driver as LD I/Os. 2805 * All small RAID-1 WT writes (<= 32 KiB) are built as FP I/Os 2806 * (there is never a reason to process these as buffered writes) 2807 * All small RAID-1 WB writes (<= 32 KiB) are built as FP I/Os 2808 * with the SLD bit asserted. 2809 */ 2810 if (io_info.r1_alt_dev_handle != MR_DEVHANDLE_INVALID) { 2811 mrdev_priv = scp->device->hostdata; 2812 2813 if (atomic_inc_return(&instance->fw_outstanding) > 2814 (instance->host->can_queue)) { 2815 fp_possible = false; 2816 atomic_dec(&instance->fw_outstanding); 2817 } else if (fusion->pcie_bw_limitation && 2818 ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) || 2819 (atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint) > 0))) { 2820 fp_possible = false; 2821 atomic_dec(&instance->fw_outstanding); 2822 if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE) 2823 atomic_set(&mrdev_priv->r1_ldio_hint, 2824 instance->r1_ldio_hint_default); 2825 } 2826 } 2827 2828 if (!fp_possible || 2829 (io_info.isRead && io_info.ra_capable)) { 2830 spin_lock_irqsave(&instance->stream_lock, 2831 spinlock_flags); 2832 megasas_stream_detect(instance, cmd, &io_info); 2833 spin_unlock_irqrestore(&instance->stream_lock, 2834 spinlock_flags); 2835 /* In ventura if stream detected for a read and it is 2836 * read ahead capable make this IO as LDIO 2837 */ 2838 if (is_stream_detected(rctx_g35)) 2839 fp_possible = false; 2840 } 2841 2842 /* If raid is NULL, set CPU affinity to default CPU0 */ 2843 if (raid) 2844 megasas_set_raidflag_cpu_affinity(fusion, &io_request->RaidContext, 2845 raid, fp_possible, io_info.isRead, 2846 scsi_buff_len); 2847 else 2848 rctx_g35->routing_flags |= 2849 (MR_RAID_CTX_CPUSEL_0 << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT); 2850 } 2851 2852 if (fp_possible) { 2853 megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp, 2854 local_map_ptr, start_lba_lo); 2855 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 2856 cmd->request_desc->SCSIIO.RequestFlags = 2857 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO 2858 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2859 if (instance->adapter_type == INVADER_SERIES) { 2860 rctx->type = MPI2_TYPE_CUDA; 2861 rctx->nseg = 0x1; 2862 io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); 2863 rctx->reg_lock_flags |= 2864 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | 2865 MR_RL_FLAGS_SEQ_NUM_ENABLE); 2866 } else if (instance->adapter_type >= VENTURA_SERIES) { 2867 rctx_g35->nseg_type |= (1 << RAID_CONTEXT_NSEG_SHIFT); 2868 rctx_g35->nseg_type |= (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT); 2869 rctx_g35->routing_flags |= (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT); 2870 io_request->IoFlags |= 2871 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); 2872 } 2873 if (fusion->load_balance_info && 2874 (fusion->load_balance_info[device_id].loadBalanceFlag) && 2875 (io_info.isRead)) { 2876 io_info.devHandle = 2877 get_updated_dev_handle(instance, 2878 &fusion->load_balance_info[device_id], 2879 &io_info, local_map_ptr); 2880 scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG; 2881 cmd->pd_r1_lb = io_info.pd_after_lb; 2882 if (instance->adapter_type >= VENTURA_SERIES) 2883 rctx_g35->span_arm = io_info.span_arm; 2884 else 2885 rctx->span_arm = io_info.span_arm; 2886 2887 } else 2888 scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; 2889 2890 if (instance->adapter_type >= VENTURA_SERIES) 2891 cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle; 2892 else 2893 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; 2894 2895 if ((raidLUN[0] == 1) && 2896 (local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].validHandles > 1)) { 2897 instance->dev_handle = !(instance->dev_handle); 2898 io_info.devHandle = 2899 local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].devHandle[instance->dev_handle]; 2900 } 2901 2902 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; 2903 io_request->DevHandle = io_info.devHandle; 2904 cmd->pd_interface = io_info.pd_interface; 2905 /* populate the LUN field */ 2906 memcpy(io_request->LUN, raidLUN, 8); 2907 } else { 2908 rctx->timeout_value = 2909 cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec); 2910 cmd->request_desc->SCSIIO.RequestFlags = 2911 (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO 2912 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2913 if (instance->adapter_type == INVADER_SERIES) { 2914 if (io_info.do_fp_rlbypass || 2915 (rctx->reg_lock_flags == REGION_TYPE_UNUSED)) 2916 cmd->request_desc->SCSIIO.RequestFlags = 2917 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << 2918 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2919 rctx->type = MPI2_TYPE_CUDA; 2920 rctx->reg_lock_flags |= 2921 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | 2922 MR_RL_FLAGS_SEQ_NUM_ENABLE); 2923 rctx->nseg = 0x1; 2924 } else if (instance->adapter_type >= VENTURA_SERIES) { 2925 rctx_g35->routing_flags |= (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT); 2926 rctx_g35->nseg_type |= (1 << RAID_CONTEXT_NSEG_SHIFT); 2927 rctx_g35->nseg_type |= (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT); 2928 } 2929 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; 2930 io_request->DevHandle = cpu_to_le16(device_id); 2931 2932 } /* Not FP */ 2933 } 2934 2935 /** 2936 * megasas_build_ld_nonrw_fusion - prepares non rw ios for virtual disk 2937 * @instance: Adapter soft state 2938 * @scmd: SCSI command 2939 * @cmd: Command to be prepared 2940 * 2941 * Prepares the io_request frame for non-rw io cmds for vd. 2942 */ 2943 static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance, 2944 struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd) 2945 { 2946 u32 device_id; 2947 struct MPI2_RAID_SCSI_IO_REQUEST *io_request; 2948 u16 ld; 2949 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 2950 struct fusion_context *fusion = instance->ctrl_context; 2951 u8 span, physArm; 2952 __le16 devHandle; 2953 u32 arRef, pd; 2954 struct MR_LD_RAID *raid; 2955 struct RAID_CONTEXT *pRAID_Context; 2956 u8 fp_possible = 1; 2957 2958 io_request = cmd->io_request; 2959 device_id = MEGASAS_DEV_INDEX(scmd); 2960 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 2961 io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); 2962 /* get RAID_Context pointer */ 2963 pRAID_Context = &io_request->RaidContext.raid_context; 2964 /* Check with FW team */ 2965 pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); 2966 pRAID_Context->reg_lock_row_lba = 0; 2967 pRAID_Context->reg_lock_length = 0; 2968 2969 if (fusion->fast_path_io && ( 2970 device_id < instance->fw_supported_vd_count)) { 2971 2972 ld = MR_TargetIdToLdGet(device_id, local_map_ptr); 2973 if (ld >= instance->fw_supported_vd_count - 1) 2974 fp_possible = 0; 2975 else { 2976 raid = MR_LdRaidGet(ld, local_map_ptr); 2977 if (!(raid->capability.fpNonRWCapable)) 2978 fp_possible = 0; 2979 } 2980 } else 2981 fp_possible = 0; 2982 2983 if (!fp_possible) { 2984 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; 2985 io_request->DevHandle = cpu_to_le16(device_id); 2986 io_request->LUN[1] = scmd->device->lun; 2987 pRAID_Context->timeout_value = 2988 cpu_to_le16 (scmd->request->timeout / HZ); 2989 cmd->request_desc->SCSIIO.RequestFlags = 2990 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 2991 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2992 } else { 2993 2994 /* set RAID context values */ 2995 pRAID_Context->config_seq_num = raid->seqNum; 2996 if (instance->adapter_type < VENTURA_SERIES) 2997 pRAID_Context->reg_lock_flags = REGION_TYPE_SHARED_READ; 2998 pRAID_Context->timeout_value = 2999 cpu_to_le16(raid->fpIoTimeoutForLd); 3000 3001 /* get the DevHandle for the PD (since this is 3002 fpNonRWCapable, this is a single disk RAID0) */ 3003 span = physArm = 0; 3004 arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr); 3005 pd = MR_ArPdGet(arRef, physArm, local_map_ptr); 3006 devHandle = MR_PdDevHandleGet(pd, local_map_ptr); 3007 3008 /* build request descriptor */ 3009 cmd->request_desc->SCSIIO.RequestFlags = 3010 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << 3011 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 3012 cmd->request_desc->SCSIIO.DevHandle = devHandle; 3013 3014 /* populate the LUN field */ 3015 memcpy(io_request->LUN, raid->LUN, 8); 3016 3017 /* build the raidScsiIO structure */ 3018 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 3019 io_request->DevHandle = devHandle; 3020 } 3021 } 3022 3023 /** 3024 * megasas_build_syspd_fusion - prepares rw/non-rw ios for syspd 3025 * @instance: Adapter soft state 3026 * @scmd: SCSI command 3027 * @cmd: Command to be prepared 3028 * @fp_possible: parameter to detect fast path or firmware path io. 3029 * 3030 * Prepares the io_request frame for rw/non-rw io cmds for syspds 3031 */ 3032 static void 3033 megasas_build_syspd_fusion(struct megasas_instance *instance, 3034 struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd, 3035 bool fp_possible) 3036 { 3037 u32 device_id; 3038 struct MPI2_RAID_SCSI_IO_REQUEST *io_request; 3039 u16 pd_index = 0; 3040 u16 os_timeout_value; 3041 u16 timeout_limit; 3042 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 3043 struct RAID_CONTEXT *pRAID_Context; 3044 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 3045 struct MR_PRIV_DEVICE *mr_device_priv_data; 3046 struct fusion_context *fusion = instance->ctrl_context; 3047 pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id - 1) & 1]; 3048 3049 device_id = MEGASAS_DEV_INDEX(scmd); 3050 pd_index = MEGASAS_PD_INDEX(scmd); 3051 os_timeout_value = scmd->request->timeout / HZ; 3052 mr_device_priv_data = scmd->device->hostdata; 3053 cmd->pd_interface = mr_device_priv_data->interface_type; 3054 3055 io_request = cmd->io_request; 3056 /* get RAID_Context pointer */ 3057 pRAID_Context = &io_request->RaidContext.raid_context; 3058 pRAID_Context->reg_lock_flags = 0; 3059 pRAID_Context->reg_lock_row_lba = 0; 3060 pRAID_Context->reg_lock_length = 0; 3061 io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); 3062 io_request->LUN[1] = scmd->device->lun; 3063 pRAID_Context->raid_flags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD 3064 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; 3065 3066 /* If FW supports PD sequence number */ 3067 if (instance->support_seqnum_jbod_fp) { 3068 if (instance->use_seqnum_jbod_fp && 3069 instance->pd_list[pd_index].driveType == TYPE_DISK) { 3070 3071 /* More than 256 PD/JBOD support for Ventura */ 3072 if (instance->support_morethan256jbod) 3073 pRAID_Context->virtual_disk_tgt_id = 3074 pd_sync->seq[pd_index].pd_target_id; 3075 else 3076 pRAID_Context->virtual_disk_tgt_id = 3077 cpu_to_le16(device_id + 3078 (MAX_PHYSICAL_DEVICES - 1)); 3079 pRAID_Context->config_seq_num = 3080 pd_sync->seq[pd_index].seqNum; 3081 io_request->DevHandle = 3082 pd_sync->seq[pd_index].devHandle; 3083 if (instance->adapter_type >= VENTURA_SERIES) { 3084 io_request->RaidContext.raid_context_g35.routing_flags |= 3085 (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT); 3086 io_request->RaidContext.raid_context_g35.nseg_type |= 3087 (1 << RAID_CONTEXT_NSEG_SHIFT); 3088 io_request->RaidContext.raid_context_g35.nseg_type |= 3089 (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT); 3090 } else { 3091 pRAID_Context->type = MPI2_TYPE_CUDA; 3092 pRAID_Context->nseg = 0x1; 3093 pRAID_Context->reg_lock_flags |= 3094 (MR_RL_FLAGS_SEQ_NUM_ENABLE | 3095 MR_RL_FLAGS_GRANT_DESTINATION_CUDA); 3096 } 3097 } else { 3098 pRAID_Context->virtual_disk_tgt_id = 3099 cpu_to_le16(device_id + 3100 (MAX_PHYSICAL_DEVICES - 1)); 3101 pRAID_Context->config_seq_num = 0; 3102 io_request->DevHandle = cpu_to_le16(0xFFFF); 3103 } 3104 } else { 3105 pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); 3106 pRAID_Context->config_seq_num = 0; 3107 3108 if (fusion->fast_path_io) { 3109 local_map_ptr = 3110 fusion->ld_drv_map[(instance->map_id & 1)]; 3111 io_request->DevHandle = 3112 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; 3113 } else { 3114 io_request->DevHandle = cpu_to_le16(0xFFFF); 3115 } 3116 } 3117 3118 cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle; 3119 3120 megasas_get_msix_index(instance, scmd, cmd, 1); 3121 3122 if (!fp_possible) { 3123 /* system pd firmware path */ 3124 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; 3125 cmd->request_desc->SCSIIO.RequestFlags = 3126 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 3127 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 3128 pRAID_Context->timeout_value = cpu_to_le16(os_timeout_value); 3129 pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); 3130 } else { 3131 if (os_timeout_value) 3132 os_timeout_value++; 3133 3134 /* system pd Fast Path */ 3135 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 3136 timeout_limit = (scmd->device->type == TYPE_DISK) ? 3137 255 : 0xFFFF; 3138 pRAID_Context->timeout_value = 3139 cpu_to_le16((os_timeout_value > timeout_limit) ? 3140 timeout_limit : os_timeout_value); 3141 if (instance->adapter_type >= INVADER_SERIES) 3142 io_request->IoFlags |= 3143 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); 3144 3145 cmd->request_desc->SCSIIO.RequestFlags = 3146 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << 3147 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 3148 } 3149 } 3150 3151 /** 3152 * megasas_build_io_fusion - Prepares IOs to devices 3153 * @instance: Adapter soft state 3154 * @scp: SCSI command 3155 * @cmd: Command to be prepared 3156 * 3157 * Invokes helper functions to prepare request frames 3158 * and sets flags appropriate for IO/Non-IO cmd 3159 */ 3160 static int 3161 megasas_build_io_fusion(struct megasas_instance *instance, 3162 struct scsi_cmnd *scp, 3163 struct megasas_cmd_fusion *cmd) 3164 { 3165 int sge_count; 3166 u8 cmd_type; 3167 struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request; 3168 struct MR_PRIV_DEVICE *mr_device_priv_data; 3169 mr_device_priv_data = scp->device->hostdata; 3170 3171 /* Zero out some fields so they don't get reused */ 3172 memset(io_request->LUN, 0x0, 8); 3173 io_request->CDB.EEDP32.PrimaryReferenceTag = 0; 3174 io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0; 3175 io_request->EEDPFlags = 0; 3176 io_request->Control = 0; 3177 io_request->EEDPBlockSize = 0; 3178 io_request->ChainOffset = 0; 3179 io_request->RaidContext.raid_context.raid_flags = 0; 3180 io_request->RaidContext.raid_context.type = 0; 3181 io_request->RaidContext.raid_context.nseg = 0; 3182 3183 memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len); 3184 /* 3185 * Just the CDB length,rest of the Flags are zero 3186 * This will be modified for FP in build_ldio_fusion 3187 */ 3188 io_request->IoFlags = cpu_to_le16(scp->cmd_len); 3189 3190 switch (cmd_type = megasas_cmd_type(scp)) { 3191 case READ_WRITE_LDIO: 3192 megasas_build_ldio_fusion(instance, scp, cmd); 3193 break; 3194 case NON_READ_WRITE_LDIO: 3195 megasas_build_ld_nonrw_fusion(instance, scp, cmd); 3196 break; 3197 case READ_WRITE_SYSPDIO: 3198 megasas_build_syspd_fusion(instance, scp, cmd, true); 3199 break; 3200 case NON_READ_WRITE_SYSPDIO: 3201 if (instance->secure_jbod_support || 3202 mr_device_priv_data->is_tm_capable) 3203 megasas_build_syspd_fusion(instance, scp, cmd, false); 3204 else 3205 megasas_build_syspd_fusion(instance, scp, cmd, true); 3206 break; 3207 default: 3208 break; 3209 } 3210 3211 /* 3212 * Construct SGL 3213 */ 3214 3215 sge_count = megasas_make_sgl(instance, scp, cmd); 3216 3217 if (sge_count > instance->max_num_sge || (sge_count < 0)) { 3218 dev_err(&instance->pdev->dev, 3219 "%s %d sge_count (%d) is out of range. Range is: 0-%d\n", 3220 __func__, __LINE__, sge_count, instance->max_num_sge); 3221 return 1; 3222 } 3223 3224 if (instance->adapter_type >= VENTURA_SERIES) { 3225 set_num_sge(&io_request->RaidContext.raid_context_g35, sge_count); 3226 cpu_to_le16s(&io_request->RaidContext.raid_context_g35.routing_flags); 3227 cpu_to_le16s(&io_request->RaidContext.raid_context_g35.nseg_type); 3228 } else { 3229 /* numSGE store lower 8 bit of sge_count. 3230 * numSGEExt store higher 8 bit of sge_count 3231 */ 3232 io_request->RaidContext.raid_context.num_sge = sge_count; 3233 io_request->RaidContext.raid_context.num_sge_ext = 3234 (u8)(sge_count >> 8); 3235 } 3236 3237 io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING); 3238 3239 if (scp->sc_data_direction == DMA_TO_DEVICE) 3240 io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE); 3241 else if (scp->sc_data_direction == DMA_FROM_DEVICE) 3242 io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ); 3243 3244 io_request->SGLOffset0 = 3245 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4; 3246 3247 io_request->SenseBufferLowAddress = 3248 cpu_to_le32(lower_32_bits(cmd->sense_phys_addr)); 3249 io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; 3250 3251 cmd->scmd = scp; 3252 scp->SCp.ptr = (char *)cmd; 3253 3254 return 0; 3255 } 3256 3257 static union MEGASAS_REQUEST_DESCRIPTOR_UNION * 3258 megasas_get_request_descriptor(struct megasas_instance *instance, u16 index) 3259 { 3260 u8 *p; 3261 struct fusion_context *fusion; 3262 3263 fusion = instance->ctrl_context; 3264 p = fusion->req_frames_desc + 3265 sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * index; 3266 3267 return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p; 3268 } 3269 3270 3271 /* megasas_prepate_secondRaid1_IO 3272 * It prepares the raid 1 second IO 3273 */ 3274 static void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance, 3275 struct megasas_cmd_fusion *cmd, 3276 struct megasas_cmd_fusion *r1_cmd) 3277 { 3278 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL; 3279 struct fusion_context *fusion; 3280 fusion = instance->ctrl_context; 3281 req_desc = cmd->request_desc; 3282 /* copy the io request frame as well as 8 SGEs data for r1 command*/ 3283 memcpy(r1_cmd->io_request, cmd->io_request, 3284 (sizeof(struct MPI2_RAID_SCSI_IO_REQUEST))); 3285 memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL, 3286 (fusion->max_sge_in_main_msg * sizeof(union MPI2_SGE_IO_UNION))); 3287 /*sense buffer is different for r1 command*/ 3288 r1_cmd->io_request->SenseBufferLowAddress = 3289 cpu_to_le32(lower_32_bits(r1_cmd->sense_phys_addr)); 3290 r1_cmd->scmd = cmd->scmd; 3291 req_desc2 = megasas_get_request_descriptor(instance, 3292 (r1_cmd->index - 1)); 3293 req_desc2->Words = 0; 3294 r1_cmd->request_desc = req_desc2; 3295 req_desc2->SCSIIO.SMID = cpu_to_le16(r1_cmd->index); 3296 req_desc2->SCSIIO.RequestFlags = req_desc->SCSIIO.RequestFlags; 3297 r1_cmd->request_desc->SCSIIO.DevHandle = cmd->r1_alt_dev_handle; 3298 r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle; 3299 r1_cmd->r1_alt_dev_handle = cmd->io_request->DevHandle; 3300 cmd->io_request->RaidContext.raid_context_g35.flow_specific.peer_smid = 3301 cpu_to_le16(r1_cmd->index); 3302 r1_cmd->io_request->RaidContext.raid_context_g35.flow_specific.peer_smid = 3303 cpu_to_le16(cmd->index); 3304 /*MSIxIndex of both commands request descriptors should be same*/ 3305 r1_cmd->request_desc->SCSIIO.MSIxIndex = 3306 cmd->request_desc->SCSIIO.MSIxIndex; 3307 /*span arm is different for r1 cmd*/ 3308 r1_cmd->io_request->RaidContext.raid_context_g35.span_arm = 3309 cmd->io_request->RaidContext.raid_context_g35.span_arm + 1; 3310 } 3311 3312 /** 3313 * megasas_build_and_issue_cmd_fusion -Main routine for building and 3314 * issuing non IOCTL cmd 3315 * @instance: Adapter soft state 3316 * @scmd: pointer to scsi cmd from OS 3317 */ 3318 static u32 3319 megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, 3320 struct scsi_cmnd *scmd) 3321 { 3322 struct megasas_cmd_fusion *cmd, *r1_cmd = NULL; 3323 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3324 u32 index; 3325 3326 if ((megasas_cmd_type(scmd) == READ_WRITE_LDIO) && 3327 instance->ldio_threshold && 3328 (atomic_inc_return(&instance->ldio_outstanding) > 3329 instance->ldio_threshold)) { 3330 atomic_dec(&instance->ldio_outstanding); 3331 return SCSI_MLQUEUE_DEVICE_BUSY; 3332 } 3333 3334 if (atomic_inc_return(&instance->fw_outstanding) > 3335 instance->host->can_queue) { 3336 atomic_dec(&instance->fw_outstanding); 3337 return SCSI_MLQUEUE_HOST_BUSY; 3338 } 3339 3340 cmd = megasas_get_cmd_fusion(instance, scmd->request->tag); 3341 3342 if (!cmd) { 3343 atomic_dec(&instance->fw_outstanding); 3344 return SCSI_MLQUEUE_HOST_BUSY; 3345 } 3346 3347 index = cmd->index; 3348 3349 req_desc = megasas_get_request_descriptor(instance, index-1); 3350 3351 req_desc->Words = 0; 3352 cmd->request_desc = req_desc; 3353 3354 if (megasas_build_io_fusion(instance, scmd, cmd)) { 3355 megasas_return_cmd_fusion(instance, cmd); 3356 dev_err(&instance->pdev->dev, "Error building command\n"); 3357 cmd->request_desc = NULL; 3358 atomic_dec(&instance->fw_outstanding); 3359 return SCSI_MLQUEUE_HOST_BUSY; 3360 } 3361 3362 req_desc = cmd->request_desc; 3363 req_desc->SCSIIO.SMID = cpu_to_le16(index); 3364 3365 if (cmd->io_request->ChainOffset != 0 && 3366 cmd->io_request->ChainOffset != 0xF) 3367 dev_err(&instance->pdev->dev, "The chain offset value is not " 3368 "correct : %x\n", cmd->io_request->ChainOffset); 3369 /* 3370 * if it is raid 1/10 fp write capable. 3371 * try to get second command from pool and construct it. 3372 * From FW, it has confirmed that lba values of two PDs 3373 * corresponds to single R1/10 LD are always same 3374 * 3375 */ 3376 /* driver side count always should be less than max_fw_cmds 3377 * to get new command 3378 */ 3379 if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) { 3380 r1_cmd = megasas_get_cmd_fusion(instance, 3381 (scmd->request->tag + instance->max_fw_cmds)); 3382 megasas_prepare_secondRaid1_IO(instance, cmd, r1_cmd); 3383 } 3384 3385 3386 /* 3387 * Issue the command to the FW 3388 */ 3389 3390 megasas_fire_cmd_fusion(instance, req_desc); 3391 3392 if (r1_cmd) 3393 megasas_fire_cmd_fusion(instance, r1_cmd->request_desc); 3394 3395 3396 return 0; 3397 } 3398 3399 /** 3400 * megasas_complete_r1_command - 3401 * completes R1 FP write commands which has valid peer smid 3402 * @instance: Adapter soft state 3403 * @cmd: MPT command frame 3404 * 3405 */ 3406 static inline void 3407 megasas_complete_r1_command(struct megasas_instance *instance, 3408 struct megasas_cmd_fusion *cmd) 3409 { 3410 u8 *sense, status, ex_status; 3411 u32 data_length; 3412 u16 peer_smid; 3413 struct fusion_context *fusion; 3414 struct megasas_cmd_fusion *r1_cmd = NULL; 3415 struct scsi_cmnd *scmd_local = NULL; 3416 struct RAID_CONTEXT_G35 *rctx_g35; 3417 3418 rctx_g35 = &cmd->io_request->RaidContext.raid_context_g35; 3419 fusion = instance->ctrl_context; 3420 peer_smid = le16_to_cpu(rctx_g35->flow_specific.peer_smid); 3421 3422 r1_cmd = fusion->cmd_list[peer_smid - 1]; 3423 scmd_local = cmd->scmd; 3424 status = rctx_g35->status; 3425 ex_status = rctx_g35->ex_status; 3426 data_length = cmd->io_request->DataLength; 3427 sense = cmd->sense; 3428 3429 cmd->cmd_completed = true; 3430 3431 /* Check if peer command is completed or not*/ 3432 if (r1_cmd->cmd_completed) { 3433 rctx_g35 = &r1_cmd->io_request->RaidContext.raid_context_g35; 3434 if (rctx_g35->status != MFI_STAT_OK) { 3435 status = rctx_g35->status; 3436 ex_status = rctx_g35->ex_status; 3437 data_length = r1_cmd->io_request->DataLength; 3438 sense = r1_cmd->sense; 3439 } 3440 3441 megasas_return_cmd_fusion(instance, r1_cmd); 3442 map_cmd_status(fusion, scmd_local, status, ex_status, 3443 le32_to_cpu(data_length), sense); 3444 if (instance->ldio_threshold && 3445 megasas_cmd_type(scmd_local) == READ_WRITE_LDIO) 3446 atomic_dec(&instance->ldio_outstanding); 3447 scmd_local->SCp.ptr = NULL; 3448 megasas_return_cmd_fusion(instance, cmd); 3449 scsi_dma_unmap(scmd_local); 3450 scmd_local->scsi_done(scmd_local); 3451 } 3452 } 3453 3454 /** 3455 * complete_cmd_fusion - Completes command 3456 * @instance: Adapter soft state 3457 * @MSIxIndex: MSI number 3458 * @irq_context: IRQ context 3459 * 3460 * Completes all commands that is in reply descriptor queue 3461 */ 3462 static int 3463 complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex, 3464 struct megasas_irq_context *irq_context) 3465 { 3466 union MPI2_REPLY_DESCRIPTORS_UNION *desc; 3467 struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc; 3468 struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req; 3469 struct fusion_context *fusion; 3470 struct megasas_cmd *cmd_mfi; 3471 struct megasas_cmd_fusion *cmd_fusion; 3472 u16 smid, num_completed; 3473 u8 reply_descript_type, *sense, status, extStatus; 3474 u32 device_id, data_length; 3475 union desc_value d_val; 3476 struct LD_LOAD_BALANCE_INFO *lbinfo; 3477 int threshold_reply_count = 0; 3478 struct scsi_cmnd *scmd_local = NULL; 3479 struct MR_TASK_MANAGE_REQUEST *mr_tm_req; 3480 struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req; 3481 3482 fusion = instance->ctrl_context; 3483 3484 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 3485 return IRQ_HANDLED; 3486 3487 desc = fusion->reply_frames_desc[MSIxIndex] + 3488 fusion->last_reply_idx[MSIxIndex]; 3489 3490 reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; 3491 3492 d_val.word = desc->Words; 3493 3494 reply_descript_type = reply_desc->ReplyFlags & 3495 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 3496 3497 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 3498 return IRQ_NONE; 3499 3500 num_completed = 0; 3501 3502 while (d_val.u.low != cpu_to_le32(UINT_MAX) && 3503 d_val.u.high != cpu_to_le32(UINT_MAX)) { 3504 3505 smid = le16_to_cpu(reply_desc->SMID); 3506 cmd_fusion = fusion->cmd_list[smid - 1]; 3507 scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *) 3508 cmd_fusion->io_request; 3509 3510 scmd_local = cmd_fusion->scmd; 3511 status = scsi_io_req->RaidContext.raid_context.status; 3512 extStatus = scsi_io_req->RaidContext.raid_context.ex_status; 3513 sense = cmd_fusion->sense; 3514 data_length = scsi_io_req->DataLength; 3515 3516 switch (scsi_io_req->Function) { 3517 case MPI2_FUNCTION_SCSI_TASK_MGMT: 3518 mr_tm_req = (struct MR_TASK_MANAGE_REQUEST *) 3519 cmd_fusion->io_request; 3520 mpi_tm_req = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *) 3521 &mr_tm_req->TmRequest; 3522 dev_dbg(&instance->pdev->dev, "TM completion:" 3523 "type: 0x%x TaskMID: 0x%x\n", 3524 mpi_tm_req->TaskType, mpi_tm_req->TaskMID); 3525 complete(&cmd_fusion->done); 3526 break; 3527 case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/ 3528 /* Update load balancing info */ 3529 if (fusion->load_balance_info && 3530 (cmd_fusion->scmd->SCp.Status & 3531 MEGASAS_LOAD_BALANCE_FLAG)) { 3532 device_id = MEGASAS_DEV_INDEX(scmd_local); 3533 lbinfo = &fusion->load_balance_info[device_id]; 3534 atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]); 3535 cmd_fusion->scmd->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; 3536 } 3537 /* Fall through - and complete IO */ 3538 case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */ 3539 atomic_dec(&instance->fw_outstanding); 3540 if (cmd_fusion->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) { 3541 map_cmd_status(fusion, scmd_local, status, 3542 extStatus, le32_to_cpu(data_length), 3543 sense); 3544 if (instance->ldio_threshold && 3545 (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)) 3546 atomic_dec(&instance->ldio_outstanding); 3547 scmd_local->SCp.ptr = NULL; 3548 megasas_return_cmd_fusion(instance, cmd_fusion); 3549 scsi_dma_unmap(scmd_local); 3550 scmd_local->scsi_done(scmd_local); 3551 } else /* Optimal VD - R1 FP command completion. */ 3552 megasas_complete_r1_command(instance, cmd_fusion); 3553 break; 3554 case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */ 3555 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 3556 /* Poll mode. Dummy free. 3557 * In case of Interrupt mode, caller has reverse check. 3558 */ 3559 if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) { 3560 cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE; 3561 megasas_return_cmd(instance, cmd_mfi); 3562 } else 3563 megasas_complete_cmd(instance, cmd_mfi, DID_OK); 3564 break; 3565 } 3566 3567 fusion->last_reply_idx[MSIxIndex]++; 3568 if (fusion->last_reply_idx[MSIxIndex] >= 3569 fusion->reply_q_depth) 3570 fusion->last_reply_idx[MSIxIndex] = 0; 3571 3572 desc->Words = cpu_to_le64(ULLONG_MAX); 3573 num_completed++; 3574 threshold_reply_count++; 3575 3576 /* Get the next reply descriptor */ 3577 if (!fusion->last_reply_idx[MSIxIndex]) 3578 desc = fusion->reply_frames_desc[MSIxIndex]; 3579 else 3580 desc++; 3581 3582 reply_desc = 3583 (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; 3584 3585 d_val.word = desc->Words; 3586 3587 reply_descript_type = reply_desc->ReplyFlags & 3588 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 3589 3590 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 3591 break; 3592 /* 3593 * Write to reply post host index register after completing threshold 3594 * number of reply counts and still there are more replies in reply queue 3595 * pending to be completed 3596 */ 3597 if (threshold_reply_count >= instance->threshold_reply_count) { 3598 if (instance->msix_combined) 3599 writel(((MSIxIndex & 0x7) << 24) | 3600 fusion->last_reply_idx[MSIxIndex], 3601 instance->reply_post_host_index_addr[MSIxIndex/8]); 3602 else 3603 writel((MSIxIndex << 24) | 3604 fusion->last_reply_idx[MSIxIndex], 3605 instance->reply_post_host_index_addr[0]); 3606 threshold_reply_count = 0; 3607 if (irq_context) { 3608 if (!irq_context->irq_poll_scheduled) { 3609 irq_context->irq_poll_scheduled = true; 3610 irq_context->irq_line_enable = true; 3611 irq_poll_sched(&irq_context->irqpoll); 3612 } 3613 return num_completed; 3614 } 3615 } 3616 } 3617 3618 if (num_completed) { 3619 wmb(); 3620 if (instance->msix_combined) 3621 writel(((MSIxIndex & 0x7) << 24) | 3622 fusion->last_reply_idx[MSIxIndex], 3623 instance->reply_post_host_index_addr[MSIxIndex/8]); 3624 else 3625 writel((MSIxIndex << 24) | 3626 fusion->last_reply_idx[MSIxIndex], 3627 instance->reply_post_host_index_addr[0]); 3628 megasas_check_and_restore_queue_depth(instance); 3629 } 3630 return num_completed; 3631 } 3632 3633 /** 3634 * megasas_enable_irq_poll() - enable irqpoll 3635 * @instance: Adapter soft state 3636 */ 3637 static void megasas_enable_irq_poll(struct megasas_instance *instance) 3638 { 3639 u32 count, i; 3640 struct megasas_irq_context *irq_ctx; 3641 3642 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 3643 3644 for (i = 0; i < count; i++) { 3645 irq_ctx = &instance->irq_context[i]; 3646 irq_poll_enable(&irq_ctx->irqpoll); 3647 } 3648 } 3649 3650 /** 3651 * megasas_sync_irqs - Synchronizes all IRQs owned by adapter 3652 * @instance_addr: Adapter soft state address 3653 */ 3654 static void megasas_sync_irqs(unsigned long instance_addr) 3655 { 3656 u32 count, i; 3657 struct megasas_instance *instance = 3658 (struct megasas_instance *)instance_addr; 3659 struct megasas_irq_context *irq_ctx; 3660 3661 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 3662 3663 for (i = 0; i < count; i++) { 3664 synchronize_irq(pci_irq_vector(instance->pdev, i)); 3665 irq_ctx = &instance->irq_context[i]; 3666 irq_poll_disable(&irq_ctx->irqpoll); 3667 if (irq_ctx->irq_poll_scheduled) { 3668 irq_ctx->irq_poll_scheduled = false; 3669 enable_irq(irq_ctx->os_irq); 3670 } 3671 } 3672 } 3673 3674 /** 3675 * megasas_irqpoll() - process a queue for completed reply descriptors 3676 * @irqpoll: IRQ poll structure associated with queue to poll. 3677 * @budget: Threshold of reply descriptors to process per poll. 3678 * 3679 * Return: The number of entries processed. 3680 */ 3681 3682 int megasas_irqpoll(struct irq_poll *irqpoll, int budget) 3683 { 3684 struct megasas_irq_context *irq_ctx; 3685 struct megasas_instance *instance; 3686 int num_entries; 3687 3688 irq_ctx = container_of(irqpoll, struct megasas_irq_context, irqpoll); 3689 instance = irq_ctx->instance; 3690 3691 if (irq_ctx->irq_line_enable) { 3692 disable_irq(irq_ctx->os_irq); 3693 irq_ctx->irq_line_enable = false; 3694 } 3695 3696 num_entries = complete_cmd_fusion(instance, irq_ctx->MSIxIndex, irq_ctx); 3697 if (num_entries < budget) { 3698 irq_poll_complete(irqpoll); 3699 irq_ctx->irq_poll_scheduled = false; 3700 enable_irq(irq_ctx->os_irq); 3701 } 3702 3703 return num_entries; 3704 } 3705 3706 /** 3707 * megasas_complete_cmd_dpc_fusion - Completes command 3708 * @instance_addr: Adapter soft state address 3709 * 3710 * Tasklet to complete cmds 3711 */ 3712 static void 3713 megasas_complete_cmd_dpc_fusion(unsigned long instance_addr) 3714 { 3715 struct megasas_instance *instance = 3716 (struct megasas_instance *)instance_addr; 3717 u32 count, MSIxIndex; 3718 3719 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 3720 3721 /* If we have already declared adapter dead, donot complete cmds */ 3722 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 3723 return; 3724 3725 for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++) 3726 complete_cmd_fusion(instance, MSIxIndex, NULL); 3727 } 3728 3729 /** 3730 * megasas_isr_fusion - isr entry point 3731 * @irq: IRQ number 3732 * @devp: IRQ context 3733 */ 3734 static irqreturn_t megasas_isr_fusion(int irq, void *devp) 3735 { 3736 struct megasas_irq_context *irq_context = devp; 3737 struct megasas_instance *instance = irq_context->instance; 3738 u32 mfiStatus; 3739 3740 if (instance->mask_interrupts) 3741 return IRQ_NONE; 3742 3743 if (irq_context->irq_poll_scheduled) 3744 return IRQ_HANDLED; 3745 3746 if (!instance->msix_vectors) { 3747 mfiStatus = instance->instancet->clear_intr(instance); 3748 if (!mfiStatus) 3749 return IRQ_NONE; 3750 } 3751 3752 /* If we are resetting, bail */ 3753 if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) { 3754 instance->instancet->clear_intr(instance); 3755 return IRQ_HANDLED; 3756 } 3757 3758 return complete_cmd_fusion(instance, irq_context->MSIxIndex, irq_context) 3759 ? IRQ_HANDLED : IRQ_NONE; 3760 } 3761 3762 /** 3763 * build_mpt_mfi_pass_thru - builds a cmd fo MFI Pass thru 3764 * @instance: Adapter soft state 3765 * @mfi_cmd: megasas_cmd pointer 3766 * 3767 */ 3768 static void 3769 build_mpt_mfi_pass_thru(struct megasas_instance *instance, 3770 struct megasas_cmd *mfi_cmd) 3771 { 3772 struct MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain; 3773 struct MPI2_RAID_SCSI_IO_REQUEST *io_req; 3774 struct megasas_cmd_fusion *cmd; 3775 struct fusion_context *fusion; 3776 struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr; 3777 3778 fusion = instance->ctrl_context; 3779 3780 cmd = megasas_get_cmd_fusion(instance, 3781 instance->max_scsi_cmds + mfi_cmd->index); 3782 3783 /* Save the smid. To be used for returning the cmd */ 3784 mfi_cmd->context.smid = cmd->index; 3785 3786 /* 3787 * For cmds where the flag is set, store the flag and check 3788 * on completion. For cmds with this flag, don't call 3789 * megasas_complete_cmd 3790 */ 3791 3792 if (frame_hdr->flags & cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)) 3793 mfi_cmd->flags |= DRV_DCMD_POLLED_MODE; 3794 3795 io_req = cmd->io_request; 3796 3797 if (instance->adapter_type >= INVADER_SERIES) { 3798 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = 3799 (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL; 3800 sgl_ptr_end += fusion->max_sge_in_main_msg - 1; 3801 sgl_ptr_end->Flags = 0; 3802 } 3803 3804 mpi25_ieee_chain = 3805 (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain; 3806 3807 io_req->Function = MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST; 3808 io_req->SGLOffset0 = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, 3809 SGL) / 4; 3810 io_req->ChainOffset = fusion->chain_offset_mfi_pthru; 3811 3812 mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr); 3813 3814 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | 3815 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; 3816 3817 mpi25_ieee_chain->Length = cpu_to_le32(instance->mfi_frame_size); 3818 } 3819 3820 /** 3821 * build_mpt_cmd - Calls helper function to build a cmd MFI Pass thru cmd 3822 * @instance: Adapter soft state 3823 * @cmd: mfi cmd to build 3824 * 3825 */ 3826 static union MEGASAS_REQUEST_DESCRIPTOR_UNION * 3827 build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 3828 { 3829 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc = NULL; 3830 u16 index; 3831 3832 build_mpt_mfi_pass_thru(instance, cmd); 3833 index = cmd->context.smid; 3834 3835 req_desc = megasas_get_request_descriptor(instance, index - 1); 3836 3837 req_desc->Words = 0; 3838 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 3839 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 3840 3841 req_desc->SCSIIO.SMID = cpu_to_le16(index); 3842 3843 return req_desc; 3844 } 3845 3846 /** 3847 * megasas_issue_dcmd_fusion - Issues a MFI Pass thru cmd 3848 * @instance: Adapter soft state 3849 * @cmd: mfi cmd pointer 3850 * 3851 */ 3852 static void 3853 megasas_issue_dcmd_fusion(struct megasas_instance *instance, 3854 struct megasas_cmd *cmd) 3855 { 3856 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3857 3858 req_desc = build_mpt_cmd(instance, cmd); 3859 3860 megasas_fire_cmd_fusion(instance, req_desc); 3861 return; 3862 } 3863 3864 /** 3865 * megasas_release_fusion - Reverses the FW initialization 3866 * @instance: Adapter soft state 3867 */ 3868 void 3869 megasas_release_fusion(struct megasas_instance *instance) 3870 { 3871 megasas_free_ioc_init_cmd(instance); 3872 megasas_free_cmds(instance); 3873 megasas_free_cmds_fusion(instance); 3874 3875 iounmap(instance->reg_set); 3876 3877 pci_release_selected_regions(instance->pdev, 1<<instance->bar); 3878 } 3879 3880 /** 3881 * megasas_read_fw_status_reg_fusion - returns the current FW status value 3882 * @instance: Adapter soft state 3883 */ 3884 static u32 3885 megasas_read_fw_status_reg_fusion(struct megasas_instance *instance) 3886 { 3887 return megasas_readl(instance, &instance->reg_set->outbound_scratch_pad_0); 3888 } 3889 3890 /** 3891 * megasas_alloc_host_crash_buffer - Host buffers for Crash dump collection from Firmware 3892 * @instance: Controller's soft instance 3893 * @return: Number of allocated host crash buffers 3894 */ 3895 static void 3896 megasas_alloc_host_crash_buffer(struct megasas_instance *instance) 3897 { 3898 unsigned int i; 3899 3900 for (i = 0; i < MAX_CRASH_DUMP_SIZE; i++) { 3901 instance->crash_buf[i] = vzalloc(CRASH_DMA_BUF_SIZE); 3902 if (!instance->crash_buf[i]) { 3903 dev_info(&instance->pdev->dev, "Firmware crash dump " 3904 "memory allocation failed at index %d\n", i); 3905 break; 3906 } 3907 } 3908 instance->drv_buf_alloc = i; 3909 } 3910 3911 /** 3912 * megasas_free_host_crash_buffer - Host buffers for Crash dump collection from Firmware 3913 * @instance: Controller's soft instance 3914 */ 3915 void 3916 megasas_free_host_crash_buffer(struct megasas_instance *instance) 3917 { 3918 unsigned int i; 3919 for (i = 0; i < instance->drv_buf_alloc; i++) { 3920 if (instance->crash_buf[i]) 3921 vfree(instance->crash_buf[i]); 3922 } 3923 instance->drv_buf_index = 0; 3924 instance->drv_buf_alloc = 0; 3925 instance->fw_crash_state = UNAVAILABLE; 3926 instance->fw_crash_buffer_size = 0; 3927 } 3928 3929 /** 3930 * megasas_adp_reset_fusion - For controller reset 3931 * @instance: Controller's soft instance 3932 * @regs: MFI register set 3933 */ 3934 static int 3935 megasas_adp_reset_fusion(struct megasas_instance *instance, 3936 struct megasas_register_set __iomem *regs) 3937 { 3938 u32 host_diag, abs_state, retry; 3939 3940 /* Now try to reset the chip */ 3941 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 3942 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 3943 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 3944 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 3945 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 3946 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 3947 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 3948 3949 /* Check that the diag write enable (DRWE) bit is on */ 3950 host_diag = megasas_readl(instance, &instance->reg_set->fusion_host_diag); 3951 retry = 0; 3952 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { 3953 msleep(100); 3954 host_diag = megasas_readl(instance, 3955 &instance->reg_set->fusion_host_diag); 3956 if (retry++ == 100) { 3957 dev_warn(&instance->pdev->dev, 3958 "Host diag unlock failed from %s %d\n", 3959 __func__, __LINE__); 3960 break; 3961 } 3962 } 3963 if (!(host_diag & HOST_DIAG_WRITE_ENABLE)) 3964 return -1; 3965 3966 /* Send chip reset command */ 3967 writel(host_diag | HOST_DIAG_RESET_ADAPTER, 3968 &instance->reg_set->fusion_host_diag); 3969 msleep(3000); 3970 3971 /* Make sure reset adapter bit is cleared */ 3972 host_diag = megasas_readl(instance, &instance->reg_set->fusion_host_diag); 3973 retry = 0; 3974 while (host_diag & HOST_DIAG_RESET_ADAPTER) { 3975 msleep(100); 3976 host_diag = megasas_readl(instance, 3977 &instance->reg_set->fusion_host_diag); 3978 if (retry++ == 1000) { 3979 dev_warn(&instance->pdev->dev, 3980 "Diag reset adapter never cleared %s %d\n", 3981 __func__, __LINE__); 3982 break; 3983 } 3984 } 3985 if (host_diag & HOST_DIAG_RESET_ADAPTER) 3986 return -1; 3987 3988 abs_state = instance->instancet->read_fw_status_reg(instance) 3989 & MFI_STATE_MASK; 3990 retry = 0; 3991 3992 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) { 3993 msleep(100); 3994 abs_state = instance->instancet-> 3995 read_fw_status_reg(instance) & MFI_STATE_MASK; 3996 } 3997 if (abs_state <= MFI_STATE_FW_INIT) { 3998 dev_warn(&instance->pdev->dev, 3999 "fw state < MFI_STATE_FW_INIT, state = 0x%x %s %d\n", 4000 abs_state, __func__, __LINE__); 4001 return -1; 4002 } 4003 4004 return 0; 4005 } 4006 4007 /** 4008 * megasas_check_reset_fusion - For controller reset check 4009 * @instance: Controller's soft instance 4010 * @regs: MFI register set 4011 */ 4012 static int 4013 megasas_check_reset_fusion(struct megasas_instance *instance, 4014 struct megasas_register_set __iomem *regs) 4015 { 4016 return 0; 4017 } 4018 4019 /** 4020 * megasas_trigger_snap_dump - Trigger snap dump in FW 4021 * @instance: Soft instance of adapter 4022 */ 4023 static inline void megasas_trigger_snap_dump(struct megasas_instance *instance) 4024 { 4025 int j; 4026 u32 fw_state, abs_state; 4027 4028 if (!instance->disableOnlineCtrlReset) { 4029 dev_info(&instance->pdev->dev, "Trigger snap dump\n"); 4030 writel(MFI_ADP_TRIGGER_SNAP_DUMP, 4031 &instance->reg_set->doorbell); 4032 readl(&instance->reg_set->doorbell); 4033 } 4034 4035 for (j = 0; j < instance->snapdump_wait_time; j++) { 4036 abs_state = instance->instancet->read_fw_status_reg(instance); 4037 fw_state = abs_state & MFI_STATE_MASK; 4038 if (fw_state == MFI_STATE_FAULT) { 4039 dev_printk(KERN_ERR, &instance->pdev->dev, 4040 "FW in FAULT state Fault code:0x%x subcode:0x%x func:%s\n", 4041 abs_state & MFI_STATE_FAULT_CODE, 4042 abs_state & MFI_STATE_FAULT_SUBCODE, __func__); 4043 return; 4044 } 4045 msleep(1000); 4046 } 4047 } 4048 4049 /* This function waits for outstanding commands on fusion to complete */ 4050 static int 4051 megasas_wait_for_outstanding_fusion(struct megasas_instance *instance, 4052 int reason, int *convert) 4053 { 4054 int i, outstanding, retval = 0, hb_seconds_missed = 0; 4055 u32 fw_state, abs_state; 4056 u32 waittime_for_io_completion; 4057 4058 waittime_for_io_completion = 4059 min_t(u32, resetwaittime, 4060 (resetwaittime - instance->snapdump_wait_time)); 4061 4062 if (reason == MFI_IO_TIMEOUT_OCR) { 4063 dev_info(&instance->pdev->dev, 4064 "MFI command is timed out\n"); 4065 megasas_complete_cmd_dpc_fusion((unsigned long)instance); 4066 if (instance->snapdump_wait_time) 4067 megasas_trigger_snap_dump(instance); 4068 retval = 1; 4069 goto out; 4070 } 4071 4072 for (i = 0; i < waittime_for_io_completion; i++) { 4073 /* Check if firmware is in fault state */ 4074 abs_state = instance->instancet->read_fw_status_reg(instance); 4075 fw_state = abs_state & MFI_STATE_MASK; 4076 if (fw_state == MFI_STATE_FAULT) { 4077 dev_printk(KERN_ERR, &instance->pdev->dev, 4078 "FW in FAULT state Fault code:0x%x subcode:0x%x func:%s\n", 4079 abs_state & MFI_STATE_FAULT_CODE, 4080 abs_state & MFI_STATE_FAULT_SUBCODE, __func__); 4081 megasas_complete_cmd_dpc_fusion((unsigned long)instance); 4082 if (instance->requestorId && reason) { 4083 dev_warn(&instance->pdev->dev, "SR-IOV Found FW in FAULT" 4084 " state while polling during" 4085 " I/O timeout handling for %d\n", 4086 instance->host->host_no); 4087 *convert = 1; 4088 } 4089 4090 retval = 1; 4091 goto out; 4092 } 4093 4094 4095 /* If SR-IOV VF mode & heartbeat timeout, don't wait */ 4096 if (instance->requestorId && !reason) { 4097 retval = 1; 4098 goto out; 4099 } 4100 4101 /* If SR-IOV VF mode & I/O timeout, check for HB timeout */ 4102 if (instance->requestorId && (reason == SCSIIO_TIMEOUT_OCR)) { 4103 if (instance->hb_host_mem->HB.fwCounter != 4104 instance->hb_host_mem->HB.driverCounter) { 4105 instance->hb_host_mem->HB.driverCounter = 4106 instance->hb_host_mem->HB.fwCounter; 4107 hb_seconds_missed = 0; 4108 } else { 4109 hb_seconds_missed++; 4110 if (hb_seconds_missed == 4111 (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) { 4112 dev_warn(&instance->pdev->dev, "SR-IOV:" 4113 " Heartbeat never completed " 4114 " while polling during I/O " 4115 " timeout handling for " 4116 "scsi%d.\n", 4117 instance->host->host_no); 4118 *convert = 1; 4119 retval = 1; 4120 goto out; 4121 } 4122 } 4123 } 4124 4125 megasas_complete_cmd_dpc_fusion((unsigned long)instance); 4126 outstanding = atomic_read(&instance->fw_outstanding); 4127 if (!outstanding) 4128 goto out; 4129 4130 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 4131 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d " 4132 "commands to complete for scsi%d\n", i, 4133 outstanding, instance->host->host_no); 4134 } 4135 msleep(1000); 4136 } 4137 4138 if (instance->snapdump_wait_time) { 4139 megasas_trigger_snap_dump(instance); 4140 retval = 1; 4141 goto out; 4142 } 4143 4144 if (atomic_read(&instance->fw_outstanding)) { 4145 dev_err(&instance->pdev->dev, "pending commands remain after waiting, " 4146 "will reset adapter scsi%d.\n", 4147 instance->host->host_no); 4148 *convert = 1; 4149 retval = 1; 4150 } 4151 4152 out: 4153 return retval; 4154 } 4155 4156 void megasas_reset_reply_desc(struct megasas_instance *instance) 4157 { 4158 int i, j, count; 4159 struct fusion_context *fusion; 4160 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; 4161 4162 fusion = instance->ctrl_context; 4163 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 4164 for (i = 0 ; i < count ; i++) { 4165 fusion->last_reply_idx[i] = 0; 4166 reply_desc = fusion->reply_frames_desc[i]; 4167 for (j = 0 ; j < fusion->reply_q_depth; j++, reply_desc++) 4168 reply_desc->Words = cpu_to_le64(ULLONG_MAX); 4169 } 4170 } 4171 4172 /* 4173 * megasas_refire_mgmt_cmd : Re-fire management commands 4174 * @instance: Controller's soft instance 4175 */ 4176 static void megasas_refire_mgmt_cmd(struct megasas_instance *instance, 4177 bool return_ioctl) 4178 { 4179 int j; 4180 struct megasas_cmd_fusion *cmd_fusion; 4181 struct fusion_context *fusion; 4182 struct megasas_cmd *cmd_mfi; 4183 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 4184 struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req; 4185 u16 smid; 4186 bool refire_cmd = false; 4187 u8 result; 4188 u32 opcode = 0; 4189 4190 fusion = instance->ctrl_context; 4191 4192 /* Re-fire management commands. 4193 * Do not traverse complet MPT frame pool. Start from max_scsi_cmds. 4194 */ 4195 for (j = instance->max_scsi_cmds ; j < instance->max_fw_cmds; j++) { 4196 cmd_fusion = fusion->cmd_list[j]; 4197 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 4198 smid = le16_to_cpu(cmd_mfi->context.smid); 4199 result = REFIRE_CMD; 4200 4201 if (!smid) 4202 continue; 4203 4204 req_desc = megasas_get_request_descriptor(instance, smid - 1); 4205 4206 switch (cmd_mfi->frame->hdr.cmd) { 4207 case MFI_CMD_DCMD: 4208 opcode = le32_to_cpu(cmd_mfi->frame->dcmd.opcode); 4209 /* Do not refire shutdown command */ 4210 if (opcode == MR_DCMD_CTRL_SHUTDOWN) { 4211 cmd_mfi->frame->dcmd.cmd_status = MFI_STAT_OK; 4212 result = COMPLETE_CMD; 4213 break; 4214 } 4215 4216 refire_cmd = ((opcode != MR_DCMD_LD_MAP_GET_INFO)) && 4217 (opcode != MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && 4218 !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE); 4219 4220 if (!refire_cmd) 4221 result = RETURN_CMD; 4222 4223 break; 4224 case MFI_CMD_NVME: 4225 if (!instance->support_nvme_passthru) { 4226 cmd_mfi->frame->hdr.cmd_status = MFI_STAT_INVALID_CMD; 4227 result = COMPLETE_CMD; 4228 } 4229 4230 break; 4231 case MFI_CMD_TOOLBOX: 4232 if (!instance->support_pci_lane_margining) { 4233 cmd_mfi->frame->hdr.cmd_status = MFI_STAT_INVALID_CMD; 4234 result = COMPLETE_CMD; 4235 } 4236 4237 break; 4238 default: 4239 break; 4240 } 4241 4242 if (return_ioctl && cmd_mfi->sync_cmd && 4243 cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) { 4244 dev_err(&instance->pdev->dev, 4245 "return -EBUSY from %s %d cmd 0x%x opcode 0x%x\n", 4246 __func__, __LINE__, cmd_mfi->frame->hdr.cmd, 4247 le32_to_cpu(cmd_mfi->frame->dcmd.opcode)); 4248 cmd_mfi->cmd_status_drv = DCMD_BUSY; 4249 result = COMPLETE_CMD; 4250 } 4251 4252 scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *) 4253 cmd_fusion->io_request; 4254 if (scsi_io_req->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) 4255 result = RETURN_CMD; 4256 4257 switch (result) { 4258 case REFIRE_CMD: 4259 megasas_fire_cmd_fusion(instance, req_desc); 4260 break; 4261 case RETURN_CMD: 4262 megasas_return_cmd(instance, cmd_mfi); 4263 break; 4264 case COMPLETE_CMD: 4265 megasas_complete_cmd(instance, cmd_mfi, DID_OK); 4266 break; 4267 } 4268 } 4269 } 4270 4271 /* 4272 * megasas_return_polled_cmds: Return polled mode commands back to the pool 4273 * before initiating an OCR. 4274 * @instance: Controller's soft instance 4275 */ 4276 static void 4277 megasas_return_polled_cmds(struct megasas_instance *instance) 4278 { 4279 int i; 4280 struct megasas_cmd_fusion *cmd_fusion; 4281 struct fusion_context *fusion; 4282 struct megasas_cmd *cmd_mfi; 4283 4284 fusion = instance->ctrl_context; 4285 4286 for (i = instance->max_scsi_cmds; i < instance->max_fw_cmds; i++) { 4287 cmd_fusion = fusion->cmd_list[i]; 4288 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 4289 4290 if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) { 4291 if (megasas_dbg_lvl & OCR_DEBUG) 4292 dev_info(&instance->pdev->dev, 4293 "%s %d return cmd 0x%x opcode 0x%x\n", 4294 __func__, __LINE__, cmd_mfi->frame->hdr.cmd, 4295 le32_to_cpu(cmd_mfi->frame->dcmd.opcode)); 4296 cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE; 4297 megasas_return_cmd(instance, cmd_mfi); 4298 } 4299 } 4300 } 4301 4302 /* 4303 * megasas_track_scsiio : Track SCSI IOs outstanding to a SCSI device 4304 * @instance: per adapter struct 4305 * @channel: the channel assigned by the OS 4306 * @id: the id assigned by the OS 4307 * 4308 * Returns SUCCESS if no IOs pending to SCSI device, else return FAILED 4309 */ 4310 4311 static int megasas_track_scsiio(struct megasas_instance *instance, 4312 int id, int channel) 4313 { 4314 int i, found = 0; 4315 struct megasas_cmd_fusion *cmd_fusion; 4316 struct fusion_context *fusion; 4317 fusion = instance->ctrl_context; 4318 4319 for (i = 0 ; i < instance->max_scsi_cmds; i++) { 4320 cmd_fusion = fusion->cmd_list[i]; 4321 if (cmd_fusion->scmd && 4322 (cmd_fusion->scmd->device->id == id && 4323 cmd_fusion->scmd->device->channel == channel)) { 4324 dev_info(&instance->pdev->dev, 4325 "SCSI commands pending to target" 4326 "channel %d id %d \tSMID: 0x%x\n", 4327 channel, id, cmd_fusion->index); 4328 scsi_print_command(cmd_fusion->scmd); 4329 found = 1; 4330 break; 4331 } 4332 } 4333 4334 return found ? FAILED : SUCCESS; 4335 } 4336 4337 /** 4338 * megasas_tm_response_code - translation of device response code 4339 * @instance: Controller's soft instance 4340 * @mpi_reply: MPI reply returned by firmware 4341 * 4342 * Return nothing. 4343 */ 4344 static void 4345 megasas_tm_response_code(struct megasas_instance *instance, 4346 struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply) 4347 { 4348 char *desc; 4349 4350 switch (mpi_reply->ResponseCode) { 4351 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: 4352 desc = "task management request completed"; 4353 break; 4354 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: 4355 desc = "invalid frame"; 4356 break; 4357 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: 4358 desc = "task management request not supported"; 4359 break; 4360 case MPI2_SCSITASKMGMT_RSP_TM_FAILED: 4361 desc = "task management request failed"; 4362 break; 4363 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: 4364 desc = "task management request succeeded"; 4365 break; 4366 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: 4367 desc = "invalid lun"; 4368 break; 4369 case 0xA: 4370 desc = "overlapped tag attempted"; 4371 break; 4372 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: 4373 desc = "task queued, however not sent to target"; 4374 break; 4375 default: 4376 desc = "unknown"; 4377 break; 4378 } 4379 dev_dbg(&instance->pdev->dev, "response_code(%01x): %s\n", 4380 mpi_reply->ResponseCode, desc); 4381 dev_dbg(&instance->pdev->dev, 4382 "TerminationCount/DevHandle/Function/TaskType/IOCStat/IOCLoginfo" 4383 " 0x%x/0x%x/0x%x/0x%x/0x%x/0x%x\n", 4384 mpi_reply->TerminationCount, mpi_reply->DevHandle, 4385 mpi_reply->Function, mpi_reply->TaskType, 4386 mpi_reply->IOCStatus, mpi_reply->IOCLogInfo); 4387 } 4388 4389 /** 4390 * megasas_issue_tm - main routine for sending tm requests 4391 * @instance: per adapter struct 4392 * @device_handle: device handle 4393 * @channel: the channel assigned by the OS 4394 * @id: the id assigned by the OS 4395 * @smid_task: smid assigned to the task 4396 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in megaraid_sas_fusion.c) 4397 * @mr_device_priv_data: private data 4398 * Context: user 4399 * 4400 * MegaRaid use MPT interface for Task Magement request. 4401 * A generic API for sending task management requests to firmware. 4402 * 4403 * Return SUCCESS or FAILED. 4404 */ 4405 static int 4406 megasas_issue_tm(struct megasas_instance *instance, u16 device_handle, 4407 uint channel, uint id, u16 smid_task, u8 type, 4408 struct MR_PRIV_DEVICE *mr_device_priv_data) 4409 { 4410 struct MR_TASK_MANAGE_REQUEST *mr_request; 4411 struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_request; 4412 unsigned long timeleft; 4413 struct megasas_cmd_fusion *cmd_fusion; 4414 struct megasas_cmd *cmd_mfi; 4415 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 4416 struct fusion_context *fusion = NULL; 4417 struct megasas_cmd_fusion *scsi_lookup; 4418 int rc; 4419 int timeout = MEGASAS_DEFAULT_TM_TIMEOUT; 4420 struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply; 4421 4422 fusion = instance->ctrl_context; 4423 4424 cmd_mfi = megasas_get_cmd(instance); 4425 4426 if (!cmd_mfi) { 4427 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 4428 __func__, __LINE__); 4429 return -ENOMEM; 4430 } 4431 4432 cmd_fusion = megasas_get_cmd_fusion(instance, 4433 instance->max_scsi_cmds + cmd_mfi->index); 4434 4435 /* Save the smid. To be used for returning the cmd */ 4436 cmd_mfi->context.smid = cmd_fusion->index; 4437 4438 req_desc = megasas_get_request_descriptor(instance, 4439 (cmd_fusion->index - 1)); 4440 4441 cmd_fusion->request_desc = req_desc; 4442 req_desc->Words = 0; 4443 4444 mr_request = (struct MR_TASK_MANAGE_REQUEST *) cmd_fusion->io_request; 4445 memset(mr_request, 0, sizeof(struct MR_TASK_MANAGE_REQUEST)); 4446 mpi_request = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest; 4447 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 4448 mpi_request->DevHandle = cpu_to_le16(device_handle); 4449 mpi_request->TaskType = type; 4450 mpi_request->TaskMID = cpu_to_le16(smid_task); 4451 mpi_request->LUN[1] = 0; 4452 4453 4454 req_desc = cmd_fusion->request_desc; 4455 req_desc->HighPriority.SMID = cpu_to_le16(cmd_fusion->index); 4456 req_desc->HighPriority.RequestFlags = 4457 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << 4458 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 4459 req_desc->HighPriority.MSIxIndex = 0; 4460 req_desc->HighPriority.LMID = 0; 4461 req_desc->HighPriority.Reserved1 = 0; 4462 4463 if (channel < MEGASAS_MAX_PD_CHANNELS) 4464 mr_request->tmReqFlags.isTMForPD = 1; 4465 else 4466 mr_request->tmReqFlags.isTMForLD = 1; 4467 4468 init_completion(&cmd_fusion->done); 4469 megasas_fire_cmd_fusion(instance, req_desc); 4470 4471 switch (type) { 4472 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: 4473 timeout = mr_device_priv_data->task_abort_tmo; 4474 break; 4475 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 4476 timeout = mr_device_priv_data->target_reset_tmo; 4477 break; 4478 } 4479 4480 timeleft = wait_for_completion_timeout(&cmd_fusion->done, timeout * HZ); 4481 4482 if (!timeleft) { 4483 dev_err(&instance->pdev->dev, 4484 "task mgmt type 0x%x timed out\n", type); 4485 mutex_unlock(&instance->reset_mutex); 4486 rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR); 4487 mutex_lock(&instance->reset_mutex); 4488 return rc; 4489 } 4490 4491 mpi_reply = (struct MPI2_SCSI_TASK_MANAGE_REPLY *) &mr_request->TMReply; 4492 megasas_tm_response_code(instance, mpi_reply); 4493 4494 megasas_return_cmd(instance, cmd_mfi); 4495 rc = SUCCESS; 4496 switch (type) { 4497 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: 4498 scsi_lookup = fusion->cmd_list[smid_task - 1]; 4499 4500 if (scsi_lookup->scmd == NULL) 4501 break; 4502 else { 4503 instance->instancet->disable_intr(instance); 4504 megasas_sync_irqs((unsigned long)instance); 4505 instance->instancet->enable_intr(instance); 4506 megasas_enable_irq_poll(instance); 4507 if (scsi_lookup->scmd == NULL) 4508 break; 4509 } 4510 rc = FAILED; 4511 break; 4512 4513 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 4514 if ((channel == 0xFFFFFFFF) && (id == 0xFFFFFFFF)) 4515 break; 4516 instance->instancet->disable_intr(instance); 4517 megasas_sync_irqs((unsigned long)instance); 4518 rc = megasas_track_scsiio(instance, id, channel); 4519 instance->instancet->enable_intr(instance); 4520 megasas_enable_irq_poll(instance); 4521 4522 break; 4523 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: 4524 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK: 4525 break; 4526 default: 4527 rc = FAILED; 4528 break; 4529 } 4530 4531 return rc; 4532 4533 } 4534 4535 /* 4536 * megasas_fusion_smid_lookup : Look for fusion command correpspodning to SCSI 4537 * @instance: per adapter struct 4538 * 4539 * Return Non Zero index, if SMID found in outstanding commands 4540 */ 4541 static u16 megasas_fusion_smid_lookup(struct scsi_cmnd *scmd) 4542 { 4543 int i, ret = 0; 4544 struct megasas_instance *instance; 4545 struct megasas_cmd_fusion *cmd_fusion; 4546 struct fusion_context *fusion; 4547 4548 instance = (struct megasas_instance *)scmd->device->host->hostdata; 4549 4550 fusion = instance->ctrl_context; 4551 4552 for (i = 0; i < instance->max_scsi_cmds; i++) { 4553 cmd_fusion = fusion->cmd_list[i]; 4554 if (cmd_fusion->scmd && (cmd_fusion->scmd == scmd)) { 4555 scmd_printk(KERN_NOTICE, scmd, "Abort request is for" 4556 " SMID: %d\n", cmd_fusion->index); 4557 ret = cmd_fusion->index; 4558 break; 4559 } 4560 } 4561 4562 return ret; 4563 } 4564 4565 /* 4566 * megasas_get_tm_devhandle - Get devhandle for TM request 4567 * @sdev- OS provided scsi device 4568 * 4569 * Returns- devhandle/targetID of SCSI device 4570 */ 4571 static u16 megasas_get_tm_devhandle(struct scsi_device *sdev) 4572 { 4573 u16 pd_index = 0; 4574 u32 device_id; 4575 struct megasas_instance *instance; 4576 struct fusion_context *fusion; 4577 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 4578 u16 devhandle = (u16)ULONG_MAX; 4579 4580 instance = (struct megasas_instance *)sdev->host->hostdata; 4581 fusion = instance->ctrl_context; 4582 4583 if (!MEGASAS_IS_LOGICAL(sdev)) { 4584 if (instance->use_seqnum_jbod_fp) { 4585 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) 4586 + sdev->id; 4587 pd_sync = (void *)fusion->pd_seq_sync 4588 [(instance->pd_seq_map_id - 1) & 1]; 4589 devhandle = pd_sync->seq[pd_index].devHandle; 4590 } else 4591 sdev_printk(KERN_ERR, sdev, "Firmware expose tmCapable" 4592 " without JBOD MAP support from %s %d\n", __func__, __LINE__); 4593 } else { 4594 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) 4595 + sdev->id; 4596 devhandle = device_id; 4597 } 4598 4599 return devhandle; 4600 } 4601 4602 /* 4603 * megasas_task_abort_fusion : SCSI task abort function for fusion adapters 4604 * @scmd : pointer to scsi command object 4605 * 4606 * Return SUCCESS, if command aborted else FAILED 4607 */ 4608 4609 int megasas_task_abort_fusion(struct scsi_cmnd *scmd) 4610 { 4611 struct megasas_instance *instance; 4612 u16 smid, devhandle; 4613 int ret; 4614 struct MR_PRIV_DEVICE *mr_device_priv_data; 4615 mr_device_priv_data = scmd->device->hostdata; 4616 4617 instance = (struct megasas_instance *)scmd->device->host->hostdata; 4618 4619 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 4620 dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL," 4621 "SCSI host:%d\n", instance->host->host_no); 4622 ret = FAILED; 4623 return ret; 4624 } 4625 4626 if (!mr_device_priv_data) { 4627 sdev_printk(KERN_INFO, scmd->device, "device been deleted! " 4628 "scmd(%p)\n", scmd); 4629 scmd->result = DID_NO_CONNECT << 16; 4630 ret = SUCCESS; 4631 goto out; 4632 } 4633 4634 if (!mr_device_priv_data->is_tm_capable) { 4635 ret = FAILED; 4636 goto out; 4637 } 4638 4639 mutex_lock(&instance->reset_mutex); 4640 4641 smid = megasas_fusion_smid_lookup(scmd); 4642 4643 if (!smid) { 4644 ret = SUCCESS; 4645 scmd_printk(KERN_NOTICE, scmd, "Command for which abort is" 4646 " issued is not found in outstanding commands\n"); 4647 mutex_unlock(&instance->reset_mutex); 4648 goto out; 4649 } 4650 4651 devhandle = megasas_get_tm_devhandle(scmd->device); 4652 4653 if (devhandle == (u16)ULONG_MAX) { 4654 ret = SUCCESS; 4655 sdev_printk(KERN_INFO, scmd->device, 4656 "task abort issued for invalid devhandle\n"); 4657 mutex_unlock(&instance->reset_mutex); 4658 goto out; 4659 } 4660 sdev_printk(KERN_INFO, scmd->device, 4661 "attempting task abort! scmd(0x%p) tm_dev_handle 0x%x\n", 4662 scmd, devhandle); 4663 4664 mr_device_priv_data->tm_busy = true; 4665 ret = megasas_issue_tm(instance, devhandle, 4666 scmd->device->channel, scmd->device->id, smid, 4667 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 4668 mr_device_priv_data); 4669 mr_device_priv_data->tm_busy = false; 4670 4671 mutex_unlock(&instance->reset_mutex); 4672 scmd_printk(KERN_INFO, scmd, "task abort %s!! scmd(0x%p)\n", 4673 ((ret == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4674 out: 4675 scsi_print_command(scmd); 4676 if (megasas_dbg_lvl & TM_DEBUG) 4677 megasas_dump_fusion_io(scmd); 4678 4679 return ret; 4680 } 4681 4682 /* 4683 * megasas_reset_target_fusion : target reset function for fusion adapters 4684 * scmd: SCSI command pointer 4685 * 4686 * Returns SUCCESS if all commands associated with target aborted else FAILED 4687 */ 4688 4689 int megasas_reset_target_fusion(struct scsi_cmnd *scmd) 4690 { 4691 4692 struct megasas_instance *instance; 4693 int ret = FAILED; 4694 u16 devhandle; 4695 struct MR_PRIV_DEVICE *mr_device_priv_data; 4696 mr_device_priv_data = scmd->device->hostdata; 4697 4698 instance = (struct megasas_instance *)scmd->device->host->hostdata; 4699 4700 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 4701 dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL," 4702 "SCSI host:%d\n", instance->host->host_no); 4703 ret = FAILED; 4704 return ret; 4705 } 4706 4707 if (!mr_device_priv_data) { 4708 sdev_printk(KERN_INFO, scmd->device, 4709 "device been deleted! scmd: (0x%p)\n", scmd); 4710 scmd->result = DID_NO_CONNECT << 16; 4711 ret = SUCCESS; 4712 goto out; 4713 } 4714 4715 if (!mr_device_priv_data->is_tm_capable) { 4716 ret = FAILED; 4717 goto out; 4718 } 4719 4720 mutex_lock(&instance->reset_mutex); 4721 devhandle = megasas_get_tm_devhandle(scmd->device); 4722 4723 if (devhandle == (u16)ULONG_MAX) { 4724 ret = SUCCESS; 4725 sdev_printk(KERN_INFO, scmd->device, 4726 "target reset issued for invalid devhandle\n"); 4727 mutex_unlock(&instance->reset_mutex); 4728 goto out; 4729 } 4730 4731 sdev_printk(KERN_INFO, scmd->device, 4732 "attempting target reset! scmd(0x%p) tm_dev_handle: 0x%x\n", 4733 scmd, devhandle); 4734 mr_device_priv_data->tm_busy = true; 4735 ret = megasas_issue_tm(instance, devhandle, 4736 scmd->device->channel, scmd->device->id, 0, 4737 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 4738 mr_device_priv_data); 4739 mr_device_priv_data->tm_busy = false; 4740 mutex_unlock(&instance->reset_mutex); 4741 scmd_printk(KERN_NOTICE, scmd, "target reset %s!!\n", 4742 (ret == SUCCESS) ? "SUCCESS" : "FAILED"); 4743 4744 out: 4745 return ret; 4746 } 4747 4748 /*SRIOV get other instance in cluster if any*/ 4749 static struct 4750 megasas_instance *megasas_get_peer_instance(struct megasas_instance *instance) 4751 { 4752 int i; 4753 4754 for (i = 0; i < MAX_MGMT_ADAPTERS; i++) { 4755 if (megasas_mgmt_info.instance[i] && 4756 (megasas_mgmt_info.instance[i] != instance) && 4757 megasas_mgmt_info.instance[i]->requestorId && 4758 megasas_mgmt_info.instance[i]->peerIsPresent && 4759 (memcmp((megasas_mgmt_info.instance[i]->clusterId), 4760 instance->clusterId, MEGASAS_CLUSTER_ID_SIZE) == 0)) 4761 return megasas_mgmt_info.instance[i]; 4762 } 4763 return NULL; 4764 } 4765 4766 /* Check for a second path that is currently UP */ 4767 int megasas_check_mpio_paths(struct megasas_instance *instance, 4768 struct scsi_cmnd *scmd) 4769 { 4770 struct megasas_instance *peer_instance = NULL; 4771 int retval = (DID_REQUEUE << 16); 4772 4773 if (instance->peerIsPresent) { 4774 peer_instance = megasas_get_peer_instance(instance); 4775 if ((peer_instance) && 4776 (atomic_read(&peer_instance->adprecovery) == 4777 MEGASAS_HBA_OPERATIONAL)) 4778 retval = (DID_NO_CONNECT << 16); 4779 } 4780 return retval; 4781 } 4782 4783 /* Core fusion reset function */ 4784 int megasas_reset_fusion(struct Scsi_Host *shost, int reason) 4785 { 4786 int retval = SUCCESS, i, j, convert = 0; 4787 struct megasas_instance *instance; 4788 struct megasas_cmd_fusion *cmd_fusion, *r1_cmd; 4789 struct fusion_context *fusion; 4790 u32 abs_state, status_reg, reset_adapter, fpio_count = 0; 4791 u32 io_timeout_in_crash_mode = 0; 4792 struct scsi_cmnd *scmd_local = NULL; 4793 struct scsi_device *sdev; 4794 int ret_target_prop = DCMD_FAILED; 4795 bool is_target_prop = false; 4796 bool do_adp_reset = true; 4797 int max_reset_tries = MEGASAS_FUSION_MAX_RESET_TRIES; 4798 4799 instance = (struct megasas_instance *)shost->hostdata; 4800 fusion = instance->ctrl_context; 4801 4802 mutex_lock(&instance->reset_mutex); 4803 4804 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 4805 dev_warn(&instance->pdev->dev, "Hardware critical error, " 4806 "returning FAILED for scsi%d.\n", 4807 instance->host->host_no); 4808 mutex_unlock(&instance->reset_mutex); 4809 return FAILED; 4810 } 4811 status_reg = instance->instancet->read_fw_status_reg(instance); 4812 abs_state = status_reg & MFI_STATE_MASK; 4813 4814 /* IO timeout detected, forcibly put FW in FAULT state */ 4815 if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf && 4816 instance->crash_dump_app_support && reason) { 4817 dev_info(&instance->pdev->dev, "IO/DCMD timeout is detected, " 4818 "forcibly FAULT Firmware\n"); 4819 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 4820 status_reg = megasas_readl(instance, &instance->reg_set->doorbell); 4821 writel(status_reg | MFI_STATE_FORCE_OCR, 4822 &instance->reg_set->doorbell); 4823 readl(&instance->reg_set->doorbell); 4824 mutex_unlock(&instance->reset_mutex); 4825 do { 4826 ssleep(3); 4827 io_timeout_in_crash_mode++; 4828 dev_dbg(&instance->pdev->dev, "waiting for [%d] " 4829 "seconds for crash dump collection and OCR " 4830 "to be done\n", (io_timeout_in_crash_mode * 3)); 4831 } while ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) && 4832 (io_timeout_in_crash_mode < 80)); 4833 4834 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) { 4835 dev_info(&instance->pdev->dev, "OCR done for IO " 4836 "timeout case\n"); 4837 retval = SUCCESS; 4838 } else { 4839 dev_info(&instance->pdev->dev, "Controller is not " 4840 "operational after 240 seconds wait for IO " 4841 "timeout case in FW crash dump mode\n do " 4842 "OCR/kill adapter\n"); 4843 retval = megasas_reset_fusion(shost, 0); 4844 } 4845 return retval; 4846 } 4847 4848 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 4849 del_timer_sync(&instance->sriov_heartbeat_timer); 4850 set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); 4851 set_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags); 4852 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING); 4853 instance->instancet->disable_intr(instance); 4854 megasas_sync_irqs((unsigned long)instance); 4855 4856 /* First try waiting for commands to complete */ 4857 if (megasas_wait_for_outstanding_fusion(instance, reason, 4858 &convert)) { 4859 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 4860 dev_warn(&instance->pdev->dev, "resetting fusion " 4861 "adapter scsi%d.\n", instance->host->host_no); 4862 if (convert) 4863 reason = 0; 4864 4865 if (megasas_dbg_lvl & OCR_DEBUG) 4866 dev_info(&instance->pdev->dev, "\nPending SCSI commands:\n"); 4867 4868 /* Now return commands back to the OS */ 4869 for (i = 0 ; i < instance->max_scsi_cmds; i++) { 4870 cmd_fusion = fusion->cmd_list[i]; 4871 /*check for extra commands issued by driver*/ 4872 if (instance->adapter_type >= VENTURA_SERIES) { 4873 r1_cmd = fusion->cmd_list[i + instance->max_fw_cmds]; 4874 megasas_return_cmd_fusion(instance, r1_cmd); 4875 } 4876 scmd_local = cmd_fusion->scmd; 4877 if (cmd_fusion->scmd) { 4878 if (megasas_dbg_lvl & OCR_DEBUG) { 4879 sdev_printk(KERN_INFO, 4880 cmd_fusion->scmd->device, "SMID: 0x%x\n", 4881 cmd_fusion->index); 4882 megasas_dump_fusion_io(cmd_fusion->scmd); 4883 } 4884 4885 if (cmd_fusion->io_request->Function == 4886 MPI2_FUNCTION_SCSI_IO_REQUEST) 4887 fpio_count++; 4888 4889 scmd_local->result = 4890 megasas_check_mpio_paths(instance, 4891 scmd_local); 4892 if (instance->ldio_threshold && 4893 megasas_cmd_type(scmd_local) == READ_WRITE_LDIO) 4894 atomic_dec(&instance->ldio_outstanding); 4895 megasas_return_cmd_fusion(instance, cmd_fusion); 4896 scsi_dma_unmap(scmd_local); 4897 scmd_local->scsi_done(scmd_local); 4898 } 4899 } 4900 4901 dev_info(&instance->pdev->dev, "Outstanding fastpath IOs: %d\n", 4902 fpio_count); 4903 4904 atomic_set(&instance->fw_outstanding, 0); 4905 4906 status_reg = instance->instancet->read_fw_status_reg(instance); 4907 abs_state = status_reg & MFI_STATE_MASK; 4908 reset_adapter = status_reg & MFI_RESET_ADAPTER; 4909 if (instance->disableOnlineCtrlReset || 4910 (abs_state == MFI_STATE_FAULT && !reset_adapter)) { 4911 /* Reset not supported, kill adapter */ 4912 dev_warn(&instance->pdev->dev, "Reset not supported" 4913 ", killing adapter scsi%d.\n", 4914 instance->host->host_no); 4915 goto kill_hba; 4916 } 4917 4918 /* Let SR-IOV VF & PF sync up if there was a HB failure */ 4919 if (instance->requestorId && !reason) { 4920 msleep(MEGASAS_OCR_SETTLE_TIME_VF); 4921 do_adp_reset = false; 4922 max_reset_tries = MEGASAS_SRIOV_MAX_RESET_TRIES_VF; 4923 } 4924 4925 /* Now try to reset the chip */ 4926 for (i = 0; i < max_reset_tries; i++) { 4927 /* 4928 * Do adp reset and wait for 4929 * controller to transition to ready 4930 */ 4931 if (megasas_adp_reset_wait_for_ready(instance, 4932 do_adp_reset, 1) == FAILED) 4933 continue; 4934 4935 /* Wait for FW to become ready */ 4936 if (megasas_transition_to_ready(instance, 1)) { 4937 dev_warn(&instance->pdev->dev, 4938 "Failed to transition controller to ready for " 4939 "scsi%d.\n", instance->host->host_no); 4940 continue; 4941 } 4942 megasas_reset_reply_desc(instance); 4943 megasas_fusion_update_can_queue(instance, OCR_CONTEXT); 4944 4945 if (megasas_ioc_init_fusion(instance)) { 4946 continue; 4947 } 4948 4949 if (megasas_get_ctrl_info(instance)) { 4950 dev_info(&instance->pdev->dev, 4951 "Failed from %s %d\n", 4952 __func__, __LINE__); 4953 goto kill_hba; 4954 } 4955 4956 megasas_refire_mgmt_cmd(instance, 4957 (i == (MEGASAS_FUSION_MAX_RESET_TRIES - 1) 4958 ? 1 : 0)); 4959 4960 /* Reset load balance info */ 4961 if (fusion->load_balance_info) 4962 memset(fusion->load_balance_info, 0, 4963 (sizeof(struct LD_LOAD_BALANCE_INFO) * 4964 MAX_LOGICAL_DRIVES_EXT)); 4965 4966 if (!megasas_get_map_info(instance)) { 4967 megasas_sync_map_info(instance); 4968 } else { 4969 /* 4970 * Return pending polled mode cmds before 4971 * retrying OCR 4972 */ 4973 megasas_return_polled_cmds(instance); 4974 continue; 4975 } 4976 4977 megasas_setup_jbod_map(instance); 4978 4979 /* reset stream detection array */ 4980 if (instance->adapter_type >= VENTURA_SERIES) { 4981 for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) { 4982 memset(fusion->stream_detect_by_ld[j], 4983 0, sizeof(struct LD_STREAM_DETECT)); 4984 fusion->stream_detect_by_ld[j]->mru_bit_map 4985 = MR_STREAM_BITMAP; 4986 } 4987 } 4988 4989 clear_bit(MEGASAS_FUSION_IN_RESET, 4990 &instance->reset_flags); 4991 instance->instancet->enable_intr(instance); 4992 megasas_enable_irq_poll(instance); 4993 shost_for_each_device(sdev, shost) { 4994 if ((instance->tgt_prop) && 4995 (instance->nvme_page_size)) 4996 ret_target_prop = megasas_get_target_prop(instance, sdev); 4997 4998 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; 4999 megasas_set_dynamic_target_properties(sdev, is_target_prop); 5000 } 5001 5002 status_reg = instance->instancet->read_fw_status_reg 5003 (instance); 5004 abs_state = status_reg & MFI_STATE_MASK; 5005 if (abs_state != MFI_STATE_OPERATIONAL) { 5006 dev_info(&instance->pdev->dev, 5007 "Adapter is not OPERATIONAL, state 0x%x for scsi:%d\n", 5008 abs_state, instance->host->host_no); 5009 goto out; 5010 } 5011 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 5012 5013 dev_info(&instance->pdev->dev, 5014 "Adapter is OPERATIONAL for scsi:%d\n", 5015 instance->host->host_no); 5016 5017 /* Restart SR-IOV heartbeat */ 5018 if (instance->requestorId) { 5019 if (!megasas_sriov_start_heartbeat(instance, 0)) 5020 megasas_start_timer(instance); 5021 else 5022 instance->skip_heartbeat_timer_del = 1; 5023 } 5024 5025 if (instance->crash_dump_drv_support && 5026 instance->crash_dump_app_support) 5027 megasas_set_crash_dump_params(instance, 5028 MR_CRASH_BUF_TURN_ON); 5029 else 5030 megasas_set_crash_dump_params(instance, 5031 MR_CRASH_BUF_TURN_OFF); 5032 5033 if (instance->snapdump_wait_time) { 5034 megasas_get_snapdump_properties(instance); 5035 dev_info(&instance->pdev->dev, 5036 "Snap dump wait time\t: %d\n", 5037 instance->snapdump_wait_time); 5038 } 5039 5040 retval = SUCCESS; 5041 5042 /* Adapter reset completed successfully */ 5043 dev_warn(&instance->pdev->dev, 5044 "Reset successful for scsi%d.\n", 5045 instance->host->host_no); 5046 5047 goto out; 5048 } 5049 /* Reset failed, kill the adapter */ 5050 dev_warn(&instance->pdev->dev, "Reset failed, killing " 5051 "adapter scsi%d.\n", instance->host->host_no); 5052 goto kill_hba; 5053 } else { 5054 /* For VF: Restart HB timer if we didn't OCR */ 5055 if (instance->requestorId) { 5056 megasas_start_timer(instance); 5057 } 5058 clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); 5059 instance->instancet->enable_intr(instance); 5060 megasas_enable_irq_poll(instance); 5061 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 5062 goto out; 5063 } 5064 kill_hba: 5065 megaraid_sas_kill_hba(instance); 5066 megasas_enable_irq_poll(instance); 5067 instance->skip_heartbeat_timer_del = 1; 5068 retval = FAILED; 5069 out: 5070 clear_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags); 5071 mutex_unlock(&instance->reset_mutex); 5072 return retval; 5073 } 5074 5075 /* Fusion Crash dump collection */ 5076 static void megasas_fusion_crash_dump(struct megasas_instance *instance) 5077 { 5078 u32 status_reg; 5079 u8 partial_copy = 0; 5080 int wait = 0; 5081 5082 5083 status_reg = instance->instancet->read_fw_status_reg(instance); 5084 5085 /* 5086 * Allocate host crash buffers to copy data from 1 MB DMA crash buffer 5087 * to host crash buffers 5088 */ 5089 if (instance->drv_buf_index == 0) { 5090 /* Buffer is already allocated for old Crash dump. 5091 * Do OCR and do not wait for crash dump collection 5092 */ 5093 if (instance->drv_buf_alloc) { 5094 dev_info(&instance->pdev->dev, "earlier crash dump is " 5095 "not yet copied by application, ignoring this " 5096 "crash dump and initiating OCR\n"); 5097 status_reg |= MFI_STATE_CRASH_DUMP_DONE; 5098 writel(status_reg, 5099 &instance->reg_set->outbound_scratch_pad_0); 5100 readl(&instance->reg_set->outbound_scratch_pad_0); 5101 return; 5102 } 5103 megasas_alloc_host_crash_buffer(instance); 5104 dev_info(&instance->pdev->dev, "Number of host crash buffers " 5105 "allocated: %d\n", instance->drv_buf_alloc); 5106 } 5107 5108 while (!(status_reg & MFI_STATE_CRASH_DUMP_DONE) && 5109 (wait < MEGASAS_WATCHDOG_WAIT_COUNT)) { 5110 if (!(status_reg & MFI_STATE_DMADONE)) { 5111 /* 5112 * Next crash dump buffer is not yet DMA'd by FW 5113 * Check after 10ms. Wait for 1 second for FW to 5114 * post the next buffer. If not bail out. 5115 */ 5116 wait++; 5117 msleep(MEGASAS_WAIT_FOR_NEXT_DMA_MSECS); 5118 status_reg = instance->instancet->read_fw_status_reg( 5119 instance); 5120 continue; 5121 } 5122 5123 wait = 0; 5124 if (instance->drv_buf_index >= instance->drv_buf_alloc) { 5125 dev_info(&instance->pdev->dev, 5126 "Driver is done copying the buffer: %d\n", 5127 instance->drv_buf_alloc); 5128 status_reg |= MFI_STATE_CRASH_DUMP_DONE; 5129 partial_copy = 1; 5130 break; 5131 } else { 5132 memcpy(instance->crash_buf[instance->drv_buf_index], 5133 instance->crash_dump_buf, CRASH_DMA_BUF_SIZE); 5134 instance->drv_buf_index++; 5135 status_reg &= ~MFI_STATE_DMADONE; 5136 } 5137 5138 writel(status_reg, &instance->reg_set->outbound_scratch_pad_0); 5139 readl(&instance->reg_set->outbound_scratch_pad_0); 5140 5141 msleep(MEGASAS_WAIT_FOR_NEXT_DMA_MSECS); 5142 status_reg = instance->instancet->read_fw_status_reg(instance); 5143 } 5144 5145 if (status_reg & MFI_STATE_CRASH_DUMP_DONE) { 5146 dev_info(&instance->pdev->dev, "Crash Dump is available,number " 5147 "of copied buffers: %d\n", instance->drv_buf_index); 5148 instance->fw_crash_buffer_size = instance->drv_buf_index; 5149 instance->fw_crash_state = AVAILABLE; 5150 instance->drv_buf_index = 0; 5151 writel(status_reg, &instance->reg_set->outbound_scratch_pad_0); 5152 readl(&instance->reg_set->outbound_scratch_pad_0); 5153 if (!partial_copy) 5154 megasas_reset_fusion(instance->host, 0); 5155 } 5156 } 5157 5158 5159 /* Fusion OCR work queue */ 5160 void megasas_fusion_ocr_wq(struct work_struct *work) 5161 { 5162 struct megasas_instance *instance = 5163 container_of(work, struct megasas_instance, work_init); 5164 5165 megasas_reset_fusion(instance->host, 0); 5166 } 5167 5168 /* Allocate fusion context */ 5169 int 5170 megasas_alloc_fusion_context(struct megasas_instance *instance) 5171 { 5172 struct fusion_context *fusion; 5173 5174 instance->ctrl_context = kzalloc(sizeof(struct fusion_context), 5175 GFP_KERNEL); 5176 if (!instance->ctrl_context) { 5177 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 5178 __func__, __LINE__); 5179 return -ENOMEM; 5180 } 5181 5182 fusion = instance->ctrl_context; 5183 5184 fusion->log_to_span_pages = get_order(MAX_LOGICAL_DRIVES_EXT * 5185 sizeof(LD_SPAN_INFO)); 5186 fusion->log_to_span = 5187 (PLD_SPAN_INFO)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 5188 fusion->log_to_span_pages); 5189 if (!fusion->log_to_span) { 5190 fusion->log_to_span = 5191 vzalloc(array_size(MAX_LOGICAL_DRIVES_EXT, 5192 sizeof(LD_SPAN_INFO))); 5193 if (!fusion->log_to_span) { 5194 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 5195 __func__, __LINE__); 5196 return -ENOMEM; 5197 } 5198 } 5199 5200 fusion->load_balance_info_pages = get_order(MAX_LOGICAL_DRIVES_EXT * 5201 sizeof(struct LD_LOAD_BALANCE_INFO)); 5202 fusion->load_balance_info = 5203 (struct LD_LOAD_BALANCE_INFO *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 5204 fusion->load_balance_info_pages); 5205 if (!fusion->load_balance_info) { 5206 fusion->load_balance_info = 5207 vzalloc(array_size(MAX_LOGICAL_DRIVES_EXT, 5208 sizeof(struct LD_LOAD_BALANCE_INFO))); 5209 if (!fusion->load_balance_info) 5210 dev_err(&instance->pdev->dev, "Failed to allocate load_balance_info, " 5211 "continuing without Load Balance support\n"); 5212 } 5213 5214 return 0; 5215 } 5216 5217 void 5218 megasas_free_fusion_context(struct megasas_instance *instance) 5219 { 5220 struct fusion_context *fusion = instance->ctrl_context; 5221 5222 if (fusion) { 5223 if (fusion->load_balance_info) { 5224 if (is_vmalloc_addr(fusion->load_balance_info)) 5225 vfree(fusion->load_balance_info); 5226 else 5227 free_pages((ulong)fusion->load_balance_info, 5228 fusion->load_balance_info_pages); 5229 } 5230 5231 if (fusion->log_to_span) { 5232 if (is_vmalloc_addr(fusion->log_to_span)) 5233 vfree(fusion->log_to_span); 5234 else 5235 free_pages((ulong)fusion->log_to_span, 5236 fusion->log_to_span_pages); 5237 } 5238 5239 kfree(fusion); 5240 } 5241 } 5242 5243 struct megasas_instance_template megasas_instance_template_fusion = { 5244 .enable_intr = megasas_enable_intr_fusion, 5245 .disable_intr = megasas_disable_intr_fusion, 5246 .clear_intr = megasas_clear_intr_fusion, 5247 .read_fw_status_reg = megasas_read_fw_status_reg_fusion, 5248 .adp_reset = megasas_adp_reset_fusion, 5249 .check_reset = megasas_check_reset_fusion, 5250 .service_isr = megasas_isr_fusion, 5251 .tasklet = megasas_complete_cmd_dpc_fusion, 5252 .init_adapter = megasas_init_adapter_fusion, 5253 .build_and_issue_cmd = megasas_build_and_issue_cmd_fusion, 5254 .issue_dcmd = megasas_issue_dcmd_fusion, 5255 }; 5256