1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux MegaRAID driver for SAS based RAID controllers 4 * 5 * Copyright (c) 2009-2013 LSI Corporation 6 * Copyright (c) 2013-2016 Avago Technologies 7 * Copyright (c) 2016-2018 Broadcom Inc. 8 * 9 * FILE: megaraid_sas_fusion.c 10 * 11 * Authors: Broadcom Inc. 12 * Sumant Patro 13 * Adam Radford 14 * Kashyap Desai <kashyap.desai@broadcom.com> 15 * Sumit Saxena <sumit.saxena@broadcom.com> 16 * 17 * Send feedback to: megaraidlinux.pdl@broadcom.com 18 */ 19 20 #include <linux/kernel.h> 21 #include <linux/types.h> 22 #include <linux/pci.h> 23 #include <linux/list.h> 24 #include <linux/moduleparam.h> 25 #include <linux/module.h> 26 #include <linux/spinlock.h> 27 #include <linux/interrupt.h> 28 #include <linux/delay.h> 29 #include <linux/uio.h> 30 #include <linux/uaccess.h> 31 #include <linux/fs.h> 32 #include <linux/compat.h> 33 #include <linux/blkdev.h> 34 #include <linux/mutex.h> 35 #include <linux/poll.h> 36 #include <linux/vmalloc.h> 37 #include <linux/workqueue.h> 38 39 #include <scsi/scsi.h> 40 #include <scsi/scsi_cmnd.h> 41 #include <scsi/scsi_device.h> 42 #include <scsi/scsi_host.h> 43 #include <scsi/scsi_dbg.h> 44 #include <linux/dmi.h> 45 46 #include "megaraid_sas_fusion.h" 47 #include "megaraid_sas.h" 48 49 50 extern void megasas_free_cmds(struct megasas_instance *instance); 51 extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance 52 *instance); 53 extern void 54 megasas_complete_cmd(struct megasas_instance *instance, 55 struct megasas_cmd *cmd, u8 alt_status); 56 int 57 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, 58 int seconds); 59 60 void 61 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd); 62 int megasas_alloc_cmds(struct megasas_instance *instance); 63 int 64 megasas_clear_intr_fusion(struct megasas_instance *instance); 65 int 66 megasas_issue_polled(struct megasas_instance *instance, 67 struct megasas_cmd *cmd); 68 void 69 megasas_check_and_restore_queue_depth(struct megasas_instance *instance); 70 71 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); 72 void megaraid_sas_kill_hba(struct megasas_instance *instance); 73 74 extern u32 megasas_dbg_lvl; 75 int megasas_sriov_start_heartbeat(struct megasas_instance *instance, 76 int initial); 77 void megasas_start_timer(struct megasas_instance *instance); 78 extern struct megasas_mgmt_info megasas_mgmt_info; 79 extern unsigned int resetwaittime; 80 extern unsigned int dual_qdepth_disable; 81 static void megasas_free_rdpq_fusion(struct megasas_instance *instance); 82 static void megasas_free_reply_fusion(struct megasas_instance *instance); 83 static inline 84 void megasas_configure_queue_sizes(struct megasas_instance *instance); 85 static void megasas_fusion_crash_dump(struct megasas_instance *instance); 86 extern u32 megasas_readl(struct megasas_instance *instance, 87 const volatile void __iomem *addr); 88 89 /** 90 * megasas_check_same_4gb_region - check if allocation 91 * crosses same 4GB boundary or not 92 * @instance - adapter's soft instance 93 * start_addr - start address of DMA allocation 94 * size - size of allocation in bytes 95 * return - true : allocation does not cross same 96 * 4GB boundary 97 * false: allocation crosses same 98 * 4GB boundary 99 */ 100 static inline bool megasas_check_same_4gb_region 101 (struct megasas_instance *instance, dma_addr_t start_addr, size_t size) 102 { 103 dma_addr_t end_addr; 104 105 end_addr = start_addr + size; 106 107 if (upper_32_bits(start_addr) != upper_32_bits(end_addr)) { 108 dev_err(&instance->pdev->dev, 109 "Failed to get same 4GB boundary: start_addr: 0x%llx end_addr: 0x%llx\n", 110 (unsigned long long)start_addr, 111 (unsigned long long)end_addr); 112 return false; 113 } 114 115 return true; 116 } 117 118 /** 119 * megasas_enable_intr_fusion - Enables interrupts 120 * @regs: MFI register set 121 */ 122 void 123 megasas_enable_intr_fusion(struct megasas_instance *instance) 124 { 125 struct megasas_register_set __iomem *regs; 126 regs = instance->reg_set; 127 128 instance->mask_interrupts = 0; 129 /* For Thunderbolt/Invader also clear intr on enable */ 130 writel(~0, ®s->outbound_intr_status); 131 readl(®s->outbound_intr_status); 132 133 writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 134 135 /* Dummy readl to force pci flush */ 136 readl(®s->outbound_intr_mask); 137 } 138 139 /** 140 * megasas_disable_intr_fusion - Disables interrupt 141 * @regs: MFI register set 142 */ 143 void 144 megasas_disable_intr_fusion(struct megasas_instance *instance) 145 { 146 u32 mask = 0xFFFFFFFF; 147 u32 status; 148 struct megasas_register_set __iomem *regs; 149 regs = instance->reg_set; 150 instance->mask_interrupts = 1; 151 152 writel(mask, ®s->outbound_intr_mask); 153 /* Dummy readl to force pci flush */ 154 status = readl(®s->outbound_intr_mask); 155 } 156 157 int 158 megasas_clear_intr_fusion(struct megasas_instance *instance) 159 { 160 u32 status; 161 struct megasas_register_set __iomem *regs; 162 regs = instance->reg_set; 163 /* 164 * Check if it is our interrupt 165 */ 166 status = megasas_readl(instance, 167 ®s->outbound_intr_status); 168 169 if (status & 1) { 170 writel(status, ®s->outbound_intr_status); 171 readl(®s->outbound_intr_status); 172 return 1; 173 } 174 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) 175 return 0; 176 177 return 1; 178 } 179 180 /** 181 * megasas_get_cmd_fusion - Get a command from the free pool 182 * @instance: Adapter soft state 183 * 184 * Returns a blk_tag indexed mpt frame 185 */ 186 inline struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance 187 *instance, u32 blk_tag) 188 { 189 struct fusion_context *fusion; 190 191 fusion = instance->ctrl_context; 192 return fusion->cmd_list[blk_tag]; 193 } 194 195 /** 196 * megasas_return_cmd_fusion - Return a cmd to free command pool 197 * @instance: Adapter soft state 198 * @cmd: Command packet to be returned to free command pool 199 */ 200 inline void megasas_return_cmd_fusion(struct megasas_instance *instance, 201 struct megasas_cmd_fusion *cmd) 202 { 203 cmd->scmd = NULL; 204 memset(cmd->io_request, 0, MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE); 205 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; 206 cmd->cmd_completed = false; 207 } 208 209 /** 210 * megasas_fire_cmd_fusion - Sends command to the FW 211 * @instance: Adapter soft state 212 * @req_desc: 64bit Request descriptor 213 * 214 * Perform PCI Write. 215 */ 216 217 static void 218 megasas_fire_cmd_fusion(struct megasas_instance *instance, 219 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc) 220 { 221 #if defined(writeq) && defined(CONFIG_64BIT) 222 u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) | 223 le32_to_cpu(req_desc->u.low)); 224 225 writeq(req_data, &instance->reg_set->inbound_low_queue_port); 226 #else 227 unsigned long flags; 228 spin_lock_irqsave(&instance->hba_lock, flags); 229 writel(le32_to_cpu(req_desc->u.low), 230 &instance->reg_set->inbound_low_queue_port); 231 writel(le32_to_cpu(req_desc->u.high), 232 &instance->reg_set->inbound_high_queue_port); 233 spin_unlock_irqrestore(&instance->hba_lock, flags); 234 #endif 235 } 236 237 /** 238 * megasas_fusion_update_can_queue - Do all Adapter Queue depth related calculations here 239 * @instance: Adapter soft state 240 * fw_boot_context: Whether this function called during probe or after OCR 241 * 242 * This function is only for fusion controllers. 243 * Update host can queue, if firmware downgrade max supported firmware commands. 244 * Firmware upgrade case will be skiped because underlying firmware has 245 * more resource than exposed to the OS. 246 * 247 */ 248 static void 249 megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_context) 250 { 251 u16 cur_max_fw_cmds = 0; 252 u16 ldio_threshold = 0; 253 struct megasas_register_set __iomem *reg_set; 254 255 reg_set = instance->reg_set; 256 257 /* ventura FW does not fill outbound_scratch_pad_2 with queue depth */ 258 if (instance->adapter_type < VENTURA_SERIES) 259 cur_max_fw_cmds = 260 megasas_readl(instance, 261 &instance->reg_set->outbound_scratch_pad_2) & 0x00FFFF; 262 263 if (dual_qdepth_disable || !cur_max_fw_cmds) 264 cur_max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF; 265 else 266 ldio_threshold = 267 (instance->instancet->read_fw_status_reg(instance) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS; 268 269 dev_info(&instance->pdev->dev, 270 "Current firmware supports maximum commands: %d\t LDIO threshold: %d\n", 271 cur_max_fw_cmds, ldio_threshold); 272 273 if (fw_boot_context == OCR_CONTEXT) { 274 cur_max_fw_cmds = cur_max_fw_cmds - 1; 275 if (cur_max_fw_cmds < instance->max_fw_cmds) { 276 instance->cur_can_queue = 277 cur_max_fw_cmds - (MEGASAS_FUSION_INTERNAL_CMDS + 278 MEGASAS_FUSION_IOCTL_CMDS); 279 instance->host->can_queue = instance->cur_can_queue; 280 instance->ldio_threshold = ldio_threshold; 281 } 282 } else { 283 instance->max_fw_cmds = cur_max_fw_cmds; 284 instance->ldio_threshold = ldio_threshold; 285 286 if (reset_devices) 287 instance->max_fw_cmds = min(instance->max_fw_cmds, 288 (u16)MEGASAS_KDUMP_QUEUE_DEPTH); 289 /* 290 * Reduce the max supported cmds by 1. This is to ensure that the 291 * reply_q_sz (1 more than the max cmd that driver may send) 292 * does not exceed max cmds that the FW can support 293 */ 294 instance->max_fw_cmds = instance->max_fw_cmds-1; 295 } 296 } 297 /** 298 * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool 299 * @instance: Adapter soft state 300 */ 301 void 302 megasas_free_cmds_fusion(struct megasas_instance *instance) 303 { 304 int i; 305 struct fusion_context *fusion = instance->ctrl_context; 306 struct megasas_cmd_fusion *cmd; 307 308 if (fusion->sense) 309 dma_pool_free(fusion->sense_dma_pool, fusion->sense, 310 fusion->sense_phys_addr); 311 312 /* SG */ 313 if (fusion->cmd_list) { 314 for (i = 0; i < instance->max_mpt_cmds; i++) { 315 cmd = fusion->cmd_list[i]; 316 if (cmd) { 317 if (cmd->sg_frame) 318 dma_pool_free(fusion->sg_dma_pool, 319 cmd->sg_frame, 320 cmd->sg_frame_phys_addr); 321 } 322 kfree(cmd); 323 } 324 kfree(fusion->cmd_list); 325 } 326 327 if (fusion->sg_dma_pool) { 328 dma_pool_destroy(fusion->sg_dma_pool); 329 fusion->sg_dma_pool = NULL; 330 } 331 if (fusion->sense_dma_pool) { 332 dma_pool_destroy(fusion->sense_dma_pool); 333 fusion->sense_dma_pool = NULL; 334 } 335 336 337 /* Reply Frame, Desc*/ 338 if (instance->is_rdpq) 339 megasas_free_rdpq_fusion(instance); 340 else 341 megasas_free_reply_fusion(instance); 342 343 /* Request Frame, Desc*/ 344 if (fusion->req_frames_desc) 345 dma_free_coherent(&instance->pdev->dev, 346 fusion->request_alloc_sz, fusion->req_frames_desc, 347 fusion->req_frames_desc_phys); 348 if (fusion->io_request_frames) 349 dma_pool_free(fusion->io_request_frames_pool, 350 fusion->io_request_frames, 351 fusion->io_request_frames_phys); 352 if (fusion->io_request_frames_pool) { 353 dma_pool_destroy(fusion->io_request_frames_pool); 354 fusion->io_request_frames_pool = NULL; 355 } 356 } 357 358 /** 359 * megasas_create_sg_sense_fusion - Creates DMA pool for cmd frames 360 * @instance: Adapter soft state 361 * 362 */ 363 static int megasas_create_sg_sense_fusion(struct megasas_instance *instance) 364 { 365 int i; 366 u16 max_cmd; 367 struct fusion_context *fusion; 368 struct megasas_cmd_fusion *cmd; 369 int sense_sz; 370 u32 offset; 371 372 fusion = instance->ctrl_context; 373 max_cmd = instance->max_fw_cmds; 374 sense_sz = instance->max_mpt_cmds * SCSI_SENSE_BUFFERSIZE; 375 376 fusion->sg_dma_pool = 377 dma_pool_create("mr_sg", &instance->pdev->dev, 378 instance->max_chain_frame_sz, 379 MR_DEFAULT_NVME_PAGE_SIZE, 0); 380 /* SCSI_SENSE_BUFFERSIZE = 96 bytes */ 381 fusion->sense_dma_pool = 382 dma_pool_create("mr_sense", &instance->pdev->dev, 383 sense_sz, 64, 0); 384 385 if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) { 386 dev_err(&instance->pdev->dev, 387 "Failed from %s %d\n", __func__, __LINE__); 388 return -ENOMEM; 389 } 390 391 fusion->sense = dma_pool_alloc(fusion->sense_dma_pool, 392 GFP_KERNEL, &fusion->sense_phys_addr); 393 if (!fusion->sense) { 394 dev_err(&instance->pdev->dev, 395 "failed from %s %d\n", __func__, __LINE__); 396 return -ENOMEM; 397 } 398 399 /* sense buffer, request frame and reply desc pool requires to be in 400 * same 4 gb region. Below function will check this. 401 * In case of failure, new pci pool will be created with updated 402 * alignment. 403 * Older allocation and pool will be destroyed. 404 * Alignment will be used such a way that next allocation if success, 405 * will always meet same 4gb region requirement. 406 * Actual requirement is not alignment, but we need start and end of 407 * DMA address must have same upper 32 bit address. 408 */ 409 410 if (!megasas_check_same_4gb_region(instance, fusion->sense_phys_addr, 411 sense_sz)) { 412 dma_pool_free(fusion->sense_dma_pool, fusion->sense, 413 fusion->sense_phys_addr); 414 fusion->sense = NULL; 415 dma_pool_destroy(fusion->sense_dma_pool); 416 417 fusion->sense_dma_pool = 418 dma_pool_create("mr_sense_align", &instance->pdev->dev, 419 sense_sz, roundup_pow_of_two(sense_sz), 420 0); 421 if (!fusion->sense_dma_pool) { 422 dev_err(&instance->pdev->dev, 423 "Failed from %s %d\n", __func__, __LINE__); 424 return -ENOMEM; 425 } 426 fusion->sense = dma_pool_alloc(fusion->sense_dma_pool, 427 GFP_KERNEL, 428 &fusion->sense_phys_addr); 429 if (!fusion->sense) { 430 dev_err(&instance->pdev->dev, 431 "failed from %s %d\n", __func__, __LINE__); 432 return -ENOMEM; 433 } 434 } 435 436 /* 437 * Allocate and attach a frame to each of the commands in cmd_list 438 */ 439 for (i = 0; i < max_cmd; i++) { 440 cmd = fusion->cmd_list[i]; 441 cmd->sg_frame = dma_pool_alloc(fusion->sg_dma_pool, 442 GFP_KERNEL, &cmd->sg_frame_phys_addr); 443 444 offset = SCSI_SENSE_BUFFERSIZE * i; 445 cmd->sense = (u8 *)fusion->sense + offset; 446 cmd->sense_phys_addr = fusion->sense_phys_addr + offset; 447 448 if (!cmd->sg_frame) { 449 dev_err(&instance->pdev->dev, 450 "Failed from %s %d\n", __func__, __LINE__); 451 return -ENOMEM; 452 } 453 } 454 455 /* create sense buffer for the raid 1/10 fp */ 456 for (i = max_cmd; i < instance->max_mpt_cmds; i++) { 457 cmd = fusion->cmd_list[i]; 458 offset = SCSI_SENSE_BUFFERSIZE * i; 459 cmd->sense = (u8 *)fusion->sense + offset; 460 cmd->sense_phys_addr = fusion->sense_phys_addr + offset; 461 462 } 463 464 return 0; 465 } 466 467 int 468 megasas_alloc_cmdlist_fusion(struct megasas_instance *instance) 469 { 470 u32 max_mpt_cmd, i, j; 471 struct fusion_context *fusion; 472 473 fusion = instance->ctrl_context; 474 475 max_mpt_cmd = instance->max_mpt_cmds; 476 477 /* 478 * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers. 479 * Allocate the dynamic array first and then allocate individual 480 * commands. 481 */ 482 fusion->cmd_list = 483 kcalloc(max_mpt_cmd, sizeof(struct megasas_cmd_fusion *), 484 GFP_KERNEL); 485 if (!fusion->cmd_list) { 486 dev_err(&instance->pdev->dev, 487 "Failed from %s %d\n", __func__, __LINE__); 488 return -ENOMEM; 489 } 490 491 for (i = 0; i < max_mpt_cmd; i++) { 492 fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion), 493 GFP_KERNEL); 494 if (!fusion->cmd_list[i]) { 495 for (j = 0; j < i; j++) 496 kfree(fusion->cmd_list[j]); 497 kfree(fusion->cmd_list); 498 dev_err(&instance->pdev->dev, 499 "Failed from %s %d\n", __func__, __LINE__); 500 return -ENOMEM; 501 } 502 } 503 504 return 0; 505 } 506 int 507 megasas_alloc_request_fusion(struct megasas_instance *instance) 508 { 509 struct fusion_context *fusion; 510 511 fusion = instance->ctrl_context; 512 513 retry_alloc: 514 fusion->io_request_frames_pool = 515 dma_pool_create("mr_ioreq", &instance->pdev->dev, 516 fusion->io_frames_alloc_sz, 16, 0); 517 518 if (!fusion->io_request_frames_pool) { 519 dev_err(&instance->pdev->dev, 520 "Failed from %s %d\n", __func__, __LINE__); 521 return -ENOMEM; 522 } 523 524 fusion->io_request_frames = 525 dma_pool_alloc(fusion->io_request_frames_pool, 526 GFP_KERNEL, &fusion->io_request_frames_phys); 527 if (!fusion->io_request_frames) { 528 if (instance->max_fw_cmds >= (MEGASAS_REDUCE_QD_COUNT * 2)) { 529 instance->max_fw_cmds -= MEGASAS_REDUCE_QD_COUNT; 530 dma_pool_destroy(fusion->io_request_frames_pool); 531 megasas_configure_queue_sizes(instance); 532 goto retry_alloc; 533 } else { 534 dev_err(&instance->pdev->dev, 535 "Failed from %s %d\n", __func__, __LINE__); 536 return -ENOMEM; 537 } 538 } 539 540 if (!megasas_check_same_4gb_region(instance, 541 fusion->io_request_frames_phys, 542 fusion->io_frames_alloc_sz)) { 543 dma_pool_free(fusion->io_request_frames_pool, 544 fusion->io_request_frames, 545 fusion->io_request_frames_phys); 546 fusion->io_request_frames = NULL; 547 dma_pool_destroy(fusion->io_request_frames_pool); 548 549 fusion->io_request_frames_pool = 550 dma_pool_create("mr_ioreq_align", 551 &instance->pdev->dev, 552 fusion->io_frames_alloc_sz, 553 roundup_pow_of_two(fusion->io_frames_alloc_sz), 554 0); 555 556 if (!fusion->io_request_frames_pool) { 557 dev_err(&instance->pdev->dev, 558 "Failed from %s %d\n", __func__, __LINE__); 559 return -ENOMEM; 560 } 561 562 fusion->io_request_frames = 563 dma_pool_alloc(fusion->io_request_frames_pool, 564 GFP_KERNEL, 565 &fusion->io_request_frames_phys); 566 567 if (!fusion->io_request_frames) { 568 dev_err(&instance->pdev->dev, 569 "Failed from %s %d\n", __func__, __LINE__); 570 return -ENOMEM; 571 } 572 } 573 574 fusion->req_frames_desc = 575 dma_alloc_coherent(&instance->pdev->dev, 576 fusion->request_alloc_sz, 577 &fusion->req_frames_desc_phys, GFP_KERNEL); 578 if (!fusion->req_frames_desc) { 579 dev_err(&instance->pdev->dev, 580 "Failed from %s %d\n", __func__, __LINE__); 581 return -ENOMEM; 582 } 583 584 return 0; 585 } 586 587 int 588 megasas_alloc_reply_fusion(struct megasas_instance *instance) 589 { 590 int i, count; 591 struct fusion_context *fusion; 592 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; 593 fusion = instance->ctrl_context; 594 595 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 596 fusion->reply_frames_desc_pool = 597 dma_pool_create("mr_reply", &instance->pdev->dev, 598 fusion->reply_alloc_sz * count, 16, 0); 599 600 if (!fusion->reply_frames_desc_pool) { 601 dev_err(&instance->pdev->dev, 602 "Failed from %s %d\n", __func__, __LINE__); 603 return -ENOMEM; 604 } 605 606 fusion->reply_frames_desc[0] = 607 dma_pool_alloc(fusion->reply_frames_desc_pool, 608 GFP_KERNEL, &fusion->reply_frames_desc_phys[0]); 609 if (!fusion->reply_frames_desc[0]) { 610 dev_err(&instance->pdev->dev, 611 "Failed from %s %d\n", __func__, __LINE__); 612 return -ENOMEM; 613 } 614 615 if (!megasas_check_same_4gb_region(instance, 616 fusion->reply_frames_desc_phys[0], 617 (fusion->reply_alloc_sz * count))) { 618 dma_pool_free(fusion->reply_frames_desc_pool, 619 fusion->reply_frames_desc[0], 620 fusion->reply_frames_desc_phys[0]); 621 fusion->reply_frames_desc[0] = NULL; 622 dma_pool_destroy(fusion->reply_frames_desc_pool); 623 624 fusion->reply_frames_desc_pool = 625 dma_pool_create("mr_reply_align", 626 &instance->pdev->dev, 627 fusion->reply_alloc_sz * count, 628 roundup_pow_of_two(fusion->reply_alloc_sz * count), 629 0); 630 631 if (!fusion->reply_frames_desc_pool) { 632 dev_err(&instance->pdev->dev, 633 "Failed from %s %d\n", __func__, __LINE__); 634 return -ENOMEM; 635 } 636 637 fusion->reply_frames_desc[0] = 638 dma_pool_alloc(fusion->reply_frames_desc_pool, 639 GFP_KERNEL, 640 &fusion->reply_frames_desc_phys[0]); 641 642 if (!fusion->reply_frames_desc[0]) { 643 dev_err(&instance->pdev->dev, 644 "Failed from %s %d\n", __func__, __LINE__); 645 return -ENOMEM; 646 } 647 } 648 649 reply_desc = fusion->reply_frames_desc[0]; 650 for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++) 651 reply_desc->Words = cpu_to_le64(ULLONG_MAX); 652 653 /* This is not a rdpq mode, but driver still populate 654 * reply_frame_desc array to use same msix index in ISR path. 655 */ 656 for (i = 0; i < (count - 1); i++) 657 fusion->reply_frames_desc[i + 1] = 658 fusion->reply_frames_desc[i] + 659 (fusion->reply_alloc_sz)/sizeof(union MPI2_REPLY_DESCRIPTORS_UNION); 660 661 return 0; 662 } 663 664 int 665 megasas_alloc_rdpq_fusion(struct megasas_instance *instance) 666 { 667 int i, j, k, msix_count; 668 struct fusion_context *fusion; 669 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; 670 union MPI2_REPLY_DESCRIPTORS_UNION *rdpq_chunk_virt[RDPQ_MAX_CHUNK_COUNT]; 671 dma_addr_t rdpq_chunk_phys[RDPQ_MAX_CHUNK_COUNT]; 672 u8 dma_alloc_count, abs_index; 673 u32 chunk_size, array_size, offset; 674 675 fusion = instance->ctrl_context; 676 chunk_size = fusion->reply_alloc_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK; 677 array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * 678 MAX_MSIX_QUEUES_FUSION; 679 680 fusion->rdpq_virt = dma_alloc_coherent(&instance->pdev->dev, 681 array_size, &fusion->rdpq_phys, 682 GFP_KERNEL); 683 if (!fusion->rdpq_virt) { 684 dev_err(&instance->pdev->dev, 685 "Failed from %s %d\n", __func__, __LINE__); 686 return -ENOMEM; 687 } 688 689 msix_count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 690 691 fusion->reply_frames_desc_pool = dma_pool_create("mr_rdpq", 692 &instance->pdev->dev, 693 chunk_size, 16, 0); 694 fusion->reply_frames_desc_pool_align = 695 dma_pool_create("mr_rdpq_align", 696 &instance->pdev->dev, 697 chunk_size, 698 roundup_pow_of_two(chunk_size), 699 0); 700 701 if (!fusion->reply_frames_desc_pool || 702 !fusion->reply_frames_desc_pool_align) { 703 dev_err(&instance->pdev->dev, 704 "Failed from %s %d\n", __func__, __LINE__); 705 return -ENOMEM; 706 } 707 708 /* 709 * For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and 710 * VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should be 711 * within 4GB boundary and also reply queues in a set must have same 712 * upper 32-bits in their memory address. so here driver is allocating the 713 * DMA'able memory for reply queues according. Driver uses limitation of 714 * VENTURA_SERIES to manage INVADER_SERIES as well. 715 */ 716 dma_alloc_count = DIV_ROUND_UP(msix_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK); 717 718 for (i = 0; i < dma_alloc_count; i++) { 719 rdpq_chunk_virt[i] = 720 dma_pool_alloc(fusion->reply_frames_desc_pool, 721 GFP_KERNEL, &rdpq_chunk_phys[i]); 722 if (!rdpq_chunk_virt[i]) { 723 dev_err(&instance->pdev->dev, 724 "Failed from %s %d\n", __func__, __LINE__); 725 return -ENOMEM; 726 } 727 /* reply desc pool requires to be in same 4 gb region. 728 * Below function will check this. 729 * In case of failure, new pci pool will be created with updated 730 * alignment. 731 * For RDPQ buffers, driver always allocate two separate pci pool. 732 * Alignment will be used such a way that next allocation if 733 * success, will always meet same 4gb region requirement. 734 * rdpq_tracker keep track of each buffer's physical, 735 * virtual address and pci pool descriptor. It will help driver 736 * while freeing the resources. 737 * 738 */ 739 if (!megasas_check_same_4gb_region(instance, rdpq_chunk_phys[i], 740 chunk_size)) { 741 dma_pool_free(fusion->reply_frames_desc_pool, 742 rdpq_chunk_virt[i], 743 rdpq_chunk_phys[i]); 744 745 rdpq_chunk_virt[i] = 746 dma_pool_alloc(fusion->reply_frames_desc_pool_align, 747 GFP_KERNEL, &rdpq_chunk_phys[i]); 748 if (!rdpq_chunk_virt[i]) { 749 dev_err(&instance->pdev->dev, 750 "Failed from %s %d\n", 751 __func__, __LINE__); 752 return -ENOMEM; 753 } 754 fusion->rdpq_tracker[i].dma_pool_ptr = 755 fusion->reply_frames_desc_pool_align; 756 } else { 757 fusion->rdpq_tracker[i].dma_pool_ptr = 758 fusion->reply_frames_desc_pool; 759 } 760 761 fusion->rdpq_tracker[i].pool_entry_phys = rdpq_chunk_phys[i]; 762 fusion->rdpq_tracker[i].pool_entry_virt = rdpq_chunk_virt[i]; 763 } 764 765 for (k = 0; k < dma_alloc_count; k++) { 766 for (i = 0; i < RDPQ_MAX_INDEX_IN_ONE_CHUNK; i++) { 767 abs_index = (k * RDPQ_MAX_INDEX_IN_ONE_CHUNK) + i; 768 769 if (abs_index == msix_count) 770 break; 771 offset = fusion->reply_alloc_sz * i; 772 fusion->rdpq_virt[abs_index].RDPQBaseAddress = 773 cpu_to_le64(rdpq_chunk_phys[k] + offset); 774 fusion->reply_frames_desc_phys[abs_index] = 775 rdpq_chunk_phys[k] + offset; 776 fusion->reply_frames_desc[abs_index] = 777 (union MPI2_REPLY_DESCRIPTORS_UNION *)((u8 *)rdpq_chunk_virt[k] + offset); 778 779 reply_desc = fusion->reply_frames_desc[abs_index]; 780 for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++) 781 reply_desc->Words = ULLONG_MAX; 782 } 783 } 784 785 return 0; 786 } 787 788 static void 789 megasas_free_rdpq_fusion(struct megasas_instance *instance) { 790 791 int i; 792 struct fusion_context *fusion; 793 794 fusion = instance->ctrl_context; 795 796 for (i = 0; i < RDPQ_MAX_CHUNK_COUNT; i++) { 797 if (fusion->rdpq_tracker[i].pool_entry_virt) 798 dma_pool_free(fusion->rdpq_tracker[i].dma_pool_ptr, 799 fusion->rdpq_tracker[i].pool_entry_virt, 800 fusion->rdpq_tracker[i].pool_entry_phys); 801 802 } 803 804 dma_pool_destroy(fusion->reply_frames_desc_pool); 805 dma_pool_destroy(fusion->reply_frames_desc_pool_align); 806 807 if (fusion->rdpq_virt) 808 dma_free_coherent(&instance->pdev->dev, 809 sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION, 810 fusion->rdpq_virt, fusion->rdpq_phys); 811 } 812 813 static void 814 megasas_free_reply_fusion(struct megasas_instance *instance) { 815 816 struct fusion_context *fusion; 817 818 fusion = instance->ctrl_context; 819 820 if (fusion->reply_frames_desc[0]) 821 dma_pool_free(fusion->reply_frames_desc_pool, 822 fusion->reply_frames_desc[0], 823 fusion->reply_frames_desc_phys[0]); 824 825 dma_pool_destroy(fusion->reply_frames_desc_pool); 826 827 } 828 829 830 /** 831 * megasas_alloc_cmds_fusion - Allocates the command packets 832 * @instance: Adapter soft state 833 * 834 * 835 * Each frame has a 32-bit field called context. This context is used to get 836 * back the megasas_cmd_fusion from the frame when a frame gets completed 837 * In this driver, the 32 bit values are the indices into an array cmd_list. 838 * This array is used only to look up the megasas_cmd_fusion given the context. 839 * The free commands themselves are maintained in a linked list called cmd_pool. 840 * 841 * cmds are formed in the io_request and sg_frame members of the 842 * megasas_cmd_fusion. The context field is used to get a request descriptor 843 * and is used as SMID of the cmd. 844 * SMID value range is from 1 to max_fw_cmds. 845 */ 846 int 847 megasas_alloc_cmds_fusion(struct megasas_instance *instance) 848 { 849 int i; 850 struct fusion_context *fusion; 851 struct megasas_cmd_fusion *cmd; 852 u32 offset; 853 dma_addr_t io_req_base_phys; 854 u8 *io_req_base; 855 856 857 fusion = instance->ctrl_context; 858 859 if (megasas_alloc_request_fusion(instance)) 860 goto fail_exit; 861 862 if (instance->is_rdpq) { 863 if (megasas_alloc_rdpq_fusion(instance)) 864 goto fail_exit; 865 } else 866 if (megasas_alloc_reply_fusion(instance)) 867 goto fail_exit; 868 869 if (megasas_alloc_cmdlist_fusion(instance)) 870 goto fail_exit; 871 872 dev_info(&instance->pdev->dev, "Configured max firmware commands: %d\n", 873 instance->max_fw_cmds); 874 875 /* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */ 876 io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 877 io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 878 879 /* 880 * Add all the commands to command pool (fusion->cmd_pool) 881 */ 882 883 /* SMID 0 is reserved. Set SMID/index from 1 */ 884 for (i = 0; i < instance->max_mpt_cmds; i++) { 885 cmd = fusion->cmd_list[i]; 886 offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; 887 memset(cmd, 0, sizeof(struct megasas_cmd_fusion)); 888 cmd->index = i + 1; 889 cmd->scmd = NULL; 890 cmd->sync_cmd_idx = 891 (i >= instance->max_scsi_cmds && i < instance->max_fw_cmds) ? 892 (i - instance->max_scsi_cmds) : 893 (u32)ULONG_MAX; /* Set to Invalid */ 894 cmd->instance = instance; 895 cmd->io_request = 896 (struct MPI2_RAID_SCSI_IO_REQUEST *) 897 (io_req_base + offset); 898 memset(cmd->io_request, 0, 899 sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); 900 cmd->io_request_phys_addr = io_req_base_phys + offset; 901 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; 902 } 903 904 if (megasas_create_sg_sense_fusion(instance)) 905 goto fail_exit; 906 907 return 0; 908 909 fail_exit: 910 megasas_free_cmds_fusion(instance); 911 return -ENOMEM; 912 } 913 914 /** 915 * wait_and_poll - Issues a polling command 916 * @instance: Adapter soft state 917 * @cmd: Command packet to be issued 918 * 919 * For polling, MFI requires the cmd_status to be set to 0xFF before posting. 920 */ 921 int 922 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, 923 int seconds) 924 { 925 int i; 926 struct megasas_header *frame_hdr = &cmd->frame->hdr; 927 928 u32 msecs = seconds * 1000; 929 930 /* 931 * Wait for cmd_status to change 932 */ 933 for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) { 934 rmb(); 935 msleep(20); 936 } 937 938 if (frame_hdr->cmd_status == MFI_STAT_INVALID_STATUS) 939 return DCMD_TIMEOUT; 940 else if (frame_hdr->cmd_status == MFI_STAT_OK) 941 return DCMD_SUCCESS; 942 else 943 return DCMD_FAILED; 944 } 945 946 /** 947 * megasas_ioc_init_fusion - Initializes the FW 948 * @instance: Adapter soft state 949 * 950 * Issues the IOC Init cmd 951 */ 952 int 953 megasas_ioc_init_fusion(struct megasas_instance *instance) 954 { 955 struct megasas_init_frame *init_frame; 956 struct MPI2_IOC_INIT_REQUEST *IOCInitMessage = NULL; 957 dma_addr_t ioc_init_handle; 958 struct megasas_cmd *cmd; 959 u8 ret, cur_rdpq_mode; 960 struct fusion_context *fusion; 961 union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc; 962 int i; 963 struct megasas_header *frame_hdr; 964 const char *sys_info; 965 MFI_CAPABILITIES *drv_ops; 966 u32 scratch_pad_1; 967 ktime_t time; 968 bool cur_fw_64bit_dma_capable; 969 970 fusion = instance->ctrl_context; 971 972 ioc_init_handle = fusion->ioc_init_request_phys; 973 IOCInitMessage = fusion->ioc_init_request; 974 975 cmd = fusion->ioc_init_cmd; 976 977 scratch_pad_1 = megasas_readl 978 (instance, &instance->reg_set->outbound_scratch_pad_1); 979 980 cur_rdpq_mode = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ? 1 : 0; 981 982 if (instance->adapter_type == INVADER_SERIES) { 983 cur_fw_64bit_dma_capable = 984 (scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET) ? true : false; 985 986 if (instance->consistent_mask_64bit && !cur_fw_64bit_dma_capable) { 987 dev_err(&instance->pdev->dev, "Driver was operating on 64bit " 988 "DMA mask, but upcoming FW does not support 64bit DMA mask\n"); 989 megaraid_sas_kill_hba(instance); 990 ret = 1; 991 goto fail_fw_init; 992 } 993 } 994 995 if (instance->is_rdpq && !cur_rdpq_mode) { 996 dev_err(&instance->pdev->dev, "Firmware downgrade *NOT SUPPORTED*" 997 " from RDPQ mode to non RDPQ mode\n"); 998 ret = 1; 999 goto fail_fw_init; 1000 } 1001 1002 instance->fw_sync_cache_support = (scratch_pad_1 & 1003 MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0; 1004 dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n", 1005 instance->fw_sync_cache_support ? "Yes" : "No"); 1006 1007 memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST)); 1008 1009 IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT; 1010 IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER; 1011 IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION); 1012 IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); 1013 IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4); 1014 1015 IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth); 1016 IOCInitMessage->ReplyDescriptorPostQueueAddress = instance->is_rdpq ? 1017 cpu_to_le64(fusion->rdpq_phys) : 1018 cpu_to_le64(fusion->reply_frames_desc_phys[0]); 1019 IOCInitMessage->MsgFlags = instance->is_rdpq ? 1020 MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0; 1021 IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys); 1022 IOCInitMessage->SenseBufferAddressHigh = cpu_to_le32(upper_32_bits(fusion->sense_phys_addr)); 1023 IOCInitMessage->HostMSIxVectors = instance->msix_vectors; 1024 IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT; 1025 1026 time = ktime_get_real(); 1027 /* Convert to milliseconds as per FW requirement */ 1028 IOCInitMessage->TimeStamp = cpu_to_le64(ktime_to_ms(time)); 1029 1030 init_frame = (struct megasas_init_frame *)cmd->frame; 1031 memset(init_frame, 0, IOC_INIT_FRAME_SIZE); 1032 1033 frame_hdr = &cmd->frame->hdr; 1034 frame_hdr->cmd_status = 0xFF; 1035 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); 1036 1037 init_frame->cmd = MFI_CMD_INIT; 1038 init_frame->cmd_status = 0xFF; 1039 1040 drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations); 1041 1042 /* driver support Extended MSIX */ 1043 if (instance->adapter_type >= INVADER_SERIES) 1044 drv_ops->mfi_capabilities.support_additional_msix = 1; 1045 /* driver supports HA / Remote LUN over Fast Path interface */ 1046 drv_ops->mfi_capabilities.support_fp_remote_lun = 1; 1047 1048 drv_ops->mfi_capabilities.support_max_255lds = 1; 1049 drv_ops->mfi_capabilities.support_ndrive_r1_lb = 1; 1050 drv_ops->mfi_capabilities.security_protocol_cmds_fw = 1; 1051 1052 if (instance->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN) 1053 drv_ops->mfi_capabilities.support_ext_io_size = 1; 1054 1055 drv_ops->mfi_capabilities.support_fp_rlbypass = 1; 1056 if (!dual_qdepth_disable) 1057 drv_ops->mfi_capabilities.support_ext_queue_depth = 1; 1058 1059 drv_ops->mfi_capabilities.support_qd_throttling = 1; 1060 drv_ops->mfi_capabilities.support_pd_map_target_id = 1; 1061 drv_ops->mfi_capabilities.support_nvme_passthru = 1; 1062 drv_ops->mfi_capabilities.support_fw_exposed_dev_list = 1; 1063 1064 if (instance->consistent_mask_64bit) 1065 drv_ops->mfi_capabilities.support_64bit_mode = 1; 1066 1067 /* Convert capability to LE32 */ 1068 cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities); 1069 1070 sys_info = dmi_get_system_info(DMI_PRODUCT_UUID); 1071 if (instance->system_info_buf && sys_info) { 1072 memcpy(instance->system_info_buf->systemId, sys_info, 1073 strlen(sys_info) > 64 ? 64 : strlen(sys_info)); 1074 instance->system_info_buf->systemIdLength = 1075 strlen(sys_info) > 64 ? 64 : strlen(sys_info); 1076 init_frame->system_info_lo = cpu_to_le32(lower_32_bits(instance->system_info_h)); 1077 init_frame->system_info_hi = cpu_to_le32(upper_32_bits(instance->system_info_h)); 1078 } 1079 1080 init_frame->queue_info_new_phys_addr_hi = 1081 cpu_to_le32(upper_32_bits(ioc_init_handle)); 1082 init_frame->queue_info_new_phys_addr_lo = 1083 cpu_to_le32(lower_32_bits(ioc_init_handle)); 1084 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST)); 1085 1086 req_desc.u.low = cpu_to_le32(lower_32_bits(cmd->frame_phys_addr)); 1087 req_desc.u.high = cpu_to_le32(upper_32_bits(cmd->frame_phys_addr)); 1088 req_desc.MFAIo.RequestFlags = 1089 (MEGASAS_REQ_DESCRIPT_FLAGS_MFA << 1090 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1091 1092 /* 1093 * disable the intr before firing the init frame 1094 */ 1095 instance->instancet->disable_intr(instance); 1096 1097 for (i = 0; i < (10 * 1000); i += 20) { 1098 if (megasas_readl(instance, &instance->reg_set->doorbell) & 1) 1099 msleep(20); 1100 else 1101 break; 1102 } 1103 1104 megasas_fire_cmd_fusion(instance, &req_desc); 1105 1106 wait_and_poll(instance, cmd, MFI_IO_TIMEOUT_SECS); 1107 1108 frame_hdr = &cmd->frame->hdr; 1109 if (frame_hdr->cmd_status != 0) { 1110 ret = 1; 1111 goto fail_fw_init; 1112 } 1113 1114 return 0; 1115 1116 fail_fw_init: 1117 dev_err(&instance->pdev->dev, 1118 "Init cmd return status FAILED for SCSI host %d\n", 1119 instance->host->host_no); 1120 1121 return ret; 1122 } 1123 1124 /** 1125 * megasas_sync_pd_seq_num - JBOD SEQ MAP 1126 * @instance: Adapter soft state 1127 * @pend: set to 1, if it is pended jbod map. 1128 * 1129 * Issue Jbod map to the firmware. If it is pended command, 1130 * issue command and return. If it is first instance of jbod map 1131 * issue and receive command. 1132 */ 1133 int 1134 megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) { 1135 int ret = 0; 1136 u32 pd_seq_map_sz; 1137 struct megasas_cmd *cmd; 1138 struct megasas_dcmd_frame *dcmd; 1139 struct fusion_context *fusion = instance->ctrl_context; 1140 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 1141 dma_addr_t pd_seq_h; 1142 1143 pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id & 1)]; 1144 pd_seq_h = fusion->pd_seq_phys[(instance->pd_seq_map_id & 1)]; 1145 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 1146 (sizeof(struct MR_PD_CFG_SEQ) * 1147 (MAX_PHYSICAL_DEVICES - 1)); 1148 1149 cmd = megasas_get_cmd(instance); 1150 if (!cmd) { 1151 dev_err(&instance->pdev->dev, 1152 "Could not get mfi cmd. Fail from %s %d\n", 1153 __func__, __LINE__); 1154 return -ENOMEM; 1155 } 1156 1157 dcmd = &cmd->frame->dcmd; 1158 1159 memset(pd_sync, 0, pd_seq_map_sz); 1160 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 1161 1162 if (pend) { 1163 dcmd->mbox.b[0] = MEGASAS_DCMD_MBOX_PEND_FLAG; 1164 dcmd->flags = MFI_FRAME_DIR_WRITE; 1165 instance->jbod_seq_cmd = cmd; 1166 } else { 1167 dcmd->flags = MFI_FRAME_DIR_READ; 1168 } 1169 1170 dcmd->cmd = MFI_CMD_DCMD; 1171 dcmd->cmd_status = 0xFF; 1172 dcmd->sge_count = 1; 1173 dcmd->timeout = 0; 1174 dcmd->pad_0 = 0; 1175 dcmd->data_xfer_len = cpu_to_le32(pd_seq_map_sz); 1176 dcmd->opcode = cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO); 1177 1178 megasas_set_dma_settings(instance, dcmd, pd_seq_h, pd_seq_map_sz); 1179 1180 if (pend) { 1181 instance->instancet->issue_dcmd(instance, cmd); 1182 return 0; 1183 } 1184 1185 /* Below code is only for non pended DCMD */ 1186 if (!instance->mask_interrupts) 1187 ret = megasas_issue_blocked_cmd(instance, cmd, 1188 MFI_IO_TIMEOUT_SECS); 1189 else 1190 ret = megasas_issue_polled(instance, cmd); 1191 1192 if (le32_to_cpu(pd_sync->count) > MAX_PHYSICAL_DEVICES) { 1193 dev_warn(&instance->pdev->dev, 1194 "driver supports max %d JBOD, but FW reports %d\n", 1195 MAX_PHYSICAL_DEVICES, le32_to_cpu(pd_sync->count)); 1196 ret = -EINVAL; 1197 } 1198 1199 if (ret == DCMD_TIMEOUT) 1200 megaraid_sas_kill_hba(instance); 1201 1202 if (ret == DCMD_SUCCESS) 1203 instance->pd_seq_map_id++; 1204 1205 megasas_return_cmd(instance, cmd); 1206 return ret; 1207 } 1208 1209 /* 1210 * megasas_get_ld_map_info - Returns FW's ld_map structure 1211 * @instance: Adapter soft state 1212 * @pend: Pend the command or not 1213 * Issues an internal command (DCMD) to get the FW's controller PD 1214 * list structure. This information is mainly used to find out SYSTEM 1215 * supported by the FW. 1216 * dcmd.mbox value setting for MR_DCMD_LD_MAP_GET_INFO 1217 * dcmd.mbox.b[0] - number of LDs being sync'd 1218 * dcmd.mbox.b[1] - 0 - complete command immediately. 1219 * - 1 - pend till config change 1220 * dcmd.mbox.b[2] - 0 - supports max 64 lds and uses legacy MR_FW_RAID_MAP 1221 * - 1 - supports max MAX_LOGICAL_DRIVES_EXT lds and 1222 * uses extended struct MR_FW_RAID_MAP_EXT 1223 */ 1224 static int 1225 megasas_get_ld_map_info(struct megasas_instance *instance) 1226 { 1227 int ret = 0; 1228 struct megasas_cmd *cmd; 1229 struct megasas_dcmd_frame *dcmd; 1230 void *ci; 1231 dma_addr_t ci_h = 0; 1232 u32 size_map_info; 1233 struct fusion_context *fusion; 1234 1235 cmd = megasas_get_cmd(instance); 1236 1237 if (!cmd) { 1238 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for map info\n"); 1239 return -ENOMEM; 1240 } 1241 1242 fusion = instance->ctrl_context; 1243 1244 if (!fusion) { 1245 megasas_return_cmd(instance, cmd); 1246 return -ENXIO; 1247 } 1248 1249 dcmd = &cmd->frame->dcmd; 1250 1251 size_map_info = fusion->current_map_sz; 1252 1253 ci = (void *) fusion->ld_map[(instance->map_id & 1)]; 1254 ci_h = fusion->ld_map_phys[(instance->map_id & 1)]; 1255 1256 if (!ci) { 1257 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ld_map_info\n"); 1258 megasas_return_cmd(instance, cmd); 1259 return -ENOMEM; 1260 } 1261 1262 memset(ci, 0, fusion->max_map_sz); 1263 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 1264 dcmd->cmd = MFI_CMD_DCMD; 1265 dcmd->cmd_status = 0xFF; 1266 dcmd->sge_count = 1; 1267 dcmd->flags = MFI_FRAME_DIR_READ; 1268 dcmd->timeout = 0; 1269 dcmd->pad_0 = 0; 1270 dcmd->data_xfer_len = cpu_to_le32(size_map_info); 1271 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO); 1272 1273 megasas_set_dma_settings(instance, dcmd, ci_h, size_map_info); 1274 1275 if (!instance->mask_interrupts) 1276 ret = megasas_issue_blocked_cmd(instance, cmd, 1277 MFI_IO_TIMEOUT_SECS); 1278 else 1279 ret = megasas_issue_polled(instance, cmd); 1280 1281 if (ret == DCMD_TIMEOUT) 1282 megaraid_sas_kill_hba(instance); 1283 1284 megasas_return_cmd(instance, cmd); 1285 1286 return ret; 1287 } 1288 1289 u8 1290 megasas_get_map_info(struct megasas_instance *instance) 1291 { 1292 struct fusion_context *fusion = instance->ctrl_context; 1293 1294 fusion->fast_path_io = 0; 1295 if (!megasas_get_ld_map_info(instance)) { 1296 if (MR_ValidateMapInfo(instance, instance->map_id)) { 1297 fusion->fast_path_io = 1; 1298 return 0; 1299 } 1300 } 1301 return 1; 1302 } 1303 1304 /* 1305 * megasas_sync_map_info - Returns FW's ld_map structure 1306 * @instance: Adapter soft state 1307 * 1308 * Issues an internal command (DCMD) to get the FW's controller PD 1309 * list structure. This information is mainly used to find out SYSTEM 1310 * supported by the FW. 1311 */ 1312 int 1313 megasas_sync_map_info(struct megasas_instance *instance) 1314 { 1315 int i; 1316 struct megasas_cmd *cmd; 1317 struct megasas_dcmd_frame *dcmd; 1318 u16 num_lds; 1319 struct fusion_context *fusion; 1320 struct MR_LD_TARGET_SYNC *ci = NULL; 1321 struct MR_DRV_RAID_MAP_ALL *map; 1322 struct MR_LD_RAID *raid; 1323 struct MR_LD_TARGET_SYNC *ld_sync; 1324 dma_addr_t ci_h = 0; 1325 u32 size_map_info; 1326 1327 cmd = megasas_get_cmd(instance); 1328 1329 if (!cmd) { 1330 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for sync info\n"); 1331 return -ENOMEM; 1332 } 1333 1334 fusion = instance->ctrl_context; 1335 1336 if (!fusion) { 1337 megasas_return_cmd(instance, cmd); 1338 return 1; 1339 } 1340 1341 map = fusion->ld_drv_map[instance->map_id & 1]; 1342 1343 num_lds = le16_to_cpu(map->raidMap.ldCount); 1344 1345 dcmd = &cmd->frame->dcmd; 1346 1347 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 1348 1349 ci = (struct MR_LD_TARGET_SYNC *) 1350 fusion->ld_map[(instance->map_id - 1) & 1]; 1351 memset(ci, 0, fusion->max_map_sz); 1352 1353 ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1]; 1354 1355 ld_sync = (struct MR_LD_TARGET_SYNC *)ci; 1356 1357 for (i = 0; i < num_lds; i++, ld_sync++) { 1358 raid = MR_LdRaidGet(i, map); 1359 ld_sync->targetId = MR_GetLDTgtId(i, map); 1360 ld_sync->seqNum = raid->seqNum; 1361 } 1362 1363 size_map_info = fusion->current_map_sz; 1364 1365 dcmd->cmd = MFI_CMD_DCMD; 1366 dcmd->cmd_status = 0xFF; 1367 dcmd->sge_count = 1; 1368 dcmd->flags = MFI_FRAME_DIR_WRITE; 1369 dcmd->timeout = 0; 1370 dcmd->pad_0 = 0; 1371 dcmd->data_xfer_len = cpu_to_le32(size_map_info); 1372 dcmd->mbox.b[0] = num_lds; 1373 dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG; 1374 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO); 1375 1376 megasas_set_dma_settings(instance, dcmd, ci_h, size_map_info); 1377 1378 instance->map_update_cmd = cmd; 1379 1380 instance->instancet->issue_dcmd(instance, cmd); 1381 1382 return 0; 1383 } 1384 1385 /* 1386 * meagasas_display_intel_branding - Display branding string 1387 * @instance: per adapter object 1388 * 1389 * Return nothing. 1390 */ 1391 static void 1392 megasas_display_intel_branding(struct megasas_instance *instance) 1393 { 1394 if (instance->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL) 1395 return; 1396 1397 switch (instance->pdev->device) { 1398 case PCI_DEVICE_ID_LSI_INVADER: 1399 switch (instance->pdev->subsystem_device) { 1400 case MEGARAID_INTEL_RS3DC080_SSDID: 1401 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 1402 instance->host->host_no, 1403 MEGARAID_INTEL_RS3DC080_BRANDING); 1404 break; 1405 case MEGARAID_INTEL_RS3DC040_SSDID: 1406 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 1407 instance->host->host_no, 1408 MEGARAID_INTEL_RS3DC040_BRANDING); 1409 break; 1410 case MEGARAID_INTEL_RS3SC008_SSDID: 1411 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 1412 instance->host->host_no, 1413 MEGARAID_INTEL_RS3SC008_BRANDING); 1414 break; 1415 case MEGARAID_INTEL_RS3MC044_SSDID: 1416 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 1417 instance->host->host_no, 1418 MEGARAID_INTEL_RS3MC044_BRANDING); 1419 break; 1420 default: 1421 break; 1422 } 1423 break; 1424 case PCI_DEVICE_ID_LSI_FURY: 1425 switch (instance->pdev->subsystem_device) { 1426 case MEGARAID_INTEL_RS3WC080_SSDID: 1427 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 1428 instance->host->host_no, 1429 MEGARAID_INTEL_RS3WC080_BRANDING); 1430 break; 1431 case MEGARAID_INTEL_RS3WC040_SSDID: 1432 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 1433 instance->host->host_no, 1434 MEGARAID_INTEL_RS3WC040_BRANDING); 1435 break; 1436 default: 1437 break; 1438 } 1439 break; 1440 case PCI_DEVICE_ID_LSI_CUTLASS_52: 1441 case PCI_DEVICE_ID_LSI_CUTLASS_53: 1442 switch (instance->pdev->subsystem_device) { 1443 case MEGARAID_INTEL_RMS3BC160_SSDID: 1444 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 1445 instance->host->host_no, 1446 MEGARAID_INTEL_RMS3BC160_BRANDING); 1447 break; 1448 default: 1449 break; 1450 } 1451 break; 1452 default: 1453 break; 1454 } 1455 } 1456 1457 /** 1458 * megasas_allocate_raid_maps - Allocate memory for RAID maps 1459 * @instance: Adapter soft state 1460 * 1461 * return: if success: return 0 1462 * failed: return -ENOMEM 1463 */ 1464 static inline int megasas_allocate_raid_maps(struct megasas_instance *instance) 1465 { 1466 struct fusion_context *fusion; 1467 int i = 0; 1468 1469 fusion = instance->ctrl_context; 1470 1471 fusion->drv_map_pages = get_order(fusion->drv_map_sz); 1472 1473 for (i = 0; i < 2; i++) { 1474 fusion->ld_map[i] = NULL; 1475 1476 fusion->ld_drv_map[i] = (void *) 1477 __get_free_pages(__GFP_ZERO | GFP_KERNEL, 1478 fusion->drv_map_pages); 1479 1480 if (!fusion->ld_drv_map[i]) { 1481 fusion->ld_drv_map[i] = vzalloc(fusion->drv_map_sz); 1482 1483 if (!fusion->ld_drv_map[i]) { 1484 dev_err(&instance->pdev->dev, 1485 "Could not allocate memory for local map" 1486 " size requested: %d\n", 1487 fusion->drv_map_sz); 1488 goto ld_drv_map_alloc_fail; 1489 } 1490 } 1491 } 1492 1493 for (i = 0; i < 2; i++) { 1494 fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev, 1495 fusion->max_map_sz, 1496 &fusion->ld_map_phys[i], 1497 GFP_KERNEL); 1498 if (!fusion->ld_map[i]) { 1499 dev_err(&instance->pdev->dev, 1500 "Could not allocate memory for map info %s:%d\n", 1501 __func__, __LINE__); 1502 goto ld_map_alloc_fail; 1503 } 1504 } 1505 1506 return 0; 1507 1508 ld_map_alloc_fail: 1509 for (i = 0; i < 2; i++) { 1510 if (fusion->ld_map[i]) 1511 dma_free_coherent(&instance->pdev->dev, 1512 fusion->max_map_sz, 1513 fusion->ld_map[i], 1514 fusion->ld_map_phys[i]); 1515 } 1516 1517 ld_drv_map_alloc_fail: 1518 for (i = 0; i < 2; i++) { 1519 if (fusion->ld_drv_map[i]) { 1520 if (is_vmalloc_addr(fusion->ld_drv_map[i])) 1521 vfree(fusion->ld_drv_map[i]); 1522 else 1523 free_pages((ulong)fusion->ld_drv_map[i], 1524 fusion->drv_map_pages); 1525 } 1526 } 1527 1528 return -ENOMEM; 1529 } 1530 1531 /** 1532 * megasas_configure_queue_sizes - Calculate size of request desc queue, 1533 * reply desc queue, 1534 * IO request frame queue, set can_queue. 1535 * @instance: Adapter soft state 1536 * @return: void 1537 */ 1538 static inline 1539 void megasas_configure_queue_sizes(struct megasas_instance *instance) 1540 { 1541 struct fusion_context *fusion; 1542 u16 max_cmd; 1543 1544 fusion = instance->ctrl_context; 1545 max_cmd = instance->max_fw_cmds; 1546 1547 if (instance->adapter_type >= VENTURA_SERIES) 1548 instance->max_mpt_cmds = instance->max_fw_cmds * RAID_1_PEER_CMDS; 1549 else 1550 instance->max_mpt_cmds = instance->max_fw_cmds; 1551 1552 instance->max_scsi_cmds = instance->max_fw_cmds - instance->max_mfi_cmds; 1553 instance->cur_can_queue = instance->max_scsi_cmds; 1554 instance->host->can_queue = instance->cur_can_queue; 1555 1556 fusion->reply_q_depth = 2 * ((max_cmd + 1 + 15) / 16) * 16; 1557 1558 fusion->request_alloc_sz = sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * 1559 instance->max_mpt_cmds; 1560 fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION) * 1561 (fusion->reply_q_depth); 1562 fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + 1563 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE 1564 * (instance->max_mpt_cmds + 1)); /* Extra 1 for SMID 0 */ 1565 } 1566 1567 static int megasas_alloc_ioc_init_frame(struct megasas_instance *instance) 1568 { 1569 struct fusion_context *fusion; 1570 struct megasas_cmd *cmd; 1571 1572 fusion = instance->ctrl_context; 1573 1574 cmd = kzalloc(sizeof(struct megasas_cmd), GFP_KERNEL); 1575 1576 if (!cmd) { 1577 dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n", 1578 __func__, __LINE__); 1579 return -ENOMEM; 1580 } 1581 1582 cmd->frame = dma_alloc_coherent(&instance->pdev->dev, 1583 IOC_INIT_FRAME_SIZE, 1584 &cmd->frame_phys_addr, GFP_KERNEL); 1585 1586 if (!cmd->frame) { 1587 dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n", 1588 __func__, __LINE__); 1589 kfree(cmd); 1590 return -ENOMEM; 1591 } 1592 1593 fusion->ioc_init_cmd = cmd; 1594 return 0; 1595 } 1596 1597 /** 1598 * megasas_free_ioc_init_cmd - Free IOC INIT command frame 1599 * @instance: Adapter soft state 1600 */ 1601 static inline void megasas_free_ioc_init_cmd(struct megasas_instance *instance) 1602 { 1603 struct fusion_context *fusion; 1604 1605 fusion = instance->ctrl_context; 1606 1607 if (fusion->ioc_init_cmd && fusion->ioc_init_cmd->frame) 1608 dma_free_coherent(&instance->pdev->dev, 1609 IOC_INIT_FRAME_SIZE, 1610 fusion->ioc_init_cmd->frame, 1611 fusion->ioc_init_cmd->frame_phys_addr); 1612 1613 kfree(fusion->ioc_init_cmd); 1614 } 1615 1616 /** 1617 * megasas_init_adapter_fusion - Initializes the FW 1618 * @instance: Adapter soft state 1619 * 1620 * This is the main function for initializing firmware. 1621 */ 1622 u32 1623 megasas_init_adapter_fusion(struct megasas_instance *instance) 1624 { 1625 struct fusion_context *fusion; 1626 u32 scratch_pad_1; 1627 int i = 0, count; 1628 1629 fusion = instance->ctrl_context; 1630 1631 megasas_fusion_update_can_queue(instance, PROBE_CONTEXT); 1632 1633 /* 1634 * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames 1635 */ 1636 instance->max_mfi_cmds = 1637 MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS; 1638 1639 megasas_configure_queue_sizes(instance); 1640 1641 scratch_pad_1 = megasas_readl(instance, 1642 &instance->reg_set->outbound_scratch_pad_1); 1643 /* If scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set, 1644 * Firmware support extended IO chain frame which is 4 times more than 1645 * legacy Firmware. 1646 * Legacy Firmware - Frame size is (8 * 128) = 1K 1647 * 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K 1648 */ 1649 if (scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK) 1650 instance->max_chain_frame_sz = 1651 ((scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 1652 MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_1MB_IO; 1653 else 1654 instance->max_chain_frame_sz = 1655 ((scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 1656 MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_256K_IO; 1657 1658 if (instance->max_chain_frame_sz < MEGASAS_CHAIN_FRAME_SZ_MIN) { 1659 dev_warn(&instance->pdev->dev, "frame size %d invalid, fall back to legacy max frame size %d\n", 1660 instance->max_chain_frame_sz, 1661 MEGASAS_CHAIN_FRAME_SZ_MIN); 1662 instance->max_chain_frame_sz = MEGASAS_CHAIN_FRAME_SZ_MIN; 1663 } 1664 1665 fusion->max_sge_in_main_msg = 1666 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE 1667 - offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL))/16; 1668 1669 fusion->max_sge_in_chain = 1670 instance->max_chain_frame_sz 1671 / sizeof(union MPI2_SGE_IO_UNION); 1672 1673 instance->max_num_sge = 1674 rounddown_pow_of_two(fusion->max_sge_in_main_msg 1675 + fusion->max_sge_in_chain - 2); 1676 1677 /* Used for pass thru MFI frame (DCMD) */ 1678 fusion->chain_offset_mfi_pthru = 1679 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL)/16; 1680 1681 fusion->chain_offset_io_request = 1682 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 1683 sizeof(union MPI2_SGE_IO_UNION))/16; 1684 1685 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 1686 for (i = 0 ; i < count; i++) 1687 fusion->last_reply_idx[i] = 0; 1688 1689 /* 1690 * For fusion adapters, 3 commands for IOCTL and 8 commands 1691 * for driver's internal DCMDs. 1692 */ 1693 instance->max_scsi_cmds = instance->max_fw_cmds - 1694 (MEGASAS_FUSION_INTERNAL_CMDS + 1695 MEGASAS_FUSION_IOCTL_CMDS); 1696 sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS); 1697 1698 if (megasas_alloc_ioc_init_frame(instance)) 1699 return 1; 1700 1701 /* 1702 * Allocate memory for descriptors 1703 * Create a pool of commands 1704 */ 1705 if (megasas_alloc_cmds(instance)) 1706 goto fail_alloc_mfi_cmds; 1707 if (megasas_alloc_cmds_fusion(instance)) 1708 goto fail_alloc_cmds; 1709 1710 if (megasas_ioc_init_fusion(instance)) 1711 goto fail_ioc_init; 1712 1713 megasas_display_intel_branding(instance); 1714 if (megasas_get_ctrl_info(instance)) { 1715 dev_err(&instance->pdev->dev, 1716 "Could not get controller info. Fail from %s %d\n", 1717 __func__, __LINE__); 1718 goto fail_ioc_init; 1719 } 1720 1721 instance->flag_ieee = 1; 1722 instance->r1_ldio_hint_default = MR_R1_LDIO_PIGGYBACK_DEFAULT; 1723 fusion->fast_path_io = 0; 1724 1725 if (megasas_allocate_raid_maps(instance)) 1726 goto fail_ioc_init; 1727 1728 if (!megasas_get_map_info(instance)) 1729 megasas_sync_map_info(instance); 1730 1731 return 0; 1732 1733 fail_ioc_init: 1734 megasas_free_cmds_fusion(instance); 1735 fail_alloc_cmds: 1736 megasas_free_cmds(instance); 1737 fail_alloc_mfi_cmds: 1738 megasas_free_ioc_init_cmd(instance); 1739 return 1; 1740 } 1741 1742 /** 1743 * megasas_fault_detect_work - Worker function of 1744 * FW fault handling workqueue. 1745 */ 1746 static void 1747 megasas_fault_detect_work(struct work_struct *work) 1748 { 1749 struct megasas_instance *instance = 1750 container_of(work, struct megasas_instance, 1751 fw_fault_work.work); 1752 u32 fw_state, dma_state, status; 1753 1754 /* Check the fw state */ 1755 fw_state = instance->instancet->read_fw_status_reg(instance) & 1756 MFI_STATE_MASK; 1757 1758 if (fw_state == MFI_STATE_FAULT) { 1759 dma_state = instance->instancet->read_fw_status_reg(instance) & 1760 MFI_STATE_DMADONE; 1761 /* Start collecting crash, if DMA bit is done */ 1762 if (instance->crash_dump_drv_support && 1763 instance->crash_dump_app_support && dma_state) { 1764 megasas_fusion_crash_dump(instance); 1765 } else { 1766 if (instance->unload == 0) { 1767 status = megasas_reset_fusion(instance->host, 0); 1768 if (status != SUCCESS) { 1769 dev_err(&instance->pdev->dev, 1770 "Failed from %s %d, do not re-arm timer\n", 1771 __func__, __LINE__); 1772 return; 1773 } 1774 } 1775 } 1776 } 1777 1778 if (instance->fw_fault_work_q) 1779 queue_delayed_work(instance->fw_fault_work_q, 1780 &instance->fw_fault_work, 1781 msecs_to_jiffies(MEGASAS_WATCHDOG_THREAD_INTERVAL)); 1782 } 1783 1784 int 1785 megasas_fusion_start_watchdog(struct megasas_instance *instance) 1786 { 1787 /* Check if the Fault WQ is already started */ 1788 if (instance->fw_fault_work_q) 1789 return SUCCESS; 1790 1791 INIT_DELAYED_WORK(&instance->fw_fault_work, megasas_fault_detect_work); 1792 1793 snprintf(instance->fault_handler_work_q_name, 1794 sizeof(instance->fault_handler_work_q_name), 1795 "poll_megasas%d_status", instance->host->host_no); 1796 1797 instance->fw_fault_work_q = 1798 create_singlethread_workqueue(instance->fault_handler_work_q_name); 1799 if (!instance->fw_fault_work_q) { 1800 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1801 __func__, __LINE__); 1802 return FAILED; 1803 } 1804 1805 queue_delayed_work(instance->fw_fault_work_q, 1806 &instance->fw_fault_work, 1807 msecs_to_jiffies(MEGASAS_WATCHDOG_THREAD_INTERVAL)); 1808 1809 return SUCCESS; 1810 } 1811 1812 void 1813 megasas_fusion_stop_watchdog(struct megasas_instance *instance) 1814 { 1815 struct workqueue_struct *wq; 1816 1817 if (instance->fw_fault_work_q) { 1818 wq = instance->fw_fault_work_q; 1819 instance->fw_fault_work_q = NULL; 1820 if (!cancel_delayed_work_sync(&instance->fw_fault_work)) 1821 flush_workqueue(wq); 1822 destroy_workqueue(wq); 1823 } 1824 } 1825 1826 /** 1827 * map_cmd_status - Maps FW cmd status to OS cmd status 1828 * @cmd : Pointer to cmd 1829 * @status : status of cmd returned by FW 1830 * @ext_status : ext status of cmd returned by FW 1831 */ 1832 1833 void 1834 map_cmd_status(struct fusion_context *fusion, 1835 struct scsi_cmnd *scmd, u8 status, u8 ext_status, 1836 u32 data_length, u8 *sense) 1837 { 1838 u8 cmd_type; 1839 int resid; 1840 1841 cmd_type = megasas_cmd_type(scmd); 1842 switch (status) { 1843 1844 case MFI_STAT_OK: 1845 scmd->result = DID_OK << 16; 1846 break; 1847 1848 case MFI_STAT_SCSI_IO_FAILED: 1849 case MFI_STAT_LD_INIT_IN_PROGRESS: 1850 scmd->result = (DID_ERROR << 16) | ext_status; 1851 break; 1852 1853 case MFI_STAT_SCSI_DONE_WITH_ERROR: 1854 1855 scmd->result = (DID_OK << 16) | ext_status; 1856 if (ext_status == SAM_STAT_CHECK_CONDITION) { 1857 memset(scmd->sense_buffer, 0, 1858 SCSI_SENSE_BUFFERSIZE); 1859 memcpy(scmd->sense_buffer, sense, 1860 SCSI_SENSE_BUFFERSIZE); 1861 scmd->result |= DRIVER_SENSE << 24; 1862 } 1863 1864 /* 1865 * If the IO request is partially completed, then MR FW will 1866 * update "io_request->DataLength" field with actual number of 1867 * bytes transferred.Driver will set residual bytes count in 1868 * SCSI command structure. 1869 */ 1870 resid = (scsi_bufflen(scmd) - data_length); 1871 scsi_set_resid(scmd, resid); 1872 1873 if (resid && 1874 ((cmd_type == READ_WRITE_LDIO) || 1875 (cmd_type == READ_WRITE_SYSPDIO))) 1876 scmd_printk(KERN_INFO, scmd, "BRCM Debug mfi stat 0x%x, data len" 1877 " requested/completed 0x%x/0x%x\n", 1878 status, scsi_bufflen(scmd), data_length); 1879 break; 1880 1881 case MFI_STAT_LD_OFFLINE: 1882 case MFI_STAT_DEVICE_NOT_FOUND: 1883 scmd->result = DID_BAD_TARGET << 16; 1884 break; 1885 case MFI_STAT_CONFIG_SEQ_MISMATCH: 1886 scmd->result = DID_IMM_RETRY << 16; 1887 break; 1888 default: 1889 scmd->result = DID_ERROR << 16; 1890 break; 1891 } 1892 } 1893 1894 /** 1895 * megasas_is_prp_possible - 1896 * Checks if native NVMe PRPs can be built for the IO 1897 * 1898 * @instance: Adapter soft state 1899 * @scmd: SCSI command from the mid-layer 1900 * @sge_count: scatter gather element count. 1901 * 1902 * Returns: true: PRPs can be built 1903 * false: IEEE SGLs needs to be built 1904 */ 1905 static bool 1906 megasas_is_prp_possible(struct megasas_instance *instance, 1907 struct scsi_cmnd *scmd, int sge_count) 1908 { 1909 int i; 1910 u32 data_length = 0; 1911 struct scatterlist *sg_scmd; 1912 bool build_prp = false; 1913 u32 mr_nvme_pg_size; 1914 1915 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, 1916 MR_DEFAULT_NVME_PAGE_SIZE); 1917 data_length = scsi_bufflen(scmd); 1918 sg_scmd = scsi_sglist(scmd); 1919 1920 /* 1921 * NVMe uses one PRP for each page (or part of a page) 1922 * look at the data length - if 4 pages or less then IEEE is OK 1923 * if > 5 pages then we need to build a native SGL 1924 * if > 4 and <= 5 pages, then check physical address of 1st SG entry 1925 * if this first size in the page is >= the residual beyond 4 pages 1926 * then use IEEE, otherwise use native SGL 1927 */ 1928 1929 if (data_length > (mr_nvme_pg_size * 5)) { 1930 build_prp = true; 1931 } else if ((data_length > (mr_nvme_pg_size * 4)) && 1932 (data_length <= (mr_nvme_pg_size * 5))) { 1933 /* check if 1st SG entry size is < residual beyond 4 pages */ 1934 if (sg_dma_len(sg_scmd) < (data_length - (mr_nvme_pg_size * 4))) 1935 build_prp = true; 1936 } 1937 1938 /* 1939 * Below code detects gaps/holes in IO data buffers. 1940 * What does holes/gaps mean? 1941 * Any SGE except first one in a SGL starts at non NVME page size 1942 * aligned address OR Any SGE except last one in a SGL ends at 1943 * non NVME page size boundary. 1944 * 1945 * Driver has already informed block layer by setting boundary rules for 1946 * bio merging done at NVME page size boundary calling kernel API 1947 * blk_queue_virt_boundary inside slave_config. 1948 * Still there is possibility of IO coming with holes to driver because of 1949 * IO merging done by IO scheduler. 1950 * 1951 * With SCSI BLK MQ enabled, there will be no IO with holes as there is no 1952 * IO scheduling so no IO merging. 1953 * 1954 * With SCSI BLK MQ disabled, IO scheduler may attempt to merge IOs and 1955 * then sending IOs with holes. 1956 * 1957 * Though driver can request block layer to disable IO merging by calling- 1958 * blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue) but 1959 * user may tune sysfs parameter- nomerges again to 0 or 1. 1960 * 1961 * If in future IO scheduling is enabled with SCSI BLK MQ, 1962 * this algorithm to detect holes will be required in driver 1963 * for SCSI BLK MQ enabled case as well. 1964 * 1965 * 1966 */ 1967 scsi_for_each_sg(scmd, sg_scmd, sge_count, i) { 1968 if ((i != 0) && (i != (sge_count - 1))) { 1969 if (mega_mod64(sg_dma_len(sg_scmd), mr_nvme_pg_size) || 1970 mega_mod64(sg_dma_address(sg_scmd), 1971 mr_nvme_pg_size)) { 1972 build_prp = false; 1973 atomic_inc(&instance->sge_holes_type1); 1974 break; 1975 } 1976 } 1977 1978 if ((sge_count > 1) && (i == 0)) { 1979 if ((mega_mod64((sg_dma_address(sg_scmd) + 1980 sg_dma_len(sg_scmd)), 1981 mr_nvme_pg_size))) { 1982 build_prp = false; 1983 atomic_inc(&instance->sge_holes_type2); 1984 break; 1985 } 1986 } 1987 1988 if ((sge_count > 1) && (i == (sge_count - 1))) { 1989 if (mega_mod64(sg_dma_address(sg_scmd), 1990 mr_nvme_pg_size)) { 1991 build_prp = false; 1992 atomic_inc(&instance->sge_holes_type3); 1993 break; 1994 } 1995 } 1996 } 1997 1998 return build_prp; 1999 } 2000 2001 /** 2002 * megasas_make_prp_nvme - 2003 * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only 2004 * 2005 * @instance: Adapter soft state 2006 * @scmd: SCSI command from the mid-layer 2007 * @sgl_ptr: SGL to be filled in 2008 * @cmd: Fusion command frame 2009 * @sge_count: scatter gather element count. 2010 * 2011 * Returns: true: PRPs are built 2012 * false: IEEE SGLs needs to be built 2013 */ 2014 static bool 2015 megasas_make_prp_nvme(struct megasas_instance *instance, struct scsi_cmnd *scmd, 2016 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr, 2017 struct megasas_cmd_fusion *cmd, int sge_count) 2018 { 2019 int sge_len, offset, num_prp_in_chain = 0; 2020 struct MPI25_IEEE_SGE_CHAIN64 *main_chain_element, *ptr_first_sgl; 2021 u64 *ptr_sgl; 2022 dma_addr_t ptr_sgl_phys; 2023 u64 sge_addr; 2024 u32 page_mask, page_mask_result; 2025 struct scatterlist *sg_scmd; 2026 u32 first_prp_len; 2027 bool build_prp = false; 2028 int data_len = scsi_bufflen(scmd); 2029 u32 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, 2030 MR_DEFAULT_NVME_PAGE_SIZE); 2031 2032 build_prp = megasas_is_prp_possible(instance, scmd, sge_count); 2033 2034 if (!build_prp) 2035 return false; 2036 2037 /* 2038 * Nvme has a very convoluted prp format. One prp is required 2039 * for each page or partial page. Driver need to split up OS sg_list 2040 * entries if it is longer than one page or cross a page 2041 * boundary. Driver also have to insert a PRP list pointer entry as 2042 * the last entry in each physical page of the PRP list. 2043 * 2044 * NOTE: The first PRP "entry" is actually placed in the first 2045 * SGL entry in the main message as IEEE 64 format. The 2nd 2046 * entry in the main message is the chain element, and the rest 2047 * of the PRP entries are built in the contiguous pcie buffer. 2048 */ 2049 page_mask = mr_nvme_pg_size - 1; 2050 ptr_sgl = (u64 *)cmd->sg_frame; 2051 ptr_sgl_phys = cmd->sg_frame_phys_addr; 2052 memset(ptr_sgl, 0, instance->max_chain_frame_sz); 2053 2054 /* Build chain frame element which holds all prps except first*/ 2055 main_chain_element = (struct MPI25_IEEE_SGE_CHAIN64 *) 2056 ((u8 *)sgl_ptr + sizeof(struct MPI25_IEEE_SGE_CHAIN64)); 2057 2058 main_chain_element->Address = cpu_to_le64(ptr_sgl_phys); 2059 main_chain_element->NextChainOffset = 0; 2060 main_chain_element->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | 2061 IEEE_SGE_FLAGS_SYSTEM_ADDR | 2062 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP; 2063 2064 /* Build first prp, sge need not to be page aligned*/ 2065 ptr_first_sgl = sgl_ptr; 2066 sg_scmd = scsi_sglist(scmd); 2067 sge_addr = sg_dma_address(sg_scmd); 2068 sge_len = sg_dma_len(sg_scmd); 2069 2070 offset = (u32)(sge_addr & page_mask); 2071 first_prp_len = mr_nvme_pg_size - offset; 2072 2073 ptr_first_sgl->Address = cpu_to_le64(sge_addr); 2074 ptr_first_sgl->Length = cpu_to_le32(first_prp_len); 2075 2076 data_len -= first_prp_len; 2077 2078 if (sge_len > first_prp_len) { 2079 sge_addr += first_prp_len; 2080 sge_len -= first_prp_len; 2081 } else if (sge_len == first_prp_len) { 2082 sg_scmd = sg_next(sg_scmd); 2083 sge_addr = sg_dma_address(sg_scmd); 2084 sge_len = sg_dma_len(sg_scmd); 2085 } 2086 2087 for (;;) { 2088 offset = (u32)(sge_addr & page_mask); 2089 2090 /* Put PRP pointer due to page boundary*/ 2091 page_mask_result = (uintptr_t)(ptr_sgl + 1) & page_mask; 2092 if (unlikely(!page_mask_result)) { 2093 scmd_printk(KERN_NOTICE, 2094 scmd, "page boundary ptr_sgl: 0x%p\n", 2095 ptr_sgl); 2096 ptr_sgl_phys += 8; 2097 *ptr_sgl = cpu_to_le64(ptr_sgl_phys); 2098 ptr_sgl++; 2099 num_prp_in_chain++; 2100 } 2101 2102 *ptr_sgl = cpu_to_le64(sge_addr); 2103 ptr_sgl++; 2104 ptr_sgl_phys += 8; 2105 num_prp_in_chain++; 2106 2107 sge_addr += mr_nvme_pg_size; 2108 sge_len -= mr_nvme_pg_size; 2109 data_len -= mr_nvme_pg_size; 2110 2111 if (data_len <= 0) 2112 break; 2113 2114 if (sge_len > 0) 2115 continue; 2116 2117 sg_scmd = sg_next(sg_scmd); 2118 sge_addr = sg_dma_address(sg_scmd); 2119 sge_len = sg_dma_len(sg_scmd); 2120 } 2121 2122 main_chain_element->Length = 2123 cpu_to_le32(num_prp_in_chain * sizeof(u64)); 2124 2125 atomic_inc(&instance->prp_sgl); 2126 return build_prp; 2127 } 2128 2129 /** 2130 * megasas_make_sgl_fusion - Prepares 32-bit SGL 2131 * @instance: Adapter soft state 2132 * @scp: SCSI command from the mid-layer 2133 * @sgl_ptr: SGL to be filled in 2134 * @cmd: cmd we are working on 2135 * @sge_count sge count 2136 * 2137 */ 2138 static void 2139 megasas_make_sgl_fusion(struct megasas_instance *instance, 2140 struct scsi_cmnd *scp, 2141 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr, 2142 struct megasas_cmd_fusion *cmd, int sge_count) 2143 { 2144 int i, sg_processed; 2145 struct scatterlist *os_sgl; 2146 struct fusion_context *fusion; 2147 2148 fusion = instance->ctrl_context; 2149 2150 if (instance->adapter_type >= INVADER_SERIES) { 2151 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr; 2152 sgl_ptr_end += fusion->max_sge_in_main_msg - 1; 2153 sgl_ptr_end->Flags = 0; 2154 } 2155 2156 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 2157 sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl)); 2158 sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl)); 2159 sgl_ptr->Flags = 0; 2160 if (instance->adapter_type >= INVADER_SERIES) 2161 if (i == sge_count - 1) 2162 sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST; 2163 sgl_ptr++; 2164 sg_processed = i + 1; 2165 2166 if ((sg_processed == (fusion->max_sge_in_main_msg - 1)) && 2167 (sge_count > fusion->max_sge_in_main_msg)) { 2168 2169 struct MPI25_IEEE_SGE_CHAIN64 *sg_chain; 2170 if (instance->adapter_type >= INVADER_SERIES) { 2171 if ((le16_to_cpu(cmd->io_request->IoFlags) & 2172 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != 2173 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) 2174 cmd->io_request->ChainOffset = 2175 fusion-> 2176 chain_offset_io_request; 2177 else 2178 cmd->io_request->ChainOffset = 0; 2179 } else 2180 cmd->io_request->ChainOffset = 2181 fusion->chain_offset_io_request; 2182 2183 sg_chain = sgl_ptr; 2184 /* Prepare chain element */ 2185 sg_chain->NextChainOffset = 0; 2186 if (instance->adapter_type >= INVADER_SERIES) 2187 sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT; 2188 else 2189 sg_chain->Flags = 2190 (IEEE_SGE_FLAGS_CHAIN_ELEMENT | 2191 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); 2192 sg_chain->Length = cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed))); 2193 sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr); 2194 2195 sgl_ptr = 2196 (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame; 2197 memset(sgl_ptr, 0, instance->max_chain_frame_sz); 2198 } 2199 } 2200 atomic_inc(&instance->ieee_sgl); 2201 } 2202 2203 /** 2204 * megasas_make_sgl - Build Scatter Gather List(SGLs) 2205 * @scp: SCSI command pointer 2206 * @instance: Soft instance of controller 2207 * @cmd: Fusion command pointer 2208 * 2209 * This function will build sgls based on device type. 2210 * For nvme drives, there is different way of building sgls in nvme native 2211 * format- PRPs(Physical Region Page). 2212 * 2213 * Returns the number of sg lists actually used, zero if the sg lists 2214 * is NULL, or -ENOMEM if the mapping failed 2215 */ 2216 static 2217 int megasas_make_sgl(struct megasas_instance *instance, struct scsi_cmnd *scp, 2218 struct megasas_cmd_fusion *cmd) 2219 { 2220 int sge_count; 2221 bool build_prp = false; 2222 struct MPI25_IEEE_SGE_CHAIN64 *sgl_chain64; 2223 2224 sge_count = scsi_dma_map(scp); 2225 2226 if ((sge_count > instance->max_num_sge) || (sge_count <= 0)) 2227 return sge_count; 2228 2229 sgl_chain64 = (struct MPI25_IEEE_SGE_CHAIN64 *)&cmd->io_request->SGL; 2230 if ((le16_to_cpu(cmd->io_request->IoFlags) & 2231 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) && 2232 (cmd->pd_interface == NVME_PD)) 2233 build_prp = megasas_make_prp_nvme(instance, scp, sgl_chain64, 2234 cmd, sge_count); 2235 2236 if (!build_prp) 2237 megasas_make_sgl_fusion(instance, scp, sgl_chain64, 2238 cmd, sge_count); 2239 2240 return sge_count; 2241 } 2242 2243 /** 2244 * megasas_set_pd_lba - Sets PD LBA 2245 * @cdb: CDB 2246 * @cdb_len: cdb length 2247 * @start_blk: Start block of IO 2248 * 2249 * Used to set the PD LBA in CDB for FP IOs 2250 */ 2251 void 2252 megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, 2253 struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp, 2254 struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag) 2255 { 2256 struct MR_LD_RAID *raid; 2257 u16 ld; 2258 u64 start_blk = io_info->pdBlock; 2259 u8 *cdb = io_request->CDB.CDB32; 2260 u32 num_blocks = io_info->numBlocks; 2261 u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0; 2262 2263 /* Check if T10 PI (DIF) is enabled for this LD */ 2264 ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr); 2265 raid = MR_LdRaidGet(ld, local_map_ptr); 2266 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) { 2267 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 2268 cdb[0] = MEGASAS_SCSI_VARIABLE_LENGTH_CMD; 2269 cdb[7] = MEGASAS_SCSI_ADDL_CDB_LEN; 2270 2271 if (scp->sc_data_direction == DMA_FROM_DEVICE) 2272 cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32; 2273 else 2274 cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32; 2275 cdb[10] = MEGASAS_RD_WR_PROTECT_CHECK_ALL; 2276 2277 /* LBA */ 2278 cdb[12] = (u8)((start_blk >> 56) & 0xff); 2279 cdb[13] = (u8)((start_blk >> 48) & 0xff); 2280 cdb[14] = (u8)((start_blk >> 40) & 0xff); 2281 cdb[15] = (u8)((start_blk >> 32) & 0xff); 2282 cdb[16] = (u8)((start_blk >> 24) & 0xff); 2283 cdb[17] = (u8)((start_blk >> 16) & 0xff); 2284 cdb[18] = (u8)((start_blk >> 8) & 0xff); 2285 cdb[19] = (u8)(start_blk & 0xff); 2286 2287 /* Logical block reference tag */ 2288 io_request->CDB.EEDP32.PrimaryReferenceTag = 2289 cpu_to_be32(ref_tag); 2290 io_request->CDB.EEDP32.PrimaryApplicationTagMask = cpu_to_be16(0xffff); 2291 io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */ 2292 2293 /* Transfer length */ 2294 cdb[28] = (u8)((num_blocks >> 24) & 0xff); 2295 cdb[29] = (u8)((num_blocks >> 16) & 0xff); 2296 cdb[30] = (u8)((num_blocks >> 8) & 0xff); 2297 cdb[31] = (u8)(num_blocks & 0xff); 2298 2299 /* set SCSI IO EEDPFlags */ 2300 if (scp->sc_data_direction == DMA_FROM_DEVICE) { 2301 io_request->EEDPFlags = cpu_to_le16( 2302 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 2303 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 2304 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | 2305 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | 2306 MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE | 2307 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); 2308 } else { 2309 io_request->EEDPFlags = cpu_to_le16( 2310 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 2311 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP); 2312 } 2313 io_request->Control |= cpu_to_le32((0x4 << 26)); 2314 io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size); 2315 } else { 2316 /* Some drives don't support 16/12 byte CDB's, convert to 10 */ 2317 if (((cdb_len == 12) || (cdb_len == 16)) && 2318 (start_blk <= 0xffffffff)) { 2319 if (cdb_len == 16) { 2320 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10; 2321 flagvals = cdb[1]; 2322 groupnum = cdb[14]; 2323 control = cdb[15]; 2324 } else { 2325 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10; 2326 flagvals = cdb[1]; 2327 groupnum = cdb[10]; 2328 control = cdb[11]; 2329 } 2330 2331 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 2332 2333 cdb[0] = opcode; 2334 cdb[1] = flagvals; 2335 cdb[6] = groupnum; 2336 cdb[9] = control; 2337 2338 /* Transfer length */ 2339 cdb[8] = (u8)(num_blocks & 0xff); 2340 cdb[7] = (u8)((num_blocks >> 8) & 0xff); 2341 2342 io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */ 2343 cdb_len = 10; 2344 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) { 2345 /* Convert to 16 byte CDB for large LBA's */ 2346 switch (cdb_len) { 2347 case 6: 2348 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16; 2349 control = cdb[5]; 2350 break; 2351 case 10: 2352 opcode = 2353 cdb[0] == READ_10 ? READ_16 : WRITE_16; 2354 flagvals = cdb[1]; 2355 groupnum = cdb[6]; 2356 control = cdb[9]; 2357 break; 2358 case 12: 2359 opcode = 2360 cdb[0] == READ_12 ? READ_16 : WRITE_16; 2361 flagvals = cdb[1]; 2362 groupnum = cdb[10]; 2363 control = cdb[11]; 2364 break; 2365 } 2366 2367 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 2368 2369 cdb[0] = opcode; 2370 cdb[1] = flagvals; 2371 cdb[14] = groupnum; 2372 cdb[15] = control; 2373 2374 /* Transfer length */ 2375 cdb[13] = (u8)(num_blocks & 0xff); 2376 cdb[12] = (u8)((num_blocks >> 8) & 0xff); 2377 cdb[11] = (u8)((num_blocks >> 16) & 0xff); 2378 cdb[10] = (u8)((num_blocks >> 24) & 0xff); 2379 2380 io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */ 2381 cdb_len = 16; 2382 } 2383 2384 /* Normal case, just load LBA here */ 2385 switch (cdb_len) { 2386 case 6: 2387 { 2388 u8 val = cdb[1] & 0xE0; 2389 cdb[3] = (u8)(start_blk & 0xff); 2390 cdb[2] = (u8)((start_blk >> 8) & 0xff); 2391 cdb[1] = val | ((u8)(start_blk >> 16) & 0x1f); 2392 break; 2393 } 2394 case 10: 2395 cdb[5] = (u8)(start_blk & 0xff); 2396 cdb[4] = (u8)((start_blk >> 8) & 0xff); 2397 cdb[3] = (u8)((start_blk >> 16) & 0xff); 2398 cdb[2] = (u8)((start_blk >> 24) & 0xff); 2399 break; 2400 case 12: 2401 cdb[5] = (u8)(start_blk & 0xff); 2402 cdb[4] = (u8)((start_blk >> 8) & 0xff); 2403 cdb[3] = (u8)((start_blk >> 16) & 0xff); 2404 cdb[2] = (u8)((start_blk >> 24) & 0xff); 2405 break; 2406 case 16: 2407 cdb[9] = (u8)(start_blk & 0xff); 2408 cdb[8] = (u8)((start_blk >> 8) & 0xff); 2409 cdb[7] = (u8)((start_blk >> 16) & 0xff); 2410 cdb[6] = (u8)((start_blk >> 24) & 0xff); 2411 cdb[5] = (u8)((start_blk >> 32) & 0xff); 2412 cdb[4] = (u8)((start_blk >> 40) & 0xff); 2413 cdb[3] = (u8)((start_blk >> 48) & 0xff); 2414 cdb[2] = (u8)((start_blk >> 56) & 0xff); 2415 break; 2416 } 2417 } 2418 } 2419 2420 /** 2421 * megasas_stream_detect - stream detection on read and and write IOs 2422 * @instance: Adapter soft state 2423 * @cmd: Command to be prepared 2424 * @io_info: IO Request info 2425 * 2426 */ 2427 2428 /** stream detection on read and and write IOs */ 2429 static void megasas_stream_detect(struct megasas_instance *instance, 2430 struct megasas_cmd_fusion *cmd, 2431 struct IO_REQUEST_INFO *io_info) 2432 { 2433 struct fusion_context *fusion = instance->ctrl_context; 2434 u32 device_id = io_info->ldTgtId; 2435 struct LD_STREAM_DETECT *current_ld_sd 2436 = fusion->stream_detect_by_ld[device_id]; 2437 u32 *track_stream = ¤t_ld_sd->mru_bit_map, stream_num; 2438 u32 shifted_values, unshifted_values; 2439 u32 index_value_mask, shifted_values_mask; 2440 int i; 2441 bool is_read_ahead = false; 2442 struct STREAM_DETECT *current_sd; 2443 /* find possible stream */ 2444 for (i = 0; i < MAX_STREAMS_TRACKED; ++i) { 2445 stream_num = (*track_stream >> 2446 (i * BITS_PER_INDEX_STREAM)) & 2447 STREAM_MASK; 2448 current_sd = ¤t_ld_sd->stream_track[stream_num]; 2449 /* if we found a stream, update the raid 2450 * context and also update the mruBitMap 2451 */ 2452 /* boundary condition */ 2453 if ((current_sd->next_seq_lba) && 2454 (io_info->ldStartBlock >= current_sd->next_seq_lba) && 2455 (io_info->ldStartBlock <= (current_sd->next_seq_lba + 32)) && 2456 (current_sd->is_read == io_info->isRead)) { 2457 2458 if ((io_info->ldStartBlock != current_sd->next_seq_lba) && 2459 ((!io_info->isRead) || (!is_read_ahead))) 2460 /* 2461 * Once the API availible we need to change this. 2462 * At this point we are not allowing any gap 2463 */ 2464 continue; 2465 2466 SET_STREAM_DETECTED(cmd->io_request->RaidContext.raid_context_g35); 2467 current_sd->next_seq_lba = 2468 io_info->ldStartBlock + io_info->numBlocks; 2469 /* 2470 * update the mruBitMap LRU 2471 */ 2472 shifted_values_mask = 2473 (1 << i * BITS_PER_INDEX_STREAM) - 1; 2474 shifted_values = ((*track_stream & shifted_values_mask) 2475 << BITS_PER_INDEX_STREAM); 2476 index_value_mask = 2477 STREAM_MASK << i * BITS_PER_INDEX_STREAM; 2478 unshifted_values = 2479 *track_stream & ~(shifted_values_mask | 2480 index_value_mask); 2481 *track_stream = 2482 unshifted_values | shifted_values | stream_num; 2483 return; 2484 } 2485 } 2486 /* 2487 * if we did not find any stream, create a new one 2488 * from the least recently used 2489 */ 2490 stream_num = (*track_stream >> 2491 ((MAX_STREAMS_TRACKED - 1) * BITS_PER_INDEX_STREAM)) & 2492 STREAM_MASK; 2493 current_sd = ¤t_ld_sd->stream_track[stream_num]; 2494 current_sd->is_read = io_info->isRead; 2495 current_sd->next_seq_lba = io_info->ldStartBlock + io_info->numBlocks; 2496 *track_stream = (((*track_stream & ZERO_LAST_STREAM) << 4) | stream_num); 2497 return; 2498 } 2499 2500 /** 2501 * megasas_set_raidflag_cpu_affinity - This function sets the cpu 2502 * affinity (cpu of the controller) and raid_flags in the raid context 2503 * based on IO type. 2504 * 2505 * @praid_context: IO RAID context 2506 * @raid: LD raid map 2507 * @fp_possible: Is fast path possible? 2508 * @is_read: Is read IO? 2509 * 2510 */ 2511 static void 2512 megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION *praid_context, 2513 struct MR_LD_RAID *raid, bool fp_possible, 2514 u8 is_read, u32 scsi_buff_len) 2515 { 2516 u8 cpu_sel = MR_RAID_CTX_CPUSEL_0; 2517 struct RAID_CONTEXT_G35 *rctx_g35; 2518 2519 rctx_g35 = &praid_context->raid_context_g35; 2520 if (fp_possible) { 2521 if (is_read) { 2522 if ((raid->cpuAffinity.pdRead.cpu0) && 2523 (raid->cpuAffinity.pdRead.cpu1)) 2524 cpu_sel = MR_RAID_CTX_CPUSEL_FCFS; 2525 else if (raid->cpuAffinity.pdRead.cpu1) 2526 cpu_sel = MR_RAID_CTX_CPUSEL_1; 2527 } else { 2528 if ((raid->cpuAffinity.pdWrite.cpu0) && 2529 (raid->cpuAffinity.pdWrite.cpu1)) 2530 cpu_sel = MR_RAID_CTX_CPUSEL_FCFS; 2531 else if (raid->cpuAffinity.pdWrite.cpu1) 2532 cpu_sel = MR_RAID_CTX_CPUSEL_1; 2533 /* Fast path cache by pass capable R0/R1 VD */ 2534 if ((raid->level <= 1) && 2535 (raid->capability.fp_cache_bypass_capable)) { 2536 rctx_g35->routing_flags |= 2537 (1 << MR_RAID_CTX_ROUTINGFLAGS_SLD_SHIFT); 2538 rctx_g35->raid_flags = 2539 (MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS 2540 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT); 2541 } 2542 } 2543 } else { 2544 if (is_read) { 2545 if ((raid->cpuAffinity.ldRead.cpu0) && 2546 (raid->cpuAffinity.ldRead.cpu1)) 2547 cpu_sel = MR_RAID_CTX_CPUSEL_FCFS; 2548 else if (raid->cpuAffinity.ldRead.cpu1) 2549 cpu_sel = MR_RAID_CTX_CPUSEL_1; 2550 } else { 2551 if ((raid->cpuAffinity.ldWrite.cpu0) && 2552 (raid->cpuAffinity.ldWrite.cpu1)) 2553 cpu_sel = MR_RAID_CTX_CPUSEL_FCFS; 2554 else if (raid->cpuAffinity.ldWrite.cpu1) 2555 cpu_sel = MR_RAID_CTX_CPUSEL_1; 2556 2557 if (is_stream_detected(rctx_g35) && 2558 ((raid->level == 5) || (raid->level == 6)) && 2559 (raid->writeMode == MR_RL_WRITE_THROUGH_MODE) && 2560 (cpu_sel == MR_RAID_CTX_CPUSEL_FCFS)) 2561 cpu_sel = MR_RAID_CTX_CPUSEL_0; 2562 } 2563 } 2564 2565 rctx_g35->routing_flags |= 2566 (cpu_sel << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT); 2567 2568 /* Always give priority to MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT 2569 * vs MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS. 2570 * IO Subtype is not bitmap. 2571 */ 2572 if ((raid->level == 1) && (!is_read)) { 2573 if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE) 2574 praid_context->raid_context_g35.raid_flags = 2575 (MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT 2576 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT); 2577 } 2578 } 2579 2580 /** 2581 * megasas_build_ldio_fusion - Prepares IOs to devices 2582 * @instance: Adapter soft state 2583 * @scp: SCSI command 2584 * @cmd: Command to be prepared 2585 * 2586 * Prepares the io_request and chain elements (sg_frame) for IO 2587 * The IO can be for PD (Fast Path) or LD 2588 */ 2589 void 2590 megasas_build_ldio_fusion(struct megasas_instance *instance, 2591 struct scsi_cmnd *scp, 2592 struct megasas_cmd_fusion *cmd) 2593 { 2594 bool fp_possible; 2595 u16 ld; 2596 u32 start_lba_lo, start_lba_hi, device_id, datalength = 0; 2597 u32 scsi_buff_len; 2598 struct MPI2_RAID_SCSI_IO_REQUEST *io_request; 2599 struct IO_REQUEST_INFO io_info; 2600 struct fusion_context *fusion; 2601 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 2602 u8 *raidLUN; 2603 unsigned long spinlock_flags; 2604 struct MR_LD_RAID *raid = NULL; 2605 struct MR_PRIV_DEVICE *mrdev_priv; 2606 struct RAID_CONTEXT *rctx; 2607 struct RAID_CONTEXT_G35 *rctx_g35; 2608 2609 device_id = MEGASAS_DEV_INDEX(scp); 2610 2611 fusion = instance->ctrl_context; 2612 2613 io_request = cmd->io_request; 2614 rctx = &io_request->RaidContext.raid_context; 2615 rctx_g35 = &io_request->RaidContext.raid_context_g35; 2616 2617 rctx->virtual_disk_tgt_id = cpu_to_le16(device_id); 2618 rctx->status = 0; 2619 rctx->ex_status = 0; 2620 2621 start_lba_lo = 0; 2622 start_lba_hi = 0; 2623 fp_possible = false; 2624 2625 /* 2626 * 6-byte READ(0x08) or WRITE(0x0A) cdb 2627 */ 2628 if (scp->cmd_len == 6) { 2629 datalength = (u32) scp->cmnd[4]; 2630 start_lba_lo = ((u32) scp->cmnd[1] << 16) | 2631 ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3]; 2632 2633 start_lba_lo &= 0x1FFFFF; 2634 } 2635 2636 /* 2637 * 10-byte READ(0x28) or WRITE(0x2A) cdb 2638 */ 2639 else if (scp->cmd_len == 10) { 2640 datalength = (u32) scp->cmnd[8] | 2641 ((u32) scp->cmnd[7] << 8); 2642 start_lba_lo = ((u32) scp->cmnd[2] << 24) | 2643 ((u32) scp->cmnd[3] << 16) | 2644 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; 2645 } 2646 2647 /* 2648 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 2649 */ 2650 else if (scp->cmd_len == 12) { 2651 datalength = ((u32) scp->cmnd[6] << 24) | 2652 ((u32) scp->cmnd[7] << 16) | 2653 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; 2654 start_lba_lo = ((u32) scp->cmnd[2] << 24) | 2655 ((u32) scp->cmnd[3] << 16) | 2656 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; 2657 } 2658 2659 /* 2660 * 16-byte READ(0x88) or WRITE(0x8A) cdb 2661 */ 2662 else if (scp->cmd_len == 16) { 2663 datalength = ((u32) scp->cmnd[10] << 24) | 2664 ((u32) scp->cmnd[11] << 16) | 2665 ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13]; 2666 start_lba_lo = ((u32) scp->cmnd[6] << 24) | 2667 ((u32) scp->cmnd[7] << 16) | 2668 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; 2669 2670 start_lba_hi = ((u32) scp->cmnd[2] << 24) | 2671 ((u32) scp->cmnd[3] << 16) | 2672 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; 2673 } 2674 2675 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO)); 2676 io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo; 2677 io_info.numBlocks = datalength; 2678 io_info.ldTgtId = device_id; 2679 io_info.r1_alt_dev_handle = MR_DEVHANDLE_INVALID; 2680 scsi_buff_len = scsi_bufflen(scp); 2681 io_request->DataLength = cpu_to_le32(scsi_buff_len); 2682 2683 if (scp->sc_data_direction == DMA_FROM_DEVICE) 2684 io_info.isRead = 1; 2685 2686 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 2687 ld = MR_TargetIdToLdGet(device_id, local_map_ptr); 2688 2689 if (ld < instance->fw_supported_vd_count) 2690 raid = MR_LdRaidGet(ld, local_map_ptr); 2691 2692 if (!raid || (!fusion->fast_path_io)) { 2693 rctx->reg_lock_flags = 0; 2694 fp_possible = false; 2695 } else { 2696 if (MR_BuildRaidContext(instance, &io_info, rctx, 2697 local_map_ptr, &raidLUN)) 2698 fp_possible = (io_info.fpOkForIo > 0) ? true : false; 2699 } 2700 2701 cmd->request_desc->SCSIIO.MSIxIndex = 2702 instance->reply_map[raw_smp_processor_id()]; 2703 2704 if (instance->adapter_type >= VENTURA_SERIES) { 2705 /* FP for Optimal raid level 1. 2706 * All large RAID-1 writes (> 32 KiB, both WT and WB modes) 2707 * are built by the driver as LD I/Os. 2708 * All small RAID-1 WT writes (<= 32 KiB) are built as FP I/Os 2709 * (there is never a reason to process these as buffered writes) 2710 * All small RAID-1 WB writes (<= 32 KiB) are built as FP I/Os 2711 * with the SLD bit asserted. 2712 */ 2713 if (io_info.r1_alt_dev_handle != MR_DEVHANDLE_INVALID) { 2714 mrdev_priv = scp->device->hostdata; 2715 2716 if (atomic_inc_return(&instance->fw_outstanding) > 2717 (instance->host->can_queue)) { 2718 fp_possible = false; 2719 atomic_dec(&instance->fw_outstanding); 2720 } else if ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) || 2721 (atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint) > 0)) { 2722 fp_possible = false; 2723 atomic_dec(&instance->fw_outstanding); 2724 if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE) 2725 atomic_set(&mrdev_priv->r1_ldio_hint, 2726 instance->r1_ldio_hint_default); 2727 } 2728 } 2729 2730 if (!fp_possible || 2731 (io_info.isRead && io_info.ra_capable)) { 2732 spin_lock_irqsave(&instance->stream_lock, 2733 spinlock_flags); 2734 megasas_stream_detect(instance, cmd, &io_info); 2735 spin_unlock_irqrestore(&instance->stream_lock, 2736 spinlock_flags); 2737 /* In ventura if stream detected for a read and it is 2738 * read ahead capable make this IO as LDIO 2739 */ 2740 if (is_stream_detected(rctx_g35)) 2741 fp_possible = false; 2742 } 2743 2744 /* If raid is NULL, set CPU affinity to default CPU0 */ 2745 if (raid) 2746 megasas_set_raidflag_cpu_affinity(&io_request->RaidContext, 2747 raid, fp_possible, io_info.isRead, 2748 scsi_buff_len); 2749 else 2750 rctx_g35->routing_flags |= 2751 (MR_RAID_CTX_CPUSEL_0 << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT); 2752 } 2753 2754 if (fp_possible) { 2755 megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp, 2756 local_map_ptr, start_lba_lo); 2757 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 2758 cmd->request_desc->SCSIIO.RequestFlags = 2759 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO 2760 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2761 if (instance->adapter_type == INVADER_SERIES) { 2762 if (rctx->reg_lock_flags == REGION_TYPE_UNUSED) 2763 cmd->request_desc->SCSIIO.RequestFlags = 2764 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << 2765 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2766 rctx->type = MPI2_TYPE_CUDA; 2767 rctx->nseg = 0x1; 2768 io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); 2769 rctx->reg_lock_flags |= 2770 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | 2771 MR_RL_FLAGS_SEQ_NUM_ENABLE); 2772 } else if (instance->adapter_type >= VENTURA_SERIES) { 2773 rctx_g35->nseg_type |= (1 << RAID_CONTEXT_NSEG_SHIFT); 2774 rctx_g35->nseg_type |= (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT); 2775 rctx_g35->routing_flags |= (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT); 2776 io_request->IoFlags |= 2777 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); 2778 } 2779 if (fusion->load_balance_info && 2780 (fusion->load_balance_info[device_id].loadBalanceFlag) && 2781 (io_info.isRead)) { 2782 io_info.devHandle = 2783 get_updated_dev_handle(instance, 2784 &fusion->load_balance_info[device_id], 2785 &io_info, local_map_ptr); 2786 scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG; 2787 cmd->pd_r1_lb = io_info.pd_after_lb; 2788 if (instance->adapter_type >= VENTURA_SERIES) 2789 rctx_g35->span_arm = io_info.span_arm; 2790 else 2791 rctx->span_arm = io_info.span_arm; 2792 2793 } else 2794 scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; 2795 2796 if (instance->adapter_type >= VENTURA_SERIES) 2797 cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle; 2798 else 2799 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; 2800 2801 if ((raidLUN[0] == 1) && 2802 (local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].validHandles > 1)) { 2803 instance->dev_handle = !(instance->dev_handle); 2804 io_info.devHandle = 2805 local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].devHandle[instance->dev_handle]; 2806 } 2807 2808 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; 2809 io_request->DevHandle = io_info.devHandle; 2810 cmd->pd_interface = io_info.pd_interface; 2811 /* populate the LUN field */ 2812 memcpy(io_request->LUN, raidLUN, 8); 2813 } else { 2814 rctx->timeout_value = 2815 cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec); 2816 cmd->request_desc->SCSIIO.RequestFlags = 2817 (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO 2818 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2819 if (instance->adapter_type == INVADER_SERIES) { 2820 if (io_info.do_fp_rlbypass || 2821 (rctx->reg_lock_flags == REGION_TYPE_UNUSED)) 2822 cmd->request_desc->SCSIIO.RequestFlags = 2823 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << 2824 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2825 rctx->type = MPI2_TYPE_CUDA; 2826 rctx->reg_lock_flags |= 2827 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | 2828 MR_RL_FLAGS_SEQ_NUM_ENABLE); 2829 rctx->nseg = 0x1; 2830 } else if (instance->adapter_type >= VENTURA_SERIES) { 2831 rctx_g35->routing_flags |= (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT); 2832 rctx_g35->nseg_type |= (1 << RAID_CONTEXT_NSEG_SHIFT); 2833 rctx_g35->nseg_type |= (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT); 2834 } 2835 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; 2836 io_request->DevHandle = cpu_to_le16(device_id); 2837 2838 } /* Not FP */ 2839 } 2840 2841 /** 2842 * megasas_build_ld_nonrw_fusion - prepares non rw ios for virtual disk 2843 * @instance: Adapter soft state 2844 * @scp: SCSI command 2845 * @cmd: Command to be prepared 2846 * 2847 * Prepares the io_request frame for non-rw io cmds for vd. 2848 */ 2849 static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance, 2850 struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd) 2851 { 2852 u32 device_id; 2853 struct MPI2_RAID_SCSI_IO_REQUEST *io_request; 2854 u16 ld; 2855 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 2856 struct fusion_context *fusion = instance->ctrl_context; 2857 u8 span, physArm; 2858 __le16 devHandle; 2859 u32 arRef, pd; 2860 struct MR_LD_RAID *raid; 2861 struct RAID_CONTEXT *pRAID_Context; 2862 u8 fp_possible = 1; 2863 2864 io_request = cmd->io_request; 2865 device_id = MEGASAS_DEV_INDEX(scmd); 2866 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 2867 io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); 2868 /* get RAID_Context pointer */ 2869 pRAID_Context = &io_request->RaidContext.raid_context; 2870 /* Check with FW team */ 2871 pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); 2872 pRAID_Context->reg_lock_row_lba = 0; 2873 pRAID_Context->reg_lock_length = 0; 2874 2875 if (fusion->fast_path_io && ( 2876 device_id < instance->fw_supported_vd_count)) { 2877 2878 ld = MR_TargetIdToLdGet(device_id, local_map_ptr); 2879 if (ld >= instance->fw_supported_vd_count - 1) 2880 fp_possible = 0; 2881 else { 2882 raid = MR_LdRaidGet(ld, local_map_ptr); 2883 if (!(raid->capability.fpNonRWCapable)) 2884 fp_possible = 0; 2885 } 2886 } else 2887 fp_possible = 0; 2888 2889 if (!fp_possible) { 2890 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; 2891 io_request->DevHandle = cpu_to_le16(device_id); 2892 io_request->LUN[1] = scmd->device->lun; 2893 pRAID_Context->timeout_value = 2894 cpu_to_le16 (scmd->request->timeout / HZ); 2895 cmd->request_desc->SCSIIO.RequestFlags = 2896 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 2897 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2898 } else { 2899 2900 /* set RAID context values */ 2901 pRAID_Context->config_seq_num = raid->seqNum; 2902 if (instance->adapter_type < VENTURA_SERIES) 2903 pRAID_Context->reg_lock_flags = REGION_TYPE_SHARED_READ; 2904 pRAID_Context->timeout_value = 2905 cpu_to_le16(raid->fpIoTimeoutForLd); 2906 2907 /* get the DevHandle for the PD (since this is 2908 fpNonRWCapable, this is a single disk RAID0) */ 2909 span = physArm = 0; 2910 arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr); 2911 pd = MR_ArPdGet(arRef, physArm, local_map_ptr); 2912 devHandle = MR_PdDevHandleGet(pd, local_map_ptr); 2913 2914 /* build request descriptor */ 2915 cmd->request_desc->SCSIIO.RequestFlags = 2916 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << 2917 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2918 cmd->request_desc->SCSIIO.DevHandle = devHandle; 2919 2920 /* populate the LUN field */ 2921 memcpy(io_request->LUN, raid->LUN, 8); 2922 2923 /* build the raidScsiIO structure */ 2924 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 2925 io_request->DevHandle = devHandle; 2926 } 2927 } 2928 2929 /** 2930 * megasas_build_syspd_fusion - prepares rw/non-rw ios for syspd 2931 * @instance: Adapter soft state 2932 * @scp: SCSI command 2933 * @cmd: Command to be prepared 2934 * @fp_possible: parameter to detect fast path or firmware path io. 2935 * 2936 * Prepares the io_request frame for rw/non-rw io cmds for syspds 2937 */ 2938 static void 2939 megasas_build_syspd_fusion(struct megasas_instance *instance, 2940 struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd, 2941 bool fp_possible) 2942 { 2943 u32 device_id; 2944 struct MPI2_RAID_SCSI_IO_REQUEST *io_request; 2945 u16 pd_index = 0; 2946 u16 os_timeout_value; 2947 u16 timeout_limit; 2948 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 2949 struct RAID_CONTEXT *pRAID_Context; 2950 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 2951 struct MR_PRIV_DEVICE *mr_device_priv_data; 2952 struct fusion_context *fusion = instance->ctrl_context; 2953 pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id - 1) & 1]; 2954 2955 device_id = MEGASAS_DEV_INDEX(scmd); 2956 pd_index = MEGASAS_PD_INDEX(scmd); 2957 os_timeout_value = scmd->request->timeout / HZ; 2958 mr_device_priv_data = scmd->device->hostdata; 2959 cmd->pd_interface = mr_device_priv_data->interface_type; 2960 2961 io_request = cmd->io_request; 2962 /* get RAID_Context pointer */ 2963 pRAID_Context = &io_request->RaidContext.raid_context; 2964 pRAID_Context->reg_lock_flags = 0; 2965 pRAID_Context->reg_lock_row_lba = 0; 2966 pRAID_Context->reg_lock_length = 0; 2967 io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); 2968 io_request->LUN[1] = scmd->device->lun; 2969 pRAID_Context->raid_flags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD 2970 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; 2971 2972 /* If FW supports PD sequence number */ 2973 if (instance->use_seqnum_jbod_fp && 2974 instance->pd_list[pd_index].driveType == TYPE_DISK) { 2975 /* TgtId must be incremented by 255 as jbod seq number is index 2976 * below raid map 2977 */ 2978 /* More than 256 PD/JBOD support for Ventura */ 2979 if (instance->support_morethan256jbod) 2980 pRAID_Context->virtual_disk_tgt_id = 2981 pd_sync->seq[pd_index].pd_target_id; 2982 else 2983 pRAID_Context->virtual_disk_tgt_id = 2984 cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1)); 2985 pRAID_Context->config_seq_num = pd_sync->seq[pd_index].seqNum; 2986 io_request->DevHandle = pd_sync->seq[pd_index].devHandle; 2987 if (instance->adapter_type >= VENTURA_SERIES) { 2988 io_request->RaidContext.raid_context_g35.routing_flags |= 2989 (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT); 2990 io_request->RaidContext.raid_context_g35.nseg_type |= 2991 (1 << RAID_CONTEXT_NSEG_SHIFT); 2992 io_request->RaidContext.raid_context_g35.nseg_type |= 2993 (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT); 2994 } else { 2995 pRAID_Context->type = MPI2_TYPE_CUDA; 2996 pRAID_Context->nseg = 0x1; 2997 pRAID_Context->reg_lock_flags |= 2998 (MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA); 2999 } 3000 } else if (fusion->fast_path_io) { 3001 pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); 3002 pRAID_Context->config_seq_num = 0; 3003 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 3004 io_request->DevHandle = 3005 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; 3006 } else { 3007 /* Want to send all IO via FW path */ 3008 pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); 3009 pRAID_Context->config_seq_num = 0; 3010 io_request->DevHandle = cpu_to_le16(0xFFFF); 3011 } 3012 3013 cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle; 3014 3015 cmd->request_desc->SCSIIO.MSIxIndex = 3016 instance->reply_map[raw_smp_processor_id()]; 3017 3018 if (!fp_possible) { 3019 /* system pd firmware path */ 3020 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; 3021 cmd->request_desc->SCSIIO.RequestFlags = 3022 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 3023 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 3024 pRAID_Context->timeout_value = cpu_to_le16(os_timeout_value); 3025 pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); 3026 } else { 3027 if (os_timeout_value) 3028 os_timeout_value++; 3029 3030 /* system pd Fast Path */ 3031 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 3032 timeout_limit = (scmd->device->type == TYPE_DISK) ? 3033 255 : 0xFFFF; 3034 pRAID_Context->timeout_value = 3035 cpu_to_le16((os_timeout_value > timeout_limit) ? 3036 timeout_limit : os_timeout_value); 3037 if (instance->adapter_type >= INVADER_SERIES) 3038 io_request->IoFlags |= 3039 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); 3040 3041 cmd->request_desc->SCSIIO.RequestFlags = 3042 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << 3043 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 3044 } 3045 } 3046 3047 /** 3048 * megasas_build_io_fusion - Prepares IOs to devices 3049 * @instance: Adapter soft state 3050 * @scp: SCSI command 3051 * @cmd: Command to be prepared 3052 * 3053 * Invokes helper functions to prepare request frames 3054 * and sets flags appropriate for IO/Non-IO cmd 3055 */ 3056 int 3057 megasas_build_io_fusion(struct megasas_instance *instance, 3058 struct scsi_cmnd *scp, 3059 struct megasas_cmd_fusion *cmd) 3060 { 3061 int sge_count; 3062 u8 cmd_type; 3063 struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request; 3064 struct MR_PRIV_DEVICE *mr_device_priv_data; 3065 mr_device_priv_data = scp->device->hostdata; 3066 3067 /* Zero out some fields so they don't get reused */ 3068 memset(io_request->LUN, 0x0, 8); 3069 io_request->CDB.EEDP32.PrimaryReferenceTag = 0; 3070 io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0; 3071 io_request->EEDPFlags = 0; 3072 io_request->Control = 0; 3073 io_request->EEDPBlockSize = 0; 3074 io_request->ChainOffset = 0; 3075 io_request->RaidContext.raid_context.raid_flags = 0; 3076 io_request->RaidContext.raid_context.type = 0; 3077 io_request->RaidContext.raid_context.nseg = 0; 3078 3079 memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len); 3080 /* 3081 * Just the CDB length,rest of the Flags are zero 3082 * This will be modified for FP in build_ldio_fusion 3083 */ 3084 io_request->IoFlags = cpu_to_le16(scp->cmd_len); 3085 3086 switch (cmd_type = megasas_cmd_type(scp)) { 3087 case READ_WRITE_LDIO: 3088 megasas_build_ldio_fusion(instance, scp, cmd); 3089 break; 3090 case NON_READ_WRITE_LDIO: 3091 megasas_build_ld_nonrw_fusion(instance, scp, cmd); 3092 break; 3093 case READ_WRITE_SYSPDIO: 3094 megasas_build_syspd_fusion(instance, scp, cmd, true); 3095 break; 3096 case NON_READ_WRITE_SYSPDIO: 3097 if (instance->secure_jbod_support || 3098 mr_device_priv_data->is_tm_capable) 3099 megasas_build_syspd_fusion(instance, scp, cmd, false); 3100 else 3101 megasas_build_syspd_fusion(instance, scp, cmd, true); 3102 break; 3103 default: 3104 break; 3105 } 3106 3107 /* 3108 * Construct SGL 3109 */ 3110 3111 sge_count = megasas_make_sgl(instance, scp, cmd); 3112 3113 if (sge_count > instance->max_num_sge || (sge_count < 0)) { 3114 dev_err(&instance->pdev->dev, 3115 "%s %d sge_count (%d) is out of range. Range is: 0-%d\n", 3116 __func__, __LINE__, sge_count, instance->max_num_sge); 3117 return 1; 3118 } 3119 3120 if (instance->adapter_type >= VENTURA_SERIES) { 3121 set_num_sge(&io_request->RaidContext.raid_context_g35, sge_count); 3122 cpu_to_le16s(&io_request->RaidContext.raid_context_g35.routing_flags); 3123 cpu_to_le16s(&io_request->RaidContext.raid_context_g35.nseg_type); 3124 } else { 3125 /* numSGE store lower 8 bit of sge_count. 3126 * numSGEExt store higher 8 bit of sge_count 3127 */ 3128 io_request->RaidContext.raid_context.num_sge = sge_count; 3129 io_request->RaidContext.raid_context.num_sge_ext = 3130 (u8)(sge_count >> 8); 3131 } 3132 3133 io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING); 3134 3135 if (scp->sc_data_direction == DMA_TO_DEVICE) 3136 io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE); 3137 else if (scp->sc_data_direction == DMA_FROM_DEVICE) 3138 io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ); 3139 3140 io_request->SGLOffset0 = 3141 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4; 3142 3143 io_request->SenseBufferLowAddress = 3144 cpu_to_le32(lower_32_bits(cmd->sense_phys_addr)); 3145 io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; 3146 3147 cmd->scmd = scp; 3148 scp->SCp.ptr = (char *)cmd; 3149 3150 return 0; 3151 } 3152 3153 static union MEGASAS_REQUEST_DESCRIPTOR_UNION * 3154 megasas_get_request_descriptor(struct megasas_instance *instance, u16 index) 3155 { 3156 u8 *p; 3157 struct fusion_context *fusion; 3158 3159 fusion = instance->ctrl_context; 3160 p = fusion->req_frames_desc + 3161 sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * index; 3162 3163 return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p; 3164 } 3165 3166 3167 /* megasas_prepate_secondRaid1_IO 3168 * It prepares the raid 1 second IO 3169 */ 3170 void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance, 3171 struct megasas_cmd_fusion *cmd, 3172 struct megasas_cmd_fusion *r1_cmd) 3173 { 3174 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL; 3175 struct fusion_context *fusion; 3176 fusion = instance->ctrl_context; 3177 req_desc = cmd->request_desc; 3178 /* copy the io request frame as well as 8 SGEs data for r1 command*/ 3179 memcpy(r1_cmd->io_request, cmd->io_request, 3180 (sizeof(struct MPI2_RAID_SCSI_IO_REQUEST))); 3181 memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL, 3182 (fusion->max_sge_in_main_msg * sizeof(union MPI2_SGE_IO_UNION))); 3183 /*sense buffer is different for r1 command*/ 3184 r1_cmd->io_request->SenseBufferLowAddress = 3185 cpu_to_le32(lower_32_bits(r1_cmd->sense_phys_addr)); 3186 r1_cmd->scmd = cmd->scmd; 3187 req_desc2 = megasas_get_request_descriptor(instance, 3188 (r1_cmd->index - 1)); 3189 req_desc2->Words = 0; 3190 r1_cmd->request_desc = req_desc2; 3191 req_desc2->SCSIIO.SMID = cpu_to_le16(r1_cmd->index); 3192 req_desc2->SCSIIO.RequestFlags = req_desc->SCSIIO.RequestFlags; 3193 r1_cmd->request_desc->SCSIIO.DevHandle = cmd->r1_alt_dev_handle; 3194 r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle; 3195 r1_cmd->r1_alt_dev_handle = cmd->io_request->DevHandle; 3196 cmd->io_request->RaidContext.raid_context_g35.smid.peer_smid = 3197 cpu_to_le16(r1_cmd->index); 3198 r1_cmd->io_request->RaidContext.raid_context_g35.smid.peer_smid = 3199 cpu_to_le16(cmd->index); 3200 /*MSIxIndex of both commands request descriptors should be same*/ 3201 r1_cmd->request_desc->SCSIIO.MSIxIndex = 3202 cmd->request_desc->SCSIIO.MSIxIndex; 3203 /*span arm is different for r1 cmd*/ 3204 r1_cmd->io_request->RaidContext.raid_context_g35.span_arm = 3205 cmd->io_request->RaidContext.raid_context_g35.span_arm + 1; 3206 } 3207 3208 /** 3209 * megasas_build_and_issue_cmd_fusion -Main routine for building and 3210 * issuing non IOCTL cmd 3211 * @instance: Adapter soft state 3212 * @scmd: pointer to scsi cmd from OS 3213 */ 3214 static u32 3215 megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, 3216 struct scsi_cmnd *scmd) 3217 { 3218 struct megasas_cmd_fusion *cmd, *r1_cmd = NULL; 3219 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3220 u32 index; 3221 3222 if ((megasas_cmd_type(scmd) == READ_WRITE_LDIO) && 3223 instance->ldio_threshold && 3224 (atomic_inc_return(&instance->ldio_outstanding) > 3225 instance->ldio_threshold)) { 3226 atomic_dec(&instance->ldio_outstanding); 3227 return SCSI_MLQUEUE_DEVICE_BUSY; 3228 } 3229 3230 if (atomic_inc_return(&instance->fw_outstanding) > 3231 instance->host->can_queue) { 3232 atomic_dec(&instance->fw_outstanding); 3233 return SCSI_MLQUEUE_HOST_BUSY; 3234 } 3235 3236 cmd = megasas_get_cmd_fusion(instance, scmd->request->tag); 3237 3238 if (!cmd) { 3239 atomic_dec(&instance->fw_outstanding); 3240 return SCSI_MLQUEUE_HOST_BUSY; 3241 } 3242 3243 index = cmd->index; 3244 3245 req_desc = megasas_get_request_descriptor(instance, index-1); 3246 3247 req_desc->Words = 0; 3248 cmd->request_desc = req_desc; 3249 3250 if (megasas_build_io_fusion(instance, scmd, cmd)) { 3251 megasas_return_cmd_fusion(instance, cmd); 3252 dev_err(&instance->pdev->dev, "Error building command\n"); 3253 cmd->request_desc = NULL; 3254 atomic_dec(&instance->fw_outstanding); 3255 return SCSI_MLQUEUE_HOST_BUSY; 3256 } 3257 3258 req_desc = cmd->request_desc; 3259 req_desc->SCSIIO.SMID = cpu_to_le16(index); 3260 3261 if (cmd->io_request->ChainOffset != 0 && 3262 cmd->io_request->ChainOffset != 0xF) 3263 dev_err(&instance->pdev->dev, "The chain offset value is not " 3264 "correct : %x\n", cmd->io_request->ChainOffset); 3265 /* 3266 * if it is raid 1/10 fp write capable. 3267 * try to get second command from pool and construct it. 3268 * From FW, it has confirmed that lba values of two PDs 3269 * corresponds to single R1/10 LD are always same 3270 * 3271 */ 3272 /* driver side count always should be less than max_fw_cmds 3273 * to get new command 3274 */ 3275 if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) { 3276 r1_cmd = megasas_get_cmd_fusion(instance, 3277 (scmd->request->tag + instance->max_fw_cmds)); 3278 megasas_prepare_secondRaid1_IO(instance, cmd, r1_cmd); 3279 } 3280 3281 3282 /* 3283 * Issue the command to the FW 3284 */ 3285 3286 megasas_fire_cmd_fusion(instance, req_desc); 3287 3288 if (r1_cmd) 3289 megasas_fire_cmd_fusion(instance, r1_cmd->request_desc); 3290 3291 3292 return 0; 3293 } 3294 3295 /** 3296 * megasas_complete_r1_command - 3297 * completes R1 FP write commands which has valid peer smid 3298 * @instance: Adapter soft state 3299 * @cmd_fusion: MPT command frame 3300 * 3301 */ 3302 static inline void 3303 megasas_complete_r1_command(struct megasas_instance *instance, 3304 struct megasas_cmd_fusion *cmd) 3305 { 3306 u8 *sense, status, ex_status; 3307 u32 data_length; 3308 u16 peer_smid; 3309 struct fusion_context *fusion; 3310 struct megasas_cmd_fusion *r1_cmd = NULL; 3311 struct scsi_cmnd *scmd_local = NULL; 3312 struct RAID_CONTEXT_G35 *rctx_g35; 3313 3314 rctx_g35 = &cmd->io_request->RaidContext.raid_context_g35; 3315 fusion = instance->ctrl_context; 3316 peer_smid = le16_to_cpu(rctx_g35->smid.peer_smid); 3317 3318 r1_cmd = fusion->cmd_list[peer_smid - 1]; 3319 scmd_local = cmd->scmd; 3320 status = rctx_g35->status; 3321 ex_status = rctx_g35->ex_status; 3322 data_length = cmd->io_request->DataLength; 3323 sense = cmd->sense; 3324 3325 cmd->cmd_completed = true; 3326 3327 /* Check if peer command is completed or not*/ 3328 if (r1_cmd->cmd_completed) { 3329 rctx_g35 = &r1_cmd->io_request->RaidContext.raid_context_g35; 3330 if (rctx_g35->status != MFI_STAT_OK) { 3331 status = rctx_g35->status; 3332 ex_status = rctx_g35->ex_status; 3333 data_length = r1_cmd->io_request->DataLength; 3334 sense = r1_cmd->sense; 3335 } 3336 3337 megasas_return_cmd_fusion(instance, r1_cmd); 3338 map_cmd_status(fusion, scmd_local, status, ex_status, 3339 le32_to_cpu(data_length), sense); 3340 if (instance->ldio_threshold && 3341 megasas_cmd_type(scmd_local) == READ_WRITE_LDIO) 3342 atomic_dec(&instance->ldio_outstanding); 3343 scmd_local->SCp.ptr = NULL; 3344 megasas_return_cmd_fusion(instance, cmd); 3345 scsi_dma_unmap(scmd_local); 3346 scmd_local->scsi_done(scmd_local); 3347 } 3348 } 3349 3350 /** 3351 * complete_cmd_fusion - Completes command 3352 * @instance: Adapter soft state 3353 * Completes all commands that is in reply descriptor queue 3354 */ 3355 int 3356 complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) 3357 { 3358 union MPI2_REPLY_DESCRIPTORS_UNION *desc; 3359 struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc; 3360 struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req; 3361 struct fusion_context *fusion; 3362 struct megasas_cmd *cmd_mfi; 3363 struct megasas_cmd_fusion *cmd_fusion; 3364 u16 smid, num_completed; 3365 u8 reply_descript_type, *sense, status, extStatus; 3366 u32 device_id, data_length; 3367 union desc_value d_val; 3368 struct LD_LOAD_BALANCE_INFO *lbinfo; 3369 int threshold_reply_count = 0; 3370 struct scsi_cmnd *scmd_local = NULL; 3371 struct MR_TASK_MANAGE_REQUEST *mr_tm_req; 3372 struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req; 3373 3374 fusion = instance->ctrl_context; 3375 3376 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 3377 return IRQ_HANDLED; 3378 3379 desc = fusion->reply_frames_desc[MSIxIndex] + 3380 fusion->last_reply_idx[MSIxIndex]; 3381 3382 reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; 3383 3384 d_val.word = desc->Words; 3385 3386 reply_descript_type = reply_desc->ReplyFlags & 3387 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 3388 3389 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 3390 return IRQ_NONE; 3391 3392 num_completed = 0; 3393 3394 while (d_val.u.low != cpu_to_le32(UINT_MAX) && 3395 d_val.u.high != cpu_to_le32(UINT_MAX)) { 3396 3397 smid = le16_to_cpu(reply_desc->SMID); 3398 cmd_fusion = fusion->cmd_list[smid - 1]; 3399 scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *) 3400 cmd_fusion->io_request; 3401 3402 scmd_local = cmd_fusion->scmd; 3403 status = scsi_io_req->RaidContext.raid_context.status; 3404 extStatus = scsi_io_req->RaidContext.raid_context.ex_status; 3405 sense = cmd_fusion->sense; 3406 data_length = scsi_io_req->DataLength; 3407 3408 switch (scsi_io_req->Function) { 3409 case MPI2_FUNCTION_SCSI_TASK_MGMT: 3410 mr_tm_req = (struct MR_TASK_MANAGE_REQUEST *) 3411 cmd_fusion->io_request; 3412 mpi_tm_req = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *) 3413 &mr_tm_req->TmRequest; 3414 dev_dbg(&instance->pdev->dev, "TM completion:" 3415 "type: 0x%x TaskMID: 0x%x\n", 3416 mpi_tm_req->TaskType, mpi_tm_req->TaskMID); 3417 complete(&cmd_fusion->done); 3418 break; 3419 case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/ 3420 /* Update load balancing info */ 3421 if (fusion->load_balance_info && 3422 (cmd_fusion->scmd->SCp.Status & 3423 MEGASAS_LOAD_BALANCE_FLAG)) { 3424 device_id = MEGASAS_DEV_INDEX(scmd_local); 3425 lbinfo = &fusion->load_balance_info[device_id]; 3426 atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]); 3427 cmd_fusion->scmd->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; 3428 } 3429 /* Fall through - and complete IO */ 3430 case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */ 3431 atomic_dec(&instance->fw_outstanding); 3432 if (cmd_fusion->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) { 3433 map_cmd_status(fusion, scmd_local, status, 3434 extStatus, le32_to_cpu(data_length), 3435 sense); 3436 if (instance->ldio_threshold && 3437 (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)) 3438 atomic_dec(&instance->ldio_outstanding); 3439 scmd_local->SCp.ptr = NULL; 3440 megasas_return_cmd_fusion(instance, cmd_fusion); 3441 scsi_dma_unmap(scmd_local); 3442 scmd_local->scsi_done(scmd_local); 3443 } else /* Optimal VD - R1 FP command completion. */ 3444 megasas_complete_r1_command(instance, cmd_fusion); 3445 break; 3446 case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */ 3447 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 3448 /* Poll mode. Dummy free. 3449 * In case of Interrupt mode, caller has reverse check. 3450 */ 3451 if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) { 3452 cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE; 3453 megasas_return_cmd(instance, cmd_mfi); 3454 } else 3455 megasas_complete_cmd(instance, cmd_mfi, DID_OK); 3456 break; 3457 } 3458 3459 fusion->last_reply_idx[MSIxIndex]++; 3460 if (fusion->last_reply_idx[MSIxIndex] >= 3461 fusion->reply_q_depth) 3462 fusion->last_reply_idx[MSIxIndex] = 0; 3463 3464 desc->Words = cpu_to_le64(ULLONG_MAX); 3465 num_completed++; 3466 threshold_reply_count++; 3467 3468 /* Get the next reply descriptor */ 3469 if (!fusion->last_reply_idx[MSIxIndex]) 3470 desc = fusion->reply_frames_desc[MSIxIndex]; 3471 else 3472 desc++; 3473 3474 reply_desc = 3475 (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; 3476 3477 d_val.word = desc->Words; 3478 3479 reply_descript_type = reply_desc->ReplyFlags & 3480 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 3481 3482 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 3483 break; 3484 /* 3485 * Write to reply post host index register after completing threshold 3486 * number of reply counts and still there are more replies in reply queue 3487 * pending to be completed 3488 */ 3489 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) { 3490 if (instance->msix_combined) 3491 writel(((MSIxIndex & 0x7) << 24) | 3492 fusion->last_reply_idx[MSIxIndex], 3493 instance->reply_post_host_index_addr[MSIxIndex/8]); 3494 else 3495 writel((MSIxIndex << 24) | 3496 fusion->last_reply_idx[MSIxIndex], 3497 instance->reply_post_host_index_addr[0]); 3498 threshold_reply_count = 0; 3499 } 3500 } 3501 3502 if (!num_completed) 3503 return IRQ_NONE; 3504 3505 wmb(); 3506 if (instance->msix_combined) 3507 writel(((MSIxIndex & 0x7) << 24) | 3508 fusion->last_reply_idx[MSIxIndex], 3509 instance->reply_post_host_index_addr[MSIxIndex/8]); 3510 else 3511 writel((MSIxIndex << 24) | 3512 fusion->last_reply_idx[MSIxIndex], 3513 instance->reply_post_host_index_addr[0]); 3514 megasas_check_and_restore_queue_depth(instance); 3515 return IRQ_HANDLED; 3516 } 3517 3518 /** 3519 * megasas_sync_irqs - Synchronizes all IRQs owned by adapter 3520 * @instance: Adapter soft state 3521 */ 3522 void megasas_sync_irqs(unsigned long instance_addr) 3523 { 3524 u32 count, i; 3525 struct megasas_instance *instance = 3526 (struct megasas_instance *)instance_addr; 3527 3528 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 3529 3530 for (i = 0; i < count; i++) 3531 synchronize_irq(pci_irq_vector(instance->pdev, i)); 3532 } 3533 3534 /** 3535 * megasas_complete_cmd_dpc_fusion - Completes command 3536 * @instance: Adapter soft state 3537 * 3538 * Tasklet to complete cmds 3539 */ 3540 void 3541 megasas_complete_cmd_dpc_fusion(unsigned long instance_addr) 3542 { 3543 struct megasas_instance *instance = 3544 (struct megasas_instance *)instance_addr; 3545 u32 count, MSIxIndex; 3546 3547 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 3548 3549 /* If we have already declared adapter dead, donot complete cmds */ 3550 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 3551 return; 3552 3553 for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++) 3554 complete_cmd_fusion(instance, MSIxIndex); 3555 } 3556 3557 /** 3558 * megasas_isr_fusion - isr entry point 3559 */ 3560 irqreturn_t megasas_isr_fusion(int irq, void *devp) 3561 { 3562 struct megasas_irq_context *irq_context = devp; 3563 struct megasas_instance *instance = irq_context->instance; 3564 u32 mfiStatus; 3565 3566 if (instance->mask_interrupts) 3567 return IRQ_NONE; 3568 3569 if (!instance->msix_vectors) { 3570 mfiStatus = instance->instancet->clear_intr(instance); 3571 if (!mfiStatus) 3572 return IRQ_NONE; 3573 } 3574 3575 /* If we are resetting, bail */ 3576 if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) { 3577 instance->instancet->clear_intr(instance); 3578 return IRQ_HANDLED; 3579 } 3580 3581 return complete_cmd_fusion(instance, irq_context->MSIxIndex); 3582 } 3583 3584 /** 3585 * build_mpt_mfi_pass_thru - builds a cmd fo MFI Pass thru 3586 * @instance: Adapter soft state 3587 * mfi_cmd: megasas_cmd pointer 3588 * 3589 */ 3590 void 3591 build_mpt_mfi_pass_thru(struct megasas_instance *instance, 3592 struct megasas_cmd *mfi_cmd) 3593 { 3594 struct MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain; 3595 struct MPI2_RAID_SCSI_IO_REQUEST *io_req; 3596 struct megasas_cmd_fusion *cmd; 3597 struct fusion_context *fusion; 3598 struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr; 3599 3600 fusion = instance->ctrl_context; 3601 3602 cmd = megasas_get_cmd_fusion(instance, 3603 instance->max_scsi_cmds + mfi_cmd->index); 3604 3605 /* Save the smid. To be used for returning the cmd */ 3606 mfi_cmd->context.smid = cmd->index; 3607 3608 /* 3609 * For cmds where the flag is set, store the flag and check 3610 * on completion. For cmds with this flag, don't call 3611 * megasas_complete_cmd 3612 */ 3613 3614 if (frame_hdr->flags & cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)) 3615 mfi_cmd->flags |= DRV_DCMD_POLLED_MODE; 3616 3617 io_req = cmd->io_request; 3618 3619 if (instance->adapter_type >= INVADER_SERIES) { 3620 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = 3621 (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL; 3622 sgl_ptr_end += fusion->max_sge_in_main_msg - 1; 3623 sgl_ptr_end->Flags = 0; 3624 } 3625 3626 mpi25_ieee_chain = 3627 (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain; 3628 3629 io_req->Function = MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST; 3630 io_req->SGLOffset0 = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, 3631 SGL) / 4; 3632 io_req->ChainOffset = fusion->chain_offset_mfi_pthru; 3633 3634 mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr); 3635 3636 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | 3637 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; 3638 3639 mpi25_ieee_chain->Length = cpu_to_le32(instance->mfi_frame_size); 3640 } 3641 3642 /** 3643 * build_mpt_cmd - Calls helper function to build a cmd MFI Pass thru cmd 3644 * @instance: Adapter soft state 3645 * @cmd: mfi cmd to build 3646 * 3647 */ 3648 union MEGASAS_REQUEST_DESCRIPTOR_UNION * 3649 build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 3650 { 3651 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc = NULL; 3652 u16 index; 3653 3654 build_mpt_mfi_pass_thru(instance, cmd); 3655 index = cmd->context.smid; 3656 3657 req_desc = megasas_get_request_descriptor(instance, index - 1); 3658 3659 req_desc->Words = 0; 3660 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 3661 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 3662 3663 req_desc->SCSIIO.SMID = cpu_to_le16(index); 3664 3665 return req_desc; 3666 } 3667 3668 /** 3669 * megasas_issue_dcmd_fusion - Issues a MFI Pass thru cmd 3670 * @instance: Adapter soft state 3671 * @cmd: mfi cmd pointer 3672 * 3673 */ 3674 void 3675 megasas_issue_dcmd_fusion(struct megasas_instance *instance, 3676 struct megasas_cmd *cmd) 3677 { 3678 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3679 3680 req_desc = build_mpt_cmd(instance, cmd); 3681 3682 megasas_fire_cmd_fusion(instance, req_desc); 3683 return; 3684 } 3685 3686 /** 3687 * megasas_release_fusion - Reverses the FW initialization 3688 * @instance: Adapter soft state 3689 */ 3690 void 3691 megasas_release_fusion(struct megasas_instance *instance) 3692 { 3693 megasas_free_ioc_init_cmd(instance); 3694 megasas_free_cmds(instance); 3695 megasas_free_cmds_fusion(instance); 3696 3697 iounmap(instance->reg_set); 3698 3699 pci_release_selected_regions(instance->pdev, 1<<instance->bar); 3700 } 3701 3702 /** 3703 * megasas_read_fw_status_reg_fusion - returns the current FW status value 3704 * @regs: MFI register set 3705 */ 3706 static u32 3707 megasas_read_fw_status_reg_fusion(struct megasas_instance *instance) 3708 { 3709 return megasas_readl(instance, &instance->reg_set->outbound_scratch_pad_0); 3710 } 3711 3712 /** 3713 * megasas_alloc_host_crash_buffer - Host buffers for Crash dump collection from Firmware 3714 * @instance: Controller's soft instance 3715 * return: Number of allocated host crash buffers 3716 */ 3717 static void 3718 megasas_alloc_host_crash_buffer(struct megasas_instance *instance) 3719 { 3720 unsigned int i; 3721 3722 for (i = 0; i < MAX_CRASH_DUMP_SIZE; i++) { 3723 instance->crash_buf[i] = vzalloc(CRASH_DMA_BUF_SIZE); 3724 if (!instance->crash_buf[i]) { 3725 dev_info(&instance->pdev->dev, "Firmware crash dump " 3726 "memory allocation failed at index %d\n", i); 3727 break; 3728 } 3729 } 3730 instance->drv_buf_alloc = i; 3731 } 3732 3733 /** 3734 * megasas_free_host_crash_buffer - Host buffers for Crash dump collection from Firmware 3735 * @instance: Controller's soft instance 3736 */ 3737 void 3738 megasas_free_host_crash_buffer(struct megasas_instance *instance) 3739 { 3740 unsigned int i; 3741 for (i = 0; i < instance->drv_buf_alloc; i++) { 3742 if (instance->crash_buf[i]) 3743 vfree(instance->crash_buf[i]); 3744 } 3745 instance->drv_buf_index = 0; 3746 instance->drv_buf_alloc = 0; 3747 instance->fw_crash_state = UNAVAILABLE; 3748 instance->fw_crash_buffer_size = 0; 3749 } 3750 3751 /** 3752 * megasas_adp_reset_fusion - For controller reset 3753 * @regs: MFI register set 3754 */ 3755 static int 3756 megasas_adp_reset_fusion(struct megasas_instance *instance, 3757 struct megasas_register_set __iomem *regs) 3758 { 3759 u32 host_diag, abs_state, retry; 3760 3761 /* Now try to reset the chip */ 3762 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 3763 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 3764 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 3765 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 3766 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 3767 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 3768 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 3769 3770 /* Check that the diag write enable (DRWE) bit is on */ 3771 host_diag = megasas_readl(instance, &instance->reg_set->fusion_host_diag); 3772 retry = 0; 3773 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { 3774 msleep(100); 3775 host_diag = megasas_readl(instance, 3776 &instance->reg_set->fusion_host_diag); 3777 if (retry++ == 100) { 3778 dev_warn(&instance->pdev->dev, 3779 "Host diag unlock failed from %s %d\n", 3780 __func__, __LINE__); 3781 break; 3782 } 3783 } 3784 if (!(host_diag & HOST_DIAG_WRITE_ENABLE)) 3785 return -1; 3786 3787 /* Send chip reset command */ 3788 writel(host_diag | HOST_DIAG_RESET_ADAPTER, 3789 &instance->reg_set->fusion_host_diag); 3790 msleep(3000); 3791 3792 /* Make sure reset adapter bit is cleared */ 3793 host_diag = megasas_readl(instance, &instance->reg_set->fusion_host_diag); 3794 retry = 0; 3795 while (host_diag & HOST_DIAG_RESET_ADAPTER) { 3796 msleep(100); 3797 host_diag = megasas_readl(instance, 3798 &instance->reg_set->fusion_host_diag); 3799 if (retry++ == 1000) { 3800 dev_warn(&instance->pdev->dev, 3801 "Diag reset adapter never cleared %s %d\n", 3802 __func__, __LINE__); 3803 break; 3804 } 3805 } 3806 if (host_diag & HOST_DIAG_RESET_ADAPTER) 3807 return -1; 3808 3809 abs_state = instance->instancet->read_fw_status_reg(instance) 3810 & MFI_STATE_MASK; 3811 retry = 0; 3812 3813 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) { 3814 msleep(100); 3815 abs_state = instance->instancet-> 3816 read_fw_status_reg(instance) & MFI_STATE_MASK; 3817 } 3818 if (abs_state <= MFI_STATE_FW_INIT) { 3819 dev_warn(&instance->pdev->dev, 3820 "fw state < MFI_STATE_FW_INIT, state = 0x%x %s %d\n", 3821 abs_state, __func__, __LINE__); 3822 return -1; 3823 } 3824 3825 return 0; 3826 } 3827 3828 /** 3829 * megasas_check_reset_fusion - For controller reset check 3830 * @regs: MFI register set 3831 */ 3832 static int 3833 megasas_check_reset_fusion(struct megasas_instance *instance, 3834 struct megasas_register_set __iomem *regs) 3835 { 3836 return 0; 3837 } 3838 3839 /** 3840 * megasas_trigger_snap_dump - Trigger snap dump in FW 3841 * @instance: Soft instance of adapter 3842 */ 3843 static inline void megasas_trigger_snap_dump(struct megasas_instance *instance) 3844 { 3845 int j; 3846 u32 fw_state; 3847 3848 if (!instance->disableOnlineCtrlReset) { 3849 dev_info(&instance->pdev->dev, "Trigger snap dump\n"); 3850 writel(MFI_ADP_TRIGGER_SNAP_DUMP, 3851 &instance->reg_set->doorbell); 3852 readl(&instance->reg_set->doorbell); 3853 } 3854 3855 for (j = 0; j < instance->snapdump_wait_time; j++) { 3856 fw_state = instance->instancet->read_fw_status_reg(instance) & 3857 MFI_STATE_MASK; 3858 if (fw_state == MFI_STATE_FAULT) { 3859 dev_err(&instance->pdev->dev, 3860 "Found FW in FAULT state, after snap dump trigger\n"); 3861 return; 3862 } 3863 msleep(1000); 3864 } 3865 } 3866 3867 /* This function waits for outstanding commands on fusion to complete */ 3868 int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance, 3869 int reason, int *convert) 3870 { 3871 int i, outstanding, retval = 0, hb_seconds_missed = 0; 3872 u32 fw_state; 3873 u32 waittime_for_io_completion; 3874 3875 waittime_for_io_completion = 3876 min_t(u32, resetwaittime, 3877 (resetwaittime - instance->snapdump_wait_time)); 3878 3879 if (reason == MFI_IO_TIMEOUT_OCR) { 3880 dev_info(&instance->pdev->dev, 3881 "MFI command is timed out\n"); 3882 megasas_complete_cmd_dpc_fusion((unsigned long)instance); 3883 if (instance->snapdump_wait_time) 3884 megasas_trigger_snap_dump(instance); 3885 retval = 1; 3886 goto out; 3887 } 3888 3889 for (i = 0; i < waittime_for_io_completion; i++) { 3890 /* Check if firmware is in fault state */ 3891 fw_state = instance->instancet->read_fw_status_reg(instance) & 3892 MFI_STATE_MASK; 3893 if (fw_state == MFI_STATE_FAULT) { 3894 dev_warn(&instance->pdev->dev, "Found FW in FAULT state," 3895 " will reset adapter scsi%d.\n", 3896 instance->host->host_no); 3897 megasas_complete_cmd_dpc_fusion((unsigned long)instance); 3898 if (instance->requestorId && reason) { 3899 dev_warn(&instance->pdev->dev, "SR-IOV Found FW in FAULT" 3900 " state while polling during" 3901 " I/O timeout handling for %d\n", 3902 instance->host->host_no); 3903 *convert = 1; 3904 } 3905 3906 retval = 1; 3907 goto out; 3908 } 3909 3910 3911 /* If SR-IOV VF mode & heartbeat timeout, don't wait */ 3912 if (instance->requestorId && !reason) { 3913 retval = 1; 3914 goto out; 3915 } 3916 3917 /* If SR-IOV VF mode & I/O timeout, check for HB timeout */ 3918 if (instance->requestorId && (reason == SCSIIO_TIMEOUT_OCR)) { 3919 if (instance->hb_host_mem->HB.fwCounter != 3920 instance->hb_host_mem->HB.driverCounter) { 3921 instance->hb_host_mem->HB.driverCounter = 3922 instance->hb_host_mem->HB.fwCounter; 3923 hb_seconds_missed = 0; 3924 } else { 3925 hb_seconds_missed++; 3926 if (hb_seconds_missed == 3927 (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) { 3928 dev_warn(&instance->pdev->dev, "SR-IOV:" 3929 " Heartbeat never completed " 3930 " while polling during I/O " 3931 " timeout handling for " 3932 "scsi%d.\n", 3933 instance->host->host_no); 3934 *convert = 1; 3935 retval = 1; 3936 goto out; 3937 } 3938 } 3939 } 3940 3941 megasas_complete_cmd_dpc_fusion((unsigned long)instance); 3942 outstanding = atomic_read(&instance->fw_outstanding); 3943 if (!outstanding) 3944 goto out; 3945 3946 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 3947 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d " 3948 "commands to complete for scsi%d\n", i, 3949 outstanding, instance->host->host_no); 3950 } 3951 msleep(1000); 3952 } 3953 3954 if (instance->snapdump_wait_time) { 3955 megasas_trigger_snap_dump(instance); 3956 retval = 1; 3957 goto out; 3958 } 3959 3960 if (atomic_read(&instance->fw_outstanding)) { 3961 dev_err(&instance->pdev->dev, "pending commands remain after waiting, " 3962 "will reset adapter scsi%d.\n", 3963 instance->host->host_no); 3964 *convert = 1; 3965 retval = 1; 3966 } 3967 3968 out: 3969 return retval; 3970 } 3971 3972 void megasas_reset_reply_desc(struct megasas_instance *instance) 3973 { 3974 int i, j, count; 3975 struct fusion_context *fusion; 3976 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; 3977 3978 fusion = instance->ctrl_context; 3979 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 3980 for (i = 0 ; i < count ; i++) { 3981 fusion->last_reply_idx[i] = 0; 3982 reply_desc = fusion->reply_frames_desc[i]; 3983 for (j = 0 ; j < fusion->reply_q_depth; j++, reply_desc++) 3984 reply_desc->Words = cpu_to_le64(ULLONG_MAX); 3985 } 3986 } 3987 3988 /* 3989 * megasas_refire_mgmt_cmd : Re-fire management commands 3990 * @instance: Controller's soft instance 3991 */ 3992 void megasas_refire_mgmt_cmd(struct megasas_instance *instance) 3993 { 3994 int j; 3995 struct megasas_cmd_fusion *cmd_fusion; 3996 struct fusion_context *fusion; 3997 struct megasas_cmd *cmd_mfi; 3998 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3999 u16 smid; 4000 bool refire_cmd = 0; 4001 u8 result; 4002 u32 opcode = 0; 4003 4004 fusion = instance->ctrl_context; 4005 4006 /* Re-fire management commands. 4007 * Do not traverse complet MPT frame pool. Start from max_scsi_cmds. 4008 */ 4009 for (j = instance->max_scsi_cmds ; j < instance->max_fw_cmds; j++) { 4010 cmd_fusion = fusion->cmd_list[j]; 4011 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 4012 smid = le16_to_cpu(cmd_mfi->context.smid); 4013 result = REFIRE_CMD; 4014 4015 if (!smid) 4016 continue; 4017 4018 req_desc = megasas_get_request_descriptor(instance, smid - 1); 4019 4020 switch (cmd_mfi->frame->hdr.cmd) { 4021 case MFI_CMD_DCMD: 4022 opcode = le32_to_cpu(cmd_mfi->frame->dcmd.opcode); 4023 /* Do not refire shutdown command */ 4024 if (opcode == MR_DCMD_CTRL_SHUTDOWN) { 4025 cmd_mfi->frame->dcmd.cmd_status = MFI_STAT_OK; 4026 result = COMPLETE_CMD; 4027 break; 4028 } 4029 4030 refire_cmd = ((opcode != MR_DCMD_LD_MAP_GET_INFO)) && 4031 (opcode != MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && 4032 !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE); 4033 4034 if (!refire_cmd) 4035 result = RETURN_CMD; 4036 4037 break; 4038 case MFI_CMD_NVME: 4039 if (!instance->support_nvme_passthru) { 4040 cmd_mfi->frame->hdr.cmd_status = MFI_STAT_INVALID_CMD; 4041 result = COMPLETE_CMD; 4042 } 4043 4044 break; 4045 default: 4046 break; 4047 } 4048 4049 switch (result) { 4050 case REFIRE_CMD: 4051 megasas_fire_cmd_fusion(instance, req_desc); 4052 break; 4053 case RETURN_CMD: 4054 megasas_return_cmd(instance, cmd_mfi); 4055 break; 4056 case COMPLETE_CMD: 4057 megasas_complete_cmd(instance, cmd_mfi, DID_OK); 4058 break; 4059 } 4060 } 4061 } 4062 4063 /* 4064 * megasas_track_scsiio : Track SCSI IOs outstanding to a SCSI device 4065 * @instance: per adapter struct 4066 * @channel: the channel assigned by the OS 4067 * @id: the id assigned by the OS 4068 * 4069 * Returns SUCCESS if no IOs pending to SCSI device, else return FAILED 4070 */ 4071 4072 static int megasas_track_scsiio(struct megasas_instance *instance, 4073 int id, int channel) 4074 { 4075 int i, found = 0; 4076 struct megasas_cmd_fusion *cmd_fusion; 4077 struct fusion_context *fusion; 4078 fusion = instance->ctrl_context; 4079 4080 for (i = 0 ; i < instance->max_scsi_cmds; i++) { 4081 cmd_fusion = fusion->cmd_list[i]; 4082 if (cmd_fusion->scmd && 4083 (cmd_fusion->scmd->device->id == id && 4084 cmd_fusion->scmd->device->channel == channel)) { 4085 dev_info(&instance->pdev->dev, 4086 "SCSI commands pending to target" 4087 "channel %d id %d \tSMID: 0x%x\n", 4088 channel, id, cmd_fusion->index); 4089 scsi_print_command(cmd_fusion->scmd); 4090 found = 1; 4091 break; 4092 } 4093 } 4094 4095 return found ? FAILED : SUCCESS; 4096 } 4097 4098 /** 4099 * megasas_tm_response_code - translation of device response code 4100 * @ioc: per adapter object 4101 * @mpi_reply: MPI reply returned by firmware 4102 * 4103 * Return nothing. 4104 */ 4105 static void 4106 megasas_tm_response_code(struct megasas_instance *instance, 4107 struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply) 4108 { 4109 char *desc; 4110 4111 switch (mpi_reply->ResponseCode) { 4112 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: 4113 desc = "task management request completed"; 4114 break; 4115 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: 4116 desc = "invalid frame"; 4117 break; 4118 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: 4119 desc = "task management request not supported"; 4120 break; 4121 case MPI2_SCSITASKMGMT_RSP_TM_FAILED: 4122 desc = "task management request failed"; 4123 break; 4124 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: 4125 desc = "task management request succeeded"; 4126 break; 4127 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: 4128 desc = "invalid lun"; 4129 break; 4130 case 0xA: 4131 desc = "overlapped tag attempted"; 4132 break; 4133 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: 4134 desc = "task queued, however not sent to target"; 4135 break; 4136 default: 4137 desc = "unknown"; 4138 break; 4139 } 4140 dev_dbg(&instance->pdev->dev, "response_code(%01x): %s\n", 4141 mpi_reply->ResponseCode, desc); 4142 dev_dbg(&instance->pdev->dev, 4143 "TerminationCount/DevHandle/Function/TaskType/IOCStat/IOCLoginfo" 4144 " 0x%x/0x%x/0x%x/0x%x/0x%x/0x%x\n", 4145 mpi_reply->TerminationCount, mpi_reply->DevHandle, 4146 mpi_reply->Function, mpi_reply->TaskType, 4147 mpi_reply->IOCStatus, mpi_reply->IOCLogInfo); 4148 } 4149 4150 /** 4151 * megasas_issue_tm - main routine for sending tm requests 4152 * @instance: per adapter struct 4153 * @device_handle: device handle 4154 * @channel: the channel assigned by the OS 4155 * @id: the id assigned by the OS 4156 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in megaraid_sas_fusion.c) 4157 * @smid_task: smid assigned to the task 4158 * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF 4159 * Context: user 4160 * 4161 * MegaRaid use MPT interface for Task Magement request. 4162 * A generic API for sending task management requests to firmware. 4163 * 4164 * Return SUCCESS or FAILED. 4165 */ 4166 static int 4167 megasas_issue_tm(struct megasas_instance *instance, u16 device_handle, 4168 uint channel, uint id, u16 smid_task, u8 type, 4169 struct MR_PRIV_DEVICE *mr_device_priv_data) 4170 { 4171 struct MR_TASK_MANAGE_REQUEST *mr_request; 4172 struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_request; 4173 unsigned long timeleft; 4174 struct megasas_cmd_fusion *cmd_fusion; 4175 struct megasas_cmd *cmd_mfi; 4176 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 4177 struct fusion_context *fusion = NULL; 4178 struct megasas_cmd_fusion *scsi_lookup; 4179 int rc; 4180 int timeout = MEGASAS_DEFAULT_TM_TIMEOUT; 4181 struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply; 4182 4183 fusion = instance->ctrl_context; 4184 4185 cmd_mfi = megasas_get_cmd(instance); 4186 4187 if (!cmd_mfi) { 4188 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 4189 __func__, __LINE__); 4190 return -ENOMEM; 4191 } 4192 4193 cmd_fusion = megasas_get_cmd_fusion(instance, 4194 instance->max_scsi_cmds + cmd_mfi->index); 4195 4196 /* Save the smid. To be used for returning the cmd */ 4197 cmd_mfi->context.smid = cmd_fusion->index; 4198 4199 req_desc = megasas_get_request_descriptor(instance, 4200 (cmd_fusion->index - 1)); 4201 4202 cmd_fusion->request_desc = req_desc; 4203 req_desc->Words = 0; 4204 4205 mr_request = (struct MR_TASK_MANAGE_REQUEST *) cmd_fusion->io_request; 4206 memset(mr_request, 0, sizeof(struct MR_TASK_MANAGE_REQUEST)); 4207 mpi_request = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest; 4208 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 4209 mpi_request->DevHandle = cpu_to_le16(device_handle); 4210 mpi_request->TaskType = type; 4211 mpi_request->TaskMID = cpu_to_le16(smid_task); 4212 mpi_request->LUN[1] = 0; 4213 4214 4215 req_desc = cmd_fusion->request_desc; 4216 req_desc->HighPriority.SMID = cpu_to_le16(cmd_fusion->index); 4217 req_desc->HighPriority.RequestFlags = 4218 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << 4219 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 4220 req_desc->HighPriority.MSIxIndex = 0; 4221 req_desc->HighPriority.LMID = 0; 4222 req_desc->HighPriority.Reserved1 = 0; 4223 4224 if (channel < MEGASAS_MAX_PD_CHANNELS) 4225 mr_request->tmReqFlags.isTMForPD = 1; 4226 else 4227 mr_request->tmReqFlags.isTMForLD = 1; 4228 4229 init_completion(&cmd_fusion->done); 4230 megasas_fire_cmd_fusion(instance, req_desc); 4231 4232 switch (type) { 4233 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: 4234 timeout = mr_device_priv_data->task_abort_tmo; 4235 break; 4236 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 4237 timeout = mr_device_priv_data->target_reset_tmo; 4238 break; 4239 } 4240 4241 timeleft = wait_for_completion_timeout(&cmd_fusion->done, timeout * HZ); 4242 4243 if (!timeleft) { 4244 dev_err(&instance->pdev->dev, 4245 "task mgmt type 0x%x timed out\n", type); 4246 cmd_mfi->flags |= DRV_DCMD_SKIP_REFIRE; 4247 mutex_unlock(&instance->reset_mutex); 4248 rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR); 4249 mutex_lock(&instance->reset_mutex); 4250 return rc; 4251 } 4252 4253 mpi_reply = (struct MPI2_SCSI_TASK_MANAGE_REPLY *) &mr_request->TMReply; 4254 megasas_tm_response_code(instance, mpi_reply); 4255 4256 megasas_return_cmd(instance, cmd_mfi); 4257 rc = SUCCESS; 4258 switch (type) { 4259 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: 4260 scsi_lookup = fusion->cmd_list[smid_task - 1]; 4261 4262 if (scsi_lookup->scmd == NULL) 4263 break; 4264 else { 4265 instance->instancet->disable_intr(instance); 4266 megasas_sync_irqs((unsigned long)instance); 4267 instance->instancet->enable_intr(instance); 4268 if (scsi_lookup->scmd == NULL) 4269 break; 4270 } 4271 rc = FAILED; 4272 break; 4273 4274 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 4275 if ((channel == 0xFFFFFFFF) && (id == 0xFFFFFFFF)) 4276 break; 4277 instance->instancet->disable_intr(instance); 4278 megasas_sync_irqs((unsigned long)instance); 4279 rc = megasas_track_scsiio(instance, id, channel); 4280 instance->instancet->enable_intr(instance); 4281 4282 break; 4283 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: 4284 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK: 4285 break; 4286 default: 4287 rc = FAILED; 4288 break; 4289 } 4290 4291 return rc; 4292 4293 } 4294 4295 /* 4296 * megasas_fusion_smid_lookup : Look for fusion command correpspodning to SCSI 4297 * @instance: per adapter struct 4298 * 4299 * Return Non Zero index, if SMID found in outstanding commands 4300 */ 4301 static u16 megasas_fusion_smid_lookup(struct scsi_cmnd *scmd) 4302 { 4303 int i, ret = 0; 4304 struct megasas_instance *instance; 4305 struct megasas_cmd_fusion *cmd_fusion; 4306 struct fusion_context *fusion; 4307 4308 instance = (struct megasas_instance *)scmd->device->host->hostdata; 4309 4310 fusion = instance->ctrl_context; 4311 4312 for (i = 0; i < instance->max_scsi_cmds; i++) { 4313 cmd_fusion = fusion->cmd_list[i]; 4314 if (cmd_fusion->scmd && (cmd_fusion->scmd == scmd)) { 4315 scmd_printk(KERN_NOTICE, scmd, "Abort request is for" 4316 " SMID: %d\n", cmd_fusion->index); 4317 ret = cmd_fusion->index; 4318 break; 4319 } 4320 } 4321 4322 return ret; 4323 } 4324 4325 /* 4326 * megasas_get_tm_devhandle - Get devhandle for TM request 4327 * @sdev- OS provided scsi device 4328 * 4329 * Returns- devhandle/targetID of SCSI device 4330 */ 4331 static u16 megasas_get_tm_devhandle(struct scsi_device *sdev) 4332 { 4333 u16 pd_index = 0; 4334 u32 device_id; 4335 struct megasas_instance *instance; 4336 struct fusion_context *fusion; 4337 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 4338 u16 devhandle = (u16)ULONG_MAX; 4339 4340 instance = (struct megasas_instance *)sdev->host->hostdata; 4341 fusion = instance->ctrl_context; 4342 4343 if (!MEGASAS_IS_LOGICAL(sdev)) { 4344 if (instance->use_seqnum_jbod_fp) { 4345 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) 4346 + sdev->id; 4347 pd_sync = (void *)fusion->pd_seq_sync 4348 [(instance->pd_seq_map_id - 1) & 1]; 4349 devhandle = pd_sync->seq[pd_index].devHandle; 4350 } else 4351 sdev_printk(KERN_ERR, sdev, "Firmware expose tmCapable" 4352 " without JBOD MAP support from %s %d\n", __func__, __LINE__); 4353 } else { 4354 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) 4355 + sdev->id; 4356 devhandle = device_id; 4357 } 4358 4359 return devhandle; 4360 } 4361 4362 /* 4363 * megasas_task_abort_fusion : SCSI task abort function for fusion adapters 4364 * @scmd : pointer to scsi command object 4365 * 4366 * Return SUCCESS, if command aborted else FAILED 4367 */ 4368 4369 int megasas_task_abort_fusion(struct scsi_cmnd *scmd) 4370 { 4371 struct megasas_instance *instance; 4372 u16 smid, devhandle; 4373 int ret; 4374 struct MR_PRIV_DEVICE *mr_device_priv_data; 4375 mr_device_priv_data = scmd->device->hostdata; 4376 4377 instance = (struct megasas_instance *)scmd->device->host->hostdata; 4378 4379 scmd_printk(KERN_INFO, scmd, "task abort called for scmd(%p)\n", scmd); 4380 scsi_print_command(scmd); 4381 4382 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 4383 dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL," 4384 "SCSI host:%d\n", instance->host->host_no); 4385 ret = FAILED; 4386 return ret; 4387 } 4388 4389 if (!mr_device_priv_data) { 4390 sdev_printk(KERN_INFO, scmd->device, "device been deleted! " 4391 "scmd(%p)\n", scmd); 4392 scmd->result = DID_NO_CONNECT << 16; 4393 ret = SUCCESS; 4394 goto out; 4395 } 4396 4397 if (!mr_device_priv_data->is_tm_capable) { 4398 ret = FAILED; 4399 goto out; 4400 } 4401 4402 mutex_lock(&instance->reset_mutex); 4403 4404 smid = megasas_fusion_smid_lookup(scmd); 4405 4406 if (!smid) { 4407 ret = SUCCESS; 4408 scmd_printk(KERN_NOTICE, scmd, "Command for which abort is" 4409 " issued is not found in outstanding commands\n"); 4410 mutex_unlock(&instance->reset_mutex); 4411 goto out; 4412 } 4413 4414 devhandle = megasas_get_tm_devhandle(scmd->device); 4415 4416 if (devhandle == (u16)ULONG_MAX) { 4417 ret = SUCCESS; 4418 sdev_printk(KERN_INFO, scmd->device, 4419 "task abort issued for invalid devhandle\n"); 4420 mutex_unlock(&instance->reset_mutex); 4421 goto out; 4422 } 4423 sdev_printk(KERN_INFO, scmd->device, 4424 "attempting task abort! scmd(%p) tm_dev_handle 0x%x\n", 4425 scmd, devhandle); 4426 4427 mr_device_priv_data->tm_busy = 1; 4428 ret = megasas_issue_tm(instance, devhandle, 4429 scmd->device->channel, scmd->device->id, smid, 4430 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 4431 mr_device_priv_data); 4432 mr_device_priv_data->tm_busy = 0; 4433 4434 mutex_unlock(&instance->reset_mutex); 4435 out: 4436 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", 4437 ((ret == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4438 4439 return ret; 4440 } 4441 4442 /* 4443 * megasas_reset_target_fusion : target reset function for fusion adapters 4444 * scmd: SCSI command pointer 4445 * 4446 * Returns SUCCESS if all commands associated with target aborted else FAILED 4447 */ 4448 4449 int megasas_reset_target_fusion(struct scsi_cmnd *scmd) 4450 { 4451 4452 struct megasas_instance *instance; 4453 int ret = FAILED; 4454 u16 devhandle; 4455 struct MR_PRIV_DEVICE *mr_device_priv_data; 4456 mr_device_priv_data = scmd->device->hostdata; 4457 4458 instance = (struct megasas_instance *)scmd->device->host->hostdata; 4459 4460 sdev_printk(KERN_INFO, scmd->device, 4461 "target reset called for scmd(%p)\n", scmd); 4462 4463 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 4464 dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL," 4465 "SCSI host:%d\n", instance->host->host_no); 4466 ret = FAILED; 4467 return ret; 4468 } 4469 4470 if (!mr_device_priv_data) { 4471 sdev_printk(KERN_INFO, scmd->device, "device been deleted! " 4472 "scmd(%p)\n", scmd); 4473 scmd->result = DID_NO_CONNECT << 16; 4474 ret = SUCCESS; 4475 goto out; 4476 } 4477 4478 if (!mr_device_priv_data->is_tm_capable) { 4479 ret = FAILED; 4480 goto out; 4481 } 4482 4483 mutex_lock(&instance->reset_mutex); 4484 devhandle = megasas_get_tm_devhandle(scmd->device); 4485 4486 if (devhandle == (u16)ULONG_MAX) { 4487 ret = SUCCESS; 4488 sdev_printk(KERN_INFO, scmd->device, 4489 "target reset issued for invalid devhandle\n"); 4490 mutex_unlock(&instance->reset_mutex); 4491 goto out; 4492 } 4493 4494 sdev_printk(KERN_INFO, scmd->device, 4495 "attempting target reset! scmd(%p) tm_dev_handle 0x%x\n", 4496 scmd, devhandle); 4497 mr_device_priv_data->tm_busy = 1; 4498 ret = megasas_issue_tm(instance, devhandle, 4499 scmd->device->channel, scmd->device->id, 0, 4500 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 4501 mr_device_priv_data); 4502 mr_device_priv_data->tm_busy = 0; 4503 mutex_unlock(&instance->reset_mutex); 4504 out: 4505 scmd_printk(KERN_NOTICE, scmd, "megasas: target reset %s!!\n", 4506 (ret == SUCCESS) ? "SUCCESS" : "FAILED"); 4507 4508 return ret; 4509 } 4510 4511 /*SRIOV get other instance in cluster if any*/ 4512 struct megasas_instance *megasas_get_peer_instance(struct megasas_instance *instance) 4513 { 4514 int i; 4515 4516 for (i = 0; i < MAX_MGMT_ADAPTERS; i++) { 4517 if (megasas_mgmt_info.instance[i] && 4518 (megasas_mgmt_info.instance[i] != instance) && 4519 megasas_mgmt_info.instance[i]->requestorId && 4520 megasas_mgmt_info.instance[i]->peerIsPresent && 4521 (memcmp((megasas_mgmt_info.instance[i]->clusterId), 4522 instance->clusterId, MEGASAS_CLUSTER_ID_SIZE) == 0)) 4523 return megasas_mgmt_info.instance[i]; 4524 } 4525 return NULL; 4526 } 4527 4528 /* Check for a second path that is currently UP */ 4529 int megasas_check_mpio_paths(struct megasas_instance *instance, 4530 struct scsi_cmnd *scmd) 4531 { 4532 struct megasas_instance *peer_instance = NULL; 4533 int retval = (DID_REQUEUE << 16); 4534 4535 if (instance->peerIsPresent) { 4536 peer_instance = megasas_get_peer_instance(instance); 4537 if ((peer_instance) && 4538 (atomic_read(&peer_instance->adprecovery) == 4539 MEGASAS_HBA_OPERATIONAL)) 4540 retval = (DID_NO_CONNECT << 16); 4541 } 4542 return retval; 4543 } 4544 4545 /* Core fusion reset function */ 4546 int megasas_reset_fusion(struct Scsi_Host *shost, int reason) 4547 { 4548 int retval = SUCCESS, i, j, convert = 0; 4549 struct megasas_instance *instance; 4550 struct megasas_cmd_fusion *cmd_fusion, *r1_cmd; 4551 struct fusion_context *fusion; 4552 u32 abs_state, status_reg, reset_adapter; 4553 u32 io_timeout_in_crash_mode = 0; 4554 struct scsi_cmnd *scmd_local = NULL; 4555 struct scsi_device *sdev; 4556 int ret_target_prop = DCMD_FAILED; 4557 bool is_target_prop = false; 4558 4559 instance = (struct megasas_instance *)shost->hostdata; 4560 fusion = instance->ctrl_context; 4561 4562 mutex_lock(&instance->reset_mutex); 4563 4564 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 4565 dev_warn(&instance->pdev->dev, "Hardware critical error, " 4566 "returning FAILED for scsi%d.\n", 4567 instance->host->host_no); 4568 mutex_unlock(&instance->reset_mutex); 4569 return FAILED; 4570 } 4571 status_reg = instance->instancet->read_fw_status_reg(instance); 4572 abs_state = status_reg & MFI_STATE_MASK; 4573 4574 /* IO timeout detected, forcibly put FW in FAULT state */ 4575 if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf && 4576 instance->crash_dump_app_support && reason) { 4577 dev_info(&instance->pdev->dev, "IO/DCMD timeout is detected, " 4578 "forcibly FAULT Firmware\n"); 4579 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 4580 status_reg = megasas_readl(instance, &instance->reg_set->doorbell); 4581 writel(status_reg | MFI_STATE_FORCE_OCR, 4582 &instance->reg_set->doorbell); 4583 readl(&instance->reg_set->doorbell); 4584 mutex_unlock(&instance->reset_mutex); 4585 do { 4586 ssleep(3); 4587 io_timeout_in_crash_mode++; 4588 dev_dbg(&instance->pdev->dev, "waiting for [%d] " 4589 "seconds for crash dump collection and OCR " 4590 "to be done\n", (io_timeout_in_crash_mode * 3)); 4591 } while ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) && 4592 (io_timeout_in_crash_mode < 80)); 4593 4594 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) { 4595 dev_info(&instance->pdev->dev, "OCR done for IO " 4596 "timeout case\n"); 4597 retval = SUCCESS; 4598 } else { 4599 dev_info(&instance->pdev->dev, "Controller is not " 4600 "operational after 240 seconds wait for IO " 4601 "timeout case in FW crash dump mode\n do " 4602 "OCR/kill adapter\n"); 4603 retval = megasas_reset_fusion(shost, 0); 4604 } 4605 return retval; 4606 } 4607 4608 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 4609 del_timer_sync(&instance->sriov_heartbeat_timer); 4610 set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); 4611 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING); 4612 instance->instancet->disable_intr(instance); 4613 megasas_sync_irqs((unsigned long)instance); 4614 4615 /* First try waiting for commands to complete */ 4616 if (megasas_wait_for_outstanding_fusion(instance, reason, 4617 &convert)) { 4618 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 4619 dev_warn(&instance->pdev->dev, "resetting fusion " 4620 "adapter scsi%d.\n", instance->host->host_no); 4621 if (convert) 4622 reason = 0; 4623 4624 if (megasas_dbg_lvl & OCR_LOGS) 4625 dev_info(&instance->pdev->dev, "\nPending SCSI commands:\n"); 4626 4627 /* Now return commands back to the OS */ 4628 for (i = 0 ; i < instance->max_scsi_cmds; i++) { 4629 cmd_fusion = fusion->cmd_list[i]; 4630 /*check for extra commands issued by driver*/ 4631 if (instance->adapter_type >= VENTURA_SERIES) { 4632 r1_cmd = fusion->cmd_list[i + instance->max_fw_cmds]; 4633 megasas_return_cmd_fusion(instance, r1_cmd); 4634 } 4635 scmd_local = cmd_fusion->scmd; 4636 if (cmd_fusion->scmd) { 4637 if (megasas_dbg_lvl & OCR_LOGS) { 4638 sdev_printk(KERN_INFO, 4639 cmd_fusion->scmd->device, "SMID: 0x%x\n", 4640 cmd_fusion->index); 4641 scsi_print_command(cmd_fusion->scmd); 4642 } 4643 4644 scmd_local->result = 4645 megasas_check_mpio_paths(instance, 4646 scmd_local); 4647 if (instance->ldio_threshold && 4648 megasas_cmd_type(scmd_local) == READ_WRITE_LDIO) 4649 atomic_dec(&instance->ldio_outstanding); 4650 megasas_return_cmd_fusion(instance, cmd_fusion); 4651 scsi_dma_unmap(scmd_local); 4652 scmd_local->scsi_done(scmd_local); 4653 } 4654 } 4655 4656 atomic_set(&instance->fw_outstanding, 0); 4657 4658 status_reg = instance->instancet->read_fw_status_reg(instance); 4659 abs_state = status_reg & MFI_STATE_MASK; 4660 reset_adapter = status_reg & MFI_RESET_ADAPTER; 4661 if (instance->disableOnlineCtrlReset || 4662 (abs_state == MFI_STATE_FAULT && !reset_adapter)) { 4663 /* Reset not supported, kill adapter */ 4664 dev_warn(&instance->pdev->dev, "Reset not supported" 4665 ", killing adapter scsi%d.\n", 4666 instance->host->host_no); 4667 megaraid_sas_kill_hba(instance); 4668 instance->skip_heartbeat_timer_del = 1; 4669 retval = FAILED; 4670 goto out; 4671 } 4672 4673 /* Let SR-IOV VF & PF sync up if there was a HB failure */ 4674 if (instance->requestorId && !reason) { 4675 msleep(MEGASAS_OCR_SETTLE_TIME_VF); 4676 goto transition_to_ready; 4677 } 4678 4679 /* Now try to reset the chip */ 4680 for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) { 4681 4682 if (instance->instancet->adp_reset 4683 (instance, instance->reg_set)) 4684 continue; 4685 transition_to_ready: 4686 /* Wait for FW to become ready */ 4687 if (megasas_transition_to_ready(instance, 1)) { 4688 dev_warn(&instance->pdev->dev, 4689 "Failed to transition controller to ready for " 4690 "scsi%d.\n", instance->host->host_no); 4691 if (instance->requestorId && !reason) 4692 goto fail_kill_adapter; 4693 else 4694 continue; 4695 } 4696 megasas_reset_reply_desc(instance); 4697 megasas_fusion_update_can_queue(instance, OCR_CONTEXT); 4698 4699 if (megasas_ioc_init_fusion(instance)) { 4700 if (instance->requestorId && !reason) 4701 goto fail_kill_adapter; 4702 else 4703 continue; 4704 } 4705 4706 if (megasas_get_ctrl_info(instance)) { 4707 dev_info(&instance->pdev->dev, 4708 "Failed from %s %d\n", 4709 __func__, __LINE__); 4710 megaraid_sas_kill_hba(instance); 4711 retval = FAILED; 4712 goto out; 4713 } 4714 4715 megasas_refire_mgmt_cmd(instance); 4716 4717 /* Reset load balance info */ 4718 if (fusion->load_balance_info) 4719 memset(fusion->load_balance_info, 0, 4720 (sizeof(struct LD_LOAD_BALANCE_INFO) * 4721 MAX_LOGICAL_DRIVES_EXT)); 4722 4723 if (!megasas_get_map_info(instance)) 4724 megasas_sync_map_info(instance); 4725 4726 megasas_setup_jbod_map(instance); 4727 4728 /* reset stream detection array */ 4729 if (instance->adapter_type >= VENTURA_SERIES) { 4730 for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) { 4731 memset(fusion->stream_detect_by_ld[j], 4732 0, sizeof(struct LD_STREAM_DETECT)); 4733 fusion->stream_detect_by_ld[j]->mru_bit_map 4734 = MR_STREAM_BITMAP; 4735 } 4736 } 4737 4738 clear_bit(MEGASAS_FUSION_IN_RESET, 4739 &instance->reset_flags); 4740 instance->instancet->enable_intr(instance); 4741 4742 shost_for_each_device(sdev, shost) { 4743 if ((instance->tgt_prop) && 4744 (instance->nvme_page_size)) 4745 ret_target_prop = megasas_get_target_prop(instance, sdev); 4746 4747 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; 4748 megasas_set_dynamic_target_properties(sdev, is_target_prop); 4749 } 4750 4751 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 4752 4753 dev_info(&instance->pdev->dev, "Interrupts are enabled and" 4754 " controller is OPERATIONAL for scsi:%d\n", 4755 instance->host->host_no); 4756 4757 /* Restart SR-IOV heartbeat */ 4758 if (instance->requestorId) { 4759 if (!megasas_sriov_start_heartbeat(instance, 0)) 4760 megasas_start_timer(instance); 4761 else 4762 instance->skip_heartbeat_timer_del = 1; 4763 } 4764 4765 if (instance->crash_dump_drv_support && 4766 instance->crash_dump_app_support) 4767 megasas_set_crash_dump_params(instance, 4768 MR_CRASH_BUF_TURN_ON); 4769 else 4770 megasas_set_crash_dump_params(instance, 4771 MR_CRASH_BUF_TURN_OFF); 4772 4773 if (instance->snapdump_wait_time) { 4774 megasas_get_snapdump_properties(instance); 4775 dev_info(&instance->pdev->dev, 4776 "Snap dump wait time\t: %d\n", 4777 instance->snapdump_wait_time); 4778 } 4779 4780 retval = SUCCESS; 4781 4782 /* Adapter reset completed successfully */ 4783 dev_warn(&instance->pdev->dev, 4784 "Reset successful for scsi%d.\n", 4785 instance->host->host_no); 4786 4787 goto out; 4788 } 4789 fail_kill_adapter: 4790 /* Reset failed, kill the adapter */ 4791 dev_warn(&instance->pdev->dev, "Reset failed, killing " 4792 "adapter scsi%d.\n", instance->host->host_no); 4793 megaraid_sas_kill_hba(instance); 4794 instance->skip_heartbeat_timer_del = 1; 4795 retval = FAILED; 4796 } else { 4797 /* For VF: Restart HB timer if we didn't OCR */ 4798 if (instance->requestorId) { 4799 megasas_start_timer(instance); 4800 } 4801 clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); 4802 instance->instancet->enable_intr(instance); 4803 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 4804 } 4805 out: 4806 clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); 4807 mutex_unlock(&instance->reset_mutex); 4808 return retval; 4809 } 4810 4811 /* Fusion Crash dump collection */ 4812 void megasas_fusion_crash_dump(struct megasas_instance *instance) 4813 { 4814 u32 status_reg; 4815 u8 partial_copy = 0; 4816 int wait = 0; 4817 4818 4819 status_reg = instance->instancet->read_fw_status_reg(instance); 4820 4821 /* 4822 * Allocate host crash buffers to copy data from 1 MB DMA crash buffer 4823 * to host crash buffers 4824 */ 4825 if (instance->drv_buf_index == 0) { 4826 /* Buffer is already allocated for old Crash dump. 4827 * Do OCR and do not wait for crash dump collection 4828 */ 4829 if (instance->drv_buf_alloc) { 4830 dev_info(&instance->pdev->dev, "earlier crash dump is " 4831 "not yet copied by application, ignoring this " 4832 "crash dump and initiating OCR\n"); 4833 status_reg |= MFI_STATE_CRASH_DUMP_DONE; 4834 writel(status_reg, 4835 &instance->reg_set->outbound_scratch_pad_0); 4836 readl(&instance->reg_set->outbound_scratch_pad_0); 4837 return; 4838 } 4839 megasas_alloc_host_crash_buffer(instance); 4840 dev_info(&instance->pdev->dev, "Number of host crash buffers " 4841 "allocated: %d\n", instance->drv_buf_alloc); 4842 } 4843 4844 while (!(status_reg & MFI_STATE_CRASH_DUMP_DONE) && 4845 (wait < MEGASAS_WATCHDOG_WAIT_COUNT)) { 4846 if (!(status_reg & MFI_STATE_DMADONE)) { 4847 /* 4848 * Next crash dump buffer is not yet DMA'd by FW 4849 * Check after 10ms. Wait for 1 second for FW to 4850 * post the next buffer. If not bail out. 4851 */ 4852 wait++; 4853 msleep(MEGASAS_WAIT_FOR_NEXT_DMA_MSECS); 4854 status_reg = instance->instancet->read_fw_status_reg( 4855 instance); 4856 continue; 4857 } 4858 4859 wait = 0; 4860 if (instance->drv_buf_index >= instance->drv_buf_alloc) { 4861 dev_info(&instance->pdev->dev, 4862 "Driver is done copying the buffer: %d\n", 4863 instance->drv_buf_alloc); 4864 status_reg |= MFI_STATE_CRASH_DUMP_DONE; 4865 partial_copy = 1; 4866 break; 4867 } else { 4868 memcpy(instance->crash_buf[instance->drv_buf_index], 4869 instance->crash_dump_buf, CRASH_DMA_BUF_SIZE); 4870 instance->drv_buf_index++; 4871 status_reg &= ~MFI_STATE_DMADONE; 4872 } 4873 4874 writel(status_reg, &instance->reg_set->outbound_scratch_pad_0); 4875 readl(&instance->reg_set->outbound_scratch_pad_0); 4876 4877 msleep(MEGASAS_WAIT_FOR_NEXT_DMA_MSECS); 4878 status_reg = instance->instancet->read_fw_status_reg(instance); 4879 } 4880 4881 if (status_reg & MFI_STATE_CRASH_DUMP_DONE) { 4882 dev_info(&instance->pdev->dev, "Crash Dump is available,number " 4883 "of copied buffers: %d\n", instance->drv_buf_index); 4884 instance->fw_crash_buffer_size = instance->drv_buf_index; 4885 instance->fw_crash_state = AVAILABLE; 4886 instance->drv_buf_index = 0; 4887 writel(status_reg, &instance->reg_set->outbound_scratch_pad_0); 4888 readl(&instance->reg_set->outbound_scratch_pad_0); 4889 if (!partial_copy) 4890 megasas_reset_fusion(instance->host, 0); 4891 } 4892 } 4893 4894 4895 /* Fusion OCR work queue */ 4896 void megasas_fusion_ocr_wq(struct work_struct *work) 4897 { 4898 struct megasas_instance *instance = 4899 container_of(work, struct megasas_instance, work_init); 4900 4901 megasas_reset_fusion(instance->host, 0); 4902 } 4903 4904 /* Allocate fusion context */ 4905 int 4906 megasas_alloc_fusion_context(struct megasas_instance *instance) 4907 { 4908 struct fusion_context *fusion; 4909 4910 instance->ctrl_context = kzalloc(sizeof(struct fusion_context), 4911 GFP_KERNEL); 4912 if (!instance->ctrl_context) { 4913 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 4914 __func__, __LINE__); 4915 return -ENOMEM; 4916 } 4917 4918 fusion = instance->ctrl_context; 4919 4920 fusion->log_to_span_pages = get_order(MAX_LOGICAL_DRIVES_EXT * 4921 sizeof(LD_SPAN_INFO)); 4922 fusion->log_to_span = 4923 (PLD_SPAN_INFO)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 4924 fusion->log_to_span_pages); 4925 if (!fusion->log_to_span) { 4926 fusion->log_to_span = 4927 vzalloc(array_size(MAX_LOGICAL_DRIVES_EXT, 4928 sizeof(LD_SPAN_INFO))); 4929 if (!fusion->log_to_span) { 4930 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 4931 __func__, __LINE__); 4932 return -ENOMEM; 4933 } 4934 } 4935 4936 fusion->load_balance_info_pages = get_order(MAX_LOGICAL_DRIVES_EXT * 4937 sizeof(struct LD_LOAD_BALANCE_INFO)); 4938 fusion->load_balance_info = 4939 (struct LD_LOAD_BALANCE_INFO *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 4940 fusion->load_balance_info_pages); 4941 if (!fusion->load_balance_info) { 4942 fusion->load_balance_info = 4943 vzalloc(array_size(MAX_LOGICAL_DRIVES_EXT, 4944 sizeof(struct LD_LOAD_BALANCE_INFO))); 4945 if (!fusion->load_balance_info) 4946 dev_err(&instance->pdev->dev, "Failed to allocate load_balance_info, " 4947 "continuing without Load Balance support\n"); 4948 } 4949 4950 return 0; 4951 } 4952 4953 void 4954 megasas_free_fusion_context(struct megasas_instance *instance) 4955 { 4956 struct fusion_context *fusion = instance->ctrl_context; 4957 4958 if (fusion) { 4959 if (fusion->load_balance_info) { 4960 if (is_vmalloc_addr(fusion->load_balance_info)) 4961 vfree(fusion->load_balance_info); 4962 else 4963 free_pages((ulong)fusion->load_balance_info, 4964 fusion->load_balance_info_pages); 4965 } 4966 4967 if (fusion->log_to_span) { 4968 if (is_vmalloc_addr(fusion->log_to_span)) 4969 vfree(fusion->log_to_span); 4970 else 4971 free_pages((ulong)fusion->log_to_span, 4972 fusion->log_to_span_pages); 4973 } 4974 4975 kfree(fusion); 4976 } 4977 } 4978 4979 struct megasas_instance_template megasas_instance_template_fusion = { 4980 .enable_intr = megasas_enable_intr_fusion, 4981 .disable_intr = megasas_disable_intr_fusion, 4982 .clear_intr = megasas_clear_intr_fusion, 4983 .read_fw_status_reg = megasas_read_fw_status_reg_fusion, 4984 .adp_reset = megasas_adp_reset_fusion, 4985 .check_reset = megasas_check_reset_fusion, 4986 .service_isr = megasas_isr_fusion, 4987 .tasklet = megasas_complete_cmd_dpc_fusion, 4988 .init_adapter = megasas_init_adapter_fusion, 4989 .build_and_issue_cmd = megasas_build_and_issue_cmd_fusion, 4990 .issue_dcmd = megasas_issue_dcmd_fusion, 4991 }; 4992