1 /* 2 * Linux MegaRAID driver for SAS based RAID controllers 3 * 4 * Copyright (c) 2009-2013 LSI Corporation 5 * Copyright (c) 2013-2014 Avago Technologies 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 2 10 * of the License, or (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program. If not, see <http://www.gnu.org/licenses/>. 19 * 20 * FILE: megaraid_sas_fusion.c 21 * 22 * Authors: Avago Technologies 23 * Sumant Patro 24 * Adam Radford 25 * Kashyap Desai <kashyap.desai@avagotech.com> 26 * Sumit Saxena <sumit.saxena@avagotech.com> 27 * 28 * Send feedback to: megaraidlinux.pdl@avagotech.com 29 * 30 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90, 31 * San Jose, California 95131 32 */ 33 34 #include <linux/kernel.h> 35 #include <linux/types.h> 36 #include <linux/pci.h> 37 #include <linux/list.h> 38 #include <linux/moduleparam.h> 39 #include <linux/module.h> 40 #include <linux/spinlock.h> 41 #include <linux/interrupt.h> 42 #include <linux/delay.h> 43 #include <linux/uio.h> 44 #include <linux/uaccess.h> 45 #include <linux/fs.h> 46 #include <linux/compat.h> 47 #include <linux/blkdev.h> 48 #include <linux/mutex.h> 49 #include <linux/poll.h> 50 51 #include <scsi/scsi.h> 52 #include <scsi/scsi_cmnd.h> 53 #include <scsi/scsi_device.h> 54 #include <scsi/scsi_host.h> 55 #include <scsi/scsi_dbg.h> 56 #include <linux/dmi.h> 57 58 #include "megaraid_sas_fusion.h" 59 #include "megaraid_sas.h" 60 61 62 extern void megasas_free_cmds(struct megasas_instance *instance); 63 extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance 64 *instance); 65 extern void 66 megasas_complete_cmd(struct megasas_instance *instance, 67 struct megasas_cmd *cmd, u8 alt_status); 68 int 69 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, 70 int seconds); 71 72 void 73 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd); 74 int megasas_alloc_cmds(struct megasas_instance *instance); 75 int 76 megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs); 77 int 78 megasas_issue_polled(struct megasas_instance *instance, 79 struct megasas_cmd *cmd); 80 void 81 megasas_check_and_restore_queue_depth(struct megasas_instance *instance); 82 83 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); 84 void megaraid_sas_kill_hba(struct megasas_instance *instance); 85 86 extern u32 megasas_dbg_lvl; 87 void megasas_sriov_heartbeat_handler(unsigned long instance_addr); 88 int megasas_sriov_start_heartbeat(struct megasas_instance *instance, 89 int initial); 90 void megasas_start_timer(struct megasas_instance *instance, 91 struct timer_list *timer, 92 void *fn, unsigned long interval); 93 extern struct megasas_mgmt_info megasas_mgmt_info; 94 extern int resetwaittime; 95 96 97 98 /** 99 * megasas_enable_intr_fusion - Enables interrupts 100 * @regs: MFI register set 101 */ 102 void 103 megasas_enable_intr_fusion(struct megasas_instance *instance) 104 { 105 struct megasas_register_set __iomem *regs; 106 regs = instance->reg_set; 107 108 instance->mask_interrupts = 0; 109 /* For Thunderbolt/Invader also clear intr on enable */ 110 writel(~0, ®s->outbound_intr_status); 111 readl(®s->outbound_intr_status); 112 113 writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 114 115 /* Dummy readl to force pci flush */ 116 readl(®s->outbound_intr_mask); 117 } 118 119 /** 120 * megasas_disable_intr_fusion - Disables interrupt 121 * @regs: MFI register set 122 */ 123 void 124 megasas_disable_intr_fusion(struct megasas_instance *instance) 125 { 126 u32 mask = 0xFFFFFFFF; 127 u32 status; 128 struct megasas_register_set __iomem *regs; 129 regs = instance->reg_set; 130 instance->mask_interrupts = 1; 131 132 writel(mask, ®s->outbound_intr_mask); 133 /* Dummy readl to force pci flush */ 134 status = readl(®s->outbound_intr_mask); 135 } 136 137 int 138 megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs) 139 { 140 u32 status; 141 /* 142 * Check if it is our interrupt 143 */ 144 status = readl(®s->outbound_intr_status); 145 146 if (status & 1) { 147 writel(status, ®s->outbound_intr_status); 148 readl(®s->outbound_intr_status); 149 return 1; 150 } 151 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) 152 return 0; 153 154 return 1; 155 } 156 157 /** 158 * megasas_get_cmd_fusion - Get a command from the free pool 159 * @instance: Adapter soft state 160 * 161 * Returns a blk_tag indexed mpt frame 162 */ 163 inline struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance 164 *instance, u32 blk_tag) 165 { 166 struct fusion_context *fusion; 167 168 fusion = instance->ctrl_context; 169 return fusion->cmd_list[blk_tag]; 170 } 171 172 /** 173 * megasas_return_cmd_fusion - Return a cmd to free command pool 174 * @instance: Adapter soft state 175 * @cmd: Command packet to be returned to free command pool 176 */ 177 inline void megasas_return_cmd_fusion(struct megasas_instance *instance, 178 struct megasas_cmd_fusion *cmd) 179 { 180 cmd->scmd = NULL; 181 memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); 182 } 183 184 /** 185 * megasas_fire_cmd_fusion - Sends command to the FW 186 */ 187 static void 188 megasas_fire_cmd_fusion(struct megasas_instance *instance, 189 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc) 190 { 191 #if defined(writeq) && defined(CONFIG_64BIT) 192 u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) | 193 le32_to_cpu(req_desc->u.low)); 194 195 writeq(req_data, &instance->reg_set->inbound_low_queue_port); 196 #else 197 unsigned long flags; 198 199 spin_lock_irqsave(&instance->hba_lock, flags); 200 writel(le32_to_cpu(req_desc->u.low), 201 &instance->reg_set->inbound_low_queue_port); 202 writel(le32_to_cpu(req_desc->u.high), 203 &instance->reg_set->inbound_high_queue_port); 204 spin_unlock_irqrestore(&instance->hba_lock, flags); 205 #endif 206 } 207 208 209 /** 210 * megasas_teardown_frame_pool_fusion - Destroy the cmd frame DMA pool 211 * @instance: Adapter soft state 212 */ 213 static void megasas_teardown_frame_pool_fusion( 214 struct megasas_instance *instance) 215 { 216 int i; 217 struct fusion_context *fusion = instance->ctrl_context; 218 219 u16 max_cmd = instance->max_fw_cmds; 220 221 struct megasas_cmd_fusion *cmd; 222 223 if (!fusion->sg_dma_pool || !fusion->sense_dma_pool) { 224 dev_err(&instance->pdev->dev, "dma pool is null. SG Pool %p, " 225 "sense pool : %p\n", fusion->sg_dma_pool, 226 fusion->sense_dma_pool); 227 return; 228 } 229 230 /* 231 * Return all frames to pool 232 */ 233 for (i = 0; i < max_cmd; i++) { 234 235 cmd = fusion->cmd_list[i]; 236 237 if (cmd->sg_frame) 238 pci_pool_free(fusion->sg_dma_pool, cmd->sg_frame, 239 cmd->sg_frame_phys_addr); 240 241 if (cmd->sense) 242 pci_pool_free(fusion->sense_dma_pool, cmd->sense, 243 cmd->sense_phys_addr); 244 } 245 246 /* 247 * Now destroy the pool itself 248 */ 249 pci_pool_destroy(fusion->sg_dma_pool); 250 pci_pool_destroy(fusion->sense_dma_pool); 251 252 fusion->sg_dma_pool = NULL; 253 fusion->sense_dma_pool = NULL; 254 } 255 256 /** 257 * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool 258 * @instance: Adapter soft state 259 */ 260 void 261 megasas_free_cmds_fusion(struct megasas_instance *instance) 262 { 263 int i; 264 struct fusion_context *fusion = instance->ctrl_context; 265 266 u32 max_cmds, req_sz, reply_sz, io_frames_sz; 267 268 269 req_sz = fusion->request_alloc_sz; 270 reply_sz = fusion->reply_alloc_sz; 271 io_frames_sz = fusion->io_frames_alloc_sz; 272 273 max_cmds = instance->max_fw_cmds; 274 275 /* Free descriptors and request Frames memory */ 276 if (fusion->req_frames_desc) 277 dma_free_coherent(&instance->pdev->dev, req_sz, 278 fusion->req_frames_desc, 279 fusion->req_frames_desc_phys); 280 281 if (fusion->reply_frames_desc) { 282 pci_pool_free(fusion->reply_frames_desc_pool, 283 fusion->reply_frames_desc, 284 fusion->reply_frames_desc_phys); 285 pci_pool_destroy(fusion->reply_frames_desc_pool); 286 } 287 288 if (fusion->io_request_frames) { 289 pci_pool_free(fusion->io_request_frames_pool, 290 fusion->io_request_frames, 291 fusion->io_request_frames_phys); 292 pci_pool_destroy(fusion->io_request_frames_pool); 293 } 294 295 /* Free the Fusion frame pool */ 296 megasas_teardown_frame_pool_fusion(instance); 297 298 /* Free all the commands in the cmd_list */ 299 for (i = 0; i < max_cmds; i++) 300 kfree(fusion->cmd_list[i]); 301 302 /* Free the cmd_list buffer itself */ 303 kfree(fusion->cmd_list); 304 fusion->cmd_list = NULL; 305 306 } 307 308 /** 309 * megasas_create_frame_pool_fusion - Creates DMA pool for cmd frames 310 * @instance: Adapter soft state 311 * 312 */ 313 static int megasas_create_frame_pool_fusion(struct megasas_instance *instance) 314 { 315 int i; 316 u32 max_cmd; 317 struct fusion_context *fusion; 318 struct megasas_cmd_fusion *cmd; 319 u32 total_sz_chain_frame; 320 321 fusion = instance->ctrl_context; 322 max_cmd = instance->max_fw_cmds; 323 324 total_sz_chain_frame = MEGASAS_MAX_SZ_CHAIN_FRAME; 325 326 /* 327 * Use DMA pool facility provided by PCI layer 328 */ 329 330 fusion->sg_dma_pool = pci_pool_create("megasas sg pool fusion", 331 instance->pdev, 332 total_sz_chain_frame, 4, 333 0); 334 if (!fusion->sg_dma_pool) { 335 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup request pool fusion\n"); 336 return -ENOMEM; 337 } 338 fusion->sense_dma_pool = pci_pool_create("megasas sense pool fusion", 339 instance->pdev, 340 SCSI_SENSE_BUFFERSIZE, 64, 0); 341 342 if (!fusion->sense_dma_pool) { 343 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool fusion\n"); 344 pci_pool_destroy(fusion->sg_dma_pool); 345 fusion->sg_dma_pool = NULL; 346 return -ENOMEM; 347 } 348 349 /* 350 * Allocate and attach a frame to each of the commands in cmd_list 351 */ 352 for (i = 0; i < max_cmd; i++) { 353 354 cmd = fusion->cmd_list[i]; 355 356 cmd->sg_frame = pci_pool_alloc(fusion->sg_dma_pool, 357 GFP_KERNEL, 358 &cmd->sg_frame_phys_addr); 359 360 cmd->sense = pci_pool_alloc(fusion->sense_dma_pool, 361 GFP_KERNEL, &cmd->sense_phys_addr); 362 /* 363 * megasas_teardown_frame_pool_fusion() takes care of freeing 364 * whatever has been allocated 365 */ 366 if (!cmd->sg_frame || !cmd->sense) { 367 dev_printk(KERN_DEBUG, &instance->pdev->dev, "pci_pool_alloc failed\n"); 368 megasas_teardown_frame_pool_fusion(instance); 369 return -ENOMEM; 370 } 371 } 372 return 0; 373 } 374 375 /** 376 * megasas_alloc_cmds_fusion - Allocates the command packets 377 * @instance: Adapter soft state 378 * 379 * 380 * Each frame has a 32-bit field called context. This context is used to get 381 * back the megasas_cmd_fusion from the frame when a frame gets completed 382 * In this driver, the 32 bit values are the indices into an array cmd_list. 383 * This array is used only to look up the megasas_cmd_fusion given the context. 384 * The free commands themselves are maintained in a linked list called cmd_pool. 385 * 386 * cmds are formed in the io_request and sg_frame members of the 387 * megasas_cmd_fusion. The context field is used to get a request descriptor 388 * and is used as SMID of the cmd. 389 * SMID value range is from 1 to max_fw_cmds. 390 */ 391 int 392 megasas_alloc_cmds_fusion(struct megasas_instance *instance) 393 { 394 int i, j, count; 395 u32 max_cmd, io_frames_sz; 396 struct fusion_context *fusion; 397 struct megasas_cmd_fusion *cmd; 398 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; 399 u32 offset; 400 dma_addr_t io_req_base_phys; 401 u8 *io_req_base; 402 403 fusion = instance->ctrl_context; 404 405 max_cmd = instance->max_fw_cmds; 406 407 fusion->req_frames_desc = 408 dma_alloc_coherent(&instance->pdev->dev, 409 fusion->request_alloc_sz, 410 &fusion->req_frames_desc_phys, GFP_KERNEL); 411 412 if (!fusion->req_frames_desc) { 413 dev_err(&instance->pdev->dev, "Could not allocate memory for " 414 "request_frames\n"); 415 goto fail_req_desc; 416 } 417 418 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 419 fusion->reply_frames_desc_pool = 420 pci_pool_create("reply_frames pool", instance->pdev, 421 fusion->reply_alloc_sz * count, 16, 0); 422 423 if (!fusion->reply_frames_desc_pool) { 424 dev_err(&instance->pdev->dev, "Could not allocate memory for " 425 "reply_frame pool\n"); 426 goto fail_reply_desc; 427 } 428 429 fusion->reply_frames_desc = 430 pci_pool_alloc(fusion->reply_frames_desc_pool, GFP_KERNEL, 431 &fusion->reply_frames_desc_phys); 432 if (!fusion->reply_frames_desc) { 433 dev_err(&instance->pdev->dev, "Could not allocate memory for " 434 "reply_frame pool\n"); 435 pci_pool_destroy(fusion->reply_frames_desc_pool); 436 goto fail_reply_desc; 437 } 438 439 reply_desc = fusion->reply_frames_desc; 440 for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++) 441 reply_desc->Words = cpu_to_le64(ULLONG_MAX); 442 443 io_frames_sz = fusion->io_frames_alloc_sz; 444 445 fusion->io_request_frames_pool = 446 pci_pool_create("io_request_frames pool", instance->pdev, 447 fusion->io_frames_alloc_sz, 16, 0); 448 449 if (!fusion->io_request_frames_pool) { 450 dev_err(&instance->pdev->dev, "Could not allocate memory for " 451 "io_request_frame pool\n"); 452 goto fail_io_frames; 453 } 454 455 fusion->io_request_frames = 456 pci_pool_alloc(fusion->io_request_frames_pool, GFP_KERNEL, 457 &fusion->io_request_frames_phys); 458 if (!fusion->io_request_frames) { 459 dev_err(&instance->pdev->dev, "Could not allocate memory for " 460 "io_request_frames frames\n"); 461 pci_pool_destroy(fusion->io_request_frames_pool); 462 goto fail_io_frames; 463 } 464 465 /* 466 * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers. 467 * Allocate the dynamic array first and then allocate individual 468 * commands. 469 */ 470 fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *) 471 * max_cmd, GFP_KERNEL); 472 473 if (!fusion->cmd_list) { 474 dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory. Could not alloc " 475 "memory for cmd_list_fusion\n"); 476 goto fail_cmd_list; 477 } 478 479 max_cmd = instance->max_fw_cmds; 480 for (i = 0; i < max_cmd; i++) { 481 fusion->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd_fusion), 482 GFP_KERNEL); 483 if (!fusion->cmd_list[i]) { 484 dev_err(&instance->pdev->dev, "Could not alloc cmd list fusion\n"); 485 486 for (j = 0; j < i; j++) 487 kfree(fusion->cmd_list[j]); 488 489 kfree(fusion->cmd_list); 490 fusion->cmd_list = NULL; 491 goto fail_cmd_list; 492 } 493 } 494 495 /* The first 256 bytes (SMID 0) is not used. Don't add to cmd list */ 496 io_req_base = fusion->io_request_frames + 497 MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 498 io_req_base_phys = fusion->io_request_frames_phys + 499 MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 500 501 /* 502 * Add all the commands to command pool (fusion->cmd_pool) 503 */ 504 505 /* SMID 0 is reserved. Set SMID/index from 1 */ 506 for (i = 0; i < max_cmd; i++) { 507 cmd = fusion->cmd_list[i]; 508 offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; 509 memset(cmd, 0, sizeof(struct megasas_cmd_fusion)); 510 cmd->index = i + 1; 511 cmd->scmd = NULL; 512 cmd->sync_cmd_idx = (i >= instance->max_scsi_cmds) ? 513 (i - instance->max_scsi_cmds) : 514 (u32)ULONG_MAX; /* Set to Invalid */ 515 cmd->instance = instance; 516 cmd->io_request = 517 (struct MPI2_RAID_SCSI_IO_REQUEST *) 518 (io_req_base + offset); 519 memset(cmd->io_request, 0, 520 sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); 521 cmd->io_request_phys_addr = io_req_base_phys + offset; 522 } 523 524 /* 525 * Create a frame pool and assign one frame to each cmd 526 */ 527 if (megasas_create_frame_pool_fusion(instance)) { 528 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n"); 529 megasas_free_cmds_fusion(instance); 530 goto fail_req_desc; 531 } 532 533 return 0; 534 535 fail_cmd_list: 536 pci_pool_free(fusion->io_request_frames_pool, fusion->io_request_frames, 537 fusion->io_request_frames_phys); 538 pci_pool_destroy(fusion->io_request_frames_pool); 539 fail_io_frames: 540 dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz, 541 fusion->reply_frames_desc, 542 fusion->reply_frames_desc_phys); 543 pci_pool_free(fusion->reply_frames_desc_pool, 544 fusion->reply_frames_desc, 545 fusion->reply_frames_desc_phys); 546 pci_pool_destroy(fusion->reply_frames_desc_pool); 547 548 fail_reply_desc: 549 dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz, 550 fusion->req_frames_desc, 551 fusion->req_frames_desc_phys); 552 fail_req_desc: 553 return -ENOMEM; 554 } 555 556 /** 557 * wait_and_poll - Issues a polling command 558 * @instance: Adapter soft state 559 * @cmd: Command packet to be issued 560 * 561 * For polling, MFI requires the cmd_status to be set to 0xFF before posting. 562 */ 563 int 564 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, 565 int seconds) 566 { 567 int i; 568 struct megasas_header *frame_hdr = &cmd->frame->hdr; 569 struct fusion_context *fusion; 570 571 u32 msecs = seconds * 1000; 572 573 fusion = instance->ctrl_context; 574 /* 575 * Wait for cmd_status to change 576 */ 577 for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) { 578 rmb(); 579 msleep(20); 580 } 581 582 if (frame_hdr->cmd_status == 0xff) 583 return -ETIME; 584 585 return (frame_hdr->cmd_status == MFI_STAT_OK) ? 586 0 : 1; 587 } 588 589 /** 590 * megasas_ioc_init_fusion - Initializes the FW 591 * @instance: Adapter soft state 592 * 593 * Issues the IOC Init cmd 594 */ 595 int 596 megasas_ioc_init_fusion(struct megasas_instance *instance) 597 { 598 struct megasas_init_frame *init_frame; 599 struct MPI2_IOC_INIT_REQUEST *IOCInitMessage; 600 dma_addr_t ioc_init_handle; 601 struct megasas_cmd *cmd; 602 u8 ret; 603 struct fusion_context *fusion; 604 union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc; 605 int i; 606 struct megasas_header *frame_hdr; 607 const char *sys_info; 608 609 fusion = instance->ctrl_context; 610 611 cmd = megasas_get_cmd(instance); 612 613 if (!cmd) { 614 dev_err(&instance->pdev->dev, "Could not allocate cmd for INIT Frame\n"); 615 ret = 1; 616 goto fail_get_cmd; 617 } 618 619 IOCInitMessage = 620 dma_alloc_coherent(&instance->pdev->dev, 621 sizeof(struct MPI2_IOC_INIT_REQUEST), 622 &ioc_init_handle, GFP_KERNEL); 623 624 if (!IOCInitMessage) { 625 dev_err(&instance->pdev->dev, "Could not allocate memory for " 626 "IOCInitMessage\n"); 627 ret = 1; 628 goto fail_fw_init; 629 } 630 631 memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST)); 632 633 IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT; 634 IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER; 635 IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION); 636 IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); 637 IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4); 638 639 IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth); 640 IOCInitMessage->ReplyDescriptorPostQueueAddress = cpu_to_le64(fusion->reply_frames_desc_phys); 641 IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys); 642 IOCInitMessage->HostMSIxVectors = instance->msix_vectors; 643 init_frame = (struct megasas_init_frame *)cmd->frame; 644 memset(init_frame, 0, MEGAMFI_FRAME_SIZE); 645 646 frame_hdr = &cmd->frame->hdr; 647 frame_hdr->cmd_status = 0xFF; 648 frame_hdr->flags = cpu_to_le16( 649 le16_to_cpu(frame_hdr->flags) | 650 MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); 651 652 init_frame->cmd = MFI_CMD_INIT; 653 init_frame->cmd_status = 0xFF; 654 655 /* driver support Extended MSIX */ 656 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 657 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) 658 init_frame->driver_operations. 659 mfi_capabilities.support_additional_msix = 1; 660 /* driver supports HA / Remote LUN over Fast Path interface */ 661 init_frame->driver_operations.mfi_capabilities.support_fp_remote_lun 662 = 1; 663 init_frame->driver_operations.mfi_capabilities.support_max_255lds 664 = 1; 665 init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb 666 = 1; 667 init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw 668 = 1; 669 /* Convert capability to LE32 */ 670 cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities); 671 672 sys_info = dmi_get_system_info(DMI_PRODUCT_UUID); 673 if (instance->system_info_buf && sys_info) { 674 memcpy(instance->system_info_buf->systemId, sys_info, 675 strlen(sys_info) > 64 ? 64 : strlen(sys_info)); 676 instance->system_info_buf->systemIdLength = 677 strlen(sys_info) > 64 ? 64 : strlen(sys_info); 678 init_frame->system_info_lo = instance->system_info_h; 679 init_frame->system_info_hi = 0; 680 } 681 682 init_frame->queue_info_new_phys_addr_hi = 683 cpu_to_le32(upper_32_bits(ioc_init_handle)); 684 init_frame->queue_info_new_phys_addr_lo = 685 cpu_to_le32(lower_32_bits(ioc_init_handle)); 686 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST)); 687 688 req_desc.u.low = cpu_to_le32(lower_32_bits(cmd->frame_phys_addr)); 689 req_desc.u.high = cpu_to_le32(upper_32_bits(cmd->frame_phys_addr)); 690 req_desc.MFAIo.RequestFlags = 691 (MEGASAS_REQ_DESCRIPT_FLAGS_MFA << 692 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 693 694 /* 695 * disable the intr before firing the init frame 696 */ 697 instance->instancet->disable_intr(instance); 698 699 for (i = 0; i < (10 * 1000); i += 20) { 700 if (readl(&instance->reg_set->doorbell) & 1) 701 msleep(20); 702 else 703 break; 704 } 705 706 megasas_fire_cmd_fusion(instance, &req_desc); 707 708 wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS); 709 710 frame_hdr = &cmd->frame->hdr; 711 if (frame_hdr->cmd_status != 0) { 712 ret = 1; 713 goto fail_fw_init; 714 } 715 dev_err(&instance->pdev->dev, "Init cmd success\n"); 716 717 ret = 0; 718 719 fail_fw_init: 720 megasas_return_cmd(instance, cmd); 721 if (IOCInitMessage) 722 dma_free_coherent(&instance->pdev->dev, 723 sizeof(struct MPI2_IOC_INIT_REQUEST), 724 IOCInitMessage, ioc_init_handle); 725 fail_get_cmd: 726 return ret; 727 } 728 729 /* 730 * megasas_get_ld_map_info - Returns FW's ld_map structure 731 * @instance: Adapter soft state 732 * @pend: Pend the command or not 733 * Issues an internal command (DCMD) to get the FW's controller PD 734 * list structure. This information is mainly used to find out SYSTEM 735 * supported by the FW. 736 * dcmd.mbox value setting for MR_DCMD_LD_MAP_GET_INFO 737 * dcmd.mbox.b[0] - number of LDs being sync'd 738 * dcmd.mbox.b[1] - 0 - complete command immediately. 739 * - 1 - pend till config change 740 * dcmd.mbox.b[2] - 0 - supports max 64 lds and uses legacy MR_FW_RAID_MAP 741 * - 1 - supports max MAX_LOGICAL_DRIVES_EXT lds and 742 * uses extended struct MR_FW_RAID_MAP_EXT 743 */ 744 static int 745 megasas_get_ld_map_info(struct megasas_instance *instance) 746 { 747 int ret = 0; 748 struct megasas_cmd *cmd; 749 struct megasas_dcmd_frame *dcmd; 750 void *ci; 751 dma_addr_t ci_h = 0; 752 u32 size_map_info; 753 struct fusion_context *fusion; 754 755 cmd = megasas_get_cmd(instance); 756 757 if (!cmd) { 758 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for map info\n"); 759 return -ENOMEM; 760 } 761 762 fusion = instance->ctrl_context; 763 764 if (!fusion) { 765 megasas_return_cmd(instance, cmd); 766 return -ENXIO; 767 } 768 769 dcmd = &cmd->frame->dcmd; 770 771 size_map_info = fusion->current_map_sz; 772 773 ci = (void *) fusion->ld_map[(instance->map_id & 1)]; 774 ci_h = fusion->ld_map_phys[(instance->map_id & 1)]; 775 776 if (!ci) { 777 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ld_map_info\n"); 778 megasas_return_cmd(instance, cmd); 779 return -ENOMEM; 780 } 781 782 memset(ci, 0, fusion->max_map_sz); 783 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 784 #if VD_EXT_DEBUG 785 dev_dbg(&instance->pdev->dev, 786 "%s sending MR_DCMD_LD_MAP_GET_INFO with size %d\n", 787 __func__, cpu_to_le32(size_map_info)); 788 #endif 789 dcmd->cmd = MFI_CMD_DCMD; 790 dcmd->cmd_status = 0xFF; 791 dcmd->sge_count = 1; 792 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 793 dcmd->timeout = 0; 794 dcmd->pad_0 = 0; 795 dcmd->data_xfer_len = cpu_to_le32(size_map_info); 796 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO); 797 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); 798 dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info); 799 800 if (instance->ctrl_context && !instance->mask_interrupts) 801 ret = megasas_issue_blocked_cmd(instance, cmd, 802 MEGASAS_BLOCKED_CMD_TIMEOUT); 803 else 804 ret = megasas_issue_polled(instance, cmd); 805 806 megasas_return_cmd(instance, cmd); 807 808 return ret; 809 } 810 811 u8 812 megasas_get_map_info(struct megasas_instance *instance) 813 { 814 struct fusion_context *fusion = instance->ctrl_context; 815 816 fusion->fast_path_io = 0; 817 if (!megasas_get_ld_map_info(instance)) { 818 if (MR_ValidateMapInfo(instance)) { 819 fusion->fast_path_io = 1; 820 return 0; 821 } 822 } 823 return 1; 824 } 825 826 /* 827 * megasas_sync_map_info - Returns FW's ld_map structure 828 * @instance: Adapter soft state 829 * 830 * Issues an internal command (DCMD) to get the FW's controller PD 831 * list structure. This information is mainly used to find out SYSTEM 832 * supported by the FW. 833 */ 834 int 835 megasas_sync_map_info(struct megasas_instance *instance) 836 { 837 int ret = 0, i; 838 struct megasas_cmd *cmd; 839 struct megasas_dcmd_frame *dcmd; 840 u32 size_sync_info, num_lds; 841 struct fusion_context *fusion; 842 struct MR_LD_TARGET_SYNC *ci = NULL; 843 struct MR_DRV_RAID_MAP_ALL *map; 844 struct MR_LD_RAID *raid; 845 struct MR_LD_TARGET_SYNC *ld_sync; 846 dma_addr_t ci_h = 0; 847 u32 size_map_info; 848 849 cmd = megasas_get_cmd(instance); 850 851 if (!cmd) { 852 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for sync info\n"); 853 return -ENOMEM; 854 } 855 856 fusion = instance->ctrl_context; 857 858 if (!fusion) { 859 megasas_return_cmd(instance, cmd); 860 return 1; 861 } 862 863 map = fusion->ld_drv_map[instance->map_id & 1]; 864 865 num_lds = le16_to_cpu(map->raidMap.ldCount); 866 867 dcmd = &cmd->frame->dcmd; 868 869 size_sync_info = sizeof(struct MR_LD_TARGET_SYNC) *num_lds; 870 871 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 872 873 ci = (struct MR_LD_TARGET_SYNC *) 874 fusion->ld_map[(instance->map_id - 1) & 1]; 875 memset(ci, 0, fusion->max_map_sz); 876 877 ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1]; 878 879 ld_sync = (struct MR_LD_TARGET_SYNC *)ci; 880 881 for (i = 0; i < num_lds; i++, ld_sync++) { 882 raid = MR_LdRaidGet(i, map); 883 ld_sync->targetId = MR_GetLDTgtId(i, map); 884 ld_sync->seqNum = raid->seqNum; 885 } 886 887 size_map_info = fusion->current_map_sz; 888 889 dcmd->cmd = MFI_CMD_DCMD; 890 dcmd->cmd_status = 0xFF; 891 dcmd->sge_count = 1; 892 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE); 893 dcmd->timeout = 0; 894 dcmd->pad_0 = 0; 895 dcmd->data_xfer_len = cpu_to_le32(size_map_info); 896 dcmd->mbox.b[0] = num_lds; 897 dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG; 898 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO); 899 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); 900 dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info); 901 902 instance->map_update_cmd = cmd; 903 904 instance->instancet->issue_dcmd(instance, cmd); 905 906 return ret; 907 } 908 909 /* 910 * meagasas_display_intel_branding - Display branding string 911 * @instance: per adapter object 912 * 913 * Return nothing. 914 */ 915 static void 916 megasas_display_intel_branding(struct megasas_instance *instance) 917 { 918 if (instance->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL) 919 return; 920 921 switch (instance->pdev->device) { 922 case PCI_DEVICE_ID_LSI_INVADER: 923 switch (instance->pdev->subsystem_device) { 924 case MEGARAID_INTEL_RS3DC080_SSDID: 925 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 926 instance->host->host_no, 927 MEGARAID_INTEL_RS3DC080_BRANDING); 928 break; 929 case MEGARAID_INTEL_RS3DC040_SSDID: 930 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 931 instance->host->host_no, 932 MEGARAID_INTEL_RS3DC040_BRANDING); 933 break; 934 case MEGARAID_INTEL_RS3SC008_SSDID: 935 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 936 instance->host->host_no, 937 MEGARAID_INTEL_RS3SC008_BRANDING); 938 break; 939 case MEGARAID_INTEL_RS3MC044_SSDID: 940 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 941 instance->host->host_no, 942 MEGARAID_INTEL_RS3MC044_BRANDING); 943 break; 944 default: 945 break; 946 } 947 break; 948 case PCI_DEVICE_ID_LSI_FURY: 949 switch (instance->pdev->subsystem_device) { 950 case MEGARAID_INTEL_RS3WC080_SSDID: 951 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 952 instance->host->host_no, 953 MEGARAID_INTEL_RS3WC080_BRANDING); 954 break; 955 case MEGARAID_INTEL_RS3WC040_SSDID: 956 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 957 instance->host->host_no, 958 MEGARAID_INTEL_RS3WC040_BRANDING); 959 break; 960 default: 961 break; 962 } 963 break; 964 default: 965 break; 966 } 967 } 968 969 /** 970 * megasas_init_adapter_fusion - Initializes the FW 971 * @instance: Adapter soft state 972 * 973 * This is the main function for initializing firmware. 974 */ 975 u32 976 megasas_init_adapter_fusion(struct megasas_instance *instance) 977 { 978 struct megasas_register_set __iomem *reg_set; 979 struct fusion_context *fusion; 980 u32 max_cmd; 981 int i = 0, count; 982 983 fusion = instance->ctrl_context; 984 985 reg_set = instance->reg_set; 986 987 /* 988 * Get various operational parameters from status register 989 */ 990 instance->max_fw_cmds = 991 instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF; 992 instance->max_fw_cmds = min(instance->max_fw_cmds, (u16)1008); 993 994 /* 995 * Reduce the max supported cmds by 1. This is to ensure that the 996 * reply_q_sz (1 more than the max cmd that driver may send) 997 * does not exceed max cmds that the FW can support 998 */ 999 instance->max_fw_cmds = instance->max_fw_cmds-1; 1000 1001 /* 1002 * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames 1003 */ 1004 instance->max_mfi_cmds = 1005 MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS; 1006 1007 max_cmd = instance->max_fw_cmds; 1008 1009 fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16); 1010 1011 fusion->request_alloc_sz = 1012 sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *max_cmd; 1013 fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION) 1014 *(fusion->reply_q_depth); 1015 fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + 1016 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * 1017 (max_cmd + 1)); /* Extra 1 for SMID 0 */ 1018 1019 fusion->max_sge_in_main_msg = 1020 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 1021 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL))/16; 1022 1023 fusion->max_sge_in_chain = 1024 MEGASAS_MAX_SZ_CHAIN_FRAME / sizeof(union MPI2_SGE_IO_UNION); 1025 1026 instance->max_num_sge = rounddown_pow_of_two( 1027 fusion->max_sge_in_main_msg + fusion->max_sge_in_chain - 2); 1028 1029 /* Used for pass thru MFI frame (DCMD) */ 1030 fusion->chain_offset_mfi_pthru = 1031 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL)/16; 1032 1033 fusion->chain_offset_io_request = 1034 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 1035 sizeof(union MPI2_SGE_IO_UNION))/16; 1036 1037 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 1038 for (i = 0 ; i < count; i++) 1039 fusion->last_reply_idx[i] = 0; 1040 1041 /* 1042 * For fusion adapters, 3 commands for IOCTL and 5 commands 1043 * for driver's internal DCMDs. 1044 */ 1045 instance->max_scsi_cmds = instance->max_fw_cmds - 1046 (MEGASAS_FUSION_INTERNAL_CMDS + 1047 MEGASAS_FUSION_IOCTL_CMDS); 1048 sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS); 1049 1050 /* 1051 * Allocate memory for descriptors 1052 * Create a pool of commands 1053 */ 1054 if (megasas_alloc_cmds(instance)) 1055 goto fail_alloc_mfi_cmds; 1056 if (megasas_alloc_cmds_fusion(instance)) 1057 goto fail_alloc_cmds; 1058 1059 if (megasas_ioc_init_fusion(instance)) 1060 goto fail_ioc_init; 1061 1062 megasas_display_intel_branding(instance); 1063 if (megasas_get_ctrl_info(instance)) { 1064 dev_err(&instance->pdev->dev, 1065 "Could not get controller info. Fail from %s %d\n", 1066 __func__, __LINE__); 1067 goto fail_ioc_init; 1068 } 1069 1070 instance->flag_ieee = 1; 1071 fusion->fast_path_io = 0; 1072 1073 fusion->drv_map_pages = get_order(fusion->drv_map_sz); 1074 for (i = 0; i < 2; i++) { 1075 fusion->ld_map[i] = NULL; 1076 fusion->ld_drv_map[i] = (void *)__get_free_pages(GFP_KERNEL, 1077 fusion->drv_map_pages); 1078 if (!fusion->ld_drv_map[i]) { 1079 dev_err(&instance->pdev->dev, "Could not allocate " 1080 "memory for local map info for %d pages\n", 1081 fusion->drv_map_pages); 1082 if (i == 1) 1083 free_pages((ulong)fusion->ld_drv_map[0], 1084 fusion->drv_map_pages); 1085 goto fail_ioc_init; 1086 } 1087 memset(fusion->ld_drv_map[i], 0, 1088 ((1 << PAGE_SHIFT) << fusion->drv_map_pages)); 1089 } 1090 1091 for (i = 0; i < 2; i++) { 1092 fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev, 1093 fusion->max_map_sz, 1094 &fusion->ld_map_phys[i], 1095 GFP_KERNEL); 1096 if (!fusion->ld_map[i]) { 1097 dev_err(&instance->pdev->dev, "Could not allocate memory " 1098 "for map info\n"); 1099 goto fail_map_info; 1100 } 1101 } 1102 1103 if (!megasas_get_map_info(instance)) 1104 megasas_sync_map_info(instance); 1105 1106 return 0; 1107 1108 fail_map_info: 1109 if (i == 1) 1110 dma_free_coherent(&instance->pdev->dev, fusion->max_map_sz, 1111 fusion->ld_map[0], fusion->ld_map_phys[0]); 1112 fail_ioc_init: 1113 megasas_free_cmds_fusion(instance); 1114 fail_alloc_cmds: 1115 megasas_free_cmds(instance); 1116 fail_alloc_mfi_cmds: 1117 return 1; 1118 } 1119 1120 /** 1121 * map_cmd_status - Maps FW cmd status to OS cmd status 1122 * @cmd : Pointer to cmd 1123 * @status : status of cmd returned by FW 1124 * @ext_status : ext status of cmd returned by FW 1125 */ 1126 1127 void 1128 map_cmd_status(struct megasas_cmd_fusion *cmd, u8 status, u8 ext_status) 1129 { 1130 1131 switch (status) { 1132 1133 case MFI_STAT_OK: 1134 cmd->scmd->result = DID_OK << 16; 1135 break; 1136 1137 case MFI_STAT_SCSI_IO_FAILED: 1138 case MFI_STAT_LD_INIT_IN_PROGRESS: 1139 cmd->scmd->result = (DID_ERROR << 16) | ext_status; 1140 break; 1141 1142 case MFI_STAT_SCSI_DONE_WITH_ERROR: 1143 1144 cmd->scmd->result = (DID_OK << 16) | ext_status; 1145 if (ext_status == SAM_STAT_CHECK_CONDITION) { 1146 memset(cmd->scmd->sense_buffer, 0, 1147 SCSI_SENSE_BUFFERSIZE); 1148 memcpy(cmd->scmd->sense_buffer, cmd->sense, 1149 SCSI_SENSE_BUFFERSIZE); 1150 cmd->scmd->result |= DRIVER_SENSE << 24; 1151 } 1152 break; 1153 1154 case MFI_STAT_LD_OFFLINE: 1155 case MFI_STAT_DEVICE_NOT_FOUND: 1156 cmd->scmd->result = DID_BAD_TARGET << 16; 1157 break; 1158 case MFI_STAT_CONFIG_SEQ_MISMATCH: 1159 cmd->scmd->result = DID_IMM_RETRY << 16; 1160 break; 1161 default: 1162 dev_printk(KERN_DEBUG, &cmd->instance->pdev->dev, "FW status %#x\n", status); 1163 cmd->scmd->result = DID_ERROR << 16; 1164 break; 1165 } 1166 } 1167 1168 /** 1169 * megasas_make_sgl_fusion - Prepares 32-bit SGL 1170 * @instance: Adapter soft state 1171 * @scp: SCSI command from the mid-layer 1172 * @sgl_ptr: SGL to be filled in 1173 * @cmd: cmd we are working on 1174 * 1175 * If successful, this function returns the number of SG elements. 1176 */ 1177 static int 1178 megasas_make_sgl_fusion(struct megasas_instance *instance, 1179 struct scsi_cmnd *scp, 1180 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr, 1181 struct megasas_cmd_fusion *cmd) 1182 { 1183 int i, sg_processed, sge_count; 1184 struct scatterlist *os_sgl; 1185 struct fusion_context *fusion; 1186 1187 fusion = instance->ctrl_context; 1188 1189 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 1190 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { 1191 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr; 1192 sgl_ptr_end += fusion->max_sge_in_main_msg - 1; 1193 sgl_ptr_end->Flags = 0; 1194 } 1195 1196 sge_count = scsi_dma_map(scp); 1197 1198 BUG_ON(sge_count < 0); 1199 1200 if (sge_count > instance->max_num_sge || !sge_count) 1201 return sge_count; 1202 1203 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1204 sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl)); 1205 sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl)); 1206 sgl_ptr->Flags = 0; 1207 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 1208 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { 1209 if (i == sge_count - 1) 1210 sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST; 1211 } 1212 sgl_ptr++; 1213 1214 sg_processed = i + 1; 1215 1216 if ((sg_processed == (fusion->max_sge_in_main_msg - 1)) && 1217 (sge_count > fusion->max_sge_in_main_msg)) { 1218 1219 struct MPI25_IEEE_SGE_CHAIN64 *sg_chain; 1220 if ((instance->pdev->device == 1221 PCI_DEVICE_ID_LSI_INVADER) || 1222 (instance->pdev->device == 1223 PCI_DEVICE_ID_LSI_FURY)) { 1224 if ((le16_to_cpu(cmd->io_request->IoFlags) & 1225 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != 1226 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) 1227 cmd->io_request->ChainOffset = 1228 fusion-> 1229 chain_offset_io_request; 1230 else 1231 cmd->io_request->ChainOffset = 0; 1232 } else 1233 cmd->io_request->ChainOffset = 1234 fusion->chain_offset_io_request; 1235 1236 sg_chain = sgl_ptr; 1237 /* Prepare chain element */ 1238 sg_chain->NextChainOffset = 0; 1239 if ((instance->pdev->device == 1240 PCI_DEVICE_ID_LSI_INVADER) || 1241 (instance->pdev->device == 1242 PCI_DEVICE_ID_LSI_FURY)) 1243 sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT; 1244 else 1245 sg_chain->Flags = 1246 (IEEE_SGE_FLAGS_CHAIN_ELEMENT | 1247 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); 1248 sg_chain->Length = cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed))); 1249 sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr); 1250 1251 sgl_ptr = 1252 (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame; 1253 memset(sgl_ptr, 0, MEGASAS_MAX_SZ_CHAIN_FRAME); 1254 } 1255 } 1256 1257 return sge_count; 1258 } 1259 1260 /** 1261 * megasas_set_pd_lba - Sets PD LBA 1262 * @cdb: CDB 1263 * @cdb_len: cdb length 1264 * @start_blk: Start block of IO 1265 * 1266 * Used to set the PD LBA in CDB for FP IOs 1267 */ 1268 void 1269 megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, 1270 struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp, 1271 struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag) 1272 { 1273 struct MR_LD_RAID *raid; 1274 u32 ld; 1275 u64 start_blk = io_info->pdBlock; 1276 u8 *cdb = io_request->CDB.CDB32; 1277 u32 num_blocks = io_info->numBlocks; 1278 u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0; 1279 1280 /* Check if T10 PI (DIF) is enabled for this LD */ 1281 ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr); 1282 raid = MR_LdRaidGet(ld, local_map_ptr); 1283 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) { 1284 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1285 cdb[0] = MEGASAS_SCSI_VARIABLE_LENGTH_CMD; 1286 cdb[7] = MEGASAS_SCSI_ADDL_CDB_LEN; 1287 1288 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 1289 cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32; 1290 else 1291 cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32; 1292 cdb[10] = MEGASAS_RD_WR_PROTECT_CHECK_ALL; 1293 1294 /* LBA */ 1295 cdb[12] = (u8)((start_blk >> 56) & 0xff); 1296 cdb[13] = (u8)((start_blk >> 48) & 0xff); 1297 cdb[14] = (u8)((start_blk >> 40) & 0xff); 1298 cdb[15] = (u8)((start_blk >> 32) & 0xff); 1299 cdb[16] = (u8)((start_blk >> 24) & 0xff); 1300 cdb[17] = (u8)((start_blk >> 16) & 0xff); 1301 cdb[18] = (u8)((start_blk >> 8) & 0xff); 1302 cdb[19] = (u8)(start_blk & 0xff); 1303 1304 /* Logical block reference tag */ 1305 io_request->CDB.EEDP32.PrimaryReferenceTag = 1306 cpu_to_be32(ref_tag); 1307 io_request->CDB.EEDP32.PrimaryApplicationTagMask = cpu_to_be16(0xffff); 1308 io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */ 1309 1310 /* Transfer length */ 1311 cdb[28] = (u8)((num_blocks >> 24) & 0xff); 1312 cdb[29] = (u8)((num_blocks >> 16) & 0xff); 1313 cdb[30] = (u8)((num_blocks >> 8) & 0xff); 1314 cdb[31] = (u8)(num_blocks & 0xff); 1315 1316 /* set SCSI IO EEDPFlags */ 1317 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) { 1318 io_request->EEDPFlags = cpu_to_le16( 1319 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1320 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 1321 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | 1322 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | 1323 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); 1324 } else { 1325 io_request->EEDPFlags = cpu_to_le16( 1326 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1327 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP); 1328 } 1329 io_request->Control |= cpu_to_le32((0x4 << 26)); 1330 io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size); 1331 } else { 1332 /* Some drives don't support 16/12 byte CDB's, convert to 10 */ 1333 if (((cdb_len == 12) || (cdb_len == 16)) && 1334 (start_blk <= 0xffffffff)) { 1335 if (cdb_len == 16) { 1336 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10; 1337 flagvals = cdb[1]; 1338 groupnum = cdb[14]; 1339 control = cdb[15]; 1340 } else { 1341 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10; 1342 flagvals = cdb[1]; 1343 groupnum = cdb[10]; 1344 control = cdb[11]; 1345 } 1346 1347 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1348 1349 cdb[0] = opcode; 1350 cdb[1] = flagvals; 1351 cdb[6] = groupnum; 1352 cdb[9] = control; 1353 1354 /* Transfer length */ 1355 cdb[8] = (u8)(num_blocks & 0xff); 1356 cdb[7] = (u8)((num_blocks >> 8) & 0xff); 1357 1358 io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */ 1359 cdb_len = 10; 1360 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) { 1361 /* Convert to 16 byte CDB for large LBA's */ 1362 switch (cdb_len) { 1363 case 6: 1364 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16; 1365 control = cdb[5]; 1366 break; 1367 case 10: 1368 opcode = 1369 cdb[0] == READ_10 ? READ_16 : WRITE_16; 1370 flagvals = cdb[1]; 1371 groupnum = cdb[6]; 1372 control = cdb[9]; 1373 break; 1374 case 12: 1375 opcode = 1376 cdb[0] == READ_12 ? READ_16 : WRITE_16; 1377 flagvals = cdb[1]; 1378 groupnum = cdb[10]; 1379 control = cdb[11]; 1380 break; 1381 } 1382 1383 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1384 1385 cdb[0] = opcode; 1386 cdb[1] = flagvals; 1387 cdb[14] = groupnum; 1388 cdb[15] = control; 1389 1390 /* Transfer length */ 1391 cdb[13] = (u8)(num_blocks & 0xff); 1392 cdb[12] = (u8)((num_blocks >> 8) & 0xff); 1393 cdb[11] = (u8)((num_blocks >> 16) & 0xff); 1394 cdb[10] = (u8)((num_blocks >> 24) & 0xff); 1395 1396 io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */ 1397 cdb_len = 16; 1398 } 1399 1400 /* Normal case, just load LBA here */ 1401 switch (cdb_len) { 1402 case 6: 1403 { 1404 u8 val = cdb[1] & 0xE0; 1405 cdb[3] = (u8)(start_blk & 0xff); 1406 cdb[2] = (u8)((start_blk >> 8) & 0xff); 1407 cdb[1] = val | ((u8)(start_blk >> 16) & 0x1f); 1408 break; 1409 } 1410 case 10: 1411 cdb[5] = (u8)(start_blk & 0xff); 1412 cdb[4] = (u8)((start_blk >> 8) & 0xff); 1413 cdb[3] = (u8)((start_blk >> 16) & 0xff); 1414 cdb[2] = (u8)((start_blk >> 24) & 0xff); 1415 break; 1416 case 12: 1417 cdb[5] = (u8)(start_blk & 0xff); 1418 cdb[4] = (u8)((start_blk >> 8) & 0xff); 1419 cdb[3] = (u8)((start_blk >> 16) & 0xff); 1420 cdb[2] = (u8)((start_blk >> 24) & 0xff); 1421 break; 1422 case 16: 1423 cdb[9] = (u8)(start_blk & 0xff); 1424 cdb[8] = (u8)((start_blk >> 8) & 0xff); 1425 cdb[7] = (u8)((start_blk >> 16) & 0xff); 1426 cdb[6] = (u8)((start_blk >> 24) & 0xff); 1427 cdb[5] = (u8)((start_blk >> 32) & 0xff); 1428 cdb[4] = (u8)((start_blk >> 40) & 0xff); 1429 cdb[3] = (u8)((start_blk >> 48) & 0xff); 1430 cdb[2] = (u8)((start_blk >> 56) & 0xff); 1431 break; 1432 } 1433 } 1434 } 1435 1436 /** 1437 * megasas_build_ldio_fusion - Prepares IOs to devices 1438 * @instance: Adapter soft state 1439 * @scp: SCSI command 1440 * @cmd: Command to be prepared 1441 * 1442 * Prepares the io_request and chain elements (sg_frame) for IO 1443 * The IO can be for PD (Fast Path) or LD 1444 */ 1445 void 1446 megasas_build_ldio_fusion(struct megasas_instance *instance, 1447 struct scsi_cmnd *scp, 1448 struct megasas_cmd_fusion *cmd) 1449 { 1450 u8 fp_possible; 1451 u32 start_lba_lo, start_lba_hi, device_id, datalength = 0; 1452 struct MPI2_RAID_SCSI_IO_REQUEST *io_request; 1453 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 1454 struct IO_REQUEST_INFO io_info; 1455 struct fusion_context *fusion; 1456 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 1457 u8 *raidLUN; 1458 1459 device_id = MEGASAS_DEV_INDEX(scp); 1460 1461 fusion = instance->ctrl_context; 1462 1463 io_request = cmd->io_request; 1464 io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id); 1465 io_request->RaidContext.status = 0; 1466 io_request->RaidContext.exStatus = 0; 1467 1468 req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc; 1469 1470 start_lba_lo = 0; 1471 start_lba_hi = 0; 1472 fp_possible = 0; 1473 1474 /* 1475 * 6-byte READ(0x08) or WRITE(0x0A) cdb 1476 */ 1477 if (scp->cmd_len == 6) { 1478 datalength = (u32) scp->cmnd[4]; 1479 start_lba_lo = ((u32) scp->cmnd[1] << 16) | 1480 ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3]; 1481 1482 start_lba_lo &= 0x1FFFFF; 1483 } 1484 1485 /* 1486 * 10-byte READ(0x28) or WRITE(0x2A) cdb 1487 */ 1488 else if (scp->cmd_len == 10) { 1489 datalength = (u32) scp->cmnd[8] | 1490 ((u32) scp->cmnd[7] << 8); 1491 start_lba_lo = ((u32) scp->cmnd[2] << 24) | 1492 ((u32) scp->cmnd[3] << 16) | 1493 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; 1494 } 1495 1496 /* 1497 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 1498 */ 1499 else if (scp->cmd_len == 12) { 1500 datalength = ((u32) scp->cmnd[6] << 24) | 1501 ((u32) scp->cmnd[7] << 16) | 1502 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; 1503 start_lba_lo = ((u32) scp->cmnd[2] << 24) | 1504 ((u32) scp->cmnd[3] << 16) | 1505 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; 1506 } 1507 1508 /* 1509 * 16-byte READ(0x88) or WRITE(0x8A) cdb 1510 */ 1511 else if (scp->cmd_len == 16) { 1512 datalength = ((u32) scp->cmnd[10] << 24) | 1513 ((u32) scp->cmnd[11] << 16) | 1514 ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13]; 1515 start_lba_lo = ((u32) scp->cmnd[6] << 24) | 1516 ((u32) scp->cmnd[7] << 16) | 1517 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; 1518 1519 start_lba_hi = ((u32) scp->cmnd[2] << 24) | 1520 ((u32) scp->cmnd[3] << 16) | 1521 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; 1522 } 1523 1524 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO)); 1525 io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo; 1526 io_info.numBlocks = datalength; 1527 io_info.ldTgtId = device_id; 1528 io_request->DataLength = cpu_to_le32(scsi_bufflen(scp)); 1529 1530 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 1531 io_info.isRead = 1; 1532 1533 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 1534 1535 if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >= 1536 instance->fw_supported_vd_count) || (!fusion->fast_path_io)) { 1537 io_request->RaidContext.regLockFlags = 0; 1538 fp_possible = 0; 1539 } else { 1540 if (MR_BuildRaidContext(instance, &io_info, 1541 &io_request->RaidContext, 1542 local_map_ptr, &raidLUN)) 1543 fp_possible = io_info.fpOkForIo; 1544 } 1545 1546 /* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU 1547 id by default, not CPU group id, otherwise all MSI-X queues won't 1548 be utilized */ 1549 cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ? 1550 raw_smp_processor_id() % instance->msix_vectors : 0; 1551 1552 if (fp_possible) { 1553 megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp, 1554 local_map_ptr, start_lba_lo); 1555 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 1556 cmd->request_desc->SCSIIO.RequestFlags = 1557 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY 1558 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1559 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 1560 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { 1561 if (io_request->RaidContext.regLockFlags == 1562 REGION_TYPE_UNUSED) 1563 cmd->request_desc->SCSIIO.RequestFlags = 1564 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << 1565 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1566 io_request->RaidContext.Type = MPI2_TYPE_CUDA; 1567 io_request->RaidContext.nseg = 0x1; 1568 io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); 1569 io_request->RaidContext.regLockFlags |= 1570 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | 1571 MR_RL_FLAGS_SEQ_NUM_ENABLE); 1572 } 1573 if ((fusion->load_balance_info[device_id].loadBalanceFlag) && 1574 (io_info.isRead)) { 1575 io_info.devHandle = 1576 get_updated_dev_handle(instance, 1577 &fusion->load_balance_info[device_id], 1578 &io_info); 1579 scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG; 1580 cmd->pd_r1_lb = io_info.pd_after_lb; 1581 } else 1582 scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; 1583 1584 if ((raidLUN[0] == 1) && 1585 (local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].validHandles > 2)) { 1586 instance->dev_handle = !(instance->dev_handle); 1587 io_info.devHandle = 1588 local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].devHandle[instance->dev_handle]; 1589 } 1590 1591 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; 1592 io_request->DevHandle = io_info.devHandle; 1593 /* populate the LUN field */ 1594 memcpy(io_request->LUN, raidLUN, 8); 1595 } else { 1596 io_request->RaidContext.timeoutValue = 1597 cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec); 1598 cmd->request_desc->SCSIIO.RequestFlags = 1599 (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO 1600 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1601 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 1602 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { 1603 if (io_request->RaidContext.regLockFlags == 1604 REGION_TYPE_UNUSED) 1605 cmd->request_desc->SCSIIO.RequestFlags = 1606 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << 1607 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1608 io_request->RaidContext.Type = MPI2_TYPE_CUDA; 1609 io_request->RaidContext.regLockFlags |= 1610 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | 1611 MR_RL_FLAGS_SEQ_NUM_ENABLE); 1612 io_request->RaidContext.nseg = 0x1; 1613 } 1614 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; 1615 io_request->DevHandle = cpu_to_le16(device_id); 1616 } /* Not FP */ 1617 } 1618 1619 /** 1620 * megasas_build_ld_nonrw_fusion - prepares non rw ios for virtual disk 1621 * @instance: Adapter soft state 1622 * @scp: SCSI command 1623 * @cmd: Command to be prepared 1624 * 1625 * Prepares the io_request frame for non-rw io cmds for vd. 1626 */ 1627 static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance, 1628 struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd) 1629 { 1630 u32 device_id; 1631 struct MPI2_RAID_SCSI_IO_REQUEST *io_request; 1632 u16 pd_index = 0; 1633 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 1634 struct fusion_context *fusion = instance->ctrl_context; 1635 u8 span, physArm; 1636 __le16 devHandle; 1637 u32 ld, arRef, pd; 1638 struct MR_LD_RAID *raid; 1639 struct RAID_CONTEXT *pRAID_Context; 1640 u8 fp_possible = 1; 1641 1642 io_request = cmd->io_request; 1643 device_id = MEGASAS_DEV_INDEX(scmd); 1644 pd_index = MEGASAS_PD_INDEX(scmd); 1645 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 1646 io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); 1647 /* get RAID_Context pointer */ 1648 pRAID_Context = &io_request->RaidContext; 1649 /* Check with FW team */ 1650 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); 1651 pRAID_Context->regLockRowLBA = 0; 1652 pRAID_Context->regLockLength = 0; 1653 1654 if (fusion->fast_path_io && ( 1655 device_id < instance->fw_supported_vd_count)) { 1656 1657 ld = MR_TargetIdToLdGet(device_id, local_map_ptr); 1658 if (ld >= instance->fw_supported_vd_count) 1659 fp_possible = 0; 1660 1661 raid = MR_LdRaidGet(ld, local_map_ptr); 1662 if (!(raid->capability.fpNonRWCapable)) 1663 fp_possible = 0; 1664 } else 1665 fp_possible = 0; 1666 1667 if (!fp_possible) { 1668 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; 1669 io_request->DevHandle = cpu_to_le16(device_id); 1670 io_request->LUN[1] = scmd->device->lun; 1671 pRAID_Context->timeoutValue = 1672 cpu_to_le16 (scmd->request->timeout / HZ); 1673 cmd->request_desc->SCSIIO.RequestFlags = 1674 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 1675 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1676 } else { 1677 1678 /* set RAID context values */ 1679 pRAID_Context->configSeqNum = raid->seqNum; 1680 pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ; 1681 pRAID_Context->timeoutValue = cpu_to_le16(raid->fpIoTimeoutForLd); 1682 1683 /* get the DevHandle for the PD (since this is 1684 fpNonRWCapable, this is a single disk RAID0) */ 1685 span = physArm = 0; 1686 arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr); 1687 pd = MR_ArPdGet(arRef, physArm, local_map_ptr); 1688 devHandle = MR_PdDevHandleGet(pd, local_map_ptr); 1689 1690 /* build request descriptor */ 1691 cmd->request_desc->SCSIIO.RequestFlags = 1692 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << 1693 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1694 cmd->request_desc->SCSIIO.DevHandle = devHandle; 1695 1696 /* populate the LUN field */ 1697 memcpy(io_request->LUN, raid->LUN, 8); 1698 1699 /* build the raidScsiIO structure */ 1700 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 1701 io_request->DevHandle = devHandle; 1702 } 1703 } 1704 1705 /** 1706 * megasas_build_syspd_fusion - prepares rw/non-rw ios for syspd 1707 * @instance: Adapter soft state 1708 * @scp: SCSI command 1709 * @cmd: Command to be prepared 1710 * @fp_possible: parameter to detect fast path or firmware path io. 1711 * 1712 * Prepares the io_request frame for rw/non-rw io cmds for syspds 1713 */ 1714 static void 1715 megasas_build_syspd_fusion(struct megasas_instance *instance, 1716 struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd, u8 fp_possible) 1717 { 1718 u32 device_id; 1719 struct MPI2_RAID_SCSI_IO_REQUEST *io_request; 1720 u16 pd_index = 0; 1721 u16 os_timeout_value; 1722 u16 timeout_limit; 1723 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 1724 struct RAID_CONTEXT *pRAID_Context; 1725 struct fusion_context *fusion = instance->ctrl_context; 1726 1727 device_id = MEGASAS_DEV_INDEX(scmd); 1728 pd_index = MEGASAS_PD_INDEX(scmd); 1729 os_timeout_value = scmd->request->timeout / HZ; 1730 1731 io_request = cmd->io_request; 1732 /* get RAID_Context pointer */ 1733 pRAID_Context = &io_request->RaidContext; 1734 io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); 1735 io_request->LUN[1] = scmd->device->lun; 1736 pRAID_Context->RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD 1737 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; 1738 1739 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); 1740 pRAID_Context->configSeqNum = 0; 1741 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 1742 io_request->DevHandle = 1743 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; 1744 1745 cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle; 1746 cmd->request_desc->SCSIIO.MSIxIndex = 1747 instance->msix_vectors ? 1748 (raw_smp_processor_id() % instance->msix_vectors) : 0; 1749 1750 1751 if (!fp_possible) { 1752 /* system pd firmware path */ 1753 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; 1754 cmd->request_desc->SCSIIO.RequestFlags = 1755 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 1756 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1757 pRAID_Context->timeoutValue = cpu_to_le16(os_timeout_value); 1758 } else { 1759 /* system pd Fast Path */ 1760 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 1761 pRAID_Context->regLockFlags = 0; 1762 pRAID_Context->regLockRowLBA = 0; 1763 pRAID_Context->regLockLength = 0; 1764 timeout_limit = (scmd->device->type == TYPE_DISK) ? 1765 255 : 0xFFFF; 1766 pRAID_Context->timeoutValue = 1767 cpu_to_le16((os_timeout_value > timeout_limit) ? 1768 timeout_limit : os_timeout_value); 1769 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 1770 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { 1771 cmd->request_desc->SCSIIO.RequestFlags |= 1772 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << 1773 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1774 pRAID_Context->Type = MPI2_TYPE_CUDA; 1775 pRAID_Context->nseg = 0x1; 1776 io_request->IoFlags |= 1777 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); 1778 } 1779 cmd->request_desc->SCSIIO.RequestFlags = 1780 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << 1781 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1782 } 1783 } 1784 1785 /** 1786 * megasas_build_io_fusion - Prepares IOs to devices 1787 * @instance: Adapter soft state 1788 * @scp: SCSI command 1789 * @cmd: Command to be prepared 1790 * 1791 * Invokes helper functions to prepare request frames 1792 * and sets flags appropriate for IO/Non-IO cmd 1793 */ 1794 int 1795 megasas_build_io_fusion(struct megasas_instance *instance, 1796 struct scsi_cmnd *scp, 1797 struct megasas_cmd_fusion *cmd) 1798 { 1799 u32 sge_count; 1800 u8 cmd_type; 1801 struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request; 1802 1803 /* Zero out some fields so they don't get reused */ 1804 memset(io_request->LUN, 0x0, 8); 1805 io_request->CDB.EEDP32.PrimaryReferenceTag = 0; 1806 io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0; 1807 io_request->EEDPFlags = 0; 1808 io_request->Control = 0; 1809 io_request->EEDPBlockSize = 0; 1810 io_request->ChainOffset = 0; 1811 io_request->RaidContext.RAIDFlags = 0; 1812 io_request->RaidContext.Type = 0; 1813 io_request->RaidContext.nseg = 0; 1814 1815 memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len); 1816 /* 1817 * Just the CDB length,rest of the Flags are zero 1818 * This will be modified for FP in build_ldio_fusion 1819 */ 1820 io_request->IoFlags = cpu_to_le16(scp->cmd_len); 1821 1822 switch (cmd_type = megasas_cmd_type(scp)) { 1823 case READ_WRITE_LDIO: 1824 megasas_build_ldio_fusion(instance, scp, cmd); 1825 break; 1826 case NON_READ_WRITE_LDIO: 1827 megasas_build_ld_nonrw_fusion(instance, scp, cmd); 1828 break; 1829 case READ_WRITE_SYSPDIO: 1830 case NON_READ_WRITE_SYSPDIO: 1831 if (instance->secure_jbod_support && 1832 (cmd_type == NON_READ_WRITE_SYSPDIO)) 1833 megasas_build_syspd_fusion(instance, scp, cmd, 0); 1834 else 1835 megasas_build_syspd_fusion(instance, scp, cmd, 1); 1836 break; 1837 default: 1838 break; 1839 } 1840 1841 /* 1842 * Construct SGL 1843 */ 1844 1845 sge_count = 1846 megasas_make_sgl_fusion(instance, scp, 1847 (struct MPI25_IEEE_SGE_CHAIN64 *) 1848 &io_request->SGL, cmd); 1849 1850 if (sge_count > instance->max_num_sge) { 1851 dev_err(&instance->pdev->dev, "Error. sge_count (0x%x) exceeds " 1852 "max (0x%x) allowed\n", sge_count, 1853 instance->max_num_sge); 1854 return 1; 1855 } 1856 1857 io_request->RaidContext.numSGE = sge_count; 1858 1859 io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING); 1860 1861 if (scp->sc_data_direction == PCI_DMA_TODEVICE) 1862 io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE); 1863 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 1864 io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ); 1865 1866 io_request->SGLOffset0 = 1867 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4; 1868 1869 io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr); 1870 io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; 1871 1872 cmd->scmd = scp; 1873 scp->SCp.ptr = (char *)cmd; 1874 1875 return 0; 1876 } 1877 1878 union MEGASAS_REQUEST_DESCRIPTOR_UNION * 1879 megasas_get_request_descriptor(struct megasas_instance *instance, u16 index) 1880 { 1881 u8 *p; 1882 struct fusion_context *fusion; 1883 1884 if (index >= instance->max_fw_cmds) { 1885 dev_err(&instance->pdev->dev, "Invalid SMID (0x%x)request for " 1886 "descriptor for scsi%d\n", index, 1887 instance->host->host_no); 1888 return NULL; 1889 } 1890 fusion = instance->ctrl_context; 1891 p = fusion->req_frames_desc 1892 +sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *index; 1893 1894 return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p; 1895 } 1896 1897 /** 1898 * megasas_build_and_issue_cmd_fusion -Main routine for building and 1899 * issuing non IOCTL cmd 1900 * @instance: Adapter soft state 1901 * @scmd: pointer to scsi cmd from OS 1902 */ 1903 static u32 1904 megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, 1905 struct scsi_cmnd *scmd) 1906 { 1907 struct megasas_cmd_fusion *cmd; 1908 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 1909 u32 index; 1910 struct fusion_context *fusion; 1911 1912 fusion = instance->ctrl_context; 1913 1914 cmd = megasas_get_cmd_fusion(instance, scmd->request->tag); 1915 1916 index = cmd->index; 1917 1918 req_desc = megasas_get_request_descriptor(instance, index-1); 1919 if (!req_desc) 1920 return 1; 1921 1922 req_desc->Words = 0; 1923 cmd->request_desc = req_desc; 1924 1925 if (megasas_build_io_fusion(instance, scmd, cmd)) { 1926 megasas_return_cmd_fusion(instance, cmd); 1927 dev_err(&instance->pdev->dev, "Error building command\n"); 1928 cmd->request_desc = NULL; 1929 return 1; 1930 } 1931 1932 req_desc = cmd->request_desc; 1933 req_desc->SCSIIO.SMID = cpu_to_le16(index); 1934 1935 if (cmd->io_request->ChainOffset != 0 && 1936 cmd->io_request->ChainOffset != 0xF) 1937 dev_err(&instance->pdev->dev, "The chain offset value is not " 1938 "correct : %x\n", cmd->io_request->ChainOffset); 1939 1940 /* 1941 * Issue the command to the FW 1942 */ 1943 atomic_inc(&instance->fw_outstanding); 1944 1945 megasas_fire_cmd_fusion(instance, req_desc); 1946 1947 return 0; 1948 } 1949 1950 /** 1951 * complete_cmd_fusion - Completes command 1952 * @instance: Adapter soft state 1953 * Completes all commands that is in reply descriptor queue 1954 */ 1955 int 1956 complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) 1957 { 1958 union MPI2_REPLY_DESCRIPTORS_UNION *desc; 1959 struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc; 1960 struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req; 1961 struct fusion_context *fusion; 1962 struct megasas_cmd *cmd_mfi; 1963 struct megasas_cmd_fusion *cmd_fusion; 1964 u16 smid, num_completed; 1965 u8 reply_descript_type; 1966 u32 status, extStatus, device_id; 1967 union desc_value d_val; 1968 struct LD_LOAD_BALANCE_INFO *lbinfo; 1969 int threshold_reply_count = 0; 1970 struct scsi_cmnd *scmd_local = NULL; 1971 1972 fusion = instance->ctrl_context; 1973 1974 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) 1975 return IRQ_HANDLED; 1976 1977 desc = fusion->reply_frames_desc; 1978 desc += ((MSIxIndex * fusion->reply_alloc_sz)/ 1979 sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)) + 1980 fusion->last_reply_idx[MSIxIndex]; 1981 1982 reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; 1983 1984 d_val.word = desc->Words; 1985 1986 reply_descript_type = reply_desc->ReplyFlags & 1987 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1988 1989 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 1990 return IRQ_NONE; 1991 1992 num_completed = 0; 1993 1994 while (d_val.u.low != cpu_to_le32(UINT_MAX) && 1995 d_val.u.high != cpu_to_le32(UINT_MAX)) { 1996 smid = le16_to_cpu(reply_desc->SMID); 1997 1998 cmd_fusion = fusion->cmd_list[smid - 1]; 1999 2000 scsi_io_req = 2001 (struct MPI2_RAID_SCSI_IO_REQUEST *) 2002 cmd_fusion->io_request; 2003 2004 if (cmd_fusion->scmd) 2005 cmd_fusion->scmd->SCp.ptr = NULL; 2006 2007 scmd_local = cmd_fusion->scmd; 2008 status = scsi_io_req->RaidContext.status; 2009 extStatus = scsi_io_req->RaidContext.exStatus; 2010 2011 switch (scsi_io_req->Function) { 2012 case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/ 2013 /* Update load balancing info */ 2014 device_id = MEGASAS_DEV_INDEX(scmd_local); 2015 lbinfo = &fusion->load_balance_info[device_id]; 2016 if (cmd_fusion->scmd->SCp.Status & 2017 MEGASAS_LOAD_BALANCE_FLAG) { 2018 atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]); 2019 cmd_fusion->scmd->SCp.Status &= 2020 ~MEGASAS_LOAD_BALANCE_FLAG; 2021 } 2022 if (reply_descript_type == 2023 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) { 2024 if (megasas_dbg_lvl == 5) 2025 dev_err(&instance->pdev->dev, "\nFAST Path " 2026 "IO Success\n"); 2027 } 2028 /* Fall thru and complete IO */ 2029 case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */ 2030 /* Map the FW Cmd Status */ 2031 map_cmd_status(cmd_fusion, status, extStatus); 2032 scsi_io_req->RaidContext.status = 0; 2033 scsi_io_req->RaidContext.exStatus = 0; 2034 megasas_return_cmd_fusion(instance, cmd_fusion); 2035 scsi_dma_unmap(scmd_local); 2036 scmd_local->scsi_done(scmd_local); 2037 atomic_dec(&instance->fw_outstanding); 2038 2039 break; 2040 case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */ 2041 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 2042 2043 /* Poll mode. Dummy free. 2044 * In case of Interrupt mode, caller has reverse check. 2045 */ 2046 if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) { 2047 cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE; 2048 megasas_return_cmd(instance, cmd_mfi); 2049 } else 2050 megasas_complete_cmd(instance, cmd_mfi, DID_OK); 2051 break; 2052 } 2053 2054 fusion->last_reply_idx[MSIxIndex]++; 2055 if (fusion->last_reply_idx[MSIxIndex] >= 2056 fusion->reply_q_depth) 2057 fusion->last_reply_idx[MSIxIndex] = 0; 2058 2059 desc->Words = cpu_to_le64(ULLONG_MAX); 2060 num_completed++; 2061 threshold_reply_count++; 2062 2063 /* Get the next reply descriptor */ 2064 if (!fusion->last_reply_idx[MSIxIndex]) 2065 desc = fusion->reply_frames_desc + 2066 ((MSIxIndex * fusion->reply_alloc_sz)/ 2067 sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)); 2068 else 2069 desc++; 2070 2071 reply_desc = 2072 (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; 2073 2074 d_val.word = desc->Words; 2075 2076 reply_descript_type = reply_desc->ReplyFlags & 2077 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 2078 2079 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 2080 break; 2081 /* 2082 * Write to reply post host index register after completing threshold 2083 * number of reply counts and still there are more replies in reply queue 2084 * pending to be completed 2085 */ 2086 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) { 2087 if ((instance->pdev->device == 2088 PCI_DEVICE_ID_LSI_INVADER) || 2089 (instance->pdev->device == 2090 PCI_DEVICE_ID_LSI_FURY)) 2091 writel(((MSIxIndex & 0x7) << 24) | 2092 fusion->last_reply_idx[MSIxIndex], 2093 instance->reply_post_host_index_addr[MSIxIndex/8]); 2094 else 2095 writel((MSIxIndex << 24) | 2096 fusion->last_reply_idx[MSIxIndex], 2097 instance->reply_post_host_index_addr[0]); 2098 threshold_reply_count = 0; 2099 } 2100 } 2101 2102 if (!num_completed) 2103 return IRQ_NONE; 2104 2105 wmb(); 2106 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 2107 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) 2108 writel(((MSIxIndex & 0x7) << 24) | 2109 fusion->last_reply_idx[MSIxIndex], 2110 instance->reply_post_host_index_addr[MSIxIndex/8]); 2111 else 2112 writel((MSIxIndex << 24) | 2113 fusion->last_reply_idx[MSIxIndex], 2114 instance->reply_post_host_index_addr[0]); 2115 megasas_check_and_restore_queue_depth(instance); 2116 return IRQ_HANDLED; 2117 } 2118 2119 /** 2120 * megasas_complete_cmd_dpc_fusion - Completes command 2121 * @instance: Adapter soft state 2122 * 2123 * Tasklet to complete cmds 2124 */ 2125 void 2126 megasas_complete_cmd_dpc_fusion(unsigned long instance_addr) 2127 { 2128 struct megasas_instance *instance = 2129 (struct megasas_instance *)instance_addr; 2130 unsigned long flags; 2131 u32 count, MSIxIndex; 2132 2133 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 2134 2135 /* If we have already declared adapter dead, donot complete cmds */ 2136 spin_lock_irqsave(&instance->hba_lock, flags); 2137 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { 2138 spin_unlock_irqrestore(&instance->hba_lock, flags); 2139 return; 2140 } 2141 spin_unlock_irqrestore(&instance->hba_lock, flags); 2142 2143 for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++) 2144 complete_cmd_fusion(instance, MSIxIndex); 2145 } 2146 2147 /** 2148 * megasas_isr_fusion - isr entry point 2149 */ 2150 irqreturn_t megasas_isr_fusion(int irq, void *devp) 2151 { 2152 struct megasas_irq_context *irq_context = devp; 2153 struct megasas_instance *instance = irq_context->instance; 2154 u32 mfiStatus, fw_state, dma_state; 2155 2156 if (instance->mask_interrupts) 2157 return IRQ_NONE; 2158 2159 if (!instance->msix_vectors) { 2160 mfiStatus = instance->instancet->clear_intr(instance->reg_set); 2161 if (!mfiStatus) 2162 return IRQ_NONE; 2163 } 2164 2165 /* If we are resetting, bail */ 2166 if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) { 2167 instance->instancet->clear_intr(instance->reg_set); 2168 return IRQ_HANDLED; 2169 } 2170 2171 if (!complete_cmd_fusion(instance, irq_context->MSIxIndex)) { 2172 instance->instancet->clear_intr(instance->reg_set); 2173 /* If we didn't complete any commands, check for FW fault */ 2174 fw_state = instance->instancet->read_fw_status_reg( 2175 instance->reg_set) & MFI_STATE_MASK; 2176 dma_state = instance->instancet->read_fw_status_reg 2177 (instance->reg_set) & MFI_STATE_DMADONE; 2178 if (instance->crash_dump_drv_support && 2179 instance->crash_dump_app_support) { 2180 /* Start collecting crash, if DMA bit is done */ 2181 if ((fw_state == MFI_STATE_FAULT) && dma_state) 2182 schedule_work(&instance->crash_init); 2183 else if (fw_state == MFI_STATE_FAULT) 2184 schedule_work(&instance->work_init); 2185 } else if (fw_state == MFI_STATE_FAULT) { 2186 dev_warn(&instance->pdev->dev, "Iop2SysDoorbellInt" 2187 "for scsi%d\n", instance->host->host_no); 2188 schedule_work(&instance->work_init); 2189 } 2190 } 2191 2192 return IRQ_HANDLED; 2193 } 2194 2195 /** 2196 * build_mpt_mfi_pass_thru - builds a cmd fo MFI Pass thru 2197 * @instance: Adapter soft state 2198 * mfi_cmd: megasas_cmd pointer 2199 * 2200 */ 2201 u8 2202 build_mpt_mfi_pass_thru(struct megasas_instance *instance, 2203 struct megasas_cmd *mfi_cmd) 2204 { 2205 struct MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain; 2206 struct MPI2_RAID_SCSI_IO_REQUEST *io_req; 2207 struct megasas_cmd_fusion *cmd; 2208 struct fusion_context *fusion; 2209 struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr; 2210 2211 fusion = instance->ctrl_context; 2212 2213 cmd = megasas_get_cmd_fusion(instance, 2214 instance->max_scsi_cmds + mfi_cmd->index); 2215 2216 /* Save the smid. To be used for returning the cmd */ 2217 mfi_cmd->context.smid = cmd->index; 2218 2219 /* 2220 * For cmds where the flag is set, store the flag and check 2221 * on completion. For cmds with this flag, don't call 2222 * megasas_complete_cmd 2223 */ 2224 2225 if (frame_hdr->flags & cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)) 2226 mfi_cmd->flags |= DRV_DCMD_POLLED_MODE; 2227 2228 io_req = cmd->io_request; 2229 2230 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 2231 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { 2232 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = 2233 (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL; 2234 sgl_ptr_end += fusion->max_sge_in_main_msg - 1; 2235 sgl_ptr_end->Flags = 0; 2236 } 2237 2238 mpi25_ieee_chain = 2239 (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain; 2240 2241 io_req->Function = MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST; 2242 io_req->SGLOffset0 = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, 2243 SGL) / 4; 2244 io_req->ChainOffset = fusion->chain_offset_mfi_pthru; 2245 2246 mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr); 2247 2248 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | 2249 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; 2250 2251 mpi25_ieee_chain->Length = cpu_to_le32(MEGASAS_MAX_SZ_CHAIN_FRAME); 2252 2253 return 0; 2254 } 2255 2256 /** 2257 * build_mpt_cmd - Calls helper function to build a cmd MFI Pass thru cmd 2258 * @instance: Adapter soft state 2259 * @cmd: mfi cmd to build 2260 * 2261 */ 2262 union MEGASAS_REQUEST_DESCRIPTOR_UNION * 2263 build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 2264 { 2265 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2266 u16 index; 2267 2268 if (build_mpt_mfi_pass_thru(instance, cmd)) { 2269 dev_err(&instance->pdev->dev, "Couldn't build MFI pass thru cmd\n"); 2270 return NULL; 2271 } 2272 2273 index = cmd->context.smid; 2274 2275 req_desc = megasas_get_request_descriptor(instance, index - 1); 2276 2277 if (!req_desc) 2278 return NULL; 2279 2280 req_desc->Words = 0; 2281 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 2282 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2283 2284 req_desc->SCSIIO.SMID = cpu_to_le16(index); 2285 2286 return req_desc; 2287 } 2288 2289 /** 2290 * megasas_issue_dcmd_fusion - Issues a MFI Pass thru cmd 2291 * @instance: Adapter soft state 2292 * @cmd: mfi cmd pointer 2293 * 2294 */ 2295 void 2296 megasas_issue_dcmd_fusion(struct megasas_instance *instance, 2297 struct megasas_cmd *cmd) 2298 { 2299 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2300 2301 req_desc = build_mpt_cmd(instance, cmd); 2302 if (!req_desc) { 2303 dev_err(&instance->pdev->dev, "Couldn't issue MFI pass thru cmd\n"); 2304 return; 2305 } 2306 megasas_fire_cmd_fusion(instance, req_desc); 2307 } 2308 2309 /** 2310 * megasas_release_fusion - Reverses the FW initialization 2311 * @instance: Adapter soft state 2312 */ 2313 void 2314 megasas_release_fusion(struct megasas_instance *instance) 2315 { 2316 megasas_free_cmds(instance); 2317 megasas_free_cmds_fusion(instance); 2318 2319 iounmap(instance->reg_set); 2320 2321 pci_release_selected_regions(instance->pdev, instance->bar); 2322 } 2323 2324 /** 2325 * megasas_read_fw_status_reg_fusion - returns the current FW status value 2326 * @regs: MFI register set 2327 */ 2328 static u32 2329 megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem *regs) 2330 { 2331 return readl(&(regs)->outbound_scratch_pad); 2332 } 2333 2334 /** 2335 * megasas_alloc_host_crash_buffer - Host buffers for Crash dump collection from Firmware 2336 * @instance: Controller's soft instance 2337 * return: Number of allocated host crash buffers 2338 */ 2339 static void 2340 megasas_alloc_host_crash_buffer(struct megasas_instance *instance) 2341 { 2342 unsigned int i; 2343 2344 instance->crash_buf_pages = get_order(CRASH_DMA_BUF_SIZE); 2345 for (i = 0; i < MAX_CRASH_DUMP_SIZE; i++) { 2346 instance->crash_buf[i] = (void *)__get_free_pages(GFP_KERNEL, 2347 instance->crash_buf_pages); 2348 if (!instance->crash_buf[i]) { 2349 dev_info(&instance->pdev->dev, "Firmware crash dump " 2350 "memory allocation failed at index %d\n", i); 2351 break; 2352 } 2353 memset(instance->crash_buf[i], 0, 2354 ((1 << PAGE_SHIFT) << instance->crash_buf_pages)); 2355 } 2356 instance->drv_buf_alloc = i; 2357 } 2358 2359 /** 2360 * megasas_free_host_crash_buffer - Host buffers for Crash dump collection from Firmware 2361 * @instance: Controller's soft instance 2362 */ 2363 void 2364 megasas_free_host_crash_buffer(struct megasas_instance *instance) 2365 { 2366 unsigned int i 2367 ; 2368 for (i = 0; i < instance->drv_buf_alloc; i++) { 2369 if (instance->crash_buf[i]) 2370 free_pages((ulong)instance->crash_buf[i], 2371 instance->crash_buf_pages); 2372 } 2373 instance->drv_buf_index = 0; 2374 instance->drv_buf_alloc = 0; 2375 instance->fw_crash_state = UNAVAILABLE; 2376 instance->fw_crash_buffer_size = 0; 2377 } 2378 2379 /** 2380 * megasas_adp_reset_fusion - For controller reset 2381 * @regs: MFI register set 2382 */ 2383 static int 2384 megasas_adp_reset_fusion(struct megasas_instance *instance, 2385 struct megasas_register_set __iomem *regs) 2386 { 2387 return 0; 2388 } 2389 2390 /** 2391 * megasas_check_reset_fusion - For controller reset check 2392 * @regs: MFI register set 2393 */ 2394 static int 2395 megasas_check_reset_fusion(struct megasas_instance *instance, 2396 struct megasas_register_set __iomem *regs) 2397 { 2398 return 0; 2399 } 2400 2401 /* This function waits for outstanding commands on fusion to complete */ 2402 int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance, 2403 int iotimeout, int *convert) 2404 { 2405 int i, outstanding, retval = 0, hb_seconds_missed = 0; 2406 u32 fw_state; 2407 2408 for (i = 0; i < resetwaittime; i++) { 2409 /* Check if firmware is in fault state */ 2410 fw_state = instance->instancet->read_fw_status_reg( 2411 instance->reg_set) & MFI_STATE_MASK; 2412 if (fw_state == MFI_STATE_FAULT) { 2413 dev_warn(&instance->pdev->dev, "Found FW in FAULT state," 2414 " will reset adapter scsi%d.\n", 2415 instance->host->host_no); 2416 retval = 1; 2417 goto out; 2418 } 2419 /* If SR-IOV VF mode & heartbeat timeout, don't wait */ 2420 if (instance->requestorId && !iotimeout) { 2421 retval = 1; 2422 goto out; 2423 } 2424 2425 /* If SR-IOV VF mode & I/O timeout, check for HB timeout */ 2426 if (instance->requestorId && iotimeout) { 2427 if (instance->hb_host_mem->HB.fwCounter != 2428 instance->hb_host_mem->HB.driverCounter) { 2429 instance->hb_host_mem->HB.driverCounter = 2430 instance->hb_host_mem->HB.fwCounter; 2431 hb_seconds_missed = 0; 2432 } else { 2433 hb_seconds_missed++; 2434 if (hb_seconds_missed == 2435 (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) { 2436 dev_warn(&instance->pdev->dev, "SR-IOV:" 2437 " Heartbeat never completed " 2438 " while polling during I/O " 2439 " timeout handling for " 2440 "scsi%d.\n", 2441 instance->host->host_no); 2442 *convert = 1; 2443 retval = 1; 2444 goto out; 2445 } 2446 } 2447 } 2448 2449 outstanding = atomic_read(&instance->fw_outstanding); 2450 if (!outstanding) 2451 goto out; 2452 2453 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 2454 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d " 2455 "commands to complete for scsi%d\n", i, 2456 outstanding, instance->host->host_no); 2457 megasas_complete_cmd_dpc_fusion( 2458 (unsigned long)instance); 2459 } 2460 msleep(1000); 2461 } 2462 2463 if (atomic_read(&instance->fw_outstanding)) { 2464 dev_err(&instance->pdev->dev, "pending commands remain after waiting, " 2465 "will reset adapter scsi%d.\n", 2466 instance->host->host_no); 2467 retval = 1; 2468 } 2469 out: 2470 return retval; 2471 } 2472 2473 void megasas_reset_reply_desc(struct megasas_instance *instance) 2474 { 2475 int i, count; 2476 struct fusion_context *fusion; 2477 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; 2478 2479 fusion = instance->ctrl_context; 2480 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 2481 for (i = 0 ; i < count ; i++) 2482 fusion->last_reply_idx[i] = 0; 2483 reply_desc = fusion->reply_frames_desc; 2484 for (i = 0 ; i < fusion->reply_q_depth * count; i++, reply_desc++) 2485 reply_desc->Words = cpu_to_le64(ULLONG_MAX); 2486 } 2487 2488 /* 2489 * megasas_refire_mgmt_cmd : Re-fire management commands 2490 * @instance: Controller's soft instance 2491 */ 2492 void megasas_refire_mgmt_cmd(struct megasas_instance *instance) 2493 { 2494 int j; 2495 struct megasas_cmd_fusion *cmd_fusion; 2496 struct fusion_context *fusion; 2497 struct megasas_cmd *cmd_mfi; 2498 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2499 u16 smid; 2500 2501 fusion = instance->ctrl_context; 2502 2503 /* Re-fire management commands. 2504 * Do not traverse complet MPT frame pool. Start from max_scsi_cmds. 2505 */ 2506 for (j = instance->max_scsi_cmds ; j < instance->max_fw_cmds; j++) { 2507 cmd_fusion = fusion->cmd_list[j]; 2508 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 2509 smid = le16_to_cpu(cmd_mfi->context.smid); 2510 2511 if (!smid) 2512 continue; 2513 req_desc = megasas_get_request_descriptor 2514 (instance, smid - 1); 2515 if (req_desc && (cmd_mfi->frame->dcmd.opcode != 2516 cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO))) 2517 megasas_fire_cmd_fusion(instance, req_desc); 2518 else 2519 megasas_return_cmd(instance, cmd_mfi); 2520 } 2521 } 2522 2523 /* Check for a second path that is currently UP */ 2524 int megasas_check_mpio_paths(struct megasas_instance *instance, 2525 struct scsi_cmnd *scmd) 2526 { 2527 int i, j, retval = (DID_RESET << 16); 2528 2529 if (instance->mpio && instance->requestorId) { 2530 for (i = 0 ; i < MAX_MGMT_ADAPTERS ; i++) 2531 for (j = 0 ; j < MAX_LOGICAL_DRIVES; j++) 2532 if (megasas_mgmt_info.instance[i] && 2533 (megasas_mgmt_info.instance[i] != instance) && 2534 megasas_mgmt_info.instance[i]->mpio && 2535 megasas_mgmt_info.instance[i]->requestorId 2536 && 2537 (megasas_mgmt_info.instance[i]->ld_ids[j] 2538 == scmd->device->id)) { 2539 retval = (DID_NO_CONNECT << 16); 2540 goto out; 2541 } 2542 } 2543 out: 2544 return retval; 2545 } 2546 2547 /* Core fusion reset function */ 2548 int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) 2549 { 2550 int retval = SUCCESS, i, retry = 0, convert = 0; 2551 struct megasas_instance *instance; 2552 struct megasas_cmd_fusion *cmd_fusion; 2553 struct fusion_context *fusion; 2554 u32 host_diag, abs_state, status_reg, reset_adapter; 2555 u32 io_timeout_in_crash_mode = 0; 2556 struct scsi_cmnd *scmd_local = NULL; 2557 2558 instance = (struct megasas_instance *)shost->hostdata; 2559 fusion = instance->ctrl_context; 2560 2561 mutex_lock(&instance->reset_mutex); 2562 2563 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { 2564 dev_warn(&instance->pdev->dev, "Hardware critical error, " 2565 "returning FAILED for scsi%d.\n", 2566 instance->host->host_no); 2567 mutex_unlock(&instance->reset_mutex); 2568 return FAILED; 2569 } 2570 status_reg = instance->instancet->read_fw_status_reg(instance->reg_set); 2571 abs_state = status_reg & MFI_STATE_MASK; 2572 2573 /* IO timeout detected, forcibly put FW in FAULT state */ 2574 if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf && 2575 instance->crash_dump_app_support && iotimeout) { 2576 dev_info(&instance->pdev->dev, "IO timeout is detected, " 2577 "forcibly FAULT Firmware\n"); 2578 instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; 2579 status_reg = readl(&instance->reg_set->doorbell); 2580 writel(status_reg | MFI_STATE_FORCE_OCR, 2581 &instance->reg_set->doorbell); 2582 readl(&instance->reg_set->doorbell); 2583 mutex_unlock(&instance->reset_mutex); 2584 do { 2585 ssleep(3); 2586 io_timeout_in_crash_mode++; 2587 dev_dbg(&instance->pdev->dev, "waiting for [%d] " 2588 "seconds for crash dump collection and OCR " 2589 "to be done\n", (io_timeout_in_crash_mode * 3)); 2590 } while ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) && 2591 (io_timeout_in_crash_mode < 80)); 2592 2593 if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) { 2594 dev_info(&instance->pdev->dev, "OCR done for IO " 2595 "timeout case\n"); 2596 retval = SUCCESS; 2597 } else { 2598 dev_info(&instance->pdev->dev, "Controller is not " 2599 "operational after 240 seconds wait for IO " 2600 "timeout case in FW crash dump mode\n do " 2601 "OCR/kill adapter\n"); 2602 retval = megasas_reset_fusion(shost, 0); 2603 } 2604 return retval; 2605 } 2606 2607 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 2608 del_timer_sync(&instance->sriov_heartbeat_timer); 2609 set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); 2610 instance->adprecovery = MEGASAS_ADPRESET_SM_POLLING; 2611 instance->instancet->disable_intr(instance); 2612 msleep(1000); 2613 2614 /* First try waiting for commands to complete */ 2615 if (megasas_wait_for_outstanding_fusion(instance, iotimeout, 2616 &convert)) { 2617 instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; 2618 dev_warn(&instance->pdev->dev, "resetting fusion " 2619 "adapter scsi%d.\n", instance->host->host_no); 2620 if (convert) 2621 iotimeout = 0; 2622 2623 /* Now return commands back to the OS */ 2624 for (i = 0 ; i < instance->max_scsi_cmds; i++) { 2625 cmd_fusion = fusion->cmd_list[i]; 2626 scmd_local = cmd_fusion->scmd; 2627 if (cmd_fusion->scmd) { 2628 scmd_local->result = 2629 megasas_check_mpio_paths(instance, 2630 scmd_local); 2631 megasas_return_cmd_fusion(instance, cmd_fusion); 2632 scsi_dma_unmap(scmd_local); 2633 scmd_local->scsi_done(scmd_local); 2634 atomic_dec(&instance->fw_outstanding); 2635 } 2636 } 2637 2638 status_reg = instance->instancet->read_fw_status_reg( 2639 instance->reg_set); 2640 abs_state = status_reg & MFI_STATE_MASK; 2641 reset_adapter = status_reg & MFI_RESET_ADAPTER; 2642 if (instance->disableOnlineCtrlReset || 2643 (abs_state == MFI_STATE_FAULT && !reset_adapter)) { 2644 /* Reset not supported, kill adapter */ 2645 dev_warn(&instance->pdev->dev, "Reset not supported" 2646 ", killing adapter scsi%d.\n", 2647 instance->host->host_no); 2648 megaraid_sas_kill_hba(instance); 2649 instance->skip_heartbeat_timer_del = 1; 2650 retval = FAILED; 2651 goto out; 2652 } 2653 2654 /* Let SR-IOV VF & PF sync up if there was a HB failure */ 2655 if (instance->requestorId && !iotimeout) { 2656 msleep(MEGASAS_OCR_SETTLE_TIME_VF); 2657 /* Look for a late HB update after VF settle time */ 2658 if (abs_state == MFI_STATE_OPERATIONAL && 2659 (instance->hb_host_mem->HB.fwCounter != 2660 instance->hb_host_mem->HB.driverCounter)) { 2661 instance->hb_host_mem->HB.driverCounter = 2662 instance->hb_host_mem->HB.fwCounter; 2663 dev_warn(&instance->pdev->dev, "SR-IOV:" 2664 "Late FW heartbeat update for " 2665 "scsi%d.\n", 2666 instance->host->host_no); 2667 } else { 2668 /* In VF mode, first poll for FW ready */ 2669 for (i = 0; 2670 i < (MEGASAS_RESET_WAIT_TIME * 1000); 2671 i += 20) { 2672 status_reg = 2673 instance->instancet-> 2674 read_fw_status_reg( 2675 instance->reg_set); 2676 abs_state = status_reg & 2677 MFI_STATE_MASK; 2678 if (abs_state == MFI_STATE_READY) { 2679 dev_warn(&instance->pdev->dev, 2680 "SR-IOV: FW was found" 2681 "to be in ready state " 2682 "for scsi%d.\n", 2683 instance->host->host_no); 2684 break; 2685 } 2686 msleep(20); 2687 } 2688 if (abs_state != MFI_STATE_READY) { 2689 dev_warn(&instance->pdev->dev, "SR-IOV: " 2690 "FW not in ready state after %d" 2691 " seconds for scsi%d, status_reg = " 2692 "0x%x.\n", 2693 MEGASAS_RESET_WAIT_TIME, 2694 instance->host->host_no, 2695 status_reg); 2696 megaraid_sas_kill_hba(instance); 2697 instance->skip_heartbeat_timer_del = 1; 2698 instance->adprecovery = 2699 MEGASAS_HW_CRITICAL_ERROR; 2700 retval = FAILED; 2701 goto out; 2702 } 2703 } 2704 } 2705 2706 /* Now try to reset the chip */ 2707 for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) { 2708 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, 2709 &instance->reg_set->fusion_seq_offset); 2710 writel(MPI2_WRSEQ_1ST_KEY_VALUE, 2711 &instance->reg_set->fusion_seq_offset); 2712 writel(MPI2_WRSEQ_2ND_KEY_VALUE, 2713 &instance->reg_set->fusion_seq_offset); 2714 writel(MPI2_WRSEQ_3RD_KEY_VALUE, 2715 &instance->reg_set->fusion_seq_offset); 2716 writel(MPI2_WRSEQ_4TH_KEY_VALUE, 2717 &instance->reg_set->fusion_seq_offset); 2718 writel(MPI2_WRSEQ_5TH_KEY_VALUE, 2719 &instance->reg_set->fusion_seq_offset); 2720 writel(MPI2_WRSEQ_6TH_KEY_VALUE, 2721 &instance->reg_set->fusion_seq_offset); 2722 2723 /* Check that the diag write enable (DRWE) bit is on */ 2724 host_diag = readl(&instance->reg_set->fusion_host_diag); 2725 retry = 0; 2726 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { 2727 msleep(100); 2728 host_diag = 2729 readl(&instance->reg_set->fusion_host_diag); 2730 if (retry++ == 100) { 2731 dev_warn(&instance->pdev->dev, 2732 "Host diag unlock failed! " 2733 "for scsi%d\n", 2734 instance->host->host_no); 2735 break; 2736 } 2737 } 2738 if (!(host_diag & HOST_DIAG_WRITE_ENABLE)) 2739 continue; 2740 2741 /* Send chip reset command */ 2742 writel(host_diag | HOST_DIAG_RESET_ADAPTER, 2743 &instance->reg_set->fusion_host_diag); 2744 msleep(3000); 2745 2746 /* Make sure reset adapter bit is cleared */ 2747 host_diag = readl(&instance->reg_set->fusion_host_diag); 2748 retry = 0; 2749 while (host_diag & HOST_DIAG_RESET_ADAPTER) { 2750 msleep(100); 2751 host_diag = 2752 readl(&instance->reg_set->fusion_host_diag); 2753 if (retry++ == 1000) { 2754 dev_warn(&instance->pdev->dev, 2755 "Diag reset adapter never " 2756 "cleared for scsi%d!\n", 2757 instance->host->host_no); 2758 break; 2759 } 2760 } 2761 if (host_diag & HOST_DIAG_RESET_ADAPTER) 2762 continue; 2763 2764 abs_state = 2765 instance->instancet->read_fw_status_reg( 2766 instance->reg_set) & MFI_STATE_MASK; 2767 retry = 0; 2768 2769 while ((abs_state <= MFI_STATE_FW_INIT) && 2770 (retry++ < 1000)) { 2771 msleep(100); 2772 abs_state = 2773 instance->instancet->read_fw_status_reg( 2774 instance->reg_set) & MFI_STATE_MASK; 2775 } 2776 if (abs_state <= MFI_STATE_FW_INIT) { 2777 dev_warn(&instance->pdev->dev, "firmware " 2778 "state < MFI_STATE_FW_INIT, state = " 2779 "0x%x for scsi%d\n", abs_state, 2780 instance->host->host_no); 2781 continue; 2782 } 2783 2784 /* Wait for FW to become ready */ 2785 if (megasas_transition_to_ready(instance, 1)) { 2786 dev_warn(&instance->pdev->dev, "Failed to " 2787 "transition controller to ready " 2788 "for scsi%d.\n", 2789 instance->host->host_no); 2790 continue; 2791 } 2792 2793 megasas_reset_reply_desc(instance); 2794 if (megasas_ioc_init_fusion(instance)) { 2795 dev_warn(&instance->pdev->dev, 2796 "megasas_ioc_init_fusion() failed!" 2797 " for scsi%d\n", 2798 instance->host->host_no); 2799 continue; 2800 } 2801 2802 megasas_refire_mgmt_cmd(instance); 2803 2804 if (megasas_get_ctrl_info(instance)) { 2805 dev_info(&instance->pdev->dev, 2806 "Failed from %s %d\n", 2807 __func__, __LINE__); 2808 megaraid_sas_kill_hba(instance); 2809 retval = FAILED; 2810 } 2811 /* Reset load balance info */ 2812 memset(fusion->load_balance_info, 0, 2813 sizeof(struct LD_LOAD_BALANCE_INFO) 2814 *MAX_LOGICAL_DRIVES_EXT); 2815 2816 if (!megasas_get_map_info(instance)) 2817 megasas_sync_map_info(instance); 2818 2819 clear_bit(MEGASAS_FUSION_IN_RESET, 2820 &instance->reset_flags); 2821 instance->instancet->enable_intr(instance); 2822 instance->adprecovery = MEGASAS_HBA_OPERATIONAL; 2823 2824 /* Restart SR-IOV heartbeat */ 2825 if (instance->requestorId) { 2826 if (!megasas_sriov_start_heartbeat(instance, 0)) 2827 megasas_start_timer(instance, 2828 &instance->sriov_heartbeat_timer, 2829 megasas_sriov_heartbeat_handler, 2830 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 2831 else 2832 instance->skip_heartbeat_timer_del = 1; 2833 } 2834 2835 /* Adapter reset completed successfully */ 2836 dev_warn(&instance->pdev->dev, "Reset " 2837 "successful for scsi%d.\n", 2838 instance->host->host_no); 2839 2840 if (instance->crash_dump_drv_support && 2841 instance->crash_dump_app_support) 2842 megasas_set_crash_dump_params(instance, 2843 MR_CRASH_BUF_TURN_ON); 2844 else 2845 megasas_set_crash_dump_params(instance, 2846 MR_CRASH_BUF_TURN_OFF); 2847 2848 retval = SUCCESS; 2849 goto out; 2850 } 2851 /* Reset failed, kill the adapter */ 2852 dev_warn(&instance->pdev->dev, "Reset failed, killing " 2853 "adapter scsi%d.\n", instance->host->host_no); 2854 megaraid_sas_kill_hba(instance); 2855 instance->skip_heartbeat_timer_del = 1; 2856 retval = FAILED; 2857 } else { 2858 /* For VF: Restart HB timer if we didn't OCR */ 2859 if (instance->requestorId) { 2860 megasas_start_timer(instance, 2861 &instance->sriov_heartbeat_timer, 2862 megasas_sriov_heartbeat_handler, 2863 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 2864 } 2865 clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); 2866 instance->instancet->enable_intr(instance); 2867 instance->adprecovery = MEGASAS_HBA_OPERATIONAL; 2868 } 2869 out: 2870 clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); 2871 mutex_unlock(&instance->reset_mutex); 2872 return retval; 2873 } 2874 2875 /* Fusion Crash dump collection work queue */ 2876 void megasas_fusion_crash_dump_wq(struct work_struct *work) 2877 { 2878 struct megasas_instance *instance = 2879 container_of(work, struct megasas_instance, crash_init); 2880 u32 status_reg; 2881 u8 partial_copy = 0; 2882 2883 2884 status_reg = instance->instancet->read_fw_status_reg(instance->reg_set); 2885 2886 /* 2887 * Allocate host crash buffers to copy data from 1 MB DMA crash buffer 2888 * to host crash buffers 2889 */ 2890 if (instance->drv_buf_index == 0) { 2891 /* Buffer is already allocated for old Crash dump. 2892 * Do OCR and do not wait for crash dump collection 2893 */ 2894 if (instance->drv_buf_alloc) { 2895 dev_info(&instance->pdev->dev, "earlier crash dump is " 2896 "not yet copied by application, ignoring this " 2897 "crash dump and initiating OCR\n"); 2898 status_reg |= MFI_STATE_CRASH_DUMP_DONE; 2899 writel(status_reg, 2900 &instance->reg_set->outbound_scratch_pad); 2901 readl(&instance->reg_set->outbound_scratch_pad); 2902 return; 2903 } 2904 megasas_alloc_host_crash_buffer(instance); 2905 dev_info(&instance->pdev->dev, "Number of host crash buffers " 2906 "allocated: %d\n", instance->drv_buf_alloc); 2907 } 2908 2909 /* 2910 * Driver has allocated max buffers, which can be allocated 2911 * and FW has more crash dump data, then driver will 2912 * ignore the data. 2913 */ 2914 if (instance->drv_buf_index >= (instance->drv_buf_alloc)) { 2915 dev_info(&instance->pdev->dev, "Driver is done copying " 2916 "the buffer: %d\n", instance->drv_buf_alloc); 2917 status_reg |= MFI_STATE_CRASH_DUMP_DONE; 2918 partial_copy = 1; 2919 } else { 2920 memcpy(instance->crash_buf[instance->drv_buf_index], 2921 instance->crash_dump_buf, CRASH_DMA_BUF_SIZE); 2922 instance->drv_buf_index++; 2923 status_reg &= ~MFI_STATE_DMADONE; 2924 } 2925 2926 if (status_reg & MFI_STATE_CRASH_DUMP_DONE) { 2927 dev_info(&instance->pdev->dev, "Crash Dump is available,number " 2928 "of copied buffers: %d\n", instance->drv_buf_index); 2929 instance->fw_crash_buffer_size = instance->drv_buf_index; 2930 instance->fw_crash_state = AVAILABLE; 2931 instance->drv_buf_index = 0; 2932 writel(status_reg, &instance->reg_set->outbound_scratch_pad); 2933 readl(&instance->reg_set->outbound_scratch_pad); 2934 if (!partial_copy) 2935 megasas_reset_fusion(instance->host, 0); 2936 } else { 2937 writel(status_reg, &instance->reg_set->outbound_scratch_pad); 2938 readl(&instance->reg_set->outbound_scratch_pad); 2939 } 2940 } 2941 2942 2943 /* Fusion OCR work queue */ 2944 void megasas_fusion_ocr_wq(struct work_struct *work) 2945 { 2946 struct megasas_instance *instance = 2947 container_of(work, struct megasas_instance, work_init); 2948 2949 megasas_reset_fusion(instance->host, 0); 2950 } 2951 2952 struct megasas_instance_template megasas_instance_template_fusion = { 2953 .enable_intr = megasas_enable_intr_fusion, 2954 .disable_intr = megasas_disable_intr_fusion, 2955 .clear_intr = megasas_clear_intr_fusion, 2956 .read_fw_status_reg = megasas_read_fw_status_reg_fusion, 2957 .adp_reset = megasas_adp_reset_fusion, 2958 .check_reset = megasas_check_reset_fusion, 2959 .service_isr = megasas_isr_fusion, 2960 .tasklet = megasas_complete_cmd_dpc_fusion, 2961 .init_adapter = megasas_init_adapter_fusion, 2962 .build_and_issue_cmd = megasas_build_and_issue_cmd_fusion, 2963 .issue_dcmd = megasas_issue_dcmd_fusion, 2964 }; 2965