1 /* 2 * Linux MegaRAID driver for SAS based RAID controllers 3 * 4 * Copyright (c) 2003-2013 LSI Corporation 5 * Copyright (c) 2013-2014 Avago Technologies 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 2 10 * of the License, or (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program. If not, see <http://www.gnu.org/licenses/>. 19 * 20 * Authors: Avago Technologies 21 * Sreenivas Bagalkote 22 * Sumant Patro 23 * Bo Yang 24 * Adam Radford 25 * Kashyap Desai <kashyap.desai@avagotech.com> 26 * Sumit Saxena <sumit.saxena@avagotech.com> 27 * 28 * Send feedback to: megaraidlinux.pdl@avagotech.com 29 * 30 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90, 31 * San Jose, California 95131 32 */ 33 34 #include <linux/kernel.h> 35 #include <linux/types.h> 36 #include <linux/pci.h> 37 #include <linux/list.h> 38 #include <linux/moduleparam.h> 39 #include <linux/module.h> 40 #include <linux/spinlock.h> 41 #include <linux/interrupt.h> 42 #include <linux/delay.h> 43 #include <linux/uio.h> 44 #include <linux/slab.h> 45 #include <linux/uaccess.h> 46 #include <asm/unaligned.h> 47 #include <linux/fs.h> 48 #include <linux/compat.h> 49 #include <linux/blkdev.h> 50 #include <linux/mutex.h> 51 #include <linux/poll.h> 52 #include <linux/vmalloc.h> 53 54 #include <scsi/scsi.h> 55 #include <scsi/scsi_cmnd.h> 56 #include <scsi/scsi_device.h> 57 #include <scsi/scsi_host.h> 58 #include <scsi/scsi_tcq.h> 59 #include "megaraid_sas_fusion.h" 60 #include "megaraid_sas.h" 61 62 /* 63 * Number of sectors per IO command 64 * Will be set in megasas_init_mfi if user does not provide 65 */ 66 static unsigned int max_sectors; 67 module_param_named(max_sectors, max_sectors, int, 0); 68 MODULE_PARM_DESC(max_sectors, 69 "Maximum number of sectors per IO command"); 70 71 static int msix_disable; 72 module_param(msix_disable, int, S_IRUGO); 73 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0"); 74 75 static unsigned int msix_vectors; 76 module_param(msix_vectors, int, S_IRUGO); 77 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW"); 78 79 static int allow_vf_ioctls; 80 module_param(allow_vf_ioctls, int, S_IRUGO); 81 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0"); 82 83 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; 84 module_param(throttlequeuedepth, int, S_IRUGO); 85 MODULE_PARM_DESC(throttlequeuedepth, 86 "Adapter queue depth when throttled due to I/O timeout. Default: 16"); 87 88 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME; 89 module_param(resetwaittime, int, S_IRUGO); 90 MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout " 91 "before resetting adapter. Default: 180"); 92 93 int smp_affinity_enable = 1; 94 module_param(smp_affinity_enable, int, S_IRUGO); 95 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)"); 96 97 int rdpq_enable = 1; 98 module_param(rdpq_enable, int, S_IRUGO); 99 MODULE_PARM_DESC(rdpq_enable, " Allocate reply queue in chunks for large queue depth enable/disable Default: disable(0)"); 100 101 unsigned int dual_qdepth_disable; 102 module_param(dual_qdepth_disable, int, S_IRUGO); 103 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0"); 104 105 unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; 106 module_param(scmd_timeout, int, S_IRUGO); 107 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer."); 108 109 MODULE_LICENSE("GPL"); 110 MODULE_VERSION(MEGASAS_VERSION); 111 MODULE_AUTHOR("megaraidlinux.pdl@avagotech.com"); 112 MODULE_DESCRIPTION("Avago MegaRAID SAS Driver"); 113 114 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); 115 static int megasas_get_pd_list(struct megasas_instance *instance); 116 static int megasas_ld_list_query(struct megasas_instance *instance, 117 u8 query_type); 118 static int megasas_issue_init_mfi(struct megasas_instance *instance); 119 static int megasas_register_aen(struct megasas_instance *instance, 120 u32 seq_num, u32 class_locale_word); 121 static void megasas_get_pd_info(struct megasas_instance *instance, 122 struct scsi_device *sdev); 123 static int megasas_get_target_prop(struct megasas_instance *instance, 124 struct scsi_device *sdev); 125 /* 126 * PCI ID table for all supported controllers 127 */ 128 static struct pci_device_id megasas_pci_table[] = { 129 130 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)}, 131 /* xscale IOP */ 132 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)}, 133 /* ppc IOP */ 134 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)}, 135 /* ppc IOP */ 136 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)}, 137 /* gen2*/ 138 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)}, 139 /* gen2*/ 140 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)}, 141 /* skinny*/ 142 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)}, 143 /* skinny*/ 144 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, 145 /* xscale IOP, vega */ 146 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, 147 /* xscale IOP */ 148 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)}, 149 /* Fusion */ 150 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)}, 151 /* Plasma */ 152 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)}, 153 /* Invader */ 154 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)}, 155 /* Fury */ 156 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)}, 157 /* Intruder */ 158 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)}, 159 /* Intruder 24 port*/ 160 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)}, 161 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)}, 162 /* VENTURA */ 163 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)}, 164 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)}, 165 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)}, 166 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)}, 167 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)}, 168 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)}, 169 {} 170 }; 171 172 MODULE_DEVICE_TABLE(pci, megasas_pci_table); 173 174 static int megasas_mgmt_majorno; 175 struct megasas_mgmt_info megasas_mgmt_info; 176 static struct fasync_struct *megasas_async_queue; 177 static DEFINE_MUTEX(megasas_async_queue_mutex); 178 179 static int megasas_poll_wait_aen; 180 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait); 181 static u32 support_poll_for_event; 182 u32 megasas_dbg_lvl; 183 static u32 support_device_change; 184 static bool support_nvme_encapsulation; 185 186 /* define lock for aen poll */ 187 spinlock_t poll_aen_lock; 188 189 void 190 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 191 u8 alt_status); 192 static u32 193 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs); 194 static int 195 megasas_adp_reset_gen2(struct megasas_instance *instance, 196 struct megasas_register_set __iomem *reg_set); 197 static irqreturn_t megasas_isr(int irq, void *devp); 198 static u32 199 megasas_init_adapter_mfi(struct megasas_instance *instance); 200 u32 201 megasas_build_and_issue_cmd(struct megasas_instance *instance, 202 struct scsi_cmnd *scmd); 203 static void megasas_complete_cmd_dpc(unsigned long instance_addr); 204 int 205 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, 206 int seconds); 207 void megasas_fusion_ocr_wq(struct work_struct *work); 208 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 209 int initial); 210 static int 211 megasas_set_dma_mask(struct megasas_instance *instance); 212 static int 213 megasas_alloc_ctrl_mem(struct megasas_instance *instance); 214 static inline void 215 megasas_free_ctrl_mem(struct megasas_instance *instance); 216 static inline int 217 megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance); 218 static inline void 219 megasas_free_ctrl_dma_buffers(struct megasas_instance *instance); 220 static inline void 221 megasas_init_ctrl_params(struct megasas_instance *instance); 222 223 /** 224 * megasas_set_dma_settings - Populate DMA address, length and flags for DCMDs 225 * @instance: Adapter soft state 226 * @dcmd: DCMD frame inside MFI command 227 * @dma_addr: DMA address of buffer to be passed to FW 228 * @dma_len: Length of DMA buffer to be passed to FW 229 * @return: void 230 */ 231 void megasas_set_dma_settings(struct megasas_instance *instance, 232 struct megasas_dcmd_frame *dcmd, 233 dma_addr_t dma_addr, u32 dma_len) 234 { 235 if (instance->consistent_mask_64bit) { 236 dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr); 237 dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len); 238 dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64); 239 240 } else { 241 dcmd->sgl.sge32[0].phys_addr = 242 cpu_to_le32(lower_32_bits(dma_addr)); 243 dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len); 244 dcmd->flags = cpu_to_le16(dcmd->flags); 245 } 246 } 247 248 void 249 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 250 { 251 instance->instancet->fire_cmd(instance, 252 cmd->frame_phys_addr, 0, instance->reg_set); 253 return; 254 } 255 256 /** 257 * megasas_get_cmd - Get a command from the free pool 258 * @instance: Adapter soft state 259 * 260 * Returns a free command from the pool 261 */ 262 struct megasas_cmd *megasas_get_cmd(struct megasas_instance 263 *instance) 264 { 265 unsigned long flags; 266 struct megasas_cmd *cmd = NULL; 267 268 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 269 270 if (!list_empty(&instance->cmd_pool)) { 271 cmd = list_entry((&instance->cmd_pool)->next, 272 struct megasas_cmd, list); 273 list_del_init(&cmd->list); 274 } else { 275 dev_err(&instance->pdev->dev, "Command pool empty!\n"); 276 } 277 278 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 279 return cmd; 280 } 281 282 /** 283 * megasas_return_cmd - Return a cmd to free command pool 284 * @instance: Adapter soft state 285 * @cmd: Command packet to be returned to free command pool 286 */ 287 void 288 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 289 { 290 unsigned long flags; 291 u32 blk_tags; 292 struct megasas_cmd_fusion *cmd_fusion; 293 struct fusion_context *fusion = instance->ctrl_context; 294 295 /* This flag is used only for fusion adapter. 296 * Wait for Interrupt for Polled mode DCMD 297 */ 298 if (cmd->flags & DRV_DCMD_POLLED_MODE) 299 return; 300 301 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 302 303 if (fusion) { 304 blk_tags = instance->max_scsi_cmds + cmd->index; 305 cmd_fusion = fusion->cmd_list[blk_tags]; 306 megasas_return_cmd_fusion(instance, cmd_fusion); 307 } 308 cmd->scmd = NULL; 309 cmd->frame_count = 0; 310 cmd->flags = 0; 311 memset(cmd->frame, 0, instance->mfi_frame_size); 312 cmd->frame->io.context = cpu_to_le32(cmd->index); 313 if (!fusion && reset_devices) 314 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 315 list_add(&cmd->list, (&instance->cmd_pool)->next); 316 317 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 318 319 } 320 321 static const char * 322 format_timestamp(uint32_t timestamp) 323 { 324 static char buffer[32]; 325 326 if ((timestamp & 0xff000000) == 0xff000000) 327 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp & 328 0x00ffffff); 329 else 330 snprintf(buffer, sizeof(buffer), "%us", timestamp); 331 return buffer; 332 } 333 334 static const char * 335 format_class(int8_t class) 336 { 337 static char buffer[6]; 338 339 switch (class) { 340 case MFI_EVT_CLASS_DEBUG: 341 return "debug"; 342 case MFI_EVT_CLASS_PROGRESS: 343 return "progress"; 344 case MFI_EVT_CLASS_INFO: 345 return "info"; 346 case MFI_EVT_CLASS_WARNING: 347 return "WARN"; 348 case MFI_EVT_CLASS_CRITICAL: 349 return "CRIT"; 350 case MFI_EVT_CLASS_FATAL: 351 return "FATAL"; 352 case MFI_EVT_CLASS_DEAD: 353 return "DEAD"; 354 default: 355 snprintf(buffer, sizeof(buffer), "%d", class); 356 return buffer; 357 } 358 } 359 360 /** 361 * megasas_decode_evt: Decode FW AEN event and print critical event 362 * for information. 363 * @instance: Adapter soft state 364 */ 365 static void 366 megasas_decode_evt(struct megasas_instance *instance) 367 { 368 struct megasas_evt_detail *evt_detail = instance->evt_detail; 369 union megasas_evt_class_locale class_locale; 370 class_locale.word = le32_to_cpu(evt_detail->cl.word); 371 372 if (class_locale.members.class >= MFI_EVT_CLASS_CRITICAL) 373 dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n", 374 le32_to_cpu(evt_detail->seq_num), 375 format_timestamp(le32_to_cpu(evt_detail->time_stamp)), 376 (class_locale.members.locale), 377 format_class(class_locale.members.class), 378 evt_detail->description); 379 } 380 381 /** 382 * The following functions are defined for xscale 383 * (deviceid : 1064R, PERC5) controllers 384 */ 385 386 /** 387 * megasas_enable_intr_xscale - Enables interrupts 388 * @regs: MFI register set 389 */ 390 static inline void 391 megasas_enable_intr_xscale(struct megasas_instance *instance) 392 { 393 struct megasas_register_set __iomem *regs; 394 395 regs = instance->reg_set; 396 writel(0, &(regs)->outbound_intr_mask); 397 398 /* Dummy readl to force pci flush */ 399 readl(®s->outbound_intr_mask); 400 } 401 402 /** 403 * megasas_disable_intr_xscale -Disables interrupt 404 * @regs: MFI register set 405 */ 406 static inline void 407 megasas_disable_intr_xscale(struct megasas_instance *instance) 408 { 409 struct megasas_register_set __iomem *regs; 410 u32 mask = 0x1f; 411 412 regs = instance->reg_set; 413 writel(mask, ®s->outbound_intr_mask); 414 /* Dummy readl to force pci flush */ 415 readl(®s->outbound_intr_mask); 416 } 417 418 /** 419 * megasas_read_fw_status_reg_xscale - returns the current FW status value 420 * @regs: MFI register set 421 */ 422 static u32 423 megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs) 424 { 425 return readl(&(regs)->outbound_msg_0); 426 } 427 /** 428 * megasas_clear_interrupt_xscale - Check & clear interrupt 429 * @regs: MFI register set 430 */ 431 static int 432 megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs) 433 { 434 u32 status; 435 u32 mfiStatus = 0; 436 437 /* 438 * Check if it is our interrupt 439 */ 440 status = readl(®s->outbound_intr_status); 441 442 if (status & MFI_OB_INTR_STATUS_MASK) 443 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 444 if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT) 445 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 446 447 /* 448 * Clear the interrupt by writing back the same value 449 */ 450 if (mfiStatus) 451 writel(status, ®s->outbound_intr_status); 452 453 /* Dummy readl to force pci flush */ 454 readl(®s->outbound_intr_status); 455 456 return mfiStatus; 457 } 458 459 /** 460 * megasas_fire_cmd_xscale - Sends command to the FW 461 * @frame_phys_addr : Physical address of cmd 462 * @frame_count : Number of frames for the command 463 * @regs : MFI register set 464 */ 465 static inline void 466 megasas_fire_cmd_xscale(struct megasas_instance *instance, 467 dma_addr_t frame_phys_addr, 468 u32 frame_count, 469 struct megasas_register_set __iomem *regs) 470 { 471 unsigned long flags; 472 473 spin_lock_irqsave(&instance->hba_lock, flags); 474 writel((frame_phys_addr >> 3)|(frame_count), 475 &(regs)->inbound_queue_port); 476 spin_unlock_irqrestore(&instance->hba_lock, flags); 477 } 478 479 /** 480 * megasas_adp_reset_xscale - For controller reset 481 * @regs: MFI register set 482 */ 483 static int 484 megasas_adp_reset_xscale(struct megasas_instance *instance, 485 struct megasas_register_set __iomem *regs) 486 { 487 u32 i; 488 u32 pcidata; 489 490 writel(MFI_ADP_RESET, ®s->inbound_doorbell); 491 492 for (i = 0; i < 3; i++) 493 msleep(1000); /* sleep for 3 secs */ 494 pcidata = 0; 495 pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata); 496 dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata); 497 if (pcidata & 0x2) { 498 dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata); 499 pcidata &= ~0x2; 500 pci_write_config_dword(instance->pdev, 501 MFI_1068_PCSR_OFFSET, pcidata); 502 503 for (i = 0; i < 2; i++) 504 msleep(1000); /* need to wait 2 secs again */ 505 506 pcidata = 0; 507 pci_read_config_dword(instance->pdev, 508 MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata); 509 dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata); 510 if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) { 511 dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata); 512 pcidata = 0; 513 pci_write_config_dword(instance->pdev, 514 MFI_1068_FW_HANDSHAKE_OFFSET, pcidata); 515 } 516 } 517 return 0; 518 } 519 520 /** 521 * megasas_check_reset_xscale - For controller reset check 522 * @regs: MFI register set 523 */ 524 static int 525 megasas_check_reset_xscale(struct megasas_instance *instance, 526 struct megasas_register_set __iomem *regs) 527 { 528 if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) && 529 (le32_to_cpu(*instance->consumer) == 530 MEGASAS_ADPRESET_INPROG_SIGN)) 531 return 1; 532 return 0; 533 } 534 535 static struct megasas_instance_template megasas_instance_template_xscale = { 536 537 .fire_cmd = megasas_fire_cmd_xscale, 538 .enable_intr = megasas_enable_intr_xscale, 539 .disable_intr = megasas_disable_intr_xscale, 540 .clear_intr = megasas_clear_intr_xscale, 541 .read_fw_status_reg = megasas_read_fw_status_reg_xscale, 542 .adp_reset = megasas_adp_reset_xscale, 543 .check_reset = megasas_check_reset_xscale, 544 .service_isr = megasas_isr, 545 .tasklet = megasas_complete_cmd_dpc, 546 .init_adapter = megasas_init_adapter_mfi, 547 .build_and_issue_cmd = megasas_build_and_issue_cmd, 548 .issue_dcmd = megasas_issue_dcmd, 549 }; 550 551 /** 552 * This is the end of set of functions & definitions specific 553 * to xscale (deviceid : 1064R, PERC5) controllers 554 */ 555 556 /** 557 * The following functions are defined for ppc (deviceid : 0x60) 558 * controllers 559 */ 560 561 /** 562 * megasas_enable_intr_ppc - Enables interrupts 563 * @regs: MFI register set 564 */ 565 static inline void 566 megasas_enable_intr_ppc(struct megasas_instance *instance) 567 { 568 struct megasas_register_set __iomem *regs; 569 570 regs = instance->reg_set; 571 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 572 573 writel(~0x80000000, &(regs)->outbound_intr_mask); 574 575 /* Dummy readl to force pci flush */ 576 readl(®s->outbound_intr_mask); 577 } 578 579 /** 580 * megasas_disable_intr_ppc - Disable interrupt 581 * @regs: MFI register set 582 */ 583 static inline void 584 megasas_disable_intr_ppc(struct megasas_instance *instance) 585 { 586 struct megasas_register_set __iomem *regs; 587 u32 mask = 0xFFFFFFFF; 588 589 regs = instance->reg_set; 590 writel(mask, ®s->outbound_intr_mask); 591 /* Dummy readl to force pci flush */ 592 readl(®s->outbound_intr_mask); 593 } 594 595 /** 596 * megasas_read_fw_status_reg_ppc - returns the current FW status value 597 * @regs: MFI register set 598 */ 599 static u32 600 megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs) 601 { 602 return readl(&(regs)->outbound_scratch_pad); 603 } 604 605 /** 606 * megasas_clear_interrupt_ppc - Check & clear interrupt 607 * @regs: MFI register set 608 */ 609 static int 610 megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs) 611 { 612 u32 status, mfiStatus = 0; 613 614 /* 615 * Check if it is our interrupt 616 */ 617 status = readl(®s->outbound_intr_status); 618 619 if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT) 620 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 621 622 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) 623 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 624 625 /* 626 * Clear the interrupt by writing back the same value 627 */ 628 writel(status, ®s->outbound_doorbell_clear); 629 630 /* Dummy readl to force pci flush */ 631 readl(®s->outbound_doorbell_clear); 632 633 return mfiStatus; 634 } 635 636 /** 637 * megasas_fire_cmd_ppc - Sends command to the FW 638 * @frame_phys_addr : Physical address of cmd 639 * @frame_count : Number of frames for the command 640 * @regs : MFI register set 641 */ 642 static inline void 643 megasas_fire_cmd_ppc(struct megasas_instance *instance, 644 dma_addr_t frame_phys_addr, 645 u32 frame_count, 646 struct megasas_register_set __iomem *regs) 647 { 648 unsigned long flags; 649 650 spin_lock_irqsave(&instance->hba_lock, flags); 651 writel((frame_phys_addr | (frame_count<<1))|1, 652 &(regs)->inbound_queue_port); 653 spin_unlock_irqrestore(&instance->hba_lock, flags); 654 } 655 656 /** 657 * megasas_check_reset_ppc - For controller reset check 658 * @regs: MFI register set 659 */ 660 static int 661 megasas_check_reset_ppc(struct megasas_instance *instance, 662 struct megasas_register_set __iomem *regs) 663 { 664 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 665 return 1; 666 667 return 0; 668 } 669 670 static struct megasas_instance_template megasas_instance_template_ppc = { 671 672 .fire_cmd = megasas_fire_cmd_ppc, 673 .enable_intr = megasas_enable_intr_ppc, 674 .disable_intr = megasas_disable_intr_ppc, 675 .clear_intr = megasas_clear_intr_ppc, 676 .read_fw_status_reg = megasas_read_fw_status_reg_ppc, 677 .adp_reset = megasas_adp_reset_xscale, 678 .check_reset = megasas_check_reset_ppc, 679 .service_isr = megasas_isr, 680 .tasklet = megasas_complete_cmd_dpc, 681 .init_adapter = megasas_init_adapter_mfi, 682 .build_and_issue_cmd = megasas_build_and_issue_cmd, 683 .issue_dcmd = megasas_issue_dcmd, 684 }; 685 686 /** 687 * megasas_enable_intr_skinny - Enables interrupts 688 * @regs: MFI register set 689 */ 690 static inline void 691 megasas_enable_intr_skinny(struct megasas_instance *instance) 692 { 693 struct megasas_register_set __iomem *regs; 694 695 regs = instance->reg_set; 696 writel(0xFFFFFFFF, &(regs)->outbound_intr_mask); 697 698 writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 699 700 /* Dummy readl to force pci flush */ 701 readl(®s->outbound_intr_mask); 702 } 703 704 /** 705 * megasas_disable_intr_skinny - Disables interrupt 706 * @regs: MFI register set 707 */ 708 static inline void 709 megasas_disable_intr_skinny(struct megasas_instance *instance) 710 { 711 struct megasas_register_set __iomem *regs; 712 u32 mask = 0xFFFFFFFF; 713 714 regs = instance->reg_set; 715 writel(mask, ®s->outbound_intr_mask); 716 /* Dummy readl to force pci flush */ 717 readl(®s->outbound_intr_mask); 718 } 719 720 /** 721 * megasas_read_fw_status_reg_skinny - returns the current FW status value 722 * @regs: MFI register set 723 */ 724 static u32 725 megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs) 726 { 727 return readl(&(regs)->outbound_scratch_pad); 728 } 729 730 /** 731 * megasas_clear_interrupt_skinny - Check & clear interrupt 732 * @regs: MFI register set 733 */ 734 static int 735 megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs) 736 { 737 u32 status; 738 u32 mfiStatus = 0; 739 740 /* 741 * Check if it is our interrupt 742 */ 743 status = readl(®s->outbound_intr_status); 744 745 if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) { 746 return 0; 747 } 748 749 /* 750 * Check if it is our interrupt 751 */ 752 if ((megasas_read_fw_status_reg_skinny(regs) & MFI_STATE_MASK) == 753 MFI_STATE_FAULT) { 754 mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 755 } else 756 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 757 758 /* 759 * Clear the interrupt by writing back the same value 760 */ 761 writel(status, ®s->outbound_intr_status); 762 763 /* 764 * dummy read to flush PCI 765 */ 766 readl(®s->outbound_intr_status); 767 768 return mfiStatus; 769 } 770 771 /** 772 * megasas_fire_cmd_skinny - Sends command to the FW 773 * @frame_phys_addr : Physical address of cmd 774 * @frame_count : Number of frames for the command 775 * @regs : MFI register set 776 */ 777 static inline void 778 megasas_fire_cmd_skinny(struct megasas_instance *instance, 779 dma_addr_t frame_phys_addr, 780 u32 frame_count, 781 struct megasas_register_set __iomem *regs) 782 { 783 unsigned long flags; 784 785 spin_lock_irqsave(&instance->hba_lock, flags); 786 writel(upper_32_bits(frame_phys_addr), 787 &(regs)->inbound_high_queue_port); 788 writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1, 789 &(regs)->inbound_low_queue_port); 790 mmiowb(); 791 spin_unlock_irqrestore(&instance->hba_lock, flags); 792 } 793 794 /** 795 * megasas_check_reset_skinny - For controller reset check 796 * @regs: MFI register set 797 */ 798 static int 799 megasas_check_reset_skinny(struct megasas_instance *instance, 800 struct megasas_register_set __iomem *regs) 801 { 802 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 803 return 1; 804 805 return 0; 806 } 807 808 static struct megasas_instance_template megasas_instance_template_skinny = { 809 810 .fire_cmd = megasas_fire_cmd_skinny, 811 .enable_intr = megasas_enable_intr_skinny, 812 .disable_intr = megasas_disable_intr_skinny, 813 .clear_intr = megasas_clear_intr_skinny, 814 .read_fw_status_reg = megasas_read_fw_status_reg_skinny, 815 .adp_reset = megasas_adp_reset_gen2, 816 .check_reset = megasas_check_reset_skinny, 817 .service_isr = megasas_isr, 818 .tasklet = megasas_complete_cmd_dpc, 819 .init_adapter = megasas_init_adapter_mfi, 820 .build_and_issue_cmd = megasas_build_and_issue_cmd, 821 .issue_dcmd = megasas_issue_dcmd, 822 }; 823 824 825 /** 826 * The following functions are defined for gen2 (deviceid : 0x78 0x79) 827 * controllers 828 */ 829 830 /** 831 * megasas_enable_intr_gen2 - Enables interrupts 832 * @regs: MFI register set 833 */ 834 static inline void 835 megasas_enable_intr_gen2(struct megasas_instance *instance) 836 { 837 struct megasas_register_set __iomem *regs; 838 839 regs = instance->reg_set; 840 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 841 842 /* write ~0x00000005 (4 & 1) to the intr mask*/ 843 writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 844 845 /* Dummy readl to force pci flush */ 846 readl(®s->outbound_intr_mask); 847 } 848 849 /** 850 * megasas_disable_intr_gen2 - Disables interrupt 851 * @regs: MFI register set 852 */ 853 static inline void 854 megasas_disable_intr_gen2(struct megasas_instance *instance) 855 { 856 struct megasas_register_set __iomem *regs; 857 u32 mask = 0xFFFFFFFF; 858 859 regs = instance->reg_set; 860 writel(mask, ®s->outbound_intr_mask); 861 /* Dummy readl to force pci flush */ 862 readl(®s->outbound_intr_mask); 863 } 864 865 /** 866 * megasas_read_fw_status_reg_gen2 - returns the current FW status value 867 * @regs: MFI register set 868 */ 869 static u32 870 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs) 871 { 872 return readl(&(regs)->outbound_scratch_pad); 873 } 874 875 /** 876 * megasas_clear_interrupt_gen2 - Check & clear interrupt 877 * @regs: MFI register set 878 */ 879 static int 880 megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs) 881 { 882 u32 status; 883 u32 mfiStatus = 0; 884 885 /* 886 * Check if it is our interrupt 887 */ 888 status = readl(®s->outbound_intr_status); 889 890 if (status & MFI_INTR_FLAG_REPLY_MESSAGE) { 891 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 892 } 893 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) { 894 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 895 } 896 897 /* 898 * Clear the interrupt by writing back the same value 899 */ 900 if (mfiStatus) 901 writel(status, ®s->outbound_doorbell_clear); 902 903 /* Dummy readl to force pci flush */ 904 readl(®s->outbound_intr_status); 905 906 return mfiStatus; 907 } 908 /** 909 * megasas_fire_cmd_gen2 - Sends command to the FW 910 * @frame_phys_addr : Physical address of cmd 911 * @frame_count : Number of frames for the command 912 * @regs : MFI register set 913 */ 914 static inline void 915 megasas_fire_cmd_gen2(struct megasas_instance *instance, 916 dma_addr_t frame_phys_addr, 917 u32 frame_count, 918 struct megasas_register_set __iomem *regs) 919 { 920 unsigned long flags; 921 922 spin_lock_irqsave(&instance->hba_lock, flags); 923 writel((frame_phys_addr | (frame_count<<1))|1, 924 &(regs)->inbound_queue_port); 925 spin_unlock_irqrestore(&instance->hba_lock, flags); 926 } 927 928 /** 929 * megasas_adp_reset_gen2 - For controller reset 930 * @regs: MFI register set 931 */ 932 static int 933 megasas_adp_reset_gen2(struct megasas_instance *instance, 934 struct megasas_register_set __iomem *reg_set) 935 { 936 u32 retry = 0 ; 937 u32 HostDiag; 938 u32 __iomem *seq_offset = ®_set->seq_offset; 939 u32 __iomem *hostdiag_offset = ®_set->host_diag; 940 941 if (instance->instancet == &megasas_instance_template_skinny) { 942 seq_offset = ®_set->fusion_seq_offset; 943 hostdiag_offset = ®_set->fusion_host_diag; 944 } 945 946 writel(0, seq_offset); 947 writel(4, seq_offset); 948 writel(0xb, seq_offset); 949 writel(2, seq_offset); 950 writel(7, seq_offset); 951 writel(0xd, seq_offset); 952 953 msleep(1000); 954 955 HostDiag = (u32)readl(hostdiag_offset); 956 957 while (!(HostDiag & DIAG_WRITE_ENABLE)) { 958 msleep(100); 959 HostDiag = (u32)readl(hostdiag_offset); 960 dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n", 961 retry, HostDiag); 962 963 if (retry++ >= 100) 964 return 1; 965 966 } 967 968 dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag); 969 970 writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset); 971 972 ssleep(10); 973 974 HostDiag = (u32)readl(hostdiag_offset); 975 while (HostDiag & DIAG_RESET_ADAPTER) { 976 msleep(100); 977 HostDiag = (u32)readl(hostdiag_offset); 978 dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n", 979 retry, HostDiag); 980 981 if (retry++ >= 1000) 982 return 1; 983 984 } 985 return 0; 986 } 987 988 /** 989 * megasas_check_reset_gen2 - For controller reset check 990 * @regs: MFI register set 991 */ 992 static int 993 megasas_check_reset_gen2(struct megasas_instance *instance, 994 struct megasas_register_set __iomem *regs) 995 { 996 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 997 return 1; 998 999 return 0; 1000 } 1001 1002 static struct megasas_instance_template megasas_instance_template_gen2 = { 1003 1004 .fire_cmd = megasas_fire_cmd_gen2, 1005 .enable_intr = megasas_enable_intr_gen2, 1006 .disable_intr = megasas_disable_intr_gen2, 1007 .clear_intr = megasas_clear_intr_gen2, 1008 .read_fw_status_reg = megasas_read_fw_status_reg_gen2, 1009 .adp_reset = megasas_adp_reset_gen2, 1010 .check_reset = megasas_check_reset_gen2, 1011 .service_isr = megasas_isr, 1012 .tasklet = megasas_complete_cmd_dpc, 1013 .init_adapter = megasas_init_adapter_mfi, 1014 .build_and_issue_cmd = megasas_build_and_issue_cmd, 1015 .issue_dcmd = megasas_issue_dcmd, 1016 }; 1017 1018 /** 1019 * This is the end of set of functions & definitions 1020 * specific to gen2 (deviceid : 0x78, 0x79) controllers 1021 */ 1022 1023 /* 1024 * Template added for TB (Fusion) 1025 */ 1026 extern struct megasas_instance_template megasas_instance_template_fusion; 1027 1028 /** 1029 * megasas_issue_polled - Issues a polling command 1030 * @instance: Adapter soft state 1031 * @cmd: Command packet to be issued 1032 * 1033 * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting. 1034 */ 1035 int 1036 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) 1037 { 1038 struct megasas_header *frame_hdr = &cmd->frame->hdr; 1039 1040 frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS; 1041 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); 1042 1043 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1044 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1045 __func__, __LINE__); 1046 return DCMD_NOT_FIRED; 1047 } 1048 1049 instance->instancet->issue_dcmd(instance, cmd); 1050 1051 return wait_and_poll(instance, cmd, instance->requestorId ? 1052 MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS); 1053 } 1054 1055 /** 1056 * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds 1057 * @instance: Adapter soft state 1058 * @cmd: Command to be issued 1059 * @timeout: Timeout in seconds 1060 * 1061 * This function waits on an event for the command to be returned from ISR. 1062 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 1063 * Used to issue ioctl commands. 1064 */ 1065 int 1066 megasas_issue_blocked_cmd(struct megasas_instance *instance, 1067 struct megasas_cmd *cmd, int timeout) 1068 { 1069 int ret = 0; 1070 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 1071 1072 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1073 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1074 __func__, __LINE__); 1075 return DCMD_NOT_FIRED; 1076 } 1077 1078 instance->instancet->issue_dcmd(instance, cmd); 1079 1080 if (timeout) { 1081 ret = wait_event_timeout(instance->int_cmd_wait_q, 1082 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); 1083 if (!ret) { 1084 dev_err(&instance->pdev->dev, "Failed from %s %d DCMD Timed out\n", 1085 __func__, __LINE__); 1086 return DCMD_TIMEOUT; 1087 } 1088 } else 1089 wait_event(instance->int_cmd_wait_q, 1090 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS); 1091 1092 return (cmd->cmd_status_drv == MFI_STAT_OK) ? 1093 DCMD_SUCCESS : DCMD_FAILED; 1094 } 1095 1096 /** 1097 * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd 1098 * @instance: Adapter soft state 1099 * @cmd_to_abort: Previously issued cmd to be aborted 1100 * @timeout: Timeout in seconds 1101 * 1102 * MFI firmware can abort previously issued AEN comamnd (automatic event 1103 * notification). The megasas_issue_blocked_abort_cmd() issues such abort 1104 * cmd and waits for return status. 1105 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 1106 */ 1107 static int 1108 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, 1109 struct megasas_cmd *cmd_to_abort, int timeout) 1110 { 1111 struct megasas_cmd *cmd; 1112 struct megasas_abort_frame *abort_fr; 1113 int ret = 0; 1114 1115 cmd = megasas_get_cmd(instance); 1116 1117 if (!cmd) 1118 return -1; 1119 1120 abort_fr = &cmd->frame->abort; 1121 1122 /* 1123 * Prepare and issue the abort frame 1124 */ 1125 abort_fr->cmd = MFI_CMD_ABORT; 1126 abort_fr->cmd_status = MFI_STAT_INVALID_STATUS; 1127 abort_fr->flags = cpu_to_le16(0); 1128 abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index); 1129 abort_fr->abort_mfi_phys_addr_lo = 1130 cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr)); 1131 abort_fr->abort_mfi_phys_addr_hi = 1132 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); 1133 1134 cmd->sync_cmd = 1; 1135 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 1136 1137 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1138 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1139 __func__, __LINE__); 1140 return DCMD_NOT_FIRED; 1141 } 1142 1143 instance->instancet->issue_dcmd(instance, cmd); 1144 1145 if (timeout) { 1146 ret = wait_event_timeout(instance->abort_cmd_wait_q, 1147 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); 1148 if (!ret) { 1149 dev_err(&instance->pdev->dev, "Failed from %s %d Abort Timed out\n", 1150 __func__, __LINE__); 1151 return DCMD_TIMEOUT; 1152 } 1153 } else 1154 wait_event(instance->abort_cmd_wait_q, 1155 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS); 1156 1157 cmd->sync_cmd = 0; 1158 1159 megasas_return_cmd(instance, cmd); 1160 return (cmd->cmd_status_drv == MFI_STAT_OK) ? 1161 DCMD_SUCCESS : DCMD_FAILED; 1162 } 1163 1164 /** 1165 * megasas_make_sgl32 - Prepares 32-bit SGL 1166 * @instance: Adapter soft state 1167 * @scp: SCSI command from the mid-layer 1168 * @mfi_sgl: SGL to be filled in 1169 * 1170 * If successful, this function returns the number of SG elements. Otherwise, 1171 * it returnes -1. 1172 */ 1173 static int 1174 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp, 1175 union megasas_sgl *mfi_sgl) 1176 { 1177 int i; 1178 int sge_count; 1179 struct scatterlist *os_sgl; 1180 1181 sge_count = scsi_dma_map(scp); 1182 BUG_ON(sge_count < 0); 1183 1184 if (sge_count) { 1185 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1186 mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl)); 1187 mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl)); 1188 } 1189 } 1190 return sge_count; 1191 } 1192 1193 /** 1194 * megasas_make_sgl64 - Prepares 64-bit SGL 1195 * @instance: Adapter soft state 1196 * @scp: SCSI command from the mid-layer 1197 * @mfi_sgl: SGL to be filled in 1198 * 1199 * If successful, this function returns the number of SG elements. Otherwise, 1200 * it returnes -1. 1201 */ 1202 static int 1203 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, 1204 union megasas_sgl *mfi_sgl) 1205 { 1206 int i; 1207 int sge_count; 1208 struct scatterlist *os_sgl; 1209 1210 sge_count = scsi_dma_map(scp); 1211 BUG_ON(sge_count < 0); 1212 1213 if (sge_count) { 1214 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1215 mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl)); 1216 mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl)); 1217 } 1218 } 1219 return sge_count; 1220 } 1221 1222 /** 1223 * megasas_make_sgl_skinny - Prepares IEEE SGL 1224 * @instance: Adapter soft state 1225 * @scp: SCSI command from the mid-layer 1226 * @mfi_sgl: SGL to be filled in 1227 * 1228 * If successful, this function returns the number of SG elements. Otherwise, 1229 * it returnes -1. 1230 */ 1231 static int 1232 megasas_make_sgl_skinny(struct megasas_instance *instance, 1233 struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) 1234 { 1235 int i; 1236 int sge_count; 1237 struct scatterlist *os_sgl; 1238 1239 sge_count = scsi_dma_map(scp); 1240 1241 if (sge_count) { 1242 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1243 mfi_sgl->sge_skinny[i].length = 1244 cpu_to_le32(sg_dma_len(os_sgl)); 1245 mfi_sgl->sge_skinny[i].phys_addr = 1246 cpu_to_le64(sg_dma_address(os_sgl)); 1247 mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0); 1248 } 1249 } 1250 return sge_count; 1251 } 1252 1253 /** 1254 * megasas_get_frame_count - Computes the number of frames 1255 * @frame_type : type of frame- io or pthru frame 1256 * @sge_count : number of sg elements 1257 * 1258 * Returns the number of frames required for numnber of sge's (sge_count) 1259 */ 1260 1261 static u32 megasas_get_frame_count(struct megasas_instance *instance, 1262 u8 sge_count, u8 frame_type) 1263 { 1264 int num_cnt; 1265 int sge_bytes; 1266 u32 sge_sz; 1267 u32 frame_count = 0; 1268 1269 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : 1270 sizeof(struct megasas_sge32); 1271 1272 if (instance->flag_ieee) { 1273 sge_sz = sizeof(struct megasas_sge_skinny); 1274 } 1275 1276 /* 1277 * Main frame can contain 2 SGEs for 64-bit SGLs and 1278 * 3 SGEs for 32-bit SGLs for ldio & 1279 * 1 SGEs for 64-bit SGLs and 1280 * 2 SGEs for 32-bit SGLs for pthru frame 1281 */ 1282 if (unlikely(frame_type == PTHRU_FRAME)) { 1283 if (instance->flag_ieee == 1) { 1284 num_cnt = sge_count - 1; 1285 } else if (IS_DMA64) 1286 num_cnt = sge_count - 1; 1287 else 1288 num_cnt = sge_count - 2; 1289 } else { 1290 if (instance->flag_ieee == 1) { 1291 num_cnt = sge_count - 1; 1292 } else if (IS_DMA64) 1293 num_cnt = sge_count - 2; 1294 else 1295 num_cnt = sge_count - 3; 1296 } 1297 1298 if (num_cnt > 0) { 1299 sge_bytes = sge_sz * num_cnt; 1300 1301 frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + 1302 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ; 1303 } 1304 /* Main frame */ 1305 frame_count += 1; 1306 1307 if (frame_count > 7) 1308 frame_count = 8; 1309 return frame_count; 1310 } 1311 1312 /** 1313 * megasas_build_dcdb - Prepares a direct cdb (DCDB) command 1314 * @instance: Adapter soft state 1315 * @scp: SCSI command 1316 * @cmd: Command to be prepared in 1317 * 1318 * This function prepares CDB commands. These are typcially pass-through 1319 * commands to the devices. 1320 */ 1321 static int 1322 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, 1323 struct megasas_cmd *cmd) 1324 { 1325 u32 is_logical; 1326 u32 device_id; 1327 u16 flags = 0; 1328 struct megasas_pthru_frame *pthru; 1329 1330 is_logical = MEGASAS_IS_LOGICAL(scp->device); 1331 device_id = MEGASAS_DEV_INDEX(scp); 1332 pthru = (struct megasas_pthru_frame *)cmd->frame; 1333 1334 if (scp->sc_data_direction == PCI_DMA_TODEVICE) 1335 flags = MFI_FRAME_DIR_WRITE; 1336 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 1337 flags = MFI_FRAME_DIR_READ; 1338 else if (scp->sc_data_direction == PCI_DMA_NONE) 1339 flags = MFI_FRAME_DIR_NONE; 1340 1341 if (instance->flag_ieee == 1) { 1342 flags |= MFI_FRAME_IEEE; 1343 } 1344 1345 /* 1346 * Prepare the DCDB frame 1347 */ 1348 pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO; 1349 pthru->cmd_status = 0x0; 1350 pthru->scsi_status = 0x0; 1351 pthru->target_id = device_id; 1352 pthru->lun = scp->device->lun; 1353 pthru->cdb_len = scp->cmd_len; 1354 pthru->timeout = 0; 1355 pthru->pad_0 = 0; 1356 pthru->flags = cpu_to_le16(flags); 1357 pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp)); 1358 1359 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); 1360 1361 /* 1362 * If the command is for the tape device, set the 1363 * pthru timeout to the os layer timeout value. 1364 */ 1365 if (scp->device->type == TYPE_TAPE) { 1366 if ((scp->request->timeout / HZ) > 0xFFFF) 1367 pthru->timeout = cpu_to_le16(0xFFFF); 1368 else 1369 pthru->timeout = cpu_to_le16(scp->request->timeout / HZ); 1370 } 1371 1372 /* 1373 * Construct SGL 1374 */ 1375 if (instance->flag_ieee == 1) { 1376 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1377 pthru->sge_count = megasas_make_sgl_skinny(instance, scp, 1378 &pthru->sgl); 1379 } else if (IS_DMA64) { 1380 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1381 pthru->sge_count = megasas_make_sgl64(instance, scp, 1382 &pthru->sgl); 1383 } else 1384 pthru->sge_count = megasas_make_sgl32(instance, scp, 1385 &pthru->sgl); 1386 1387 if (pthru->sge_count > instance->max_num_sge) { 1388 dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n", 1389 pthru->sge_count); 1390 return 0; 1391 } 1392 1393 /* 1394 * Sense info specific 1395 */ 1396 pthru->sense_len = SCSI_SENSE_BUFFERSIZE; 1397 pthru->sense_buf_phys_addr_hi = 1398 cpu_to_le32(upper_32_bits(cmd->sense_phys_addr)); 1399 pthru->sense_buf_phys_addr_lo = 1400 cpu_to_le32(lower_32_bits(cmd->sense_phys_addr)); 1401 1402 /* 1403 * Compute the total number of frames this command consumes. FW uses 1404 * this number to pull sufficient number of frames from host memory. 1405 */ 1406 cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count, 1407 PTHRU_FRAME); 1408 1409 return cmd->frame_count; 1410 } 1411 1412 /** 1413 * megasas_build_ldio - Prepares IOs to logical devices 1414 * @instance: Adapter soft state 1415 * @scp: SCSI command 1416 * @cmd: Command to be prepared 1417 * 1418 * Frames (and accompanying SGLs) for regular SCSI IOs use this function. 1419 */ 1420 static int 1421 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, 1422 struct megasas_cmd *cmd) 1423 { 1424 u32 device_id; 1425 u8 sc = scp->cmnd[0]; 1426 u16 flags = 0; 1427 struct megasas_io_frame *ldio; 1428 1429 device_id = MEGASAS_DEV_INDEX(scp); 1430 ldio = (struct megasas_io_frame *)cmd->frame; 1431 1432 if (scp->sc_data_direction == PCI_DMA_TODEVICE) 1433 flags = MFI_FRAME_DIR_WRITE; 1434 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 1435 flags = MFI_FRAME_DIR_READ; 1436 1437 if (instance->flag_ieee == 1) { 1438 flags |= MFI_FRAME_IEEE; 1439 } 1440 1441 /* 1442 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds 1443 */ 1444 ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ; 1445 ldio->cmd_status = 0x0; 1446 ldio->scsi_status = 0x0; 1447 ldio->target_id = device_id; 1448 ldio->timeout = 0; 1449 ldio->reserved_0 = 0; 1450 ldio->pad_0 = 0; 1451 ldio->flags = cpu_to_le16(flags); 1452 ldio->start_lba_hi = 0; 1453 ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0; 1454 1455 /* 1456 * 6-byte READ(0x08) or WRITE(0x0A) cdb 1457 */ 1458 if (scp->cmd_len == 6) { 1459 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]); 1460 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) | 1461 ((u32) scp->cmnd[2] << 8) | 1462 (u32) scp->cmnd[3]); 1463 1464 ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF); 1465 } 1466 1467 /* 1468 * 10-byte READ(0x28) or WRITE(0x2A) cdb 1469 */ 1470 else if (scp->cmd_len == 10) { 1471 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] | 1472 ((u32) scp->cmnd[7] << 8)); 1473 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1474 ((u32) scp->cmnd[3] << 16) | 1475 ((u32) scp->cmnd[4] << 8) | 1476 (u32) scp->cmnd[5]); 1477 } 1478 1479 /* 1480 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 1481 */ 1482 else if (scp->cmd_len == 12) { 1483 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) | 1484 ((u32) scp->cmnd[7] << 16) | 1485 ((u32) scp->cmnd[8] << 8) | 1486 (u32) scp->cmnd[9]); 1487 1488 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1489 ((u32) scp->cmnd[3] << 16) | 1490 ((u32) scp->cmnd[4] << 8) | 1491 (u32) scp->cmnd[5]); 1492 } 1493 1494 /* 1495 * 16-byte READ(0x88) or WRITE(0x8A) cdb 1496 */ 1497 else if (scp->cmd_len == 16) { 1498 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) | 1499 ((u32) scp->cmnd[11] << 16) | 1500 ((u32) scp->cmnd[12] << 8) | 1501 (u32) scp->cmnd[13]); 1502 1503 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) | 1504 ((u32) scp->cmnd[7] << 16) | 1505 ((u32) scp->cmnd[8] << 8) | 1506 (u32) scp->cmnd[9]); 1507 1508 ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1509 ((u32) scp->cmnd[3] << 16) | 1510 ((u32) scp->cmnd[4] << 8) | 1511 (u32) scp->cmnd[5]); 1512 1513 } 1514 1515 /* 1516 * Construct SGL 1517 */ 1518 if (instance->flag_ieee) { 1519 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1520 ldio->sge_count = megasas_make_sgl_skinny(instance, scp, 1521 &ldio->sgl); 1522 } else if (IS_DMA64) { 1523 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1524 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); 1525 } else 1526 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); 1527 1528 if (ldio->sge_count > instance->max_num_sge) { 1529 dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n", 1530 ldio->sge_count); 1531 return 0; 1532 } 1533 1534 /* 1535 * Sense info specific 1536 */ 1537 ldio->sense_len = SCSI_SENSE_BUFFERSIZE; 1538 ldio->sense_buf_phys_addr_hi = 0; 1539 ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr); 1540 1541 /* 1542 * Compute the total number of frames this command consumes. FW uses 1543 * this number to pull sufficient number of frames from host memory. 1544 */ 1545 cmd->frame_count = megasas_get_frame_count(instance, 1546 ldio->sge_count, IO_FRAME); 1547 1548 return cmd->frame_count; 1549 } 1550 1551 /** 1552 * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD 1553 * and whether it's RW or non RW 1554 * @scmd: SCSI command 1555 * 1556 */ 1557 inline int megasas_cmd_type(struct scsi_cmnd *cmd) 1558 { 1559 int ret; 1560 1561 switch (cmd->cmnd[0]) { 1562 case READ_10: 1563 case WRITE_10: 1564 case READ_12: 1565 case WRITE_12: 1566 case READ_6: 1567 case WRITE_6: 1568 case READ_16: 1569 case WRITE_16: 1570 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? 1571 READ_WRITE_LDIO : READ_WRITE_SYSPDIO; 1572 break; 1573 default: 1574 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? 1575 NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO; 1576 } 1577 return ret; 1578 } 1579 1580 /** 1581 * megasas_dump_pending_frames - Dumps the frame address of all pending cmds 1582 * in FW 1583 * @instance: Adapter soft state 1584 */ 1585 static inline void 1586 megasas_dump_pending_frames(struct megasas_instance *instance) 1587 { 1588 struct megasas_cmd *cmd; 1589 int i,n; 1590 union megasas_sgl *mfi_sgl; 1591 struct megasas_io_frame *ldio; 1592 struct megasas_pthru_frame *pthru; 1593 u32 sgcount; 1594 u16 max_cmd = instance->max_fw_cmds; 1595 1596 dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no); 1597 dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding)); 1598 if (IS_DMA64) 1599 dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no); 1600 else 1601 dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no); 1602 1603 dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no); 1604 for (i = 0; i < max_cmd; i++) { 1605 cmd = instance->cmd_list[i]; 1606 if (!cmd->scmd) 1607 continue; 1608 dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr); 1609 if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) { 1610 ldio = (struct megasas_io_frame *)cmd->frame; 1611 mfi_sgl = &ldio->sgl; 1612 sgcount = ldio->sge_count; 1613 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x," 1614 " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1615 instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id, 1616 le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi), 1617 le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount); 1618 } else { 1619 pthru = (struct megasas_pthru_frame *) cmd->frame; 1620 mfi_sgl = &pthru->sgl; 1621 sgcount = pthru->sge_count; 1622 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, " 1623 "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1624 instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id, 1625 pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len), 1626 le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount); 1627 } 1628 if (megasas_dbg_lvl & MEGASAS_DBG_LVL) { 1629 for (n = 0; n < sgcount; n++) { 1630 if (IS_DMA64) 1631 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n", 1632 le32_to_cpu(mfi_sgl->sge64[n].length), 1633 le64_to_cpu(mfi_sgl->sge64[n].phys_addr)); 1634 else 1635 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n", 1636 le32_to_cpu(mfi_sgl->sge32[n].length), 1637 le32_to_cpu(mfi_sgl->sge32[n].phys_addr)); 1638 } 1639 } 1640 } /*for max_cmd*/ 1641 dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no); 1642 for (i = 0; i < max_cmd; i++) { 1643 1644 cmd = instance->cmd_list[i]; 1645 1646 if (cmd->sync_cmd == 1) 1647 dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr); 1648 } 1649 dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no); 1650 } 1651 1652 u32 1653 megasas_build_and_issue_cmd(struct megasas_instance *instance, 1654 struct scsi_cmnd *scmd) 1655 { 1656 struct megasas_cmd *cmd; 1657 u32 frame_count; 1658 1659 cmd = megasas_get_cmd(instance); 1660 if (!cmd) 1661 return SCSI_MLQUEUE_HOST_BUSY; 1662 1663 /* 1664 * Logical drive command 1665 */ 1666 if (megasas_cmd_type(scmd) == READ_WRITE_LDIO) 1667 frame_count = megasas_build_ldio(instance, scmd, cmd); 1668 else 1669 frame_count = megasas_build_dcdb(instance, scmd, cmd); 1670 1671 if (!frame_count) 1672 goto out_return_cmd; 1673 1674 cmd->scmd = scmd; 1675 scmd->SCp.ptr = (char *)cmd; 1676 1677 /* 1678 * Issue the command to the FW 1679 */ 1680 atomic_inc(&instance->fw_outstanding); 1681 1682 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 1683 cmd->frame_count-1, instance->reg_set); 1684 1685 return 0; 1686 out_return_cmd: 1687 megasas_return_cmd(instance, cmd); 1688 return SCSI_MLQUEUE_HOST_BUSY; 1689 } 1690 1691 1692 /** 1693 * megasas_queue_command - Queue entry point 1694 * @scmd: SCSI command to be queued 1695 * @done: Callback entry point 1696 */ 1697 static int 1698 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) 1699 { 1700 struct megasas_instance *instance; 1701 struct MR_PRIV_DEVICE *mr_device_priv_data; 1702 1703 instance = (struct megasas_instance *) 1704 scmd->device->host->hostdata; 1705 1706 if (instance->unload == 1) { 1707 scmd->result = DID_NO_CONNECT << 16; 1708 scmd->scsi_done(scmd); 1709 return 0; 1710 } 1711 1712 if (instance->issuepend_done == 0) 1713 return SCSI_MLQUEUE_HOST_BUSY; 1714 1715 1716 /* Check for an mpio path and adjust behavior */ 1717 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { 1718 if (megasas_check_mpio_paths(instance, scmd) == 1719 (DID_REQUEUE << 16)) { 1720 return SCSI_MLQUEUE_HOST_BUSY; 1721 } else { 1722 scmd->result = DID_NO_CONNECT << 16; 1723 scmd->scsi_done(scmd); 1724 return 0; 1725 } 1726 } 1727 1728 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1729 scmd->result = DID_NO_CONNECT << 16; 1730 scmd->scsi_done(scmd); 1731 return 0; 1732 } 1733 1734 mr_device_priv_data = scmd->device->hostdata; 1735 if (!mr_device_priv_data) { 1736 scmd->result = DID_NO_CONNECT << 16; 1737 scmd->scsi_done(scmd); 1738 return 0; 1739 } 1740 1741 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 1742 return SCSI_MLQUEUE_HOST_BUSY; 1743 1744 if (mr_device_priv_data->tm_busy) 1745 return SCSI_MLQUEUE_DEVICE_BUSY; 1746 1747 1748 scmd->result = 0; 1749 1750 if (MEGASAS_IS_LOGICAL(scmd->device) && 1751 (scmd->device->id >= instance->fw_supported_vd_count || 1752 scmd->device->lun)) { 1753 scmd->result = DID_BAD_TARGET << 16; 1754 goto out_done; 1755 } 1756 1757 if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && 1758 MEGASAS_IS_LOGICAL(scmd->device) && 1759 (!instance->fw_sync_cache_support)) { 1760 scmd->result = DID_OK << 16; 1761 goto out_done; 1762 } 1763 1764 return instance->instancet->build_and_issue_cmd(instance, scmd); 1765 1766 out_done: 1767 scmd->scsi_done(scmd); 1768 return 0; 1769 } 1770 1771 static struct megasas_instance *megasas_lookup_instance(u16 host_no) 1772 { 1773 int i; 1774 1775 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 1776 1777 if ((megasas_mgmt_info.instance[i]) && 1778 (megasas_mgmt_info.instance[i]->host->host_no == host_no)) 1779 return megasas_mgmt_info.instance[i]; 1780 } 1781 1782 return NULL; 1783 } 1784 1785 /* 1786 * megasas_set_dynamic_target_properties - 1787 * Device property set by driver may not be static and it is required to be 1788 * updated after OCR 1789 * 1790 * set tm_capable. 1791 * set dma alignment (only for eedp protection enable vd). 1792 * 1793 * @sdev: OS provided scsi device 1794 * 1795 * Returns void 1796 */ 1797 void megasas_set_dynamic_target_properties(struct scsi_device *sdev) 1798 { 1799 u16 pd_index = 0, ld; 1800 u32 device_id; 1801 struct megasas_instance *instance; 1802 struct fusion_context *fusion; 1803 struct MR_PRIV_DEVICE *mr_device_priv_data; 1804 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 1805 struct MR_LD_RAID *raid; 1806 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 1807 1808 instance = megasas_lookup_instance(sdev->host->host_no); 1809 fusion = instance->ctrl_context; 1810 mr_device_priv_data = sdev->hostdata; 1811 1812 if (!fusion || !mr_device_priv_data) 1813 return; 1814 1815 if (MEGASAS_IS_LOGICAL(sdev)) { 1816 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) 1817 + sdev->id; 1818 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 1819 ld = MR_TargetIdToLdGet(device_id, local_map_ptr); 1820 if (ld >= instance->fw_supported_vd_count) 1821 return; 1822 raid = MR_LdRaidGet(ld, local_map_ptr); 1823 1824 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) 1825 blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1826 1827 mr_device_priv_data->is_tm_capable = 1828 raid->capability.tmCapable; 1829 } else if (instance->use_seqnum_jbod_fp) { 1830 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1831 sdev->id; 1832 pd_sync = (void *)fusion->pd_seq_sync 1833 [(instance->pd_seq_map_id - 1) & 1]; 1834 mr_device_priv_data->is_tm_capable = 1835 pd_sync->seq[pd_index].capability.tmCapable; 1836 } 1837 } 1838 1839 /* 1840 * megasas_set_nvme_device_properties - 1841 * set nomerges=2 1842 * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K). 1843 * set maximum io transfer = MDTS of NVME device provided by MR firmware. 1844 * 1845 * MR firmware provides value in KB. Caller of this function converts 1846 * kb into bytes. 1847 * 1848 * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size, 1849 * MR firmware provides value 128 as (32 * 4K) = 128K. 1850 * 1851 * @sdev: scsi device 1852 * @max_io_size: maximum io transfer size 1853 * 1854 */ 1855 static inline void 1856 megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size) 1857 { 1858 struct megasas_instance *instance; 1859 u32 mr_nvme_pg_size; 1860 1861 instance = (struct megasas_instance *)sdev->host->hostdata; 1862 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, 1863 MR_DEFAULT_NVME_PAGE_SIZE); 1864 1865 blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512)); 1866 1867 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue); 1868 blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1); 1869 } 1870 1871 1872 /* 1873 * megasas_set_static_target_properties - 1874 * Device property set by driver are static and it is not required to be 1875 * updated after OCR. 1876 * 1877 * set io timeout 1878 * set device queue depth 1879 * set nvme device properties. see - megasas_set_nvme_device_properties 1880 * 1881 * @sdev: scsi device 1882 * @is_target_prop true, if fw provided target properties. 1883 */ 1884 static void megasas_set_static_target_properties(struct scsi_device *sdev, 1885 bool is_target_prop) 1886 { 1887 u16 target_index = 0; 1888 u8 interface_type; 1889 u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN; 1890 u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB; 1891 u32 tgt_device_qd; 1892 struct megasas_instance *instance; 1893 struct MR_PRIV_DEVICE *mr_device_priv_data; 1894 1895 instance = megasas_lookup_instance(sdev->host->host_no); 1896 mr_device_priv_data = sdev->hostdata; 1897 interface_type = mr_device_priv_data->interface_type; 1898 1899 /* 1900 * The RAID firmware may require extended timeouts. 1901 */ 1902 blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ); 1903 1904 target_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; 1905 1906 switch (interface_type) { 1907 case SAS_PD: 1908 device_qd = MEGASAS_SAS_QD; 1909 break; 1910 case SATA_PD: 1911 device_qd = MEGASAS_SATA_QD; 1912 break; 1913 case NVME_PD: 1914 device_qd = MEGASAS_NVME_QD; 1915 break; 1916 } 1917 1918 if (is_target_prop) { 1919 tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth); 1920 if (tgt_device_qd && 1921 (tgt_device_qd <= instance->host->can_queue)) 1922 device_qd = tgt_device_qd; 1923 1924 /* max_io_size_kb will be set to non zero for 1925 * nvme based vd and syspd. 1926 */ 1927 max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb); 1928 } 1929 1930 if (instance->nvme_page_size && max_io_size_kb) 1931 megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10)); 1932 1933 scsi_change_queue_depth(sdev, device_qd); 1934 1935 } 1936 1937 1938 static int megasas_slave_configure(struct scsi_device *sdev) 1939 { 1940 u16 pd_index = 0; 1941 struct megasas_instance *instance; 1942 int ret_target_prop = DCMD_FAILED; 1943 bool is_target_prop = false; 1944 1945 instance = megasas_lookup_instance(sdev->host->host_no); 1946 if (instance->pd_list_not_supported) { 1947 if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) { 1948 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1949 sdev->id; 1950 if (instance->pd_list[pd_index].driveState != 1951 MR_PD_STATE_SYSTEM) 1952 return -ENXIO; 1953 } 1954 } 1955 1956 mutex_lock(&instance->reset_mutex); 1957 /* Send DCMD to Firmware and cache the information */ 1958 if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev)) 1959 megasas_get_pd_info(instance, sdev); 1960 1961 /* Some ventura firmware may not have instance->nvme_page_size set. 1962 * Do not send MR_DCMD_DRV_GET_TARGET_PROP 1963 */ 1964 if ((instance->tgt_prop) && (instance->nvme_page_size)) 1965 ret_target_prop = megasas_get_target_prop(instance, sdev); 1966 1967 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; 1968 megasas_set_static_target_properties(sdev, is_target_prop); 1969 1970 mutex_unlock(&instance->reset_mutex); 1971 1972 /* This sdev property may change post OCR */ 1973 megasas_set_dynamic_target_properties(sdev); 1974 1975 return 0; 1976 } 1977 1978 static int megasas_slave_alloc(struct scsi_device *sdev) 1979 { 1980 u16 pd_index = 0; 1981 struct megasas_instance *instance ; 1982 struct MR_PRIV_DEVICE *mr_device_priv_data; 1983 1984 instance = megasas_lookup_instance(sdev->host->host_no); 1985 if (!MEGASAS_IS_LOGICAL(sdev)) { 1986 /* 1987 * Open the OS scan to the SYSTEM PD 1988 */ 1989 pd_index = 1990 (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1991 sdev->id; 1992 if ((instance->pd_list_not_supported || 1993 instance->pd_list[pd_index].driveState == 1994 MR_PD_STATE_SYSTEM)) { 1995 goto scan_target; 1996 } 1997 return -ENXIO; 1998 } 1999 2000 scan_target: 2001 mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data), 2002 GFP_KERNEL); 2003 if (!mr_device_priv_data) 2004 return -ENOMEM; 2005 sdev->hostdata = mr_device_priv_data; 2006 2007 atomic_set(&mr_device_priv_data->r1_ldio_hint, 2008 instance->r1_ldio_hint_default); 2009 return 0; 2010 } 2011 2012 static void megasas_slave_destroy(struct scsi_device *sdev) 2013 { 2014 kfree(sdev->hostdata); 2015 sdev->hostdata = NULL; 2016 } 2017 2018 /* 2019 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a 2020 * kill adapter 2021 * @instance: Adapter soft state 2022 * 2023 */ 2024 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance) 2025 { 2026 int i; 2027 struct megasas_cmd *cmd_mfi; 2028 struct megasas_cmd_fusion *cmd_fusion; 2029 struct fusion_context *fusion = instance->ctrl_context; 2030 2031 /* Find all outstanding ioctls */ 2032 if (fusion) { 2033 for (i = 0; i < instance->max_fw_cmds; i++) { 2034 cmd_fusion = fusion->cmd_list[i]; 2035 if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) { 2036 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 2037 if (cmd_mfi->sync_cmd && 2038 (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) { 2039 cmd_mfi->frame->hdr.cmd_status = 2040 MFI_STAT_WRONG_STATE; 2041 megasas_complete_cmd(instance, 2042 cmd_mfi, DID_OK); 2043 } 2044 } 2045 } 2046 } else { 2047 for (i = 0; i < instance->max_fw_cmds; i++) { 2048 cmd_mfi = instance->cmd_list[i]; 2049 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != 2050 MFI_CMD_ABORT) 2051 megasas_complete_cmd(instance, cmd_mfi, DID_OK); 2052 } 2053 } 2054 } 2055 2056 2057 void megaraid_sas_kill_hba(struct megasas_instance *instance) 2058 { 2059 /* Set critical error to block I/O & ioctls in case caller didn't */ 2060 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); 2061 /* Wait 1 second to ensure IO or ioctls in build have posted */ 2062 msleep(1000); 2063 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 2064 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 2065 (instance->adapter_type != MFI_SERIES)) { 2066 writel(MFI_STOP_ADP, &instance->reg_set->doorbell); 2067 /* Flush */ 2068 readl(&instance->reg_set->doorbell); 2069 if (instance->requestorId && instance->peerIsPresent) 2070 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 2071 } else { 2072 writel(MFI_STOP_ADP, 2073 &instance->reg_set->inbound_doorbell); 2074 } 2075 /* Complete outstanding ioctls when adapter is killed */ 2076 megasas_complete_outstanding_ioctls(instance); 2077 } 2078 2079 /** 2080 * megasas_check_and_restore_queue_depth - Check if queue depth needs to be 2081 * restored to max value 2082 * @instance: Adapter soft state 2083 * 2084 */ 2085 void 2086 megasas_check_and_restore_queue_depth(struct megasas_instance *instance) 2087 { 2088 unsigned long flags; 2089 2090 if (instance->flag & MEGASAS_FW_BUSY 2091 && time_after(jiffies, instance->last_time + 5 * HZ) 2092 && atomic_read(&instance->fw_outstanding) < 2093 instance->throttlequeuedepth + 1) { 2094 2095 spin_lock_irqsave(instance->host->host_lock, flags); 2096 instance->flag &= ~MEGASAS_FW_BUSY; 2097 2098 instance->host->can_queue = instance->cur_can_queue; 2099 spin_unlock_irqrestore(instance->host->host_lock, flags); 2100 } 2101 } 2102 2103 /** 2104 * megasas_complete_cmd_dpc - Returns FW's controller structure 2105 * @instance_addr: Address of adapter soft state 2106 * 2107 * Tasklet to complete cmds 2108 */ 2109 static void megasas_complete_cmd_dpc(unsigned long instance_addr) 2110 { 2111 u32 producer; 2112 u32 consumer; 2113 u32 context; 2114 struct megasas_cmd *cmd; 2115 struct megasas_instance *instance = 2116 (struct megasas_instance *)instance_addr; 2117 unsigned long flags; 2118 2119 /* If we have already declared adapter dead, donot complete cmds */ 2120 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 2121 return; 2122 2123 spin_lock_irqsave(&instance->completion_lock, flags); 2124 2125 producer = le32_to_cpu(*instance->producer); 2126 consumer = le32_to_cpu(*instance->consumer); 2127 2128 while (consumer != producer) { 2129 context = le32_to_cpu(instance->reply_queue[consumer]); 2130 if (context >= instance->max_fw_cmds) { 2131 dev_err(&instance->pdev->dev, "Unexpected context value %x\n", 2132 context); 2133 BUG(); 2134 } 2135 2136 cmd = instance->cmd_list[context]; 2137 2138 megasas_complete_cmd(instance, cmd, DID_OK); 2139 2140 consumer++; 2141 if (consumer == (instance->max_fw_cmds + 1)) { 2142 consumer = 0; 2143 } 2144 } 2145 2146 *instance->consumer = cpu_to_le32(producer); 2147 2148 spin_unlock_irqrestore(&instance->completion_lock, flags); 2149 2150 /* 2151 * Check if we can restore can_queue 2152 */ 2153 megasas_check_and_restore_queue_depth(instance); 2154 } 2155 2156 static void megasas_sriov_heartbeat_handler(struct timer_list *t); 2157 2158 /** 2159 * megasas_start_timer - Initializes sriov heartbeat timer object 2160 * @instance: Adapter soft state 2161 * 2162 */ 2163 void megasas_start_timer(struct megasas_instance *instance) 2164 { 2165 struct timer_list *timer = &instance->sriov_heartbeat_timer; 2166 2167 timer_setup(timer, megasas_sriov_heartbeat_handler, 0); 2168 timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF; 2169 add_timer(timer); 2170 } 2171 2172 static void 2173 megasas_internal_reset_defer_cmds(struct megasas_instance *instance); 2174 2175 static void 2176 process_fw_state_change_wq(struct work_struct *work); 2177 2178 void megasas_do_ocr(struct megasas_instance *instance) 2179 { 2180 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 2181 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 2182 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 2183 *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); 2184 } 2185 instance->instancet->disable_intr(instance); 2186 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 2187 instance->issuepend_done = 0; 2188 2189 atomic_set(&instance->fw_outstanding, 0); 2190 megasas_internal_reset_defer_cmds(instance); 2191 process_fw_state_change_wq(&instance->work_init); 2192 } 2193 2194 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance, 2195 int initial) 2196 { 2197 struct megasas_cmd *cmd; 2198 struct megasas_dcmd_frame *dcmd; 2199 struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL; 2200 dma_addr_t new_affiliation_111_h; 2201 int ld, retval = 0; 2202 u8 thisVf; 2203 2204 cmd = megasas_get_cmd(instance); 2205 2206 if (!cmd) { 2207 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:" 2208 "Failed to get cmd for scsi%d\n", 2209 instance->host->host_no); 2210 return -ENOMEM; 2211 } 2212 2213 dcmd = &cmd->frame->dcmd; 2214 2215 if (!instance->vf_affiliation_111) { 2216 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " 2217 "affiliation for scsi%d\n", instance->host->host_no); 2218 megasas_return_cmd(instance, cmd); 2219 return -ENOMEM; 2220 } 2221 2222 if (initial) 2223 memset(instance->vf_affiliation_111, 0, 2224 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2225 else { 2226 new_affiliation_111 = 2227 pci_zalloc_consistent(instance->pdev, 2228 sizeof(struct MR_LD_VF_AFFILIATION_111), 2229 &new_affiliation_111_h); 2230 if (!new_affiliation_111) { 2231 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2232 "memory for new affiliation for scsi%d\n", 2233 instance->host->host_no); 2234 megasas_return_cmd(instance, cmd); 2235 return -ENOMEM; 2236 } 2237 } 2238 2239 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2240 2241 dcmd->cmd = MFI_CMD_DCMD; 2242 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2243 dcmd->sge_count = 1; 2244 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2245 dcmd->timeout = 0; 2246 dcmd->pad_0 = 0; 2247 dcmd->data_xfer_len = 2248 cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111)); 2249 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111); 2250 2251 if (initial) 2252 dcmd->sgl.sge32[0].phys_addr = 2253 cpu_to_le32(instance->vf_affiliation_111_h); 2254 else 2255 dcmd->sgl.sge32[0].phys_addr = 2256 cpu_to_le32(new_affiliation_111_h); 2257 2258 dcmd->sgl.sge32[0].length = cpu_to_le32( 2259 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2260 2261 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " 2262 "scsi%d\n", instance->host->host_no); 2263 2264 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { 2265 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" 2266 " failed with status 0x%x for scsi%d\n", 2267 dcmd->cmd_status, instance->host->host_no); 2268 retval = 1; /* Do a scan if we couldn't get affiliation */ 2269 goto out; 2270 } 2271 2272 if (!initial) { 2273 thisVf = new_affiliation_111->thisVf; 2274 for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++) 2275 if (instance->vf_affiliation_111->map[ld].policy[thisVf] != 2276 new_affiliation_111->map[ld].policy[thisVf]) { 2277 dev_warn(&instance->pdev->dev, "SR-IOV: " 2278 "Got new LD/VF affiliation for scsi%d\n", 2279 instance->host->host_no); 2280 memcpy(instance->vf_affiliation_111, 2281 new_affiliation_111, 2282 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2283 retval = 1; 2284 goto out; 2285 } 2286 } 2287 out: 2288 if (new_affiliation_111) { 2289 pci_free_consistent(instance->pdev, 2290 sizeof(struct MR_LD_VF_AFFILIATION_111), 2291 new_affiliation_111, 2292 new_affiliation_111_h); 2293 } 2294 2295 megasas_return_cmd(instance, cmd); 2296 2297 return retval; 2298 } 2299 2300 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance, 2301 int initial) 2302 { 2303 struct megasas_cmd *cmd; 2304 struct megasas_dcmd_frame *dcmd; 2305 struct MR_LD_VF_AFFILIATION *new_affiliation = NULL; 2306 struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL; 2307 dma_addr_t new_affiliation_h; 2308 int i, j, retval = 0, found = 0, doscan = 0; 2309 u8 thisVf; 2310 2311 cmd = megasas_get_cmd(instance); 2312 2313 if (!cmd) { 2314 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: " 2315 "Failed to get cmd for scsi%d\n", 2316 instance->host->host_no); 2317 return -ENOMEM; 2318 } 2319 2320 dcmd = &cmd->frame->dcmd; 2321 2322 if (!instance->vf_affiliation) { 2323 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " 2324 "affiliation for scsi%d\n", instance->host->host_no); 2325 megasas_return_cmd(instance, cmd); 2326 return -ENOMEM; 2327 } 2328 2329 if (initial) 2330 memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) * 2331 sizeof(struct MR_LD_VF_AFFILIATION)); 2332 else { 2333 new_affiliation = 2334 pci_zalloc_consistent(instance->pdev, 2335 (MAX_LOGICAL_DRIVES + 1) * 2336 sizeof(struct MR_LD_VF_AFFILIATION), 2337 &new_affiliation_h); 2338 if (!new_affiliation) { 2339 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2340 "memory for new affiliation for scsi%d\n", 2341 instance->host->host_no); 2342 megasas_return_cmd(instance, cmd); 2343 return -ENOMEM; 2344 } 2345 } 2346 2347 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2348 2349 dcmd->cmd = MFI_CMD_DCMD; 2350 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2351 dcmd->sge_count = 1; 2352 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2353 dcmd->timeout = 0; 2354 dcmd->pad_0 = 0; 2355 dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * 2356 sizeof(struct MR_LD_VF_AFFILIATION)); 2357 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS); 2358 2359 if (initial) 2360 dcmd->sgl.sge32[0].phys_addr = 2361 cpu_to_le32(instance->vf_affiliation_h); 2362 else 2363 dcmd->sgl.sge32[0].phys_addr = 2364 cpu_to_le32(new_affiliation_h); 2365 2366 dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * 2367 sizeof(struct MR_LD_VF_AFFILIATION)); 2368 2369 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " 2370 "scsi%d\n", instance->host->host_no); 2371 2372 2373 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { 2374 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" 2375 " failed with status 0x%x for scsi%d\n", 2376 dcmd->cmd_status, instance->host->host_no); 2377 retval = 1; /* Do a scan if we couldn't get affiliation */ 2378 goto out; 2379 } 2380 2381 if (!initial) { 2382 if (!new_affiliation->ldCount) { 2383 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " 2384 "affiliation for passive path for scsi%d\n", 2385 instance->host->host_no); 2386 retval = 1; 2387 goto out; 2388 } 2389 newmap = new_affiliation->map; 2390 savedmap = instance->vf_affiliation->map; 2391 thisVf = new_affiliation->thisVf; 2392 for (i = 0 ; i < new_affiliation->ldCount; i++) { 2393 found = 0; 2394 for (j = 0; j < instance->vf_affiliation->ldCount; 2395 j++) { 2396 if (newmap->ref.targetId == 2397 savedmap->ref.targetId) { 2398 found = 1; 2399 if (newmap->policy[thisVf] != 2400 savedmap->policy[thisVf]) { 2401 doscan = 1; 2402 goto out; 2403 } 2404 } 2405 savedmap = (struct MR_LD_VF_MAP *) 2406 ((unsigned char *)savedmap + 2407 savedmap->size); 2408 } 2409 if (!found && newmap->policy[thisVf] != 2410 MR_LD_ACCESS_HIDDEN) { 2411 doscan = 1; 2412 goto out; 2413 } 2414 newmap = (struct MR_LD_VF_MAP *) 2415 ((unsigned char *)newmap + newmap->size); 2416 } 2417 2418 newmap = new_affiliation->map; 2419 savedmap = instance->vf_affiliation->map; 2420 2421 for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) { 2422 found = 0; 2423 for (j = 0 ; j < new_affiliation->ldCount; j++) { 2424 if (savedmap->ref.targetId == 2425 newmap->ref.targetId) { 2426 found = 1; 2427 if (savedmap->policy[thisVf] != 2428 newmap->policy[thisVf]) { 2429 doscan = 1; 2430 goto out; 2431 } 2432 } 2433 newmap = (struct MR_LD_VF_MAP *) 2434 ((unsigned char *)newmap + 2435 newmap->size); 2436 } 2437 if (!found && savedmap->policy[thisVf] != 2438 MR_LD_ACCESS_HIDDEN) { 2439 doscan = 1; 2440 goto out; 2441 } 2442 savedmap = (struct MR_LD_VF_MAP *) 2443 ((unsigned char *)savedmap + 2444 savedmap->size); 2445 } 2446 } 2447 out: 2448 if (doscan) { 2449 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " 2450 "affiliation for scsi%d\n", instance->host->host_no); 2451 memcpy(instance->vf_affiliation, new_affiliation, 2452 new_affiliation->size); 2453 retval = 1; 2454 } 2455 2456 if (new_affiliation) 2457 pci_free_consistent(instance->pdev, 2458 (MAX_LOGICAL_DRIVES + 1) * 2459 sizeof(struct MR_LD_VF_AFFILIATION), 2460 new_affiliation, new_affiliation_h); 2461 megasas_return_cmd(instance, cmd); 2462 2463 return retval; 2464 } 2465 2466 /* This function will get the current SR-IOV LD/VF affiliation */ 2467 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 2468 int initial) 2469 { 2470 int retval; 2471 2472 if (instance->PlasmaFW111) 2473 retval = megasas_get_ld_vf_affiliation_111(instance, initial); 2474 else 2475 retval = megasas_get_ld_vf_affiliation_12(instance, initial); 2476 return retval; 2477 } 2478 2479 /* This function will tell FW to start the SR-IOV heartbeat */ 2480 int megasas_sriov_start_heartbeat(struct megasas_instance *instance, 2481 int initial) 2482 { 2483 struct megasas_cmd *cmd; 2484 struct megasas_dcmd_frame *dcmd; 2485 int retval = 0; 2486 2487 cmd = megasas_get_cmd(instance); 2488 2489 if (!cmd) { 2490 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: " 2491 "Failed to get cmd for scsi%d\n", 2492 instance->host->host_no); 2493 return -ENOMEM; 2494 } 2495 2496 dcmd = &cmd->frame->dcmd; 2497 2498 if (initial) { 2499 instance->hb_host_mem = 2500 pci_zalloc_consistent(instance->pdev, 2501 sizeof(struct MR_CTRL_HB_HOST_MEM), 2502 &instance->hb_host_mem_h); 2503 if (!instance->hb_host_mem) { 2504 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate" 2505 " memory for heartbeat host memory for scsi%d\n", 2506 instance->host->host_no); 2507 retval = -ENOMEM; 2508 goto out; 2509 } 2510 } 2511 2512 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2513 2514 dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2515 dcmd->cmd = MFI_CMD_DCMD; 2516 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2517 dcmd->sge_count = 1; 2518 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2519 dcmd->timeout = 0; 2520 dcmd->pad_0 = 0; 2521 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2522 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC); 2523 2524 megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h, 2525 sizeof(struct MR_CTRL_HB_HOST_MEM)); 2526 2527 dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n", 2528 instance->host->host_no); 2529 2530 if ((instance->adapter_type != MFI_SERIES) && 2531 !instance->mask_interrupts) 2532 retval = megasas_issue_blocked_cmd(instance, cmd, 2533 MEGASAS_ROUTINE_WAIT_TIME_VF); 2534 else 2535 retval = megasas_issue_polled(instance, cmd); 2536 2537 if (retval) { 2538 dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST" 2539 "_MEM_ALLOC DCMD %s for scsi%d\n", 2540 (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ? 2541 "timed out" : "failed", instance->host->host_no); 2542 retval = 1; 2543 } 2544 2545 out: 2546 megasas_return_cmd(instance, cmd); 2547 2548 return retval; 2549 } 2550 2551 /* Handler for SR-IOV heartbeat */ 2552 static void megasas_sriov_heartbeat_handler(struct timer_list *t) 2553 { 2554 struct megasas_instance *instance = 2555 from_timer(instance, t, sriov_heartbeat_timer); 2556 2557 if (instance->hb_host_mem->HB.fwCounter != 2558 instance->hb_host_mem->HB.driverCounter) { 2559 instance->hb_host_mem->HB.driverCounter = 2560 instance->hb_host_mem->HB.fwCounter; 2561 mod_timer(&instance->sriov_heartbeat_timer, 2562 jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 2563 } else { 2564 dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never " 2565 "completed for scsi%d\n", instance->host->host_no); 2566 schedule_work(&instance->work_init); 2567 } 2568 } 2569 2570 /** 2571 * megasas_wait_for_outstanding - Wait for all outstanding cmds 2572 * @instance: Adapter soft state 2573 * 2574 * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to 2575 * complete all its outstanding commands. Returns error if one or more IOs 2576 * are pending after this time period. It also marks the controller dead. 2577 */ 2578 static int megasas_wait_for_outstanding(struct megasas_instance *instance) 2579 { 2580 int i, sl, outstanding; 2581 u32 reset_index; 2582 u32 wait_time = MEGASAS_RESET_WAIT_TIME; 2583 unsigned long flags; 2584 struct list_head clist_local; 2585 struct megasas_cmd *reset_cmd; 2586 u32 fw_state; 2587 2588 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2589 dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n", 2590 __func__, __LINE__); 2591 return FAILED; 2592 } 2593 2594 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 2595 2596 INIT_LIST_HEAD(&clist_local); 2597 spin_lock_irqsave(&instance->hba_lock, flags); 2598 list_splice_init(&instance->internal_reset_pending_q, 2599 &clist_local); 2600 spin_unlock_irqrestore(&instance->hba_lock, flags); 2601 2602 dev_notice(&instance->pdev->dev, "HBA reset wait ...\n"); 2603 for (i = 0; i < wait_time; i++) { 2604 msleep(1000); 2605 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) 2606 break; 2607 } 2608 2609 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 2610 dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n"); 2611 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); 2612 return FAILED; 2613 } 2614 2615 reset_index = 0; 2616 while (!list_empty(&clist_local)) { 2617 reset_cmd = list_entry((&clist_local)->next, 2618 struct megasas_cmd, list); 2619 list_del_init(&reset_cmd->list); 2620 if (reset_cmd->scmd) { 2621 reset_cmd->scmd->result = DID_REQUEUE << 16; 2622 dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n", 2623 reset_index, reset_cmd, 2624 reset_cmd->scmd->cmnd[0]); 2625 2626 reset_cmd->scmd->scsi_done(reset_cmd->scmd); 2627 megasas_return_cmd(instance, reset_cmd); 2628 } else if (reset_cmd->sync_cmd) { 2629 dev_notice(&instance->pdev->dev, "%p synch cmds" 2630 "reset queue\n", 2631 reset_cmd); 2632 2633 reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 2634 instance->instancet->fire_cmd(instance, 2635 reset_cmd->frame_phys_addr, 2636 0, instance->reg_set); 2637 } else { 2638 dev_notice(&instance->pdev->dev, "%p unexpected" 2639 "cmds lst\n", 2640 reset_cmd); 2641 } 2642 reset_index++; 2643 } 2644 2645 return SUCCESS; 2646 } 2647 2648 for (i = 0; i < resetwaittime; i++) { 2649 outstanding = atomic_read(&instance->fw_outstanding); 2650 2651 if (!outstanding) 2652 break; 2653 2654 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 2655 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d " 2656 "commands to complete\n",i,outstanding); 2657 /* 2658 * Call cmd completion routine. Cmd to be 2659 * be completed directly without depending on isr. 2660 */ 2661 megasas_complete_cmd_dpc((unsigned long)instance); 2662 } 2663 2664 msleep(1000); 2665 } 2666 2667 i = 0; 2668 outstanding = atomic_read(&instance->fw_outstanding); 2669 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; 2670 2671 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) 2672 goto no_outstanding; 2673 2674 if (instance->disableOnlineCtrlReset) 2675 goto kill_hba_and_failed; 2676 do { 2677 if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) { 2678 dev_info(&instance->pdev->dev, 2679 "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, oustanding 0x%x\n", 2680 __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding)); 2681 if (i == 3) 2682 goto kill_hba_and_failed; 2683 megasas_do_ocr(instance); 2684 2685 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2686 dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n", 2687 __func__, __LINE__); 2688 return FAILED; 2689 } 2690 dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n", 2691 __func__, __LINE__); 2692 2693 for (sl = 0; sl < 10; sl++) 2694 msleep(500); 2695 2696 outstanding = atomic_read(&instance->fw_outstanding); 2697 2698 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; 2699 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) 2700 goto no_outstanding; 2701 } 2702 i++; 2703 } while (i <= 3); 2704 2705 no_outstanding: 2706 2707 dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n", 2708 __func__, __LINE__); 2709 return SUCCESS; 2710 2711 kill_hba_and_failed: 2712 2713 /* Reset not supported, kill adapter */ 2714 dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d" 2715 " disableOnlineCtrlReset %d fw_outstanding %d \n", 2716 __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset, 2717 atomic_read(&instance->fw_outstanding)); 2718 megasas_dump_pending_frames(instance); 2719 megaraid_sas_kill_hba(instance); 2720 2721 return FAILED; 2722 } 2723 2724 /** 2725 * megasas_generic_reset - Generic reset routine 2726 * @scmd: Mid-layer SCSI command 2727 * 2728 * This routine implements a generic reset handler for device, bus and host 2729 * reset requests. Device, bus and host specific reset handlers can use this 2730 * function after they do their specific tasks. 2731 */ 2732 static int megasas_generic_reset(struct scsi_cmnd *scmd) 2733 { 2734 int ret_val; 2735 struct megasas_instance *instance; 2736 2737 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2738 2739 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n", 2740 scmd->cmnd[0], scmd->retries); 2741 2742 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2743 dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n"); 2744 return FAILED; 2745 } 2746 2747 ret_val = megasas_wait_for_outstanding(instance); 2748 if (ret_val == SUCCESS) 2749 dev_notice(&instance->pdev->dev, "reset successful\n"); 2750 else 2751 dev_err(&instance->pdev->dev, "failed to do reset\n"); 2752 2753 return ret_val; 2754 } 2755 2756 /** 2757 * megasas_reset_timer - quiesce the adapter if required 2758 * @scmd: scsi cmnd 2759 * 2760 * Sets the FW busy flag and reduces the host->can_queue if the 2761 * cmd has not been completed within the timeout period. 2762 */ 2763 static enum 2764 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) 2765 { 2766 struct megasas_instance *instance; 2767 unsigned long flags; 2768 2769 if (time_after(jiffies, scmd->jiffies_at_alloc + 2770 (scmd_timeout * 2) * HZ)) { 2771 return BLK_EH_DONE; 2772 } 2773 2774 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2775 if (!(instance->flag & MEGASAS_FW_BUSY)) { 2776 /* FW is busy, throttle IO */ 2777 spin_lock_irqsave(instance->host->host_lock, flags); 2778 2779 instance->host->can_queue = instance->throttlequeuedepth; 2780 instance->last_time = jiffies; 2781 instance->flag |= MEGASAS_FW_BUSY; 2782 2783 spin_unlock_irqrestore(instance->host->host_lock, flags); 2784 } 2785 return BLK_EH_RESET_TIMER; 2786 } 2787 2788 /** 2789 * megasas_dump_frame - This function will dump MPT/MFI frame 2790 */ 2791 static inline void 2792 megasas_dump_frame(void *mpi_request, int sz) 2793 { 2794 int i; 2795 __le32 *mfp = (__le32 *)mpi_request; 2796 2797 printk(KERN_INFO "IO request frame:\n\t"); 2798 for (i = 0; i < sz / sizeof(__le32); i++) { 2799 if (i && ((i % 8) == 0)) 2800 printk("\n\t"); 2801 printk("%08x ", le32_to_cpu(mfp[i])); 2802 } 2803 printk("\n"); 2804 } 2805 2806 /** 2807 * megasas_reset_bus_host - Bus & host reset handler entry point 2808 */ 2809 static int megasas_reset_bus_host(struct scsi_cmnd *scmd) 2810 { 2811 int ret; 2812 struct megasas_instance *instance; 2813 2814 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2815 2816 scmd_printk(KERN_INFO, scmd, 2817 "Controller reset is requested due to IO timeout\n" 2818 "SCSI command pointer: (%p)\t SCSI host state: %d\t" 2819 " SCSI host busy: %d\t FW outstanding: %d\n", 2820 scmd, scmd->device->host->shost_state, 2821 atomic_read((atomic_t *)&scmd->device->host->host_busy), 2822 atomic_read(&instance->fw_outstanding)); 2823 2824 /* 2825 * First wait for all commands to complete 2826 */ 2827 if (instance->adapter_type == MFI_SERIES) { 2828 ret = megasas_generic_reset(scmd); 2829 } else { 2830 struct megasas_cmd_fusion *cmd; 2831 cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr; 2832 if (cmd) 2833 megasas_dump_frame(cmd->io_request, 2834 MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE); 2835 ret = megasas_reset_fusion(scmd->device->host, 2836 SCSIIO_TIMEOUT_OCR); 2837 } 2838 2839 return ret; 2840 } 2841 2842 /** 2843 * megasas_task_abort - Issues task abort request to firmware 2844 * (supported only for fusion adapters) 2845 * @scmd: SCSI command pointer 2846 */ 2847 static int megasas_task_abort(struct scsi_cmnd *scmd) 2848 { 2849 int ret; 2850 struct megasas_instance *instance; 2851 2852 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2853 2854 if (instance->adapter_type != MFI_SERIES) 2855 ret = megasas_task_abort_fusion(scmd); 2856 else { 2857 sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n"); 2858 ret = FAILED; 2859 } 2860 2861 return ret; 2862 } 2863 2864 /** 2865 * megasas_reset_target: Issues target reset request to firmware 2866 * (supported only for fusion adapters) 2867 * @scmd: SCSI command pointer 2868 */ 2869 static int megasas_reset_target(struct scsi_cmnd *scmd) 2870 { 2871 int ret; 2872 struct megasas_instance *instance; 2873 2874 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2875 2876 if (instance->adapter_type != MFI_SERIES) 2877 ret = megasas_reset_target_fusion(scmd); 2878 else { 2879 sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n"); 2880 ret = FAILED; 2881 } 2882 2883 return ret; 2884 } 2885 2886 /** 2887 * megasas_bios_param - Returns disk geometry for a disk 2888 * @sdev: device handle 2889 * @bdev: block device 2890 * @capacity: drive capacity 2891 * @geom: geometry parameters 2892 */ 2893 static int 2894 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev, 2895 sector_t capacity, int geom[]) 2896 { 2897 int heads; 2898 int sectors; 2899 sector_t cylinders; 2900 unsigned long tmp; 2901 2902 /* Default heads (64) & sectors (32) */ 2903 heads = 64; 2904 sectors = 32; 2905 2906 tmp = heads * sectors; 2907 cylinders = capacity; 2908 2909 sector_div(cylinders, tmp); 2910 2911 /* 2912 * Handle extended translation size for logical drives > 1Gb 2913 */ 2914 2915 if (capacity >= 0x200000) { 2916 heads = 255; 2917 sectors = 63; 2918 tmp = heads*sectors; 2919 cylinders = capacity; 2920 sector_div(cylinders, tmp); 2921 } 2922 2923 geom[0] = heads; 2924 geom[1] = sectors; 2925 geom[2] = cylinders; 2926 2927 return 0; 2928 } 2929 2930 static void megasas_aen_polling(struct work_struct *work); 2931 2932 /** 2933 * megasas_service_aen - Processes an event notification 2934 * @instance: Adapter soft state 2935 * @cmd: AEN command completed by the ISR 2936 * 2937 * For AEN, driver sends a command down to FW that is held by the FW till an 2938 * event occurs. When an event of interest occurs, FW completes the command 2939 * that it was previously holding. 2940 * 2941 * This routines sends SIGIO signal to processes that have registered with the 2942 * driver for AEN. 2943 */ 2944 static void 2945 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) 2946 { 2947 unsigned long flags; 2948 2949 /* 2950 * Don't signal app if it is just an aborted previously registered aen 2951 */ 2952 if ((!cmd->abort_aen) && (instance->unload == 0)) { 2953 spin_lock_irqsave(&poll_aen_lock, flags); 2954 megasas_poll_wait_aen = 1; 2955 spin_unlock_irqrestore(&poll_aen_lock, flags); 2956 wake_up(&megasas_poll_wait); 2957 kill_fasync(&megasas_async_queue, SIGIO, POLL_IN); 2958 } 2959 else 2960 cmd->abort_aen = 0; 2961 2962 instance->aen_cmd = NULL; 2963 2964 megasas_return_cmd(instance, cmd); 2965 2966 if ((instance->unload == 0) && 2967 ((instance->issuepend_done == 1))) { 2968 struct megasas_aen_event *ev; 2969 2970 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 2971 if (!ev) { 2972 dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n"); 2973 } else { 2974 ev->instance = instance; 2975 instance->ev = ev; 2976 INIT_DELAYED_WORK(&ev->hotplug_work, 2977 megasas_aen_polling); 2978 schedule_delayed_work(&ev->hotplug_work, 0); 2979 } 2980 } 2981 } 2982 2983 static ssize_t 2984 megasas_fw_crash_buffer_store(struct device *cdev, 2985 struct device_attribute *attr, const char *buf, size_t count) 2986 { 2987 struct Scsi_Host *shost = class_to_shost(cdev); 2988 struct megasas_instance *instance = 2989 (struct megasas_instance *) shost->hostdata; 2990 int val = 0; 2991 unsigned long flags; 2992 2993 if (kstrtoint(buf, 0, &val) != 0) 2994 return -EINVAL; 2995 2996 spin_lock_irqsave(&instance->crashdump_lock, flags); 2997 instance->fw_crash_buffer_offset = val; 2998 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 2999 return strlen(buf); 3000 } 3001 3002 static ssize_t 3003 megasas_fw_crash_buffer_show(struct device *cdev, 3004 struct device_attribute *attr, char *buf) 3005 { 3006 struct Scsi_Host *shost = class_to_shost(cdev); 3007 struct megasas_instance *instance = 3008 (struct megasas_instance *) shost->hostdata; 3009 u32 size; 3010 unsigned long buff_addr; 3011 unsigned long dmachunk = CRASH_DMA_BUF_SIZE; 3012 unsigned long src_addr; 3013 unsigned long flags; 3014 u32 buff_offset; 3015 3016 spin_lock_irqsave(&instance->crashdump_lock, flags); 3017 buff_offset = instance->fw_crash_buffer_offset; 3018 if (!instance->crash_dump_buf && 3019 !((instance->fw_crash_state == AVAILABLE) || 3020 (instance->fw_crash_state == COPYING))) { 3021 dev_err(&instance->pdev->dev, 3022 "Firmware crash dump is not available\n"); 3023 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3024 return -EINVAL; 3025 } 3026 3027 buff_addr = (unsigned long) buf; 3028 3029 if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) { 3030 dev_err(&instance->pdev->dev, 3031 "Firmware crash dump offset is out of range\n"); 3032 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3033 return 0; 3034 } 3035 3036 size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset; 3037 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; 3038 3039 src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] + 3040 (buff_offset % dmachunk); 3041 memcpy(buf, (void *)src_addr, size); 3042 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3043 3044 return size; 3045 } 3046 3047 static ssize_t 3048 megasas_fw_crash_buffer_size_show(struct device *cdev, 3049 struct device_attribute *attr, char *buf) 3050 { 3051 struct Scsi_Host *shost = class_to_shost(cdev); 3052 struct megasas_instance *instance = 3053 (struct megasas_instance *) shost->hostdata; 3054 3055 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long) 3056 ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE); 3057 } 3058 3059 static ssize_t 3060 megasas_fw_crash_state_store(struct device *cdev, 3061 struct device_attribute *attr, const char *buf, size_t count) 3062 { 3063 struct Scsi_Host *shost = class_to_shost(cdev); 3064 struct megasas_instance *instance = 3065 (struct megasas_instance *) shost->hostdata; 3066 int val = 0; 3067 unsigned long flags; 3068 3069 if (kstrtoint(buf, 0, &val) != 0) 3070 return -EINVAL; 3071 3072 if ((val <= AVAILABLE || val > COPY_ERROR)) { 3073 dev_err(&instance->pdev->dev, "application updates invalid " 3074 "firmware crash state\n"); 3075 return -EINVAL; 3076 } 3077 3078 instance->fw_crash_state = val; 3079 3080 if ((val == COPIED) || (val == COPY_ERROR)) { 3081 spin_lock_irqsave(&instance->crashdump_lock, flags); 3082 megasas_free_host_crash_buffer(instance); 3083 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3084 if (val == COPY_ERROR) 3085 dev_info(&instance->pdev->dev, "application failed to " 3086 "copy Firmware crash dump\n"); 3087 else 3088 dev_info(&instance->pdev->dev, "Firmware crash dump " 3089 "copied successfully\n"); 3090 } 3091 return strlen(buf); 3092 } 3093 3094 static ssize_t 3095 megasas_fw_crash_state_show(struct device *cdev, 3096 struct device_attribute *attr, char *buf) 3097 { 3098 struct Scsi_Host *shost = class_to_shost(cdev); 3099 struct megasas_instance *instance = 3100 (struct megasas_instance *) shost->hostdata; 3101 3102 return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state); 3103 } 3104 3105 static ssize_t 3106 megasas_page_size_show(struct device *cdev, 3107 struct device_attribute *attr, char *buf) 3108 { 3109 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1); 3110 } 3111 3112 static ssize_t 3113 megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr, 3114 char *buf) 3115 { 3116 struct Scsi_Host *shost = class_to_shost(cdev); 3117 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3118 3119 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding)); 3120 } 3121 3122 static ssize_t 3123 megasas_fw_cmds_outstanding_show(struct device *cdev, 3124 struct device_attribute *attr, char *buf) 3125 { 3126 struct Scsi_Host *shost = class_to_shost(cdev); 3127 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3128 3129 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding)); 3130 } 3131 3132 static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR, 3133 megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store); 3134 static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO, 3135 megasas_fw_crash_buffer_size_show, NULL); 3136 static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR, 3137 megasas_fw_crash_state_show, megasas_fw_crash_state_store); 3138 static DEVICE_ATTR(page_size, S_IRUGO, 3139 megasas_page_size_show, NULL); 3140 static DEVICE_ATTR(ldio_outstanding, S_IRUGO, 3141 megasas_ldio_outstanding_show, NULL); 3142 static DEVICE_ATTR(fw_cmds_outstanding, S_IRUGO, 3143 megasas_fw_cmds_outstanding_show, NULL); 3144 3145 struct device_attribute *megaraid_host_attrs[] = { 3146 &dev_attr_fw_crash_buffer_size, 3147 &dev_attr_fw_crash_buffer, 3148 &dev_attr_fw_crash_state, 3149 &dev_attr_page_size, 3150 &dev_attr_ldio_outstanding, 3151 &dev_attr_fw_cmds_outstanding, 3152 NULL, 3153 }; 3154 3155 /* 3156 * Scsi host template for megaraid_sas driver 3157 */ 3158 static struct scsi_host_template megasas_template = { 3159 3160 .module = THIS_MODULE, 3161 .name = "Avago SAS based MegaRAID driver", 3162 .proc_name = "megaraid_sas", 3163 .slave_configure = megasas_slave_configure, 3164 .slave_alloc = megasas_slave_alloc, 3165 .slave_destroy = megasas_slave_destroy, 3166 .queuecommand = megasas_queue_command, 3167 .eh_target_reset_handler = megasas_reset_target, 3168 .eh_abort_handler = megasas_task_abort, 3169 .eh_host_reset_handler = megasas_reset_bus_host, 3170 .eh_timed_out = megasas_reset_timer, 3171 .shost_attrs = megaraid_host_attrs, 3172 .bios_param = megasas_bios_param, 3173 .use_clustering = ENABLE_CLUSTERING, 3174 .change_queue_depth = scsi_change_queue_depth, 3175 .no_write_same = 1, 3176 }; 3177 3178 /** 3179 * megasas_complete_int_cmd - Completes an internal command 3180 * @instance: Adapter soft state 3181 * @cmd: Command to be completed 3182 * 3183 * The megasas_issue_blocked_cmd() function waits for a command to complete 3184 * after it issues a command. This function wakes up that waiting routine by 3185 * calling wake_up() on the wait queue. 3186 */ 3187 static void 3188 megasas_complete_int_cmd(struct megasas_instance *instance, 3189 struct megasas_cmd *cmd) 3190 { 3191 cmd->cmd_status_drv = cmd->frame->io.cmd_status; 3192 wake_up(&instance->int_cmd_wait_q); 3193 } 3194 3195 /** 3196 * megasas_complete_abort - Completes aborting a command 3197 * @instance: Adapter soft state 3198 * @cmd: Cmd that was issued to abort another cmd 3199 * 3200 * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q 3201 * after it issues an abort on a previously issued command. This function 3202 * wakes up all functions waiting on the same wait queue. 3203 */ 3204 static void 3205 megasas_complete_abort(struct megasas_instance *instance, 3206 struct megasas_cmd *cmd) 3207 { 3208 if (cmd->sync_cmd) { 3209 cmd->sync_cmd = 0; 3210 cmd->cmd_status_drv = 0; 3211 wake_up(&instance->abort_cmd_wait_q); 3212 } 3213 } 3214 3215 /** 3216 * megasas_complete_cmd - Completes a command 3217 * @instance: Adapter soft state 3218 * @cmd: Command to be completed 3219 * @alt_status: If non-zero, use this value as status to 3220 * SCSI mid-layer instead of the value returned 3221 * by the FW. This should be used if caller wants 3222 * an alternate status (as in the case of aborted 3223 * commands) 3224 */ 3225 void 3226 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 3227 u8 alt_status) 3228 { 3229 int exception = 0; 3230 struct megasas_header *hdr = &cmd->frame->hdr; 3231 unsigned long flags; 3232 struct fusion_context *fusion = instance->ctrl_context; 3233 u32 opcode, status; 3234 3235 /* flag for the retry reset */ 3236 cmd->retry_for_fw_reset = 0; 3237 3238 if (cmd->scmd) 3239 cmd->scmd->SCp.ptr = NULL; 3240 3241 switch (hdr->cmd) { 3242 case MFI_CMD_INVALID: 3243 /* Some older 1068 controller FW may keep a pended 3244 MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel 3245 when booting the kdump kernel. Ignore this command to 3246 prevent a kernel panic on shutdown of the kdump kernel. */ 3247 dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command " 3248 "completed\n"); 3249 dev_warn(&instance->pdev->dev, "If you have a controller " 3250 "other than PERC5, please upgrade your firmware\n"); 3251 break; 3252 case MFI_CMD_PD_SCSI_IO: 3253 case MFI_CMD_LD_SCSI_IO: 3254 3255 /* 3256 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been 3257 * issued either through an IO path or an IOCTL path. If it 3258 * was via IOCTL, we will send it to internal completion. 3259 */ 3260 if (cmd->sync_cmd) { 3261 cmd->sync_cmd = 0; 3262 megasas_complete_int_cmd(instance, cmd); 3263 break; 3264 } 3265 3266 case MFI_CMD_LD_READ: 3267 case MFI_CMD_LD_WRITE: 3268 3269 if (alt_status) { 3270 cmd->scmd->result = alt_status << 16; 3271 exception = 1; 3272 } 3273 3274 if (exception) { 3275 3276 atomic_dec(&instance->fw_outstanding); 3277 3278 scsi_dma_unmap(cmd->scmd); 3279 cmd->scmd->scsi_done(cmd->scmd); 3280 megasas_return_cmd(instance, cmd); 3281 3282 break; 3283 } 3284 3285 switch (hdr->cmd_status) { 3286 3287 case MFI_STAT_OK: 3288 cmd->scmd->result = DID_OK << 16; 3289 break; 3290 3291 case MFI_STAT_SCSI_IO_FAILED: 3292 case MFI_STAT_LD_INIT_IN_PROGRESS: 3293 cmd->scmd->result = 3294 (DID_ERROR << 16) | hdr->scsi_status; 3295 break; 3296 3297 case MFI_STAT_SCSI_DONE_WITH_ERROR: 3298 3299 cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status; 3300 3301 if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) { 3302 memset(cmd->scmd->sense_buffer, 0, 3303 SCSI_SENSE_BUFFERSIZE); 3304 memcpy(cmd->scmd->sense_buffer, cmd->sense, 3305 hdr->sense_len); 3306 3307 cmd->scmd->result |= DRIVER_SENSE << 24; 3308 } 3309 3310 break; 3311 3312 case MFI_STAT_LD_OFFLINE: 3313 case MFI_STAT_DEVICE_NOT_FOUND: 3314 cmd->scmd->result = DID_BAD_TARGET << 16; 3315 break; 3316 3317 default: 3318 dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n", 3319 hdr->cmd_status); 3320 cmd->scmd->result = DID_ERROR << 16; 3321 break; 3322 } 3323 3324 atomic_dec(&instance->fw_outstanding); 3325 3326 scsi_dma_unmap(cmd->scmd); 3327 cmd->scmd->scsi_done(cmd->scmd); 3328 megasas_return_cmd(instance, cmd); 3329 3330 break; 3331 3332 case MFI_CMD_SMP: 3333 case MFI_CMD_STP: 3334 case MFI_CMD_NVME: 3335 megasas_complete_int_cmd(instance, cmd); 3336 break; 3337 3338 case MFI_CMD_DCMD: 3339 opcode = le32_to_cpu(cmd->frame->dcmd.opcode); 3340 /* Check for LD map update */ 3341 if ((opcode == MR_DCMD_LD_MAP_GET_INFO) 3342 && (cmd->frame->dcmd.mbox.b[1] == 1)) { 3343 fusion->fast_path_io = 0; 3344 spin_lock_irqsave(instance->host->host_lock, flags); 3345 status = cmd->frame->hdr.cmd_status; 3346 instance->map_update_cmd = NULL; 3347 if (status != MFI_STAT_OK) { 3348 if (status != MFI_STAT_NOT_FOUND) 3349 dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n", 3350 cmd->frame->hdr.cmd_status); 3351 else { 3352 megasas_return_cmd(instance, cmd); 3353 spin_unlock_irqrestore( 3354 instance->host->host_lock, 3355 flags); 3356 break; 3357 } 3358 } 3359 3360 megasas_return_cmd(instance, cmd); 3361 3362 /* 3363 * Set fast path IO to ZERO. 3364 * Validate Map will set proper value. 3365 * Meanwhile all IOs will go as LD IO. 3366 */ 3367 if (status == MFI_STAT_OK && 3368 (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) { 3369 instance->map_id++; 3370 fusion->fast_path_io = 1; 3371 } else { 3372 fusion->fast_path_io = 0; 3373 } 3374 3375 megasas_sync_map_info(instance); 3376 spin_unlock_irqrestore(instance->host->host_lock, 3377 flags); 3378 break; 3379 } 3380 if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO || 3381 opcode == MR_DCMD_CTRL_EVENT_GET) { 3382 spin_lock_irqsave(&poll_aen_lock, flags); 3383 megasas_poll_wait_aen = 0; 3384 spin_unlock_irqrestore(&poll_aen_lock, flags); 3385 } 3386 3387 /* FW has an updated PD sequence */ 3388 if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && 3389 (cmd->frame->dcmd.mbox.b[0] == 1)) { 3390 3391 spin_lock_irqsave(instance->host->host_lock, flags); 3392 status = cmd->frame->hdr.cmd_status; 3393 instance->jbod_seq_cmd = NULL; 3394 megasas_return_cmd(instance, cmd); 3395 3396 if (status == MFI_STAT_OK) { 3397 instance->pd_seq_map_id++; 3398 /* Re-register a pd sync seq num cmd */ 3399 if (megasas_sync_pd_seq_num(instance, true)) 3400 instance->use_seqnum_jbod_fp = false; 3401 } else 3402 instance->use_seqnum_jbod_fp = false; 3403 3404 spin_unlock_irqrestore(instance->host->host_lock, flags); 3405 break; 3406 } 3407 3408 /* 3409 * See if got an event notification 3410 */ 3411 if (opcode == MR_DCMD_CTRL_EVENT_WAIT) 3412 megasas_service_aen(instance, cmd); 3413 else 3414 megasas_complete_int_cmd(instance, cmd); 3415 3416 break; 3417 3418 case MFI_CMD_ABORT: 3419 /* 3420 * Cmd issued to abort another cmd returned 3421 */ 3422 megasas_complete_abort(instance, cmd); 3423 break; 3424 3425 default: 3426 dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n", 3427 hdr->cmd); 3428 megasas_complete_int_cmd(instance, cmd); 3429 break; 3430 } 3431 } 3432 3433 /** 3434 * megasas_issue_pending_cmds_again - issue all pending cmds 3435 * in FW again because of the fw reset 3436 * @instance: Adapter soft state 3437 */ 3438 static inline void 3439 megasas_issue_pending_cmds_again(struct megasas_instance *instance) 3440 { 3441 struct megasas_cmd *cmd; 3442 struct list_head clist_local; 3443 union megasas_evt_class_locale class_locale; 3444 unsigned long flags; 3445 u32 seq_num; 3446 3447 INIT_LIST_HEAD(&clist_local); 3448 spin_lock_irqsave(&instance->hba_lock, flags); 3449 list_splice_init(&instance->internal_reset_pending_q, &clist_local); 3450 spin_unlock_irqrestore(&instance->hba_lock, flags); 3451 3452 while (!list_empty(&clist_local)) { 3453 cmd = list_entry((&clist_local)->next, 3454 struct megasas_cmd, list); 3455 list_del_init(&cmd->list); 3456 3457 if (cmd->sync_cmd || cmd->scmd) { 3458 dev_notice(&instance->pdev->dev, "command %p, %p:%d" 3459 "detected to be pending while HBA reset\n", 3460 cmd, cmd->scmd, cmd->sync_cmd); 3461 3462 cmd->retry_for_fw_reset++; 3463 3464 if (cmd->retry_for_fw_reset == 3) { 3465 dev_notice(&instance->pdev->dev, "cmd %p, %p:%d" 3466 "was tried multiple times during reset." 3467 "Shutting down the HBA\n", 3468 cmd, cmd->scmd, cmd->sync_cmd); 3469 instance->instancet->disable_intr(instance); 3470 atomic_set(&instance->fw_reset_no_pci_access, 1); 3471 megaraid_sas_kill_hba(instance); 3472 return; 3473 } 3474 } 3475 3476 if (cmd->sync_cmd == 1) { 3477 if (cmd->scmd) { 3478 dev_notice(&instance->pdev->dev, "unexpected" 3479 "cmd attached to internal command!\n"); 3480 } 3481 dev_notice(&instance->pdev->dev, "%p synchronous cmd" 3482 "on the internal reset queue," 3483 "issue it again.\n", cmd); 3484 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 3485 instance->instancet->fire_cmd(instance, 3486 cmd->frame_phys_addr, 3487 0, instance->reg_set); 3488 } else if (cmd->scmd) { 3489 dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]" 3490 "detected on the internal queue, issue again.\n", 3491 cmd, cmd->scmd->cmnd[0]); 3492 3493 atomic_inc(&instance->fw_outstanding); 3494 instance->instancet->fire_cmd(instance, 3495 cmd->frame_phys_addr, 3496 cmd->frame_count-1, instance->reg_set); 3497 } else { 3498 dev_notice(&instance->pdev->dev, "%p unexpected cmd on the" 3499 "internal reset defer list while re-issue!!\n", 3500 cmd); 3501 } 3502 } 3503 3504 if (instance->aen_cmd) { 3505 dev_notice(&instance->pdev->dev, "aen_cmd in def process\n"); 3506 megasas_return_cmd(instance, instance->aen_cmd); 3507 3508 instance->aen_cmd = NULL; 3509 } 3510 3511 /* 3512 * Initiate AEN (Asynchronous Event Notification) 3513 */ 3514 seq_num = instance->last_seq_num; 3515 class_locale.members.reserved = 0; 3516 class_locale.members.locale = MR_EVT_LOCALE_ALL; 3517 class_locale.members.class = MR_EVT_CLASS_DEBUG; 3518 3519 megasas_register_aen(instance, seq_num, class_locale.word); 3520 } 3521 3522 /** 3523 * Move the internal reset pending commands to a deferred queue. 3524 * 3525 * We move the commands pending at internal reset time to a 3526 * pending queue. This queue would be flushed after successful 3527 * completion of the internal reset sequence. if the internal reset 3528 * did not complete in time, the kernel reset handler would flush 3529 * these commands. 3530 **/ 3531 static void 3532 megasas_internal_reset_defer_cmds(struct megasas_instance *instance) 3533 { 3534 struct megasas_cmd *cmd; 3535 int i; 3536 u16 max_cmd = instance->max_fw_cmds; 3537 u32 defer_index; 3538 unsigned long flags; 3539 3540 defer_index = 0; 3541 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 3542 for (i = 0; i < max_cmd; i++) { 3543 cmd = instance->cmd_list[i]; 3544 if (cmd->sync_cmd == 1 || cmd->scmd) { 3545 dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p" 3546 "on the defer queue as internal\n", 3547 defer_index, cmd, cmd->sync_cmd, cmd->scmd); 3548 3549 if (!list_empty(&cmd->list)) { 3550 dev_notice(&instance->pdev->dev, "ERROR while" 3551 " moving this cmd:%p, %d %p, it was" 3552 "discovered on some list?\n", 3553 cmd, cmd->sync_cmd, cmd->scmd); 3554 3555 list_del_init(&cmd->list); 3556 } 3557 defer_index++; 3558 list_add_tail(&cmd->list, 3559 &instance->internal_reset_pending_q); 3560 } 3561 } 3562 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 3563 } 3564 3565 3566 static void 3567 process_fw_state_change_wq(struct work_struct *work) 3568 { 3569 struct megasas_instance *instance = 3570 container_of(work, struct megasas_instance, work_init); 3571 u32 wait; 3572 unsigned long flags; 3573 3574 if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) { 3575 dev_notice(&instance->pdev->dev, "error, recovery st %x\n", 3576 atomic_read(&instance->adprecovery)); 3577 return ; 3578 } 3579 3580 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { 3581 dev_notice(&instance->pdev->dev, "FW detected to be in fault" 3582 "state, restarting it...\n"); 3583 3584 instance->instancet->disable_intr(instance); 3585 atomic_set(&instance->fw_outstanding, 0); 3586 3587 atomic_set(&instance->fw_reset_no_pci_access, 1); 3588 instance->instancet->adp_reset(instance, instance->reg_set); 3589 atomic_set(&instance->fw_reset_no_pci_access, 0); 3590 3591 dev_notice(&instance->pdev->dev, "FW restarted successfully," 3592 "initiating next stage...\n"); 3593 3594 dev_notice(&instance->pdev->dev, "HBA recovery state machine," 3595 "state 2 starting...\n"); 3596 3597 /* waiting for about 20 second before start the second init */ 3598 for (wait = 0; wait < 30; wait++) { 3599 msleep(1000); 3600 } 3601 3602 if (megasas_transition_to_ready(instance, 1)) { 3603 dev_notice(&instance->pdev->dev, "adapter not ready\n"); 3604 3605 atomic_set(&instance->fw_reset_no_pci_access, 1); 3606 megaraid_sas_kill_hba(instance); 3607 return ; 3608 } 3609 3610 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 3611 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 3612 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR) 3613 ) { 3614 *instance->consumer = *instance->producer; 3615 } else { 3616 *instance->consumer = 0; 3617 *instance->producer = 0; 3618 } 3619 3620 megasas_issue_init_mfi(instance); 3621 3622 spin_lock_irqsave(&instance->hba_lock, flags); 3623 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 3624 spin_unlock_irqrestore(&instance->hba_lock, flags); 3625 instance->instancet->enable_intr(instance); 3626 3627 megasas_issue_pending_cmds_again(instance); 3628 instance->issuepend_done = 1; 3629 } 3630 } 3631 3632 /** 3633 * megasas_deplete_reply_queue - Processes all completed commands 3634 * @instance: Adapter soft state 3635 * @alt_status: Alternate status to be returned to 3636 * SCSI mid-layer instead of the status 3637 * returned by the FW 3638 * Note: this must be called with hba lock held 3639 */ 3640 static int 3641 megasas_deplete_reply_queue(struct megasas_instance *instance, 3642 u8 alt_status) 3643 { 3644 u32 mfiStatus; 3645 u32 fw_state; 3646 3647 if ((mfiStatus = instance->instancet->check_reset(instance, 3648 instance->reg_set)) == 1) { 3649 return IRQ_HANDLED; 3650 } 3651 3652 if ((mfiStatus = instance->instancet->clear_intr( 3653 instance->reg_set) 3654 ) == 0) { 3655 /* Hardware may not set outbound_intr_status in MSI-X mode */ 3656 if (!instance->msix_vectors) 3657 return IRQ_NONE; 3658 } 3659 3660 instance->mfiStatus = mfiStatus; 3661 3662 if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) { 3663 fw_state = instance->instancet->read_fw_status_reg( 3664 instance->reg_set) & MFI_STATE_MASK; 3665 3666 if (fw_state != MFI_STATE_FAULT) { 3667 dev_notice(&instance->pdev->dev, "fw state:%x\n", 3668 fw_state); 3669 } 3670 3671 if ((fw_state == MFI_STATE_FAULT) && 3672 (instance->disableOnlineCtrlReset == 0)) { 3673 dev_notice(&instance->pdev->dev, "wait adp restart\n"); 3674 3675 if ((instance->pdev->device == 3676 PCI_DEVICE_ID_LSI_SAS1064R) || 3677 (instance->pdev->device == 3678 PCI_DEVICE_ID_DELL_PERC5) || 3679 (instance->pdev->device == 3680 PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 3681 3682 *instance->consumer = 3683 cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); 3684 } 3685 3686 3687 instance->instancet->disable_intr(instance); 3688 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 3689 instance->issuepend_done = 0; 3690 3691 atomic_set(&instance->fw_outstanding, 0); 3692 megasas_internal_reset_defer_cmds(instance); 3693 3694 dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n", 3695 fw_state, atomic_read(&instance->adprecovery)); 3696 3697 schedule_work(&instance->work_init); 3698 return IRQ_HANDLED; 3699 3700 } else { 3701 dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n", 3702 fw_state, instance->disableOnlineCtrlReset); 3703 } 3704 } 3705 3706 tasklet_schedule(&instance->isr_tasklet); 3707 return IRQ_HANDLED; 3708 } 3709 /** 3710 * megasas_isr - isr entry point 3711 */ 3712 static irqreturn_t megasas_isr(int irq, void *devp) 3713 { 3714 struct megasas_irq_context *irq_context = devp; 3715 struct megasas_instance *instance = irq_context->instance; 3716 unsigned long flags; 3717 irqreturn_t rc; 3718 3719 if (atomic_read(&instance->fw_reset_no_pci_access)) 3720 return IRQ_HANDLED; 3721 3722 spin_lock_irqsave(&instance->hba_lock, flags); 3723 rc = megasas_deplete_reply_queue(instance, DID_OK); 3724 spin_unlock_irqrestore(&instance->hba_lock, flags); 3725 3726 return rc; 3727 } 3728 3729 /** 3730 * megasas_transition_to_ready - Move the FW to READY state 3731 * @instance: Adapter soft state 3732 * 3733 * During the initialization, FW passes can potentially be in any one of 3734 * several possible states. If the FW in operational, waiting-for-handshake 3735 * states, driver must take steps to bring it to ready state. Otherwise, it 3736 * has to wait for the ready state. 3737 */ 3738 int 3739 megasas_transition_to_ready(struct megasas_instance *instance, int ocr) 3740 { 3741 int i; 3742 u8 max_wait; 3743 u32 fw_state; 3744 u32 cur_state; 3745 u32 abs_state, curr_abs_state; 3746 3747 abs_state = instance->instancet->read_fw_status_reg(instance->reg_set); 3748 fw_state = abs_state & MFI_STATE_MASK; 3749 3750 if (fw_state != MFI_STATE_READY) 3751 dev_info(&instance->pdev->dev, "Waiting for FW to come to ready" 3752 " state\n"); 3753 3754 while (fw_state != MFI_STATE_READY) { 3755 3756 switch (fw_state) { 3757 3758 case MFI_STATE_FAULT: 3759 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW in FAULT state!!\n"); 3760 if (ocr) { 3761 max_wait = MEGASAS_RESET_WAIT_TIME; 3762 cur_state = MFI_STATE_FAULT; 3763 break; 3764 } else 3765 return -ENODEV; 3766 3767 case MFI_STATE_WAIT_HANDSHAKE: 3768 /* 3769 * Set the CLR bit in inbound doorbell 3770 */ 3771 if ((instance->pdev->device == 3772 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 3773 (instance->pdev->device == 3774 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 3775 (instance->adapter_type != MFI_SERIES)) 3776 writel( 3777 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 3778 &instance->reg_set->doorbell); 3779 else 3780 writel( 3781 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 3782 &instance->reg_set->inbound_doorbell); 3783 3784 max_wait = MEGASAS_RESET_WAIT_TIME; 3785 cur_state = MFI_STATE_WAIT_HANDSHAKE; 3786 break; 3787 3788 case MFI_STATE_BOOT_MESSAGE_PENDING: 3789 if ((instance->pdev->device == 3790 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 3791 (instance->pdev->device == 3792 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 3793 (instance->adapter_type != MFI_SERIES)) 3794 writel(MFI_INIT_HOTPLUG, 3795 &instance->reg_set->doorbell); 3796 else 3797 writel(MFI_INIT_HOTPLUG, 3798 &instance->reg_set->inbound_doorbell); 3799 3800 max_wait = MEGASAS_RESET_WAIT_TIME; 3801 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 3802 break; 3803 3804 case MFI_STATE_OPERATIONAL: 3805 /* 3806 * Bring it to READY state; assuming max wait 10 secs 3807 */ 3808 instance->instancet->disable_intr(instance); 3809 if ((instance->pdev->device == 3810 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 3811 (instance->pdev->device == 3812 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 3813 (instance->adapter_type != MFI_SERIES)) { 3814 writel(MFI_RESET_FLAGS, 3815 &instance->reg_set->doorbell); 3816 3817 if (instance->adapter_type != MFI_SERIES) { 3818 for (i = 0; i < (10 * 1000); i += 20) { 3819 if (readl( 3820 &instance-> 3821 reg_set-> 3822 doorbell) & 1) 3823 msleep(20); 3824 else 3825 break; 3826 } 3827 } 3828 } else 3829 writel(MFI_RESET_FLAGS, 3830 &instance->reg_set->inbound_doorbell); 3831 3832 max_wait = MEGASAS_RESET_WAIT_TIME; 3833 cur_state = MFI_STATE_OPERATIONAL; 3834 break; 3835 3836 case MFI_STATE_UNDEFINED: 3837 /* 3838 * This state should not last for more than 2 seconds 3839 */ 3840 max_wait = MEGASAS_RESET_WAIT_TIME; 3841 cur_state = MFI_STATE_UNDEFINED; 3842 break; 3843 3844 case MFI_STATE_BB_INIT: 3845 max_wait = MEGASAS_RESET_WAIT_TIME; 3846 cur_state = MFI_STATE_BB_INIT; 3847 break; 3848 3849 case MFI_STATE_FW_INIT: 3850 max_wait = MEGASAS_RESET_WAIT_TIME; 3851 cur_state = MFI_STATE_FW_INIT; 3852 break; 3853 3854 case MFI_STATE_FW_INIT_2: 3855 max_wait = MEGASAS_RESET_WAIT_TIME; 3856 cur_state = MFI_STATE_FW_INIT_2; 3857 break; 3858 3859 case MFI_STATE_DEVICE_SCAN: 3860 max_wait = MEGASAS_RESET_WAIT_TIME; 3861 cur_state = MFI_STATE_DEVICE_SCAN; 3862 break; 3863 3864 case MFI_STATE_FLUSH_CACHE: 3865 max_wait = MEGASAS_RESET_WAIT_TIME; 3866 cur_state = MFI_STATE_FLUSH_CACHE; 3867 break; 3868 3869 default: 3870 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n", 3871 fw_state); 3872 return -ENODEV; 3873 } 3874 3875 /* 3876 * The cur_state should not last for more than max_wait secs 3877 */ 3878 for (i = 0; i < (max_wait * 1000); i++) { 3879 curr_abs_state = instance->instancet-> 3880 read_fw_status_reg(instance->reg_set); 3881 3882 if (abs_state == curr_abs_state) { 3883 msleep(1); 3884 } else 3885 break; 3886 } 3887 3888 /* 3889 * Return error if fw_state hasn't changed after max_wait 3890 */ 3891 if (curr_abs_state == abs_state) { 3892 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed " 3893 "in %d secs\n", fw_state, max_wait); 3894 return -ENODEV; 3895 } 3896 3897 abs_state = curr_abs_state; 3898 fw_state = curr_abs_state & MFI_STATE_MASK; 3899 } 3900 dev_info(&instance->pdev->dev, "FW now in Ready state\n"); 3901 3902 return 0; 3903 } 3904 3905 /** 3906 * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool 3907 * @instance: Adapter soft state 3908 */ 3909 static void megasas_teardown_frame_pool(struct megasas_instance *instance) 3910 { 3911 int i; 3912 u16 max_cmd = instance->max_mfi_cmds; 3913 struct megasas_cmd *cmd; 3914 3915 if (!instance->frame_dma_pool) 3916 return; 3917 3918 /* 3919 * Return all frames to pool 3920 */ 3921 for (i = 0; i < max_cmd; i++) { 3922 3923 cmd = instance->cmd_list[i]; 3924 3925 if (cmd->frame) 3926 dma_pool_free(instance->frame_dma_pool, cmd->frame, 3927 cmd->frame_phys_addr); 3928 3929 if (cmd->sense) 3930 dma_pool_free(instance->sense_dma_pool, cmd->sense, 3931 cmd->sense_phys_addr); 3932 } 3933 3934 /* 3935 * Now destroy the pool itself 3936 */ 3937 dma_pool_destroy(instance->frame_dma_pool); 3938 dma_pool_destroy(instance->sense_dma_pool); 3939 3940 instance->frame_dma_pool = NULL; 3941 instance->sense_dma_pool = NULL; 3942 } 3943 3944 /** 3945 * megasas_create_frame_pool - Creates DMA pool for cmd frames 3946 * @instance: Adapter soft state 3947 * 3948 * Each command packet has an embedded DMA memory buffer that is used for 3949 * filling MFI frame and the SG list that immediately follows the frame. This 3950 * function creates those DMA memory buffers for each command packet by using 3951 * PCI pool facility. 3952 */ 3953 static int megasas_create_frame_pool(struct megasas_instance *instance) 3954 { 3955 int i; 3956 u16 max_cmd; 3957 u32 sge_sz; 3958 u32 frame_count; 3959 struct megasas_cmd *cmd; 3960 3961 max_cmd = instance->max_mfi_cmds; 3962 3963 /* 3964 * Size of our frame is 64 bytes for MFI frame, followed by max SG 3965 * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer 3966 */ 3967 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : 3968 sizeof(struct megasas_sge32); 3969 3970 if (instance->flag_ieee) 3971 sge_sz = sizeof(struct megasas_sge_skinny); 3972 3973 /* 3974 * For MFI controllers. 3975 * max_num_sge = 60 3976 * max_sge_sz = 16 byte (sizeof megasas_sge_skinny) 3977 * Total 960 byte (15 MFI frame of 64 byte) 3978 * 3979 * Fusion adapter require only 3 extra frame. 3980 * max_num_sge = 16 (defined as MAX_IOCTL_SGE) 3981 * max_sge_sz = 12 byte (sizeof megasas_sge64) 3982 * Total 192 byte (3 MFI frame of 64 byte) 3983 */ 3984 frame_count = (instance->adapter_type == MFI_SERIES) ? 3985 (15 + 1) : (3 + 1); 3986 instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count; 3987 /* 3988 * Use DMA pool facility provided by PCI layer 3989 */ 3990 instance->frame_dma_pool = dma_pool_create("megasas frame pool", 3991 &instance->pdev->dev, 3992 instance->mfi_frame_size, 256, 0); 3993 3994 if (!instance->frame_dma_pool) { 3995 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n"); 3996 return -ENOMEM; 3997 } 3998 3999 instance->sense_dma_pool = dma_pool_create("megasas sense pool", 4000 &instance->pdev->dev, 128, 4001 4, 0); 4002 4003 if (!instance->sense_dma_pool) { 4004 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n"); 4005 4006 dma_pool_destroy(instance->frame_dma_pool); 4007 instance->frame_dma_pool = NULL; 4008 4009 return -ENOMEM; 4010 } 4011 4012 /* 4013 * Allocate and attach a frame to each of the commands in cmd_list. 4014 * By making cmd->index as the context instead of the &cmd, we can 4015 * always use 32bit context regardless of the architecture 4016 */ 4017 for (i = 0; i < max_cmd; i++) { 4018 4019 cmd = instance->cmd_list[i]; 4020 4021 cmd->frame = dma_pool_zalloc(instance->frame_dma_pool, 4022 GFP_KERNEL, &cmd->frame_phys_addr); 4023 4024 cmd->sense = dma_pool_alloc(instance->sense_dma_pool, 4025 GFP_KERNEL, &cmd->sense_phys_addr); 4026 4027 /* 4028 * megasas_teardown_frame_pool() takes care of freeing 4029 * whatever has been allocated 4030 */ 4031 if (!cmd->frame || !cmd->sense) { 4032 dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n"); 4033 megasas_teardown_frame_pool(instance); 4034 return -ENOMEM; 4035 } 4036 4037 cmd->frame->io.context = cpu_to_le32(cmd->index); 4038 cmd->frame->io.pad_0 = 0; 4039 if ((instance->adapter_type == MFI_SERIES) && reset_devices) 4040 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 4041 } 4042 4043 return 0; 4044 } 4045 4046 /** 4047 * megasas_free_cmds - Free all the cmds in the free cmd pool 4048 * @instance: Adapter soft state 4049 */ 4050 void megasas_free_cmds(struct megasas_instance *instance) 4051 { 4052 int i; 4053 4054 /* First free the MFI frame pool */ 4055 megasas_teardown_frame_pool(instance); 4056 4057 /* Free all the commands in the cmd_list */ 4058 for (i = 0; i < instance->max_mfi_cmds; i++) 4059 4060 kfree(instance->cmd_list[i]); 4061 4062 /* Free the cmd_list buffer itself */ 4063 kfree(instance->cmd_list); 4064 instance->cmd_list = NULL; 4065 4066 INIT_LIST_HEAD(&instance->cmd_pool); 4067 } 4068 4069 /** 4070 * megasas_alloc_cmds - Allocates the command packets 4071 * @instance: Adapter soft state 4072 * 4073 * Each command that is issued to the FW, whether IO commands from the OS or 4074 * internal commands like IOCTLs, are wrapped in local data structure called 4075 * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to 4076 * the FW. 4077 * 4078 * Each frame has a 32-bit field called context (tag). This context is used 4079 * to get back the megasas_cmd from the frame when a frame gets completed in 4080 * the ISR. Typically the address of the megasas_cmd itself would be used as 4081 * the context. But we wanted to keep the differences between 32 and 64 bit 4082 * systems to the mininum. We always use 32 bit integers for the context. In 4083 * this driver, the 32 bit values are the indices into an array cmd_list. 4084 * This array is used only to look up the megasas_cmd given the context. The 4085 * free commands themselves are maintained in a linked list called cmd_pool. 4086 */ 4087 int megasas_alloc_cmds(struct megasas_instance *instance) 4088 { 4089 int i; 4090 int j; 4091 u16 max_cmd; 4092 struct megasas_cmd *cmd; 4093 4094 max_cmd = instance->max_mfi_cmds; 4095 4096 /* 4097 * instance->cmd_list is an array of struct megasas_cmd pointers. 4098 * Allocate the dynamic array first and then allocate individual 4099 * commands. 4100 */ 4101 instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL); 4102 4103 if (!instance->cmd_list) { 4104 dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n"); 4105 return -ENOMEM; 4106 } 4107 4108 memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd); 4109 4110 for (i = 0; i < max_cmd; i++) { 4111 instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd), 4112 GFP_KERNEL); 4113 4114 if (!instance->cmd_list[i]) { 4115 4116 for (j = 0; j < i; j++) 4117 kfree(instance->cmd_list[j]); 4118 4119 kfree(instance->cmd_list); 4120 instance->cmd_list = NULL; 4121 4122 return -ENOMEM; 4123 } 4124 } 4125 4126 for (i = 0; i < max_cmd; i++) { 4127 cmd = instance->cmd_list[i]; 4128 memset(cmd, 0, sizeof(struct megasas_cmd)); 4129 cmd->index = i; 4130 cmd->scmd = NULL; 4131 cmd->instance = instance; 4132 4133 list_add_tail(&cmd->list, &instance->cmd_pool); 4134 } 4135 4136 /* 4137 * Create a frame pool and assign one frame to each cmd 4138 */ 4139 if (megasas_create_frame_pool(instance)) { 4140 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n"); 4141 megasas_free_cmds(instance); 4142 } 4143 4144 return 0; 4145 } 4146 4147 /* 4148 * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state. 4149 * @instance: Adapter soft state 4150 * 4151 * Return 0 for only Fusion adapter, if driver load/unload is not in progress 4152 * or FW is not under OCR. 4153 */ 4154 inline int 4155 dcmd_timeout_ocr_possible(struct megasas_instance *instance) { 4156 4157 if (instance->adapter_type == MFI_SERIES) 4158 return KILL_ADAPTER; 4159 else if (instance->unload || 4160 test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) 4161 return IGNORE_TIMEOUT; 4162 else 4163 return INITIATE_OCR; 4164 } 4165 4166 static void 4167 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev) 4168 { 4169 int ret; 4170 struct megasas_cmd *cmd; 4171 struct megasas_dcmd_frame *dcmd; 4172 4173 struct MR_PRIV_DEVICE *mr_device_priv_data; 4174 u16 device_id = 0; 4175 4176 device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; 4177 cmd = megasas_get_cmd(instance); 4178 4179 if (!cmd) { 4180 dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__); 4181 return; 4182 } 4183 4184 dcmd = &cmd->frame->dcmd; 4185 4186 memset(instance->pd_info, 0, sizeof(*instance->pd_info)); 4187 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4188 4189 dcmd->mbox.s[0] = cpu_to_le16(device_id); 4190 dcmd->cmd = MFI_CMD_DCMD; 4191 dcmd->cmd_status = 0xFF; 4192 dcmd->sge_count = 1; 4193 dcmd->flags = MFI_FRAME_DIR_READ; 4194 dcmd->timeout = 0; 4195 dcmd->pad_0 = 0; 4196 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO)); 4197 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO); 4198 4199 megasas_set_dma_settings(instance, dcmd, instance->pd_info_h, 4200 sizeof(struct MR_PD_INFO)); 4201 4202 if ((instance->adapter_type != MFI_SERIES) && 4203 !instance->mask_interrupts) 4204 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4205 else 4206 ret = megasas_issue_polled(instance, cmd); 4207 4208 switch (ret) { 4209 case DCMD_SUCCESS: 4210 mr_device_priv_data = sdev->hostdata; 4211 le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType); 4212 mr_device_priv_data->interface_type = 4213 instance->pd_info->state.ddf.pdType.intf; 4214 break; 4215 4216 case DCMD_TIMEOUT: 4217 4218 switch (dcmd_timeout_ocr_possible(instance)) { 4219 case INITIATE_OCR: 4220 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4221 megasas_reset_fusion(instance->host, 4222 MFI_IO_TIMEOUT_OCR); 4223 break; 4224 case KILL_ADAPTER: 4225 megaraid_sas_kill_hba(instance); 4226 break; 4227 case IGNORE_TIMEOUT: 4228 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4229 __func__, __LINE__); 4230 break; 4231 } 4232 4233 break; 4234 } 4235 4236 if (ret != DCMD_TIMEOUT) 4237 megasas_return_cmd(instance, cmd); 4238 4239 return; 4240 } 4241 /* 4242 * megasas_get_pd_list_info - Returns FW's pd_list structure 4243 * @instance: Adapter soft state 4244 * @pd_list: pd_list structure 4245 * 4246 * Issues an internal command (DCMD) to get the FW's controller PD 4247 * list structure. This information is mainly used to find out SYSTEM 4248 * supported by the FW. 4249 */ 4250 static int 4251 megasas_get_pd_list(struct megasas_instance *instance) 4252 { 4253 int ret = 0, pd_index = 0; 4254 struct megasas_cmd *cmd; 4255 struct megasas_dcmd_frame *dcmd; 4256 struct MR_PD_LIST *ci; 4257 struct MR_PD_ADDRESS *pd_addr; 4258 dma_addr_t ci_h = 0; 4259 4260 if (instance->pd_list_not_supported) { 4261 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " 4262 "not supported by firmware\n"); 4263 return ret; 4264 } 4265 4266 ci = instance->pd_list_buf; 4267 ci_h = instance->pd_list_buf_h; 4268 4269 cmd = megasas_get_cmd(instance); 4270 4271 if (!cmd) { 4272 dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n"); 4273 return -ENOMEM; 4274 } 4275 4276 dcmd = &cmd->frame->dcmd; 4277 4278 memset(ci, 0, sizeof(*ci)); 4279 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4280 4281 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; 4282 dcmd->mbox.b[1] = 0; 4283 dcmd->cmd = MFI_CMD_DCMD; 4284 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4285 dcmd->sge_count = 1; 4286 dcmd->flags = MFI_FRAME_DIR_READ; 4287 dcmd->timeout = 0; 4288 dcmd->pad_0 = 0; 4289 dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); 4290 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY); 4291 4292 megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h, 4293 (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST))); 4294 4295 if ((instance->adapter_type != MFI_SERIES) && 4296 !instance->mask_interrupts) 4297 ret = megasas_issue_blocked_cmd(instance, cmd, 4298 MFI_IO_TIMEOUT_SECS); 4299 else 4300 ret = megasas_issue_polled(instance, cmd); 4301 4302 switch (ret) { 4303 case DCMD_FAILED: 4304 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " 4305 "failed/not supported by firmware\n"); 4306 4307 if (instance->adapter_type != MFI_SERIES) 4308 megaraid_sas_kill_hba(instance); 4309 else 4310 instance->pd_list_not_supported = 1; 4311 break; 4312 case DCMD_TIMEOUT: 4313 4314 switch (dcmd_timeout_ocr_possible(instance)) { 4315 case INITIATE_OCR: 4316 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4317 /* 4318 * DCMD failed from AEN path. 4319 * AEN path already hold reset_mutex to avoid PCI access 4320 * while OCR is in progress. 4321 */ 4322 mutex_unlock(&instance->reset_mutex); 4323 megasas_reset_fusion(instance->host, 4324 MFI_IO_TIMEOUT_OCR); 4325 mutex_lock(&instance->reset_mutex); 4326 break; 4327 case KILL_ADAPTER: 4328 megaraid_sas_kill_hba(instance); 4329 break; 4330 case IGNORE_TIMEOUT: 4331 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n", 4332 __func__, __LINE__); 4333 break; 4334 } 4335 4336 break; 4337 4338 case DCMD_SUCCESS: 4339 pd_addr = ci->addr; 4340 4341 if ((le32_to_cpu(ci->count) > 4342 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) 4343 break; 4344 4345 memset(instance->local_pd_list, 0, 4346 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); 4347 4348 for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) { 4349 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid = 4350 le16_to_cpu(pd_addr->deviceId); 4351 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType = 4352 pd_addr->scsiDevType; 4353 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState = 4354 MR_PD_STATE_SYSTEM; 4355 pd_addr++; 4356 } 4357 4358 memcpy(instance->pd_list, instance->local_pd_list, 4359 sizeof(instance->pd_list)); 4360 break; 4361 4362 } 4363 4364 if (ret != DCMD_TIMEOUT) 4365 megasas_return_cmd(instance, cmd); 4366 4367 return ret; 4368 } 4369 4370 /* 4371 * megasas_get_ld_list_info - Returns FW's ld_list structure 4372 * @instance: Adapter soft state 4373 * @ld_list: ld_list structure 4374 * 4375 * Issues an internal command (DCMD) to get the FW's controller PD 4376 * list structure. This information is mainly used to find out SYSTEM 4377 * supported by the FW. 4378 */ 4379 static int 4380 megasas_get_ld_list(struct megasas_instance *instance) 4381 { 4382 int ret = 0, ld_index = 0, ids = 0; 4383 struct megasas_cmd *cmd; 4384 struct megasas_dcmd_frame *dcmd; 4385 struct MR_LD_LIST *ci; 4386 dma_addr_t ci_h = 0; 4387 u32 ld_count; 4388 4389 ci = instance->ld_list_buf; 4390 ci_h = instance->ld_list_buf_h; 4391 4392 cmd = megasas_get_cmd(instance); 4393 4394 if (!cmd) { 4395 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n"); 4396 return -ENOMEM; 4397 } 4398 4399 dcmd = &cmd->frame->dcmd; 4400 4401 memset(ci, 0, sizeof(*ci)); 4402 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4403 4404 if (instance->supportmax256vd) 4405 dcmd->mbox.b[0] = 1; 4406 dcmd->cmd = MFI_CMD_DCMD; 4407 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4408 dcmd->sge_count = 1; 4409 dcmd->flags = MFI_FRAME_DIR_READ; 4410 dcmd->timeout = 0; 4411 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST)); 4412 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST); 4413 dcmd->pad_0 = 0; 4414 4415 megasas_set_dma_settings(instance, dcmd, ci_h, 4416 sizeof(struct MR_LD_LIST)); 4417 4418 if ((instance->adapter_type != MFI_SERIES) && 4419 !instance->mask_interrupts) 4420 ret = megasas_issue_blocked_cmd(instance, cmd, 4421 MFI_IO_TIMEOUT_SECS); 4422 else 4423 ret = megasas_issue_polled(instance, cmd); 4424 4425 ld_count = le32_to_cpu(ci->ldCount); 4426 4427 switch (ret) { 4428 case DCMD_FAILED: 4429 megaraid_sas_kill_hba(instance); 4430 break; 4431 case DCMD_TIMEOUT: 4432 4433 switch (dcmd_timeout_ocr_possible(instance)) { 4434 case INITIATE_OCR: 4435 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4436 /* 4437 * DCMD failed from AEN path. 4438 * AEN path already hold reset_mutex to avoid PCI access 4439 * while OCR is in progress. 4440 */ 4441 mutex_unlock(&instance->reset_mutex); 4442 megasas_reset_fusion(instance->host, 4443 MFI_IO_TIMEOUT_OCR); 4444 mutex_lock(&instance->reset_mutex); 4445 break; 4446 case KILL_ADAPTER: 4447 megaraid_sas_kill_hba(instance); 4448 break; 4449 case IGNORE_TIMEOUT: 4450 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4451 __func__, __LINE__); 4452 break; 4453 } 4454 4455 break; 4456 4457 case DCMD_SUCCESS: 4458 if (ld_count > instance->fw_supported_vd_count) 4459 break; 4460 4461 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 4462 4463 for (ld_index = 0; ld_index < ld_count; ld_index++) { 4464 if (ci->ldList[ld_index].state != 0) { 4465 ids = ci->ldList[ld_index].ref.targetId; 4466 instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId; 4467 } 4468 } 4469 4470 break; 4471 } 4472 4473 if (ret != DCMD_TIMEOUT) 4474 megasas_return_cmd(instance, cmd); 4475 4476 return ret; 4477 } 4478 4479 /** 4480 * megasas_ld_list_query - Returns FW's ld_list structure 4481 * @instance: Adapter soft state 4482 * @ld_list: ld_list structure 4483 * 4484 * Issues an internal command (DCMD) to get the FW's controller PD 4485 * list structure. This information is mainly used to find out SYSTEM 4486 * supported by the FW. 4487 */ 4488 static int 4489 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) 4490 { 4491 int ret = 0, ld_index = 0, ids = 0; 4492 struct megasas_cmd *cmd; 4493 struct megasas_dcmd_frame *dcmd; 4494 struct MR_LD_TARGETID_LIST *ci; 4495 dma_addr_t ci_h = 0; 4496 u32 tgtid_count; 4497 4498 ci = instance->ld_targetid_list_buf; 4499 ci_h = instance->ld_targetid_list_buf_h; 4500 4501 cmd = megasas_get_cmd(instance); 4502 4503 if (!cmd) { 4504 dev_warn(&instance->pdev->dev, 4505 "megasas_ld_list_query: Failed to get cmd\n"); 4506 return -ENOMEM; 4507 } 4508 4509 dcmd = &cmd->frame->dcmd; 4510 4511 memset(ci, 0, sizeof(*ci)); 4512 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4513 4514 dcmd->mbox.b[0] = query_type; 4515 if (instance->supportmax256vd) 4516 dcmd->mbox.b[2] = 1; 4517 4518 dcmd->cmd = MFI_CMD_DCMD; 4519 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4520 dcmd->sge_count = 1; 4521 dcmd->flags = MFI_FRAME_DIR_READ; 4522 dcmd->timeout = 0; 4523 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); 4524 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY); 4525 dcmd->pad_0 = 0; 4526 4527 megasas_set_dma_settings(instance, dcmd, ci_h, 4528 sizeof(struct MR_LD_TARGETID_LIST)); 4529 4530 if ((instance->adapter_type != MFI_SERIES) && 4531 !instance->mask_interrupts) 4532 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4533 else 4534 ret = megasas_issue_polled(instance, cmd); 4535 4536 switch (ret) { 4537 case DCMD_FAILED: 4538 dev_info(&instance->pdev->dev, 4539 "DCMD not supported by firmware - %s %d\n", 4540 __func__, __LINE__); 4541 ret = megasas_get_ld_list(instance); 4542 break; 4543 case DCMD_TIMEOUT: 4544 switch (dcmd_timeout_ocr_possible(instance)) { 4545 case INITIATE_OCR: 4546 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4547 /* 4548 * DCMD failed from AEN path. 4549 * AEN path already hold reset_mutex to avoid PCI access 4550 * while OCR is in progress. 4551 */ 4552 mutex_unlock(&instance->reset_mutex); 4553 megasas_reset_fusion(instance->host, 4554 MFI_IO_TIMEOUT_OCR); 4555 mutex_lock(&instance->reset_mutex); 4556 break; 4557 case KILL_ADAPTER: 4558 megaraid_sas_kill_hba(instance); 4559 break; 4560 case IGNORE_TIMEOUT: 4561 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4562 __func__, __LINE__); 4563 break; 4564 } 4565 4566 break; 4567 case DCMD_SUCCESS: 4568 tgtid_count = le32_to_cpu(ci->count); 4569 4570 if ((tgtid_count > (instance->fw_supported_vd_count))) 4571 break; 4572 4573 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 4574 for (ld_index = 0; ld_index < tgtid_count; ld_index++) { 4575 ids = ci->targetId[ld_index]; 4576 instance->ld_ids[ids] = ci->targetId[ld_index]; 4577 } 4578 4579 break; 4580 } 4581 4582 if (ret != DCMD_TIMEOUT) 4583 megasas_return_cmd(instance, cmd); 4584 4585 return ret; 4586 } 4587 4588 /* 4589 * megasas_update_ext_vd_details : Update details w.r.t Extended VD 4590 * instance : Controller's instance 4591 */ 4592 static void megasas_update_ext_vd_details(struct megasas_instance *instance) 4593 { 4594 struct fusion_context *fusion; 4595 u32 ventura_map_sz = 0; 4596 4597 fusion = instance->ctrl_context; 4598 /* For MFI based controllers return dummy success */ 4599 if (!fusion) 4600 return; 4601 4602 instance->supportmax256vd = 4603 instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs; 4604 /* Below is additional check to address future FW enhancement */ 4605 if (instance->ctrl_info_buf->max_lds > 64) 4606 instance->supportmax256vd = 1; 4607 4608 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS 4609 * MEGASAS_MAX_DEV_PER_CHANNEL; 4610 instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS 4611 * MEGASAS_MAX_DEV_PER_CHANNEL; 4612 if (instance->supportmax256vd) { 4613 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; 4614 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 4615 } else { 4616 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 4617 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 4618 } 4619 4620 dev_info(&instance->pdev->dev, 4621 "firmware type\t: %s\n", 4622 instance->supportmax256vd ? "Extended VD(240 VD)firmware" : 4623 "Legacy(64 VD) firmware"); 4624 4625 if (instance->max_raid_mapsize) { 4626 ventura_map_sz = instance->max_raid_mapsize * 4627 MR_MIN_MAP_SIZE; /* 64k */ 4628 fusion->current_map_sz = ventura_map_sz; 4629 fusion->max_map_sz = ventura_map_sz; 4630 } else { 4631 fusion->old_map_sz = sizeof(struct MR_FW_RAID_MAP) + 4632 (sizeof(struct MR_LD_SPAN_MAP) * 4633 (instance->fw_supported_vd_count - 1)); 4634 fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT); 4635 4636 fusion->max_map_sz = 4637 max(fusion->old_map_sz, fusion->new_map_sz); 4638 4639 if (instance->supportmax256vd) 4640 fusion->current_map_sz = fusion->new_map_sz; 4641 else 4642 fusion->current_map_sz = fusion->old_map_sz; 4643 } 4644 /* irrespective of FW raid maps, driver raid map is constant */ 4645 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL); 4646 } 4647 4648 /** 4649 * megasas_get_controller_info - Returns FW's controller structure 4650 * @instance: Adapter soft state 4651 * 4652 * Issues an internal command (DCMD) to get the FW's controller structure. 4653 * This information is mainly used to find out the maximum IO transfer per 4654 * command supported by the FW. 4655 */ 4656 int 4657 megasas_get_ctrl_info(struct megasas_instance *instance) 4658 { 4659 int ret = 0; 4660 struct megasas_cmd *cmd; 4661 struct megasas_dcmd_frame *dcmd; 4662 struct megasas_ctrl_info *ci; 4663 dma_addr_t ci_h = 0; 4664 4665 ci = instance->ctrl_info_buf; 4666 ci_h = instance->ctrl_info_buf_h; 4667 4668 cmd = megasas_get_cmd(instance); 4669 4670 if (!cmd) { 4671 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n"); 4672 return -ENOMEM; 4673 } 4674 4675 dcmd = &cmd->frame->dcmd; 4676 4677 memset(ci, 0, sizeof(*ci)); 4678 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4679 4680 dcmd->cmd = MFI_CMD_DCMD; 4681 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4682 dcmd->sge_count = 1; 4683 dcmd->flags = MFI_FRAME_DIR_READ; 4684 dcmd->timeout = 0; 4685 dcmd->pad_0 = 0; 4686 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info)); 4687 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO); 4688 dcmd->mbox.b[0] = 1; 4689 4690 megasas_set_dma_settings(instance, dcmd, ci_h, 4691 sizeof(struct megasas_ctrl_info)); 4692 4693 if ((instance->adapter_type != MFI_SERIES) && 4694 !instance->mask_interrupts) { 4695 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4696 } else { 4697 ret = megasas_issue_polled(instance, cmd); 4698 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4699 } 4700 4701 switch (ret) { 4702 case DCMD_SUCCESS: 4703 /* Save required controller information in 4704 * CPU endianness format. 4705 */ 4706 le32_to_cpus((u32 *)&ci->properties.OnOffProperties); 4707 le32_to_cpus((u32 *)&ci->adapterOperations2); 4708 le32_to_cpus((u32 *)&ci->adapterOperations3); 4709 le16_to_cpus((u16 *)&ci->adapter_operations4); 4710 4711 /* Update the latest Ext VD info. 4712 * From Init path, store current firmware details. 4713 * From OCR path, detect any firmware properties changes. 4714 * in case of Firmware upgrade without system reboot. 4715 */ 4716 megasas_update_ext_vd_details(instance); 4717 instance->use_seqnum_jbod_fp = 4718 ci->adapterOperations3.useSeqNumJbodFP; 4719 instance->support_morethan256jbod = 4720 ci->adapter_operations4.support_pd_map_target_id; 4721 instance->support_nvme_passthru = 4722 ci->adapter_operations4.support_nvme_passthru; 4723 4724 /*Check whether controller is iMR or MR */ 4725 instance->is_imr = (ci->memory_size ? 0 : 1); 4726 dev_info(&instance->pdev->dev, 4727 "controller type\t: %s(%dMB)\n", 4728 instance->is_imr ? "iMR" : "MR", 4729 le16_to_cpu(ci->memory_size)); 4730 4731 instance->disableOnlineCtrlReset = 4732 ci->properties.OnOffProperties.disableOnlineCtrlReset; 4733 instance->secure_jbod_support = 4734 ci->adapterOperations3.supportSecurityonJBOD; 4735 dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n", 4736 instance->disableOnlineCtrlReset ? "Disabled" : "Enabled"); 4737 dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n", 4738 instance->secure_jbod_support ? "Yes" : "No"); 4739 dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n", 4740 instance->support_nvme_passthru ? "Yes" : "No"); 4741 break; 4742 4743 case DCMD_TIMEOUT: 4744 switch (dcmd_timeout_ocr_possible(instance)) { 4745 case INITIATE_OCR: 4746 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4747 megasas_reset_fusion(instance->host, 4748 MFI_IO_TIMEOUT_OCR); 4749 break; 4750 case KILL_ADAPTER: 4751 megaraid_sas_kill_hba(instance); 4752 break; 4753 case IGNORE_TIMEOUT: 4754 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4755 __func__, __LINE__); 4756 break; 4757 } 4758 case DCMD_FAILED: 4759 megaraid_sas_kill_hba(instance); 4760 break; 4761 4762 } 4763 4764 megasas_return_cmd(instance, cmd); 4765 4766 4767 return ret; 4768 } 4769 4770 /* 4771 * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer 4772 * to firmware 4773 * 4774 * @instance: Adapter soft state 4775 * @crash_buf_state - tell FW to turn ON/OFF crash dump feature 4776 MR_CRASH_BUF_TURN_OFF = 0 4777 MR_CRASH_BUF_TURN_ON = 1 4778 * @return 0 on success non-zero on failure. 4779 * Issues an internal command (DCMD) to set parameters for crash dump feature. 4780 * Driver will send address of crash dump DMA buffer and set mbox to tell FW 4781 * that driver supports crash dump feature. This DCMD will be sent only if 4782 * crash dump feature is supported by the FW. 4783 * 4784 */ 4785 int megasas_set_crash_dump_params(struct megasas_instance *instance, 4786 u8 crash_buf_state) 4787 { 4788 int ret = 0; 4789 struct megasas_cmd *cmd; 4790 struct megasas_dcmd_frame *dcmd; 4791 4792 cmd = megasas_get_cmd(instance); 4793 4794 if (!cmd) { 4795 dev_err(&instance->pdev->dev, "Failed to get a free cmd\n"); 4796 return -ENOMEM; 4797 } 4798 4799 4800 dcmd = &cmd->frame->dcmd; 4801 4802 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4803 dcmd->mbox.b[0] = crash_buf_state; 4804 dcmd->cmd = MFI_CMD_DCMD; 4805 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4806 dcmd->sge_count = 1; 4807 dcmd->flags = MFI_FRAME_DIR_NONE; 4808 dcmd->timeout = 0; 4809 dcmd->pad_0 = 0; 4810 dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE); 4811 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS); 4812 4813 megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h, 4814 CRASH_DMA_BUF_SIZE); 4815 4816 if ((instance->adapter_type != MFI_SERIES) && 4817 !instance->mask_interrupts) 4818 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4819 else 4820 ret = megasas_issue_polled(instance, cmd); 4821 4822 if (ret == DCMD_TIMEOUT) { 4823 switch (dcmd_timeout_ocr_possible(instance)) { 4824 case INITIATE_OCR: 4825 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4826 megasas_reset_fusion(instance->host, 4827 MFI_IO_TIMEOUT_OCR); 4828 break; 4829 case KILL_ADAPTER: 4830 megaraid_sas_kill_hba(instance); 4831 break; 4832 case IGNORE_TIMEOUT: 4833 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4834 __func__, __LINE__); 4835 break; 4836 } 4837 } else 4838 megasas_return_cmd(instance, cmd); 4839 4840 return ret; 4841 } 4842 4843 /** 4844 * megasas_issue_init_mfi - Initializes the FW 4845 * @instance: Adapter soft state 4846 * 4847 * Issues the INIT MFI cmd 4848 */ 4849 static int 4850 megasas_issue_init_mfi(struct megasas_instance *instance) 4851 { 4852 __le32 context; 4853 struct megasas_cmd *cmd; 4854 struct megasas_init_frame *init_frame; 4855 struct megasas_init_queue_info *initq_info; 4856 dma_addr_t init_frame_h; 4857 dma_addr_t initq_info_h; 4858 4859 /* 4860 * Prepare a init frame. Note the init frame points to queue info 4861 * structure. Each frame has SGL allocated after first 64 bytes. For 4862 * this frame - since we don't need any SGL - we use SGL's space as 4863 * queue info structure 4864 * 4865 * We will not get a NULL command below. We just created the pool. 4866 */ 4867 cmd = megasas_get_cmd(instance); 4868 4869 init_frame = (struct megasas_init_frame *)cmd->frame; 4870 initq_info = (struct megasas_init_queue_info *) 4871 ((unsigned long)init_frame + 64); 4872 4873 init_frame_h = cmd->frame_phys_addr; 4874 initq_info_h = init_frame_h + 64; 4875 4876 context = init_frame->context; 4877 memset(init_frame, 0, MEGAMFI_FRAME_SIZE); 4878 memset(initq_info, 0, sizeof(struct megasas_init_queue_info)); 4879 init_frame->context = context; 4880 4881 initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1); 4882 initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h); 4883 4884 initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h); 4885 initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h); 4886 4887 init_frame->cmd = MFI_CMD_INIT; 4888 init_frame->cmd_status = MFI_STAT_INVALID_STATUS; 4889 init_frame->queue_info_new_phys_addr_lo = 4890 cpu_to_le32(lower_32_bits(initq_info_h)); 4891 init_frame->queue_info_new_phys_addr_hi = 4892 cpu_to_le32(upper_32_bits(initq_info_h)); 4893 4894 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info)); 4895 4896 /* 4897 * disable the intr before firing the init frame to FW 4898 */ 4899 instance->instancet->disable_intr(instance); 4900 4901 /* 4902 * Issue the init frame in polled mode 4903 */ 4904 4905 if (megasas_issue_polled(instance, cmd)) { 4906 dev_err(&instance->pdev->dev, "Failed to init firmware\n"); 4907 megasas_return_cmd(instance, cmd); 4908 goto fail_fw_init; 4909 } 4910 4911 megasas_return_cmd(instance, cmd); 4912 4913 return 0; 4914 4915 fail_fw_init: 4916 return -EINVAL; 4917 } 4918 4919 static u32 4920 megasas_init_adapter_mfi(struct megasas_instance *instance) 4921 { 4922 struct megasas_register_set __iomem *reg_set; 4923 u32 context_sz; 4924 u32 reply_q_sz; 4925 4926 reg_set = instance->reg_set; 4927 4928 /* 4929 * Get various operational parameters from status register 4930 */ 4931 instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF; 4932 /* 4933 * Reduce the max supported cmds by 1. This is to ensure that the 4934 * reply_q_sz (1 more than the max cmd that driver may send) 4935 * does not exceed max cmds that the FW can support 4936 */ 4937 instance->max_fw_cmds = instance->max_fw_cmds-1; 4938 instance->max_mfi_cmds = instance->max_fw_cmds; 4939 instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >> 4940 0x10; 4941 /* 4942 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands 4943 * are reserved for IOCTL + driver's internal DCMDs. 4944 */ 4945 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 4946 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { 4947 instance->max_scsi_cmds = (instance->max_fw_cmds - 4948 MEGASAS_SKINNY_INT_CMDS); 4949 sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); 4950 } else { 4951 instance->max_scsi_cmds = (instance->max_fw_cmds - 4952 MEGASAS_INT_CMDS); 4953 sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS)); 4954 } 4955 4956 instance->cur_can_queue = instance->max_scsi_cmds; 4957 /* 4958 * Create a pool of commands 4959 */ 4960 if (megasas_alloc_cmds(instance)) 4961 goto fail_alloc_cmds; 4962 4963 /* 4964 * Allocate memory for reply queue. Length of reply queue should 4965 * be _one_ more than the maximum commands handled by the firmware. 4966 * 4967 * Note: When FW completes commands, it places corresponding contex 4968 * values in this circular reply queue. This circular queue is a fairly 4969 * typical producer-consumer queue. FW is the producer (of completed 4970 * commands) and the driver is the consumer. 4971 */ 4972 context_sz = sizeof(u32); 4973 reply_q_sz = context_sz * (instance->max_fw_cmds + 1); 4974 4975 instance->reply_queue = pci_alloc_consistent(instance->pdev, 4976 reply_q_sz, 4977 &instance->reply_queue_h); 4978 4979 if (!instance->reply_queue) { 4980 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n"); 4981 goto fail_reply_queue; 4982 } 4983 4984 if (megasas_issue_init_mfi(instance)) 4985 goto fail_fw_init; 4986 4987 if (megasas_get_ctrl_info(instance)) { 4988 dev_err(&instance->pdev->dev, "(%d): Could get controller info " 4989 "Fail from %s %d\n", instance->unique_id, 4990 __func__, __LINE__); 4991 goto fail_fw_init; 4992 } 4993 4994 instance->fw_support_ieee = 0; 4995 instance->fw_support_ieee = 4996 (instance->instancet->read_fw_status_reg(reg_set) & 4997 0x04000000); 4998 4999 dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d", 5000 instance->fw_support_ieee); 5001 5002 if (instance->fw_support_ieee) 5003 instance->flag_ieee = 1; 5004 5005 return 0; 5006 5007 fail_fw_init: 5008 5009 pci_free_consistent(instance->pdev, reply_q_sz, 5010 instance->reply_queue, instance->reply_queue_h); 5011 fail_reply_queue: 5012 megasas_free_cmds(instance); 5013 5014 fail_alloc_cmds: 5015 return 1; 5016 } 5017 5018 /* 5019 * megasas_setup_irqs_ioapic - register legacy interrupts. 5020 * @instance: Adapter soft state 5021 * 5022 * Do not enable interrupt, only setup ISRs. 5023 * 5024 * Return 0 on success. 5025 */ 5026 static int 5027 megasas_setup_irqs_ioapic(struct megasas_instance *instance) 5028 { 5029 struct pci_dev *pdev; 5030 5031 pdev = instance->pdev; 5032 instance->irq_context[0].instance = instance; 5033 instance->irq_context[0].MSIxIndex = 0; 5034 if (request_irq(pci_irq_vector(pdev, 0), 5035 instance->instancet->service_isr, IRQF_SHARED, 5036 "megasas", &instance->irq_context[0])) { 5037 dev_err(&instance->pdev->dev, 5038 "Failed to register IRQ from %s %d\n", 5039 __func__, __LINE__); 5040 return -1; 5041 } 5042 return 0; 5043 } 5044 5045 /** 5046 * megasas_setup_irqs_msix - register MSI-x interrupts. 5047 * @instance: Adapter soft state 5048 * @is_probe: Driver probe check 5049 * 5050 * Do not enable interrupt, only setup ISRs. 5051 * 5052 * Return 0 on success. 5053 */ 5054 static int 5055 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) 5056 { 5057 int i, j; 5058 struct pci_dev *pdev; 5059 5060 pdev = instance->pdev; 5061 5062 /* Try MSI-x */ 5063 for (i = 0; i < instance->msix_vectors; i++) { 5064 instance->irq_context[i].instance = instance; 5065 instance->irq_context[i].MSIxIndex = i; 5066 if (request_irq(pci_irq_vector(pdev, i), 5067 instance->instancet->service_isr, 0, "megasas", 5068 &instance->irq_context[i])) { 5069 dev_err(&instance->pdev->dev, 5070 "Failed to register IRQ for vector %d.\n", i); 5071 for (j = 0; j < i; j++) 5072 free_irq(pci_irq_vector(pdev, j), 5073 &instance->irq_context[j]); 5074 /* Retry irq register for IO_APIC*/ 5075 instance->msix_vectors = 0; 5076 if (is_probe) { 5077 pci_free_irq_vectors(instance->pdev); 5078 return megasas_setup_irqs_ioapic(instance); 5079 } else { 5080 return -1; 5081 } 5082 } 5083 } 5084 return 0; 5085 } 5086 5087 /* 5088 * megasas_destroy_irqs- unregister interrupts. 5089 * @instance: Adapter soft state 5090 * return: void 5091 */ 5092 static void 5093 megasas_destroy_irqs(struct megasas_instance *instance) { 5094 5095 int i; 5096 5097 if (instance->msix_vectors) 5098 for (i = 0; i < instance->msix_vectors; i++) { 5099 free_irq(pci_irq_vector(instance->pdev, i), 5100 &instance->irq_context[i]); 5101 } 5102 else 5103 free_irq(pci_irq_vector(instance->pdev, 0), 5104 &instance->irq_context[0]); 5105 } 5106 5107 /** 5108 * megasas_setup_jbod_map - setup jbod map for FP seq_number. 5109 * @instance: Adapter soft state 5110 * @is_probe: Driver probe check 5111 * 5112 * Return 0 on success. 5113 */ 5114 void 5115 megasas_setup_jbod_map(struct megasas_instance *instance) 5116 { 5117 int i; 5118 struct fusion_context *fusion = instance->ctrl_context; 5119 u32 pd_seq_map_sz; 5120 5121 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 5122 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1)); 5123 5124 if (reset_devices || !fusion || 5125 !instance->ctrl_info_buf->adapterOperations3.useSeqNumJbodFP) { 5126 dev_info(&instance->pdev->dev, 5127 "Jbod map is not supported %s %d\n", 5128 __func__, __LINE__); 5129 instance->use_seqnum_jbod_fp = false; 5130 return; 5131 } 5132 5133 if (fusion->pd_seq_sync[0]) 5134 goto skip_alloc; 5135 5136 for (i = 0; i < JBOD_MAPS_COUNT; i++) { 5137 fusion->pd_seq_sync[i] = dma_alloc_coherent 5138 (&instance->pdev->dev, pd_seq_map_sz, 5139 &fusion->pd_seq_phys[i], GFP_KERNEL); 5140 if (!fusion->pd_seq_sync[i]) { 5141 dev_err(&instance->pdev->dev, 5142 "Failed to allocate memory from %s %d\n", 5143 __func__, __LINE__); 5144 if (i == 1) { 5145 dma_free_coherent(&instance->pdev->dev, 5146 pd_seq_map_sz, fusion->pd_seq_sync[0], 5147 fusion->pd_seq_phys[0]); 5148 fusion->pd_seq_sync[0] = NULL; 5149 } 5150 instance->use_seqnum_jbod_fp = false; 5151 return; 5152 } 5153 } 5154 5155 skip_alloc: 5156 if (!megasas_sync_pd_seq_num(instance, false) && 5157 !megasas_sync_pd_seq_num(instance, true)) 5158 instance->use_seqnum_jbod_fp = true; 5159 else 5160 instance->use_seqnum_jbod_fp = false; 5161 } 5162 5163 static void megasas_setup_reply_map(struct megasas_instance *instance) 5164 { 5165 const struct cpumask *mask; 5166 unsigned int queue, cpu; 5167 5168 for (queue = 0; queue < instance->msix_vectors; queue++) { 5169 mask = pci_irq_get_affinity(instance->pdev, queue); 5170 if (!mask) 5171 goto fallback; 5172 5173 for_each_cpu(cpu, mask) 5174 instance->reply_map[cpu] = queue; 5175 } 5176 return; 5177 5178 fallback: 5179 for_each_possible_cpu(cpu) 5180 instance->reply_map[cpu] = cpu % instance->msix_vectors; 5181 } 5182 5183 /** 5184 * megasas_init_fw - Initializes the FW 5185 * @instance: Adapter soft state 5186 * 5187 * This is the main function for initializing firmware 5188 */ 5189 5190 static int megasas_init_fw(struct megasas_instance *instance) 5191 { 5192 u32 max_sectors_1; 5193 u32 max_sectors_2, tmp_sectors, msix_enable; 5194 u32 scratch_pad_2, scratch_pad_3, scratch_pad_4; 5195 resource_size_t base_addr; 5196 struct megasas_register_set __iomem *reg_set; 5197 struct megasas_ctrl_info *ctrl_info = NULL; 5198 unsigned long bar_list; 5199 int i, j, loop, fw_msix_count = 0; 5200 struct IOV_111 *iovPtr; 5201 struct fusion_context *fusion; 5202 5203 fusion = instance->ctrl_context; 5204 5205 /* Find first memory bar */ 5206 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); 5207 instance->bar = find_first_bit(&bar_list, BITS_PER_LONG); 5208 if (pci_request_selected_regions(instance->pdev, 1<<instance->bar, 5209 "megasas: LSI")) { 5210 dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n"); 5211 return -EBUSY; 5212 } 5213 5214 base_addr = pci_resource_start(instance->pdev, instance->bar); 5215 instance->reg_set = ioremap_nocache(base_addr, 8192); 5216 5217 if (!instance->reg_set) { 5218 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n"); 5219 goto fail_ioremap; 5220 } 5221 5222 reg_set = instance->reg_set; 5223 5224 if (instance->adapter_type != MFI_SERIES) 5225 instance->instancet = &megasas_instance_template_fusion; 5226 else { 5227 switch (instance->pdev->device) { 5228 case PCI_DEVICE_ID_LSI_SAS1078R: 5229 case PCI_DEVICE_ID_LSI_SAS1078DE: 5230 instance->instancet = &megasas_instance_template_ppc; 5231 break; 5232 case PCI_DEVICE_ID_LSI_SAS1078GEN2: 5233 case PCI_DEVICE_ID_LSI_SAS0079GEN2: 5234 instance->instancet = &megasas_instance_template_gen2; 5235 break; 5236 case PCI_DEVICE_ID_LSI_SAS0073SKINNY: 5237 case PCI_DEVICE_ID_LSI_SAS0071SKINNY: 5238 instance->instancet = &megasas_instance_template_skinny; 5239 break; 5240 case PCI_DEVICE_ID_LSI_SAS1064R: 5241 case PCI_DEVICE_ID_DELL_PERC5: 5242 default: 5243 instance->instancet = &megasas_instance_template_xscale; 5244 instance->pd_list_not_supported = 1; 5245 break; 5246 } 5247 } 5248 5249 if (megasas_transition_to_ready(instance, 0)) { 5250 atomic_set(&instance->fw_reset_no_pci_access, 1); 5251 instance->instancet->adp_reset 5252 (instance, instance->reg_set); 5253 atomic_set(&instance->fw_reset_no_pci_access, 0); 5254 dev_info(&instance->pdev->dev, 5255 "FW restarted successfully from %s!\n", 5256 __func__); 5257 5258 /*waitting for about 30 second before retry*/ 5259 ssleep(30); 5260 5261 if (megasas_transition_to_ready(instance, 0)) 5262 goto fail_ready_state; 5263 } 5264 5265 megasas_init_ctrl_params(instance); 5266 5267 if (megasas_set_dma_mask(instance)) 5268 goto fail_ready_state; 5269 5270 if (megasas_alloc_ctrl_mem(instance)) 5271 goto fail_alloc_dma_buf; 5272 5273 if (megasas_alloc_ctrl_dma_buffers(instance)) 5274 goto fail_alloc_dma_buf; 5275 5276 fusion = instance->ctrl_context; 5277 5278 if (instance->adapter_type == VENTURA_SERIES) { 5279 scratch_pad_3 = 5280 readl(&instance->reg_set->outbound_scratch_pad_3); 5281 instance->max_raid_mapsize = ((scratch_pad_3 >> 5282 MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) & 5283 MR_MAX_RAID_MAP_SIZE_MASK); 5284 } 5285 5286 /* Check if MSI-X is supported while in ready state */ 5287 msix_enable = (instance->instancet->read_fw_status_reg(reg_set) & 5288 0x4000000) >> 0x1a; 5289 if (msix_enable && !msix_disable) { 5290 int irq_flags = PCI_IRQ_MSIX; 5291 5292 scratch_pad_2 = readl 5293 (&instance->reg_set->outbound_scratch_pad_2); 5294 /* Check max MSI-X vectors */ 5295 if (fusion) { 5296 if (instance->adapter_type == THUNDERBOLT_SERIES) { 5297 /* Thunderbolt Series*/ 5298 instance->msix_vectors = (scratch_pad_2 5299 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; 5300 fw_msix_count = instance->msix_vectors; 5301 } else { /* Invader series supports more than 8 MSI-x vectors*/ 5302 instance->msix_vectors = ((scratch_pad_2 5303 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) 5304 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; 5305 if (instance->msix_vectors > 16) 5306 instance->msix_combined = true; 5307 5308 if (rdpq_enable) 5309 instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 5310 1 : 0; 5311 fw_msix_count = instance->msix_vectors; 5312 /* Save 1-15 reply post index address to local memory 5313 * Index 0 is already saved from reg offset 5314 * MPI2_REPLY_POST_HOST_INDEX_OFFSET 5315 */ 5316 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) { 5317 instance->reply_post_host_index_addr[loop] = 5318 (u32 __iomem *) 5319 ((u8 __iomem *)instance->reg_set + 5320 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET 5321 + (loop * 0x10)); 5322 } 5323 } 5324 if (msix_vectors) 5325 instance->msix_vectors = min(msix_vectors, 5326 instance->msix_vectors); 5327 } else /* MFI adapters */ 5328 instance->msix_vectors = 1; 5329 /* Don't bother allocating more MSI-X vectors than cpus */ 5330 instance->msix_vectors = min(instance->msix_vectors, 5331 (unsigned int)num_online_cpus()); 5332 if (smp_affinity_enable) 5333 irq_flags |= PCI_IRQ_AFFINITY; 5334 i = pci_alloc_irq_vectors(instance->pdev, 1, 5335 instance->msix_vectors, irq_flags); 5336 if (i > 0) 5337 instance->msix_vectors = i; 5338 else 5339 instance->msix_vectors = 0; 5340 } 5341 /* 5342 * MSI-X host index 0 is common for all adapter. 5343 * It is used for all MPT based Adapters. 5344 */ 5345 if (instance->msix_combined) { 5346 instance->reply_post_host_index_addr[0] = 5347 (u32 *)((u8 *)instance->reg_set + 5348 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET); 5349 } else { 5350 instance->reply_post_host_index_addr[0] = 5351 (u32 *)((u8 *)instance->reg_set + 5352 MPI2_REPLY_POST_HOST_INDEX_OFFSET); 5353 } 5354 5355 if (!instance->msix_vectors) { 5356 i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); 5357 if (i < 0) 5358 goto fail_setup_irqs; 5359 } 5360 5361 megasas_setup_reply_map(instance); 5362 5363 dev_info(&instance->pdev->dev, 5364 "firmware supports msix\t: (%d)", fw_msix_count); 5365 dev_info(&instance->pdev->dev, 5366 "current msix/online cpus\t: (%d/%d)\n", 5367 instance->msix_vectors, (unsigned int)num_online_cpus()); 5368 dev_info(&instance->pdev->dev, 5369 "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled"); 5370 5371 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 5372 (unsigned long)instance); 5373 5374 /* 5375 * Below are default value for legacy Firmware. 5376 * non-fusion based controllers 5377 */ 5378 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 5379 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 5380 /* Get operational params, sge flags, send init cmd to controller */ 5381 if (instance->instancet->init_adapter(instance)) 5382 goto fail_init_adapter; 5383 5384 if (instance->adapter_type == VENTURA_SERIES) { 5385 scratch_pad_4 = 5386 readl(&instance->reg_set->outbound_scratch_pad_4); 5387 if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >= 5388 MR_DEFAULT_NVME_PAGE_SHIFT) 5389 instance->nvme_page_size = 5390 (1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK)); 5391 5392 dev_info(&instance->pdev->dev, 5393 "NVME page size\t: (%d)\n", instance->nvme_page_size); 5394 } 5395 5396 if (instance->msix_vectors ? 5397 megasas_setup_irqs_msix(instance, 1) : 5398 megasas_setup_irqs_ioapic(instance)) 5399 goto fail_init_adapter; 5400 5401 instance->instancet->enable_intr(instance); 5402 5403 dev_info(&instance->pdev->dev, "INIT adapter done\n"); 5404 5405 megasas_setup_jbod_map(instance); 5406 5407 /** for passthrough 5408 * the following function will get the PD LIST. 5409 */ 5410 memset(instance->pd_list, 0, 5411 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); 5412 if (megasas_get_pd_list(instance) < 0) { 5413 dev_err(&instance->pdev->dev, "failed to get PD list\n"); 5414 goto fail_get_ld_pd_list; 5415 } 5416 5417 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 5418 5419 /* stream detection initialization */ 5420 if (instance->adapter_type == VENTURA_SERIES) { 5421 fusion->stream_detect_by_ld = 5422 kcalloc(MAX_LOGICAL_DRIVES_EXT, 5423 sizeof(struct LD_STREAM_DETECT *), 5424 GFP_KERNEL); 5425 if (!fusion->stream_detect_by_ld) { 5426 dev_err(&instance->pdev->dev, 5427 "unable to allocate stream detection for pool of LDs\n"); 5428 goto fail_get_ld_pd_list; 5429 } 5430 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) { 5431 fusion->stream_detect_by_ld[i] = 5432 kzalloc(sizeof(struct LD_STREAM_DETECT), 5433 GFP_KERNEL); 5434 if (!fusion->stream_detect_by_ld[i]) { 5435 dev_err(&instance->pdev->dev, 5436 "unable to allocate stream detect by LD\n "); 5437 for (j = 0; j < i; ++j) 5438 kfree(fusion->stream_detect_by_ld[j]); 5439 kfree(fusion->stream_detect_by_ld); 5440 fusion->stream_detect_by_ld = NULL; 5441 goto fail_get_ld_pd_list; 5442 } 5443 fusion->stream_detect_by_ld[i]->mru_bit_map 5444 = MR_STREAM_BITMAP; 5445 } 5446 } 5447 5448 if (megasas_ld_list_query(instance, 5449 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) 5450 goto fail_get_ld_pd_list; 5451 5452 /* 5453 * Compute the max allowed sectors per IO: The controller info has two 5454 * limits on max sectors. Driver should use the minimum of these two. 5455 * 5456 * 1 << stripe_sz_ops.min = max sectors per strip 5457 * 5458 * Note that older firmwares ( < FW ver 30) didn't report information 5459 * to calculate max_sectors_1. So the number ended up as zero always. 5460 */ 5461 tmp_sectors = 0; 5462 ctrl_info = instance->ctrl_info_buf; 5463 5464 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * 5465 le16_to_cpu(ctrl_info->max_strips_per_io); 5466 max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size); 5467 5468 tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2); 5469 5470 instance->peerIsPresent = ctrl_info->cluster.peerIsPresent; 5471 instance->passive = ctrl_info->cluster.passive; 5472 memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId)); 5473 instance->UnevenSpanSupport = 5474 ctrl_info->adapterOperations2.supportUnevenSpans; 5475 if (instance->UnevenSpanSupport) { 5476 struct fusion_context *fusion = instance->ctrl_context; 5477 if (MR_ValidateMapInfo(instance, instance->map_id)) 5478 fusion->fast_path_io = 1; 5479 else 5480 fusion->fast_path_io = 0; 5481 5482 } 5483 if (ctrl_info->host_interface.SRIOV) { 5484 instance->requestorId = ctrl_info->iov.requestorId; 5485 if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) { 5486 if (!ctrl_info->adapterOperations2.activePassive) 5487 instance->PlasmaFW111 = 1; 5488 5489 dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n", 5490 instance->PlasmaFW111 ? "1.11" : "new"); 5491 5492 if (instance->PlasmaFW111) { 5493 iovPtr = (struct IOV_111 *) 5494 ((unsigned char *)ctrl_info + IOV_111_OFFSET); 5495 instance->requestorId = iovPtr->requestorId; 5496 } 5497 } 5498 dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n", 5499 instance->requestorId); 5500 } 5501 5502 instance->crash_dump_fw_support = 5503 ctrl_info->adapterOperations3.supportCrashDump; 5504 instance->crash_dump_drv_support = 5505 (instance->crash_dump_fw_support && 5506 instance->crash_dump_buf); 5507 if (instance->crash_dump_drv_support) 5508 megasas_set_crash_dump_params(instance, 5509 MR_CRASH_BUF_TURN_OFF); 5510 5511 else { 5512 if (instance->crash_dump_buf) 5513 pci_free_consistent(instance->pdev, 5514 CRASH_DMA_BUF_SIZE, 5515 instance->crash_dump_buf, 5516 instance->crash_dump_h); 5517 instance->crash_dump_buf = NULL; 5518 } 5519 5520 5521 dev_info(&instance->pdev->dev, 5522 "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n", 5523 le16_to_cpu(ctrl_info->pci.vendor_id), 5524 le16_to_cpu(ctrl_info->pci.device_id), 5525 le16_to_cpu(ctrl_info->pci.sub_vendor_id), 5526 le16_to_cpu(ctrl_info->pci.sub_device_id)); 5527 dev_info(&instance->pdev->dev, "unevenspan support : %s\n", 5528 instance->UnevenSpanSupport ? "yes" : "no"); 5529 dev_info(&instance->pdev->dev, "firmware crash dump : %s\n", 5530 instance->crash_dump_drv_support ? "yes" : "no"); 5531 dev_info(&instance->pdev->dev, "jbod sync map : %s\n", 5532 instance->use_seqnum_jbod_fp ? "yes" : "no"); 5533 5534 5535 instance->max_sectors_per_req = instance->max_num_sge * 5536 SGE_BUFFER_SIZE / 512; 5537 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) 5538 instance->max_sectors_per_req = tmp_sectors; 5539 5540 /* Check for valid throttlequeuedepth module parameter */ 5541 if (throttlequeuedepth && 5542 throttlequeuedepth <= instance->max_scsi_cmds) 5543 instance->throttlequeuedepth = throttlequeuedepth; 5544 else 5545 instance->throttlequeuedepth = 5546 MEGASAS_THROTTLE_QUEUE_DEPTH; 5547 5548 if ((resetwaittime < 1) || 5549 (resetwaittime > MEGASAS_RESET_WAIT_TIME)) 5550 resetwaittime = MEGASAS_RESET_WAIT_TIME; 5551 5552 if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT)) 5553 scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; 5554 5555 /* Launch SR-IOV heartbeat timer */ 5556 if (instance->requestorId) { 5557 if (!megasas_sriov_start_heartbeat(instance, 1)) 5558 megasas_start_timer(instance); 5559 else 5560 instance->skip_heartbeat_timer_del = 1; 5561 } 5562 5563 return 0; 5564 5565 fail_get_ld_pd_list: 5566 instance->instancet->disable_intr(instance); 5567 fail_init_adapter: 5568 megasas_destroy_irqs(instance); 5569 fail_setup_irqs: 5570 if (instance->msix_vectors) 5571 pci_free_irq_vectors(instance->pdev); 5572 instance->msix_vectors = 0; 5573 fail_alloc_dma_buf: 5574 megasas_free_ctrl_dma_buffers(instance); 5575 megasas_free_ctrl_mem(instance); 5576 fail_ready_state: 5577 iounmap(instance->reg_set); 5578 5579 fail_ioremap: 5580 pci_release_selected_regions(instance->pdev, 1<<instance->bar); 5581 5582 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 5583 __func__, __LINE__); 5584 return -EINVAL; 5585 } 5586 5587 /** 5588 * megasas_release_mfi - Reverses the FW initialization 5589 * @instance: Adapter soft state 5590 */ 5591 static void megasas_release_mfi(struct megasas_instance *instance) 5592 { 5593 u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1); 5594 5595 if (instance->reply_queue) 5596 pci_free_consistent(instance->pdev, reply_q_sz, 5597 instance->reply_queue, instance->reply_queue_h); 5598 5599 megasas_free_cmds(instance); 5600 5601 iounmap(instance->reg_set); 5602 5603 pci_release_selected_regions(instance->pdev, 1<<instance->bar); 5604 } 5605 5606 /** 5607 * megasas_get_seq_num - Gets latest event sequence numbers 5608 * @instance: Adapter soft state 5609 * @eli: FW event log sequence numbers information 5610 * 5611 * FW maintains a log of all events in a non-volatile area. Upper layers would 5612 * usually find out the latest sequence number of the events, the seq number at 5613 * the boot etc. They would "read" all the events below the latest seq number 5614 * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq 5615 * number), they would subsribe to AEN (asynchronous event notification) and 5616 * wait for the events to happen. 5617 */ 5618 static int 5619 megasas_get_seq_num(struct megasas_instance *instance, 5620 struct megasas_evt_log_info *eli) 5621 { 5622 struct megasas_cmd *cmd; 5623 struct megasas_dcmd_frame *dcmd; 5624 struct megasas_evt_log_info *el_info; 5625 dma_addr_t el_info_h = 0; 5626 int ret; 5627 5628 cmd = megasas_get_cmd(instance); 5629 5630 if (!cmd) { 5631 return -ENOMEM; 5632 } 5633 5634 dcmd = &cmd->frame->dcmd; 5635 el_info = pci_zalloc_consistent(instance->pdev, 5636 sizeof(struct megasas_evt_log_info), 5637 &el_info_h); 5638 5639 if (!el_info) { 5640 megasas_return_cmd(instance, cmd); 5641 return -ENOMEM; 5642 } 5643 5644 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5645 5646 dcmd->cmd = MFI_CMD_DCMD; 5647 dcmd->cmd_status = 0x0; 5648 dcmd->sge_count = 1; 5649 dcmd->flags = MFI_FRAME_DIR_READ; 5650 dcmd->timeout = 0; 5651 dcmd->pad_0 = 0; 5652 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info)); 5653 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO); 5654 5655 megasas_set_dma_settings(instance, dcmd, el_info_h, 5656 sizeof(struct megasas_evt_log_info)); 5657 5658 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 5659 if (ret != DCMD_SUCCESS) { 5660 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 5661 __func__, __LINE__); 5662 goto dcmd_failed; 5663 } 5664 5665 /* 5666 * Copy the data back into callers buffer 5667 */ 5668 eli->newest_seq_num = el_info->newest_seq_num; 5669 eli->oldest_seq_num = el_info->oldest_seq_num; 5670 eli->clear_seq_num = el_info->clear_seq_num; 5671 eli->shutdown_seq_num = el_info->shutdown_seq_num; 5672 eli->boot_seq_num = el_info->boot_seq_num; 5673 5674 dcmd_failed: 5675 pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), 5676 el_info, el_info_h); 5677 5678 megasas_return_cmd(instance, cmd); 5679 5680 return ret; 5681 } 5682 5683 /** 5684 * megasas_register_aen - Registers for asynchronous event notification 5685 * @instance: Adapter soft state 5686 * @seq_num: The starting sequence number 5687 * @class_locale: Class of the event 5688 * 5689 * This function subscribes for AEN for events beyond the @seq_num. It requests 5690 * to be notified if and only if the event is of type @class_locale 5691 */ 5692 static int 5693 megasas_register_aen(struct megasas_instance *instance, u32 seq_num, 5694 u32 class_locale_word) 5695 { 5696 int ret_val; 5697 struct megasas_cmd *cmd; 5698 struct megasas_dcmd_frame *dcmd; 5699 union megasas_evt_class_locale curr_aen; 5700 union megasas_evt_class_locale prev_aen; 5701 5702 /* 5703 * If there an AEN pending already (aen_cmd), check if the 5704 * class_locale of that pending AEN is inclusive of the new 5705 * AEN request we currently have. If it is, then we don't have 5706 * to do anything. In other words, whichever events the current 5707 * AEN request is subscribing to, have already been subscribed 5708 * to. 5709 * 5710 * If the old_cmd is _not_ inclusive, then we have to abort 5711 * that command, form a class_locale that is superset of both 5712 * old and current and re-issue to the FW 5713 */ 5714 5715 curr_aen.word = class_locale_word; 5716 5717 if (instance->aen_cmd) { 5718 5719 prev_aen.word = 5720 le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]); 5721 5722 if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) || 5723 (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) { 5724 dev_info(&instance->pdev->dev, 5725 "%s %d out of range class %d send by application\n", 5726 __func__, __LINE__, curr_aen.members.class); 5727 return 0; 5728 } 5729 5730 /* 5731 * A class whose enum value is smaller is inclusive of all 5732 * higher values. If a PROGRESS (= -1) was previously 5733 * registered, then a new registration requests for higher 5734 * classes need not be sent to FW. They are automatically 5735 * included. 5736 * 5737 * Locale numbers don't have such hierarchy. They are bitmap 5738 * values 5739 */ 5740 if ((prev_aen.members.class <= curr_aen.members.class) && 5741 !((prev_aen.members.locale & curr_aen.members.locale) ^ 5742 curr_aen.members.locale)) { 5743 /* 5744 * Previously issued event registration includes 5745 * current request. Nothing to do. 5746 */ 5747 return 0; 5748 } else { 5749 curr_aen.members.locale |= prev_aen.members.locale; 5750 5751 if (prev_aen.members.class < curr_aen.members.class) 5752 curr_aen.members.class = prev_aen.members.class; 5753 5754 instance->aen_cmd->abort_aen = 1; 5755 ret_val = megasas_issue_blocked_abort_cmd(instance, 5756 instance-> 5757 aen_cmd, 30); 5758 5759 if (ret_val) { 5760 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort " 5761 "previous AEN command\n"); 5762 return ret_val; 5763 } 5764 } 5765 } 5766 5767 cmd = megasas_get_cmd(instance); 5768 5769 if (!cmd) 5770 return -ENOMEM; 5771 5772 dcmd = &cmd->frame->dcmd; 5773 5774 memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail)); 5775 5776 /* 5777 * Prepare DCMD for aen registration 5778 */ 5779 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5780 5781 dcmd->cmd = MFI_CMD_DCMD; 5782 dcmd->cmd_status = 0x0; 5783 dcmd->sge_count = 1; 5784 dcmd->flags = MFI_FRAME_DIR_READ; 5785 dcmd->timeout = 0; 5786 dcmd->pad_0 = 0; 5787 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail)); 5788 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT); 5789 dcmd->mbox.w[0] = cpu_to_le32(seq_num); 5790 instance->last_seq_num = seq_num; 5791 dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word); 5792 5793 megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h, 5794 sizeof(struct megasas_evt_detail)); 5795 5796 if (instance->aen_cmd != NULL) { 5797 megasas_return_cmd(instance, cmd); 5798 return 0; 5799 } 5800 5801 /* 5802 * Store reference to the cmd used to register for AEN. When an 5803 * application wants us to register for AEN, we have to abort this 5804 * cmd and re-register with a new EVENT LOCALE supplied by that app 5805 */ 5806 instance->aen_cmd = cmd; 5807 5808 /* 5809 * Issue the aen registration frame 5810 */ 5811 instance->instancet->issue_dcmd(instance, cmd); 5812 5813 return 0; 5814 } 5815 5816 /* megasas_get_target_prop - Send DCMD with below details to firmware. 5817 * 5818 * This DCMD will fetch few properties of LD/system PD defined 5819 * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value. 5820 * 5821 * DCMD send by drivers whenever new target is added to the OS. 5822 * 5823 * dcmd.opcode - MR_DCMD_DEV_GET_TARGET_PROP 5824 * dcmd.mbox.b[0] - DCMD is to be fired for LD or system PD. 5825 * 0 = system PD, 1 = LD. 5826 * dcmd.mbox.s[1] - TargetID for LD/system PD. 5827 * dcmd.sge IN - Pointer to return MR_TARGET_DEV_PROPERTIES. 5828 * 5829 * @instance: Adapter soft state 5830 * @sdev: OS provided scsi device 5831 * 5832 * Returns 0 on success non-zero on failure. 5833 */ 5834 static int 5835 megasas_get_target_prop(struct megasas_instance *instance, 5836 struct scsi_device *sdev) 5837 { 5838 int ret; 5839 struct megasas_cmd *cmd; 5840 struct megasas_dcmd_frame *dcmd; 5841 u16 targetId = (sdev->channel % 2) + sdev->id; 5842 5843 cmd = megasas_get_cmd(instance); 5844 5845 if (!cmd) { 5846 dev_err(&instance->pdev->dev, 5847 "Failed to get cmd %s\n", __func__); 5848 return -ENOMEM; 5849 } 5850 5851 dcmd = &cmd->frame->dcmd; 5852 5853 memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop)); 5854 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5855 dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev); 5856 5857 dcmd->mbox.s[1] = cpu_to_le16(targetId); 5858 dcmd->cmd = MFI_CMD_DCMD; 5859 dcmd->cmd_status = 0xFF; 5860 dcmd->sge_count = 1; 5861 dcmd->flags = MFI_FRAME_DIR_READ; 5862 dcmd->timeout = 0; 5863 dcmd->pad_0 = 0; 5864 dcmd->data_xfer_len = 5865 cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES)); 5866 dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP); 5867 5868 megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h, 5869 sizeof(struct MR_TARGET_PROPERTIES)); 5870 5871 if ((instance->adapter_type != MFI_SERIES) && 5872 !instance->mask_interrupts) 5873 ret = megasas_issue_blocked_cmd(instance, 5874 cmd, MFI_IO_TIMEOUT_SECS); 5875 else 5876 ret = megasas_issue_polled(instance, cmd); 5877 5878 switch (ret) { 5879 case DCMD_TIMEOUT: 5880 switch (dcmd_timeout_ocr_possible(instance)) { 5881 case INITIATE_OCR: 5882 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5883 megasas_reset_fusion(instance->host, 5884 MFI_IO_TIMEOUT_OCR); 5885 break; 5886 case KILL_ADAPTER: 5887 megaraid_sas_kill_hba(instance); 5888 break; 5889 case IGNORE_TIMEOUT: 5890 dev_info(&instance->pdev->dev, 5891 "Ignore DCMD timeout: %s %d\n", 5892 __func__, __LINE__); 5893 break; 5894 } 5895 break; 5896 5897 default: 5898 megasas_return_cmd(instance, cmd); 5899 } 5900 if (ret != DCMD_SUCCESS) 5901 dev_err(&instance->pdev->dev, 5902 "return from %s %d return value %d\n", 5903 __func__, __LINE__, ret); 5904 5905 return ret; 5906 } 5907 5908 /** 5909 * megasas_start_aen - Subscribes to AEN during driver load time 5910 * @instance: Adapter soft state 5911 */ 5912 static int megasas_start_aen(struct megasas_instance *instance) 5913 { 5914 struct megasas_evt_log_info eli; 5915 union megasas_evt_class_locale class_locale; 5916 5917 /* 5918 * Get the latest sequence number from FW 5919 */ 5920 memset(&eli, 0, sizeof(eli)); 5921 5922 if (megasas_get_seq_num(instance, &eli)) 5923 return -1; 5924 5925 /* 5926 * Register AEN with FW for latest sequence number plus 1 5927 */ 5928 class_locale.members.reserved = 0; 5929 class_locale.members.locale = MR_EVT_LOCALE_ALL; 5930 class_locale.members.class = MR_EVT_CLASS_DEBUG; 5931 5932 return megasas_register_aen(instance, 5933 le32_to_cpu(eli.newest_seq_num) + 1, 5934 class_locale.word); 5935 } 5936 5937 /** 5938 * megasas_io_attach - Attaches this driver to SCSI mid-layer 5939 * @instance: Adapter soft state 5940 */ 5941 static int megasas_io_attach(struct megasas_instance *instance) 5942 { 5943 struct Scsi_Host *host = instance->host; 5944 5945 /* 5946 * Export parameters required by SCSI mid-layer 5947 */ 5948 host->unique_id = instance->unique_id; 5949 host->can_queue = instance->max_scsi_cmds; 5950 host->this_id = instance->init_id; 5951 host->sg_tablesize = instance->max_num_sge; 5952 5953 if (instance->fw_support_ieee) 5954 instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE; 5955 5956 /* 5957 * Check if the module parameter value for max_sectors can be used 5958 */ 5959 if (max_sectors && max_sectors < instance->max_sectors_per_req) 5960 instance->max_sectors_per_req = max_sectors; 5961 else { 5962 if (max_sectors) { 5963 if (((instance->pdev->device == 5964 PCI_DEVICE_ID_LSI_SAS1078GEN2) || 5965 (instance->pdev->device == 5966 PCI_DEVICE_ID_LSI_SAS0079GEN2)) && 5967 (max_sectors <= MEGASAS_MAX_SECTORS)) { 5968 instance->max_sectors_per_req = max_sectors; 5969 } else { 5970 dev_info(&instance->pdev->dev, "max_sectors should be > 0" 5971 "and <= %d (or < 1MB for GEN2 controller)\n", 5972 instance->max_sectors_per_req); 5973 } 5974 } 5975 } 5976 5977 host->max_sectors = instance->max_sectors_per_req; 5978 host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN; 5979 host->max_channel = MEGASAS_MAX_CHANNELS - 1; 5980 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; 5981 host->max_lun = MEGASAS_MAX_LUN; 5982 host->max_cmd_len = 16; 5983 5984 /* 5985 * Notify the mid-layer about the new controller 5986 */ 5987 if (scsi_add_host(host, &instance->pdev->dev)) { 5988 dev_err(&instance->pdev->dev, 5989 "Failed to add host from %s %d\n", 5990 __func__, __LINE__); 5991 return -ENODEV; 5992 } 5993 5994 return 0; 5995 } 5996 5997 /** 5998 * megasas_set_dma_mask - Set DMA mask for supported controllers 5999 * 6000 * @instance: Adapter soft state 6001 * Description: 6002 * 6003 * For Ventura, driver/FW will operate in 64bit DMA addresses. 6004 * 6005 * For invader- 6006 * By default, driver/FW will operate in 32bit DMA addresses 6007 * for consistent DMA mapping but if 32 bit consistent 6008 * DMA mask fails, driver will try with 64 bit consistent 6009 * mask provided FW is true 64bit DMA capable 6010 * 6011 * For older controllers(Thunderbolt and MFI based adapters)- 6012 * driver/FW will operate in 32 bit consistent DMA addresses. 6013 */ 6014 static int 6015 megasas_set_dma_mask(struct megasas_instance *instance) 6016 { 6017 u64 consistent_mask; 6018 struct pci_dev *pdev; 6019 u32 scratch_pad_2; 6020 6021 pdev = instance->pdev; 6022 consistent_mask = (instance->adapter_type == VENTURA_SERIES) ? 6023 DMA_BIT_MASK(64) : DMA_BIT_MASK(32); 6024 6025 if (IS_DMA64) { 6026 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && 6027 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) 6028 goto fail_set_dma_mask; 6029 6030 if ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) && 6031 (dma_set_coherent_mask(&pdev->dev, consistent_mask) && 6032 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) { 6033 /* 6034 * If 32 bit DMA mask fails, then try for 64 bit mask 6035 * for FW capable of handling 64 bit DMA. 6036 */ 6037 scratch_pad_2 = readl 6038 (&instance->reg_set->outbound_scratch_pad_2); 6039 6040 if (!(scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET)) 6041 goto fail_set_dma_mask; 6042 else if (dma_set_mask_and_coherent(&pdev->dev, 6043 DMA_BIT_MASK(64))) 6044 goto fail_set_dma_mask; 6045 } 6046 } else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) 6047 goto fail_set_dma_mask; 6048 6049 if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32)) 6050 instance->consistent_mask_64bit = false; 6051 else 6052 instance->consistent_mask_64bit = true; 6053 6054 dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n", 6055 ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "64" : "32"), 6056 (instance->consistent_mask_64bit ? "64" : "32")); 6057 6058 return 0; 6059 6060 fail_set_dma_mask: 6061 dev_err(&pdev->dev, "Failed to set DMA mask\n"); 6062 return -1; 6063 6064 } 6065 6066 /* 6067 * megasas_set_adapter_type - Set adapter type. 6068 * Supported controllers can be divided in 6069 * 4 categories- enum MR_ADAPTER_TYPE { 6070 * MFI_SERIES = 1, 6071 * THUNDERBOLT_SERIES = 2, 6072 * INVADER_SERIES = 3, 6073 * VENTURA_SERIES = 4, 6074 * }; 6075 * @instance: Adapter soft state 6076 * return: void 6077 */ 6078 static inline void megasas_set_adapter_type(struct megasas_instance *instance) 6079 { 6080 if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) && 6081 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) { 6082 instance->adapter_type = MFI_SERIES; 6083 } else { 6084 switch (instance->pdev->device) { 6085 case PCI_DEVICE_ID_LSI_VENTURA: 6086 case PCI_DEVICE_ID_LSI_CRUSADER: 6087 case PCI_DEVICE_ID_LSI_HARPOON: 6088 case PCI_DEVICE_ID_LSI_TOMCAT: 6089 case PCI_DEVICE_ID_LSI_VENTURA_4PORT: 6090 case PCI_DEVICE_ID_LSI_CRUSADER_4PORT: 6091 instance->adapter_type = VENTURA_SERIES; 6092 break; 6093 case PCI_DEVICE_ID_LSI_FUSION: 6094 case PCI_DEVICE_ID_LSI_PLASMA: 6095 instance->adapter_type = THUNDERBOLT_SERIES; 6096 break; 6097 case PCI_DEVICE_ID_LSI_INVADER: 6098 case PCI_DEVICE_ID_LSI_INTRUDER: 6099 case PCI_DEVICE_ID_LSI_INTRUDER_24: 6100 case PCI_DEVICE_ID_LSI_CUTLASS_52: 6101 case PCI_DEVICE_ID_LSI_CUTLASS_53: 6102 case PCI_DEVICE_ID_LSI_FURY: 6103 instance->adapter_type = INVADER_SERIES; 6104 break; 6105 default: /* For all other supported controllers */ 6106 instance->adapter_type = MFI_SERIES; 6107 break; 6108 } 6109 } 6110 } 6111 6112 static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance) 6113 { 6114 instance->producer = pci_alloc_consistent(instance->pdev, sizeof(u32), 6115 &instance->producer_h); 6116 instance->consumer = pci_alloc_consistent(instance->pdev, sizeof(u32), 6117 &instance->consumer_h); 6118 6119 if (!instance->producer || !instance->consumer) { 6120 dev_err(&instance->pdev->dev, 6121 "Failed to allocate memory for producer, consumer\n"); 6122 return -1; 6123 } 6124 6125 *instance->producer = 0; 6126 *instance->consumer = 0; 6127 return 0; 6128 } 6129 6130 /** 6131 * megasas_alloc_ctrl_mem - Allocate per controller memory for core data 6132 * structures which are not common across MFI 6133 * adapters and fusion adapters. 6134 * For MFI based adapters, allocate producer and 6135 * consumer buffers. For fusion adapters, allocate 6136 * memory for fusion context. 6137 * @instance: Adapter soft state 6138 * return: 0 for SUCCESS 6139 */ 6140 static int megasas_alloc_ctrl_mem(struct megasas_instance *instance) 6141 { 6142 instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int), 6143 GFP_KERNEL); 6144 if (!instance->reply_map) 6145 return -ENOMEM; 6146 6147 switch (instance->adapter_type) { 6148 case MFI_SERIES: 6149 if (megasas_alloc_mfi_ctrl_mem(instance)) 6150 goto fail; 6151 break; 6152 case VENTURA_SERIES: 6153 case THUNDERBOLT_SERIES: 6154 case INVADER_SERIES: 6155 if (megasas_alloc_fusion_context(instance)) 6156 goto fail; 6157 break; 6158 } 6159 6160 return 0; 6161 fail: 6162 kfree(instance->reply_map); 6163 instance->reply_map = NULL; 6164 return -ENOMEM; 6165 } 6166 6167 /* 6168 * megasas_free_ctrl_mem - Free fusion context for fusion adapters and 6169 * producer, consumer buffers for MFI adapters 6170 * 6171 * @instance - Adapter soft instance 6172 * 6173 */ 6174 static inline void megasas_free_ctrl_mem(struct megasas_instance *instance) 6175 { 6176 kfree(instance->reply_map); 6177 if (instance->adapter_type == MFI_SERIES) { 6178 if (instance->producer) 6179 pci_free_consistent(instance->pdev, sizeof(u32), 6180 instance->producer, 6181 instance->producer_h); 6182 if (instance->consumer) 6183 pci_free_consistent(instance->pdev, sizeof(u32), 6184 instance->consumer, 6185 instance->consumer_h); 6186 } else { 6187 megasas_free_fusion_context(instance); 6188 } 6189 } 6190 6191 /** 6192 * megasas_alloc_ctrl_dma_buffers - Allocate consistent DMA buffers during 6193 * driver load time 6194 * 6195 * @instance- Adapter soft instance 6196 * @return- O for SUCCESS 6197 */ 6198 static inline 6199 int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance) 6200 { 6201 struct pci_dev *pdev = instance->pdev; 6202 struct fusion_context *fusion = instance->ctrl_context; 6203 6204 instance->evt_detail = 6205 pci_alloc_consistent(pdev, 6206 sizeof(struct megasas_evt_detail), 6207 &instance->evt_detail_h); 6208 6209 if (!instance->evt_detail) { 6210 dev_err(&instance->pdev->dev, 6211 "Failed to allocate event detail buffer\n"); 6212 return -ENOMEM; 6213 } 6214 6215 if (fusion) { 6216 fusion->ioc_init_request = 6217 dma_alloc_coherent(&pdev->dev, 6218 sizeof(struct MPI2_IOC_INIT_REQUEST), 6219 &fusion->ioc_init_request_phys, 6220 GFP_KERNEL); 6221 6222 if (!fusion->ioc_init_request) { 6223 dev_err(&pdev->dev, 6224 "Failed to allocate PD list buffer\n"); 6225 return -ENOMEM; 6226 } 6227 } 6228 6229 instance->pd_list_buf = 6230 pci_alloc_consistent(pdev, 6231 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), 6232 &instance->pd_list_buf_h); 6233 6234 if (!instance->pd_list_buf) { 6235 dev_err(&pdev->dev, "Failed to allocate PD list buffer\n"); 6236 return -ENOMEM; 6237 } 6238 6239 instance->ctrl_info_buf = 6240 pci_alloc_consistent(pdev, 6241 sizeof(struct megasas_ctrl_info), 6242 &instance->ctrl_info_buf_h); 6243 6244 if (!instance->ctrl_info_buf) { 6245 dev_err(&pdev->dev, 6246 "Failed to allocate controller info buffer\n"); 6247 return -ENOMEM; 6248 } 6249 6250 instance->ld_list_buf = 6251 pci_alloc_consistent(pdev, 6252 sizeof(struct MR_LD_LIST), 6253 &instance->ld_list_buf_h); 6254 6255 if (!instance->ld_list_buf) { 6256 dev_err(&pdev->dev, "Failed to allocate LD list buffer\n"); 6257 return -ENOMEM; 6258 } 6259 6260 instance->ld_targetid_list_buf = 6261 pci_alloc_consistent(pdev, 6262 sizeof(struct MR_LD_TARGETID_LIST), 6263 &instance->ld_targetid_list_buf_h); 6264 6265 if (!instance->ld_targetid_list_buf) { 6266 dev_err(&pdev->dev, 6267 "Failed to allocate LD targetid list buffer\n"); 6268 return -ENOMEM; 6269 } 6270 6271 if (!reset_devices) { 6272 instance->system_info_buf = 6273 pci_alloc_consistent(pdev, 6274 sizeof(struct MR_DRV_SYSTEM_INFO), 6275 &instance->system_info_h); 6276 instance->pd_info = 6277 pci_alloc_consistent(pdev, 6278 sizeof(struct MR_PD_INFO), 6279 &instance->pd_info_h); 6280 instance->tgt_prop = 6281 pci_alloc_consistent(pdev, 6282 sizeof(struct MR_TARGET_PROPERTIES), 6283 &instance->tgt_prop_h); 6284 instance->crash_dump_buf = 6285 pci_alloc_consistent(pdev, 6286 CRASH_DMA_BUF_SIZE, 6287 &instance->crash_dump_h); 6288 6289 if (!instance->system_info_buf) 6290 dev_err(&instance->pdev->dev, 6291 "Failed to allocate system info buffer\n"); 6292 6293 if (!instance->pd_info) 6294 dev_err(&instance->pdev->dev, 6295 "Failed to allocate pd_info buffer\n"); 6296 6297 if (!instance->tgt_prop) 6298 dev_err(&instance->pdev->dev, 6299 "Failed to allocate tgt_prop buffer\n"); 6300 6301 if (!instance->crash_dump_buf) 6302 dev_err(&instance->pdev->dev, 6303 "Failed to allocate crash dump buffer\n"); 6304 } 6305 6306 return 0; 6307 } 6308 6309 /* 6310 * megasas_free_ctrl_dma_buffers - Free consistent DMA buffers allocated 6311 * during driver load time 6312 * 6313 * @instance- Adapter soft instance 6314 * 6315 */ 6316 static inline 6317 void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance) 6318 { 6319 struct pci_dev *pdev = instance->pdev; 6320 struct fusion_context *fusion = instance->ctrl_context; 6321 6322 if (instance->evt_detail) 6323 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), 6324 instance->evt_detail, 6325 instance->evt_detail_h); 6326 6327 if (fusion && fusion->ioc_init_request) 6328 dma_free_coherent(&pdev->dev, 6329 sizeof(struct MPI2_IOC_INIT_REQUEST), 6330 fusion->ioc_init_request, 6331 fusion->ioc_init_request_phys); 6332 6333 if (instance->pd_list_buf) 6334 pci_free_consistent(pdev, 6335 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), 6336 instance->pd_list_buf, 6337 instance->pd_list_buf_h); 6338 6339 if (instance->ld_list_buf) 6340 pci_free_consistent(pdev, sizeof(struct MR_LD_LIST), 6341 instance->ld_list_buf, 6342 instance->ld_list_buf_h); 6343 6344 if (instance->ld_targetid_list_buf) 6345 pci_free_consistent(pdev, sizeof(struct MR_LD_TARGETID_LIST), 6346 instance->ld_targetid_list_buf, 6347 instance->ld_targetid_list_buf_h); 6348 6349 if (instance->ctrl_info_buf) 6350 pci_free_consistent(pdev, sizeof(struct megasas_ctrl_info), 6351 instance->ctrl_info_buf, 6352 instance->ctrl_info_buf_h); 6353 6354 if (instance->system_info_buf) 6355 pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO), 6356 instance->system_info_buf, 6357 instance->system_info_h); 6358 6359 if (instance->pd_info) 6360 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO), 6361 instance->pd_info, instance->pd_info_h); 6362 6363 if (instance->tgt_prop) 6364 pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES), 6365 instance->tgt_prop, instance->tgt_prop_h); 6366 6367 if (instance->crash_dump_buf) 6368 pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE, 6369 instance->crash_dump_buf, 6370 instance->crash_dump_h); 6371 } 6372 6373 /* 6374 * megasas_init_ctrl_params - Initialize controller's instance 6375 * parameters before FW init 6376 * @instance - Adapter soft instance 6377 * @return - void 6378 */ 6379 static inline void megasas_init_ctrl_params(struct megasas_instance *instance) 6380 { 6381 instance->fw_crash_state = UNAVAILABLE; 6382 6383 megasas_poll_wait_aen = 0; 6384 instance->issuepend_done = 1; 6385 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 6386 6387 /* 6388 * Initialize locks and queues 6389 */ 6390 INIT_LIST_HEAD(&instance->cmd_pool); 6391 INIT_LIST_HEAD(&instance->internal_reset_pending_q); 6392 6393 atomic_set(&instance->fw_outstanding, 0); 6394 6395 init_waitqueue_head(&instance->int_cmd_wait_q); 6396 init_waitqueue_head(&instance->abort_cmd_wait_q); 6397 6398 spin_lock_init(&instance->crashdump_lock); 6399 spin_lock_init(&instance->mfi_pool_lock); 6400 spin_lock_init(&instance->hba_lock); 6401 spin_lock_init(&instance->stream_lock); 6402 spin_lock_init(&instance->completion_lock); 6403 6404 mutex_init(&instance->reset_mutex); 6405 6406 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 6407 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) 6408 instance->flag_ieee = 1; 6409 6410 megasas_dbg_lvl = 0; 6411 instance->flag = 0; 6412 instance->unload = 1; 6413 instance->last_time = 0; 6414 instance->disableOnlineCtrlReset = 1; 6415 instance->UnevenSpanSupport = 0; 6416 6417 if (instance->adapter_type != MFI_SERIES) { 6418 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); 6419 INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq); 6420 } else { 6421 INIT_WORK(&instance->work_init, process_fw_state_change_wq); 6422 } 6423 } 6424 6425 /** 6426 * megasas_probe_one - PCI hotplug entry point 6427 * @pdev: PCI device structure 6428 * @id: PCI ids of supported hotplugged adapter 6429 */ 6430 static int megasas_probe_one(struct pci_dev *pdev, 6431 const struct pci_device_id *id) 6432 { 6433 int rval, pos; 6434 struct Scsi_Host *host; 6435 struct megasas_instance *instance; 6436 u16 control = 0; 6437 6438 /* Reset MSI-X in the kdump kernel */ 6439 if (reset_devices) { 6440 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 6441 if (pos) { 6442 pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, 6443 &control); 6444 if (control & PCI_MSIX_FLAGS_ENABLE) { 6445 dev_info(&pdev->dev, "resetting MSI-X\n"); 6446 pci_write_config_word(pdev, 6447 pos + PCI_MSIX_FLAGS, 6448 control & 6449 ~PCI_MSIX_FLAGS_ENABLE); 6450 } 6451 } 6452 } 6453 6454 /* 6455 * PCI prepping: enable device set bus mastering and dma mask 6456 */ 6457 rval = pci_enable_device_mem(pdev); 6458 6459 if (rval) { 6460 return rval; 6461 } 6462 6463 pci_set_master(pdev); 6464 6465 host = scsi_host_alloc(&megasas_template, 6466 sizeof(struct megasas_instance)); 6467 6468 if (!host) { 6469 dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n"); 6470 goto fail_alloc_instance; 6471 } 6472 6473 instance = (struct megasas_instance *)host->hostdata; 6474 memset(instance, 0, sizeof(*instance)); 6475 atomic_set(&instance->fw_reset_no_pci_access, 0); 6476 6477 /* 6478 * Initialize PCI related and misc parameters 6479 */ 6480 instance->pdev = pdev; 6481 instance->host = host; 6482 instance->unique_id = pdev->bus->number << 8 | pdev->devfn; 6483 instance->init_id = MEGASAS_DEFAULT_INIT_ID; 6484 6485 megasas_set_adapter_type(instance); 6486 6487 /* 6488 * Initialize MFI Firmware 6489 */ 6490 if (megasas_init_fw(instance)) 6491 goto fail_init_mfi; 6492 6493 if (instance->requestorId) { 6494 if (instance->PlasmaFW111) { 6495 instance->vf_affiliation_111 = 6496 pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111), 6497 &instance->vf_affiliation_111_h); 6498 if (!instance->vf_affiliation_111) 6499 dev_warn(&pdev->dev, "Can't allocate " 6500 "memory for VF affiliation buffer\n"); 6501 } else { 6502 instance->vf_affiliation = 6503 pci_alloc_consistent(pdev, 6504 (MAX_LOGICAL_DRIVES + 1) * 6505 sizeof(struct MR_LD_VF_AFFILIATION), 6506 &instance->vf_affiliation_h); 6507 if (!instance->vf_affiliation) 6508 dev_warn(&pdev->dev, "Can't allocate " 6509 "memory for VF affiliation buffer\n"); 6510 } 6511 } 6512 6513 /* 6514 * Store instance in PCI softstate 6515 */ 6516 pci_set_drvdata(pdev, instance); 6517 6518 /* 6519 * Add this controller to megasas_mgmt_info structure so that it 6520 * can be exported to management applications 6521 */ 6522 megasas_mgmt_info.count++; 6523 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance; 6524 megasas_mgmt_info.max_index++; 6525 6526 /* 6527 * Register with SCSI mid-layer 6528 */ 6529 if (megasas_io_attach(instance)) 6530 goto fail_io_attach; 6531 6532 instance->unload = 0; 6533 /* 6534 * Trigger SCSI to scan our drives 6535 */ 6536 scsi_scan_host(host); 6537 6538 /* 6539 * Initiate AEN (Asynchronous Event Notification) 6540 */ 6541 if (megasas_start_aen(instance)) { 6542 dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n"); 6543 goto fail_start_aen; 6544 } 6545 6546 /* Get current SR-IOV LD/VF affiliation */ 6547 if (instance->requestorId) 6548 megasas_get_ld_vf_affiliation(instance, 1); 6549 6550 return 0; 6551 6552 fail_start_aen: 6553 fail_io_attach: 6554 megasas_mgmt_info.count--; 6555 megasas_mgmt_info.max_index--; 6556 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; 6557 6558 instance->instancet->disable_intr(instance); 6559 megasas_destroy_irqs(instance); 6560 6561 if (instance->adapter_type != MFI_SERIES) 6562 megasas_release_fusion(instance); 6563 else 6564 megasas_release_mfi(instance); 6565 if (instance->msix_vectors) 6566 pci_free_irq_vectors(instance->pdev); 6567 fail_init_mfi: 6568 scsi_host_put(host); 6569 fail_alloc_instance: 6570 pci_disable_device(pdev); 6571 6572 return -ENODEV; 6573 } 6574 6575 /** 6576 * megasas_flush_cache - Requests FW to flush all its caches 6577 * @instance: Adapter soft state 6578 */ 6579 static void megasas_flush_cache(struct megasas_instance *instance) 6580 { 6581 struct megasas_cmd *cmd; 6582 struct megasas_dcmd_frame *dcmd; 6583 6584 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 6585 return; 6586 6587 cmd = megasas_get_cmd(instance); 6588 6589 if (!cmd) 6590 return; 6591 6592 dcmd = &cmd->frame->dcmd; 6593 6594 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6595 6596 dcmd->cmd = MFI_CMD_DCMD; 6597 dcmd->cmd_status = 0x0; 6598 dcmd->sge_count = 0; 6599 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 6600 dcmd->timeout = 0; 6601 dcmd->pad_0 = 0; 6602 dcmd->data_xfer_len = 0; 6603 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH); 6604 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 6605 6606 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) 6607 != DCMD_SUCCESS) { 6608 dev_err(&instance->pdev->dev, 6609 "return from %s %d\n", __func__, __LINE__); 6610 return; 6611 } 6612 6613 megasas_return_cmd(instance, cmd); 6614 } 6615 6616 /** 6617 * megasas_shutdown_controller - Instructs FW to shutdown the controller 6618 * @instance: Adapter soft state 6619 * @opcode: Shutdown/Hibernate 6620 */ 6621 static void megasas_shutdown_controller(struct megasas_instance *instance, 6622 u32 opcode) 6623 { 6624 struct megasas_cmd *cmd; 6625 struct megasas_dcmd_frame *dcmd; 6626 6627 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 6628 return; 6629 6630 cmd = megasas_get_cmd(instance); 6631 6632 if (!cmd) 6633 return; 6634 6635 if (instance->aen_cmd) 6636 megasas_issue_blocked_abort_cmd(instance, 6637 instance->aen_cmd, MFI_IO_TIMEOUT_SECS); 6638 if (instance->map_update_cmd) 6639 megasas_issue_blocked_abort_cmd(instance, 6640 instance->map_update_cmd, MFI_IO_TIMEOUT_SECS); 6641 if (instance->jbod_seq_cmd) 6642 megasas_issue_blocked_abort_cmd(instance, 6643 instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS); 6644 6645 dcmd = &cmd->frame->dcmd; 6646 6647 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6648 6649 dcmd->cmd = MFI_CMD_DCMD; 6650 dcmd->cmd_status = 0x0; 6651 dcmd->sge_count = 0; 6652 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 6653 dcmd->timeout = 0; 6654 dcmd->pad_0 = 0; 6655 dcmd->data_xfer_len = 0; 6656 dcmd->opcode = cpu_to_le32(opcode); 6657 6658 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) 6659 != DCMD_SUCCESS) { 6660 dev_err(&instance->pdev->dev, 6661 "return from %s %d\n", __func__, __LINE__); 6662 return; 6663 } 6664 6665 megasas_return_cmd(instance, cmd); 6666 } 6667 6668 #ifdef CONFIG_PM 6669 /** 6670 * megasas_suspend - driver suspend entry point 6671 * @pdev: PCI device structure 6672 * @state: PCI power state to suspend routine 6673 */ 6674 static int 6675 megasas_suspend(struct pci_dev *pdev, pm_message_t state) 6676 { 6677 struct Scsi_Host *host; 6678 struct megasas_instance *instance; 6679 6680 instance = pci_get_drvdata(pdev); 6681 host = instance->host; 6682 instance->unload = 1; 6683 6684 /* Shutdown SR-IOV heartbeat timer */ 6685 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 6686 del_timer_sync(&instance->sriov_heartbeat_timer); 6687 6688 megasas_flush_cache(instance); 6689 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); 6690 6691 /* cancel the delayed work if this work still in queue */ 6692 if (instance->ev != NULL) { 6693 struct megasas_aen_event *ev = instance->ev; 6694 cancel_delayed_work_sync(&ev->hotplug_work); 6695 instance->ev = NULL; 6696 } 6697 6698 tasklet_kill(&instance->isr_tasklet); 6699 6700 pci_set_drvdata(instance->pdev, instance); 6701 instance->instancet->disable_intr(instance); 6702 6703 megasas_destroy_irqs(instance); 6704 6705 if (instance->msix_vectors) 6706 pci_free_irq_vectors(instance->pdev); 6707 6708 pci_save_state(pdev); 6709 pci_disable_device(pdev); 6710 6711 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 6712 6713 return 0; 6714 } 6715 6716 /** 6717 * megasas_resume- driver resume entry point 6718 * @pdev: PCI device structure 6719 */ 6720 static int 6721 megasas_resume(struct pci_dev *pdev) 6722 { 6723 int rval; 6724 struct Scsi_Host *host; 6725 struct megasas_instance *instance; 6726 int irq_flags = PCI_IRQ_LEGACY; 6727 6728 instance = pci_get_drvdata(pdev); 6729 host = instance->host; 6730 pci_set_power_state(pdev, PCI_D0); 6731 pci_enable_wake(pdev, PCI_D0, 0); 6732 pci_restore_state(pdev); 6733 6734 /* 6735 * PCI prepping: enable device set bus mastering and dma mask 6736 */ 6737 rval = pci_enable_device_mem(pdev); 6738 6739 if (rval) { 6740 dev_err(&pdev->dev, "Enable device failed\n"); 6741 return rval; 6742 } 6743 6744 pci_set_master(pdev); 6745 6746 /* 6747 * We expect the FW state to be READY 6748 */ 6749 if (megasas_transition_to_ready(instance, 0)) 6750 goto fail_ready_state; 6751 6752 if (megasas_set_dma_mask(instance)) 6753 goto fail_set_dma_mask; 6754 6755 /* 6756 * Initialize MFI Firmware 6757 */ 6758 6759 atomic_set(&instance->fw_outstanding, 0); 6760 atomic_set(&instance->ldio_outstanding, 0); 6761 6762 /* Now re-enable MSI-X */ 6763 if (instance->msix_vectors) { 6764 irq_flags = PCI_IRQ_MSIX; 6765 if (smp_affinity_enable) 6766 irq_flags |= PCI_IRQ_AFFINITY; 6767 } 6768 rval = pci_alloc_irq_vectors(instance->pdev, 1, 6769 instance->msix_vectors ? 6770 instance->msix_vectors : 1, irq_flags); 6771 if (rval < 0) 6772 goto fail_reenable_msix; 6773 6774 megasas_setup_reply_map(instance); 6775 6776 if (instance->adapter_type != MFI_SERIES) { 6777 megasas_reset_reply_desc(instance); 6778 if (megasas_ioc_init_fusion(instance)) { 6779 megasas_free_cmds(instance); 6780 megasas_free_cmds_fusion(instance); 6781 goto fail_init_mfi; 6782 } 6783 if (!megasas_get_map_info(instance)) 6784 megasas_sync_map_info(instance); 6785 } else { 6786 *instance->producer = 0; 6787 *instance->consumer = 0; 6788 if (megasas_issue_init_mfi(instance)) 6789 goto fail_init_mfi; 6790 } 6791 6792 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 6793 (unsigned long)instance); 6794 6795 if (instance->msix_vectors ? 6796 megasas_setup_irqs_msix(instance, 0) : 6797 megasas_setup_irqs_ioapic(instance)) 6798 goto fail_init_mfi; 6799 6800 /* Re-launch SR-IOV heartbeat timer */ 6801 if (instance->requestorId) { 6802 if (!megasas_sriov_start_heartbeat(instance, 0)) 6803 megasas_start_timer(instance); 6804 else { 6805 instance->skip_heartbeat_timer_del = 1; 6806 goto fail_init_mfi; 6807 } 6808 } 6809 6810 instance->instancet->enable_intr(instance); 6811 megasas_setup_jbod_map(instance); 6812 instance->unload = 0; 6813 6814 /* 6815 * Initiate AEN (Asynchronous Event Notification) 6816 */ 6817 if (megasas_start_aen(instance)) 6818 dev_err(&instance->pdev->dev, "Start AEN failed\n"); 6819 6820 return 0; 6821 6822 fail_init_mfi: 6823 megasas_free_ctrl_dma_buffers(instance); 6824 megasas_free_ctrl_mem(instance); 6825 scsi_host_put(host); 6826 6827 fail_reenable_msix: 6828 fail_set_dma_mask: 6829 fail_ready_state: 6830 6831 pci_disable_device(pdev); 6832 6833 return -ENODEV; 6834 } 6835 #else 6836 #define megasas_suspend NULL 6837 #define megasas_resume NULL 6838 #endif 6839 6840 static inline int 6841 megasas_wait_for_adapter_operational(struct megasas_instance *instance) 6842 { 6843 int wait_time = MEGASAS_RESET_WAIT_TIME * 2; 6844 int i; 6845 6846 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 6847 return 1; 6848 6849 for (i = 0; i < wait_time; i++) { 6850 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) 6851 break; 6852 6853 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) 6854 dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n"); 6855 6856 msleep(1000); 6857 } 6858 6859 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 6860 dev_info(&instance->pdev->dev, "%s timed out while waiting for HBA to recover.\n", 6861 __func__); 6862 return 1; 6863 } 6864 6865 return 0; 6866 } 6867 6868 /** 6869 * megasas_detach_one - PCI hot"un"plug entry point 6870 * @pdev: PCI device structure 6871 */ 6872 static void megasas_detach_one(struct pci_dev *pdev) 6873 { 6874 int i; 6875 struct Scsi_Host *host; 6876 struct megasas_instance *instance; 6877 struct fusion_context *fusion; 6878 u32 pd_seq_map_sz; 6879 6880 instance = pci_get_drvdata(pdev); 6881 host = instance->host; 6882 fusion = instance->ctrl_context; 6883 6884 /* Shutdown SR-IOV heartbeat timer */ 6885 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 6886 del_timer_sync(&instance->sriov_heartbeat_timer); 6887 6888 if (instance->fw_crash_state != UNAVAILABLE) 6889 megasas_free_host_crash_buffer(instance); 6890 scsi_remove_host(instance->host); 6891 instance->unload = 1; 6892 6893 if (megasas_wait_for_adapter_operational(instance)) 6894 goto skip_firing_dcmds; 6895 6896 megasas_flush_cache(instance); 6897 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 6898 6899 skip_firing_dcmds: 6900 /* cancel the delayed work if this work still in queue*/ 6901 if (instance->ev != NULL) { 6902 struct megasas_aen_event *ev = instance->ev; 6903 cancel_delayed_work_sync(&ev->hotplug_work); 6904 instance->ev = NULL; 6905 } 6906 6907 /* cancel all wait events */ 6908 wake_up_all(&instance->int_cmd_wait_q); 6909 6910 tasklet_kill(&instance->isr_tasklet); 6911 6912 /* 6913 * Take the instance off the instance array. Note that we will not 6914 * decrement the max_index. We let this array be sparse array 6915 */ 6916 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 6917 if (megasas_mgmt_info.instance[i] == instance) { 6918 megasas_mgmt_info.count--; 6919 megasas_mgmt_info.instance[i] = NULL; 6920 6921 break; 6922 } 6923 } 6924 6925 instance->instancet->disable_intr(instance); 6926 6927 megasas_destroy_irqs(instance); 6928 6929 if (instance->msix_vectors) 6930 pci_free_irq_vectors(instance->pdev); 6931 6932 if (instance->adapter_type == VENTURA_SERIES) { 6933 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) 6934 kfree(fusion->stream_detect_by_ld[i]); 6935 kfree(fusion->stream_detect_by_ld); 6936 fusion->stream_detect_by_ld = NULL; 6937 } 6938 6939 6940 if (instance->adapter_type != MFI_SERIES) { 6941 megasas_release_fusion(instance); 6942 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 6943 (sizeof(struct MR_PD_CFG_SEQ) * 6944 (MAX_PHYSICAL_DEVICES - 1)); 6945 for (i = 0; i < 2 ; i++) { 6946 if (fusion->ld_map[i]) 6947 dma_free_coherent(&instance->pdev->dev, 6948 fusion->max_map_sz, 6949 fusion->ld_map[i], 6950 fusion->ld_map_phys[i]); 6951 if (fusion->ld_drv_map[i]) { 6952 if (is_vmalloc_addr(fusion->ld_drv_map[i])) 6953 vfree(fusion->ld_drv_map[i]); 6954 else 6955 free_pages((ulong)fusion->ld_drv_map[i], 6956 fusion->drv_map_pages); 6957 } 6958 6959 if (fusion->pd_seq_sync[i]) 6960 dma_free_coherent(&instance->pdev->dev, 6961 pd_seq_map_sz, 6962 fusion->pd_seq_sync[i], 6963 fusion->pd_seq_phys[i]); 6964 } 6965 } else { 6966 megasas_release_mfi(instance); 6967 } 6968 6969 if (instance->vf_affiliation) 6970 pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) * 6971 sizeof(struct MR_LD_VF_AFFILIATION), 6972 instance->vf_affiliation, 6973 instance->vf_affiliation_h); 6974 6975 if (instance->vf_affiliation_111) 6976 pci_free_consistent(pdev, 6977 sizeof(struct MR_LD_VF_AFFILIATION_111), 6978 instance->vf_affiliation_111, 6979 instance->vf_affiliation_111_h); 6980 6981 if (instance->hb_host_mem) 6982 pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM), 6983 instance->hb_host_mem, 6984 instance->hb_host_mem_h); 6985 6986 megasas_free_ctrl_dma_buffers(instance); 6987 6988 megasas_free_ctrl_mem(instance); 6989 6990 scsi_host_put(host); 6991 6992 pci_disable_device(pdev); 6993 } 6994 6995 /** 6996 * megasas_shutdown - Shutdown entry point 6997 * @device: Generic device structure 6998 */ 6999 static void megasas_shutdown(struct pci_dev *pdev) 7000 { 7001 struct megasas_instance *instance = pci_get_drvdata(pdev); 7002 7003 instance->unload = 1; 7004 7005 if (megasas_wait_for_adapter_operational(instance)) 7006 goto skip_firing_dcmds; 7007 7008 megasas_flush_cache(instance); 7009 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 7010 7011 skip_firing_dcmds: 7012 instance->instancet->disable_intr(instance); 7013 megasas_destroy_irqs(instance); 7014 7015 if (instance->msix_vectors) 7016 pci_free_irq_vectors(instance->pdev); 7017 } 7018 7019 /** 7020 * megasas_mgmt_open - char node "open" entry point 7021 */ 7022 static int megasas_mgmt_open(struct inode *inode, struct file *filep) 7023 { 7024 /* 7025 * Allow only those users with admin rights 7026 */ 7027 if (!capable(CAP_SYS_ADMIN)) 7028 return -EACCES; 7029 7030 return 0; 7031 } 7032 7033 /** 7034 * megasas_mgmt_fasync - Async notifier registration from applications 7035 * 7036 * This function adds the calling process to a driver global queue. When an 7037 * event occurs, SIGIO will be sent to all processes in this queue. 7038 */ 7039 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode) 7040 { 7041 int rc; 7042 7043 mutex_lock(&megasas_async_queue_mutex); 7044 7045 rc = fasync_helper(fd, filep, mode, &megasas_async_queue); 7046 7047 mutex_unlock(&megasas_async_queue_mutex); 7048 7049 if (rc >= 0) { 7050 /* For sanity check when we get ioctl */ 7051 filep->private_data = filep; 7052 return 0; 7053 } 7054 7055 printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc); 7056 7057 return rc; 7058 } 7059 7060 /** 7061 * megasas_mgmt_poll - char node "poll" entry point 7062 * */ 7063 static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait) 7064 { 7065 __poll_t mask; 7066 unsigned long flags; 7067 7068 poll_wait(file, &megasas_poll_wait, wait); 7069 spin_lock_irqsave(&poll_aen_lock, flags); 7070 if (megasas_poll_wait_aen) 7071 mask = (EPOLLIN | EPOLLRDNORM); 7072 else 7073 mask = 0; 7074 megasas_poll_wait_aen = 0; 7075 spin_unlock_irqrestore(&poll_aen_lock, flags); 7076 return mask; 7077 } 7078 7079 /* 7080 * megasas_set_crash_dump_params_ioctl: 7081 * Send CRASH_DUMP_MODE DCMD to all controllers 7082 * @cmd: MFI command frame 7083 */ 7084 7085 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd) 7086 { 7087 struct megasas_instance *local_instance; 7088 int i, error = 0; 7089 int crash_support; 7090 7091 crash_support = cmd->frame->dcmd.mbox.w[0]; 7092 7093 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 7094 local_instance = megasas_mgmt_info.instance[i]; 7095 if (local_instance && local_instance->crash_dump_drv_support) { 7096 if ((atomic_read(&local_instance->adprecovery) == 7097 MEGASAS_HBA_OPERATIONAL) && 7098 !megasas_set_crash_dump_params(local_instance, 7099 crash_support)) { 7100 local_instance->crash_dump_app_support = 7101 crash_support; 7102 dev_info(&local_instance->pdev->dev, 7103 "Application firmware crash " 7104 "dump mode set success\n"); 7105 error = 0; 7106 } else { 7107 dev_info(&local_instance->pdev->dev, 7108 "Application firmware crash " 7109 "dump mode set failed\n"); 7110 error = -1; 7111 } 7112 } 7113 } 7114 return error; 7115 } 7116 7117 /** 7118 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW 7119 * @instance: Adapter soft state 7120 * @argp: User's ioctl packet 7121 */ 7122 static int 7123 megasas_mgmt_fw_ioctl(struct megasas_instance *instance, 7124 struct megasas_iocpacket __user * user_ioc, 7125 struct megasas_iocpacket *ioc) 7126 { 7127 struct megasas_sge64 *kern_sge64 = NULL; 7128 struct megasas_sge32 *kern_sge32 = NULL; 7129 struct megasas_cmd *cmd; 7130 void *kbuff_arr[MAX_IOCTL_SGE]; 7131 dma_addr_t buf_handle = 0; 7132 int error = 0, i; 7133 void *sense = NULL; 7134 dma_addr_t sense_handle; 7135 unsigned long *sense_ptr; 7136 u32 opcode = 0; 7137 7138 memset(kbuff_arr, 0, sizeof(kbuff_arr)); 7139 7140 if (ioc->sge_count > MAX_IOCTL_SGE) { 7141 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n", 7142 ioc->sge_count, MAX_IOCTL_SGE); 7143 return -EINVAL; 7144 } 7145 7146 if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) || 7147 ((ioc->frame.hdr.cmd == MFI_CMD_NVME) && 7148 !instance->support_nvme_passthru)) { 7149 dev_err(&instance->pdev->dev, 7150 "Received invalid ioctl command 0x%x\n", 7151 ioc->frame.hdr.cmd); 7152 return -ENOTSUPP; 7153 } 7154 7155 cmd = megasas_get_cmd(instance); 7156 if (!cmd) { 7157 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n"); 7158 return -ENOMEM; 7159 } 7160 7161 /* 7162 * User's IOCTL packet has 2 frames (maximum). Copy those two 7163 * frames into our cmd's frames. cmd->frame's context will get 7164 * overwritten when we copy from user's frames. So set that value 7165 * alone separately 7166 */ 7167 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); 7168 cmd->frame->hdr.context = cpu_to_le32(cmd->index); 7169 cmd->frame->hdr.pad_0 = 0; 7170 7171 cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE); 7172 7173 if (instance->consistent_mask_64bit) 7174 cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 | 7175 MFI_FRAME_SENSE64)); 7176 else 7177 cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 | 7178 MFI_FRAME_SENSE64)); 7179 7180 if (cmd->frame->hdr.cmd == MFI_CMD_DCMD) 7181 opcode = le32_to_cpu(cmd->frame->dcmd.opcode); 7182 7183 if (opcode == MR_DCMD_CTRL_SHUTDOWN) { 7184 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) { 7185 megasas_return_cmd(instance, cmd); 7186 return -1; 7187 } 7188 } 7189 7190 if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) { 7191 error = megasas_set_crash_dump_params_ioctl(cmd); 7192 megasas_return_cmd(instance, cmd); 7193 return error; 7194 } 7195 7196 /* 7197 * The management interface between applications and the fw uses 7198 * MFI frames. E.g, RAID configuration changes, LD property changes 7199 * etc are accomplishes through different kinds of MFI frames. The 7200 * driver needs to care only about substituting user buffers with 7201 * kernel buffers in SGLs. The location of SGL is embedded in the 7202 * struct iocpacket itself. 7203 */ 7204 if (instance->consistent_mask_64bit) 7205 kern_sge64 = (struct megasas_sge64 *) 7206 ((unsigned long)cmd->frame + ioc->sgl_off); 7207 else 7208 kern_sge32 = (struct megasas_sge32 *) 7209 ((unsigned long)cmd->frame + ioc->sgl_off); 7210 7211 /* 7212 * For each user buffer, create a mirror buffer and copy in 7213 */ 7214 for (i = 0; i < ioc->sge_count; i++) { 7215 if (!ioc->sgl[i].iov_len) 7216 continue; 7217 7218 kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev, 7219 ioc->sgl[i].iov_len, 7220 &buf_handle, GFP_KERNEL); 7221 if (!kbuff_arr[i]) { 7222 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc " 7223 "kernel SGL buffer for IOCTL\n"); 7224 error = -ENOMEM; 7225 goto out; 7226 } 7227 7228 /* 7229 * We don't change the dma_coherent_mask, so 7230 * pci_alloc_consistent only returns 32bit addresses 7231 */ 7232 if (instance->consistent_mask_64bit) { 7233 kern_sge64[i].phys_addr = cpu_to_le64(buf_handle); 7234 kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len); 7235 } else { 7236 kern_sge32[i].phys_addr = cpu_to_le32(buf_handle); 7237 kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len); 7238 } 7239 7240 /* 7241 * We created a kernel buffer corresponding to the 7242 * user buffer. Now copy in from the user buffer 7243 */ 7244 if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base, 7245 (u32) (ioc->sgl[i].iov_len))) { 7246 error = -EFAULT; 7247 goto out; 7248 } 7249 } 7250 7251 if (ioc->sense_len) { 7252 sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len, 7253 &sense_handle, GFP_KERNEL); 7254 if (!sense) { 7255 error = -ENOMEM; 7256 goto out; 7257 } 7258 7259 sense_ptr = 7260 (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off); 7261 if (instance->consistent_mask_64bit) 7262 *sense_ptr = cpu_to_le64(sense_handle); 7263 else 7264 *sense_ptr = cpu_to_le32(sense_handle); 7265 } 7266 7267 /* 7268 * Set the sync_cmd flag so that the ISR knows not to complete this 7269 * cmd to the SCSI mid-layer 7270 */ 7271 cmd->sync_cmd = 1; 7272 if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) { 7273 cmd->sync_cmd = 0; 7274 dev_err(&instance->pdev->dev, 7275 "return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n", 7276 __func__, __LINE__, cmd->frame->hdr.cmd, opcode, 7277 cmd->cmd_status_drv); 7278 return -EBUSY; 7279 } 7280 7281 cmd->sync_cmd = 0; 7282 7283 if (instance->unload == 1) { 7284 dev_info(&instance->pdev->dev, "Driver unload is in progress " 7285 "don't submit data to application\n"); 7286 goto out; 7287 } 7288 /* 7289 * copy out the kernel buffers to user buffers 7290 */ 7291 for (i = 0; i < ioc->sge_count; i++) { 7292 if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i], 7293 ioc->sgl[i].iov_len)) { 7294 error = -EFAULT; 7295 goto out; 7296 } 7297 } 7298 7299 /* 7300 * copy out the sense 7301 */ 7302 if (ioc->sense_len) { 7303 /* 7304 * sense_ptr points to the location that has the user 7305 * sense buffer address 7306 */ 7307 sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw + 7308 ioc->sense_off); 7309 7310 if (copy_to_user((void __user *)((unsigned long) 7311 get_unaligned((unsigned long *)sense_ptr)), 7312 sense, ioc->sense_len)) { 7313 dev_err(&instance->pdev->dev, "Failed to copy out to user " 7314 "sense data\n"); 7315 error = -EFAULT; 7316 goto out; 7317 } 7318 } 7319 7320 /* 7321 * copy the status codes returned by the fw 7322 */ 7323 if (copy_to_user(&user_ioc->frame.hdr.cmd_status, 7324 &cmd->frame->hdr.cmd_status, sizeof(u8))) { 7325 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n"); 7326 error = -EFAULT; 7327 } 7328 7329 out: 7330 if (sense) { 7331 dma_free_coherent(&instance->pdev->dev, ioc->sense_len, 7332 sense, sense_handle); 7333 } 7334 7335 for (i = 0; i < ioc->sge_count; i++) { 7336 if (kbuff_arr[i]) { 7337 if (instance->consistent_mask_64bit) 7338 dma_free_coherent(&instance->pdev->dev, 7339 le32_to_cpu(kern_sge64[i].length), 7340 kbuff_arr[i], 7341 le64_to_cpu(kern_sge64[i].phys_addr)); 7342 else 7343 dma_free_coherent(&instance->pdev->dev, 7344 le32_to_cpu(kern_sge32[i].length), 7345 kbuff_arr[i], 7346 le32_to_cpu(kern_sge32[i].phys_addr)); 7347 kbuff_arr[i] = NULL; 7348 } 7349 } 7350 7351 megasas_return_cmd(instance, cmd); 7352 return error; 7353 } 7354 7355 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) 7356 { 7357 struct megasas_iocpacket __user *user_ioc = 7358 (struct megasas_iocpacket __user *)arg; 7359 struct megasas_iocpacket *ioc; 7360 struct megasas_instance *instance; 7361 int error; 7362 7363 ioc = memdup_user(user_ioc, sizeof(*ioc)); 7364 if (IS_ERR(ioc)) 7365 return PTR_ERR(ioc); 7366 7367 instance = megasas_lookup_instance(ioc->host_no); 7368 if (!instance) { 7369 error = -ENODEV; 7370 goto out_kfree_ioc; 7371 } 7372 7373 /* Block ioctls in VF mode */ 7374 if (instance->requestorId && !allow_vf_ioctls) { 7375 error = -ENODEV; 7376 goto out_kfree_ioc; 7377 } 7378 7379 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 7380 dev_err(&instance->pdev->dev, "Controller in crit error\n"); 7381 error = -ENODEV; 7382 goto out_kfree_ioc; 7383 } 7384 7385 if (instance->unload == 1) { 7386 error = -ENODEV; 7387 goto out_kfree_ioc; 7388 } 7389 7390 if (down_interruptible(&instance->ioctl_sem)) { 7391 error = -ERESTARTSYS; 7392 goto out_kfree_ioc; 7393 } 7394 7395 if (megasas_wait_for_adapter_operational(instance)) { 7396 error = -ENODEV; 7397 goto out_up; 7398 } 7399 7400 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); 7401 out_up: 7402 up(&instance->ioctl_sem); 7403 7404 out_kfree_ioc: 7405 kfree(ioc); 7406 return error; 7407 } 7408 7409 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) 7410 { 7411 struct megasas_instance *instance; 7412 struct megasas_aen aen; 7413 int error; 7414 7415 if (file->private_data != file) { 7416 printk(KERN_DEBUG "megasas: fasync_helper was not " 7417 "called first\n"); 7418 return -EINVAL; 7419 } 7420 7421 if (copy_from_user(&aen, (void __user *)arg, sizeof(aen))) 7422 return -EFAULT; 7423 7424 instance = megasas_lookup_instance(aen.host_no); 7425 7426 if (!instance) 7427 return -ENODEV; 7428 7429 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 7430 return -ENODEV; 7431 } 7432 7433 if (instance->unload == 1) { 7434 return -ENODEV; 7435 } 7436 7437 if (megasas_wait_for_adapter_operational(instance)) 7438 return -ENODEV; 7439 7440 mutex_lock(&instance->reset_mutex); 7441 error = megasas_register_aen(instance, aen.seq_num, 7442 aen.class_locale_word); 7443 mutex_unlock(&instance->reset_mutex); 7444 return error; 7445 } 7446 7447 /** 7448 * megasas_mgmt_ioctl - char node ioctl entry point 7449 */ 7450 static long 7451 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 7452 { 7453 switch (cmd) { 7454 case MEGASAS_IOC_FIRMWARE: 7455 return megasas_mgmt_ioctl_fw(file, arg); 7456 7457 case MEGASAS_IOC_GET_AEN: 7458 return megasas_mgmt_ioctl_aen(file, arg); 7459 } 7460 7461 return -ENOTTY; 7462 } 7463 7464 #ifdef CONFIG_COMPAT 7465 static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) 7466 { 7467 struct compat_megasas_iocpacket __user *cioc = 7468 (struct compat_megasas_iocpacket __user *)arg; 7469 struct megasas_iocpacket __user *ioc = 7470 compat_alloc_user_space(sizeof(struct megasas_iocpacket)); 7471 int i; 7472 int error = 0; 7473 compat_uptr_t ptr; 7474 u32 local_sense_off; 7475 u32 local_sense_len; 7476 u32 user_sense_off; 7477 7478 if (clear_user(ioc, sizeof(*ioc))) 7479 return -EFAULT; 7480 7481 if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) || 7482 copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) || 7483 copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) || 7484 copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) || 7485 copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) || 7486 copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32))) 7487 return -EFAULT; 7488 7489 /* 7490 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when 7491 * sense_len is not null, so prepare the 64bit value under 7492 * the same condition. 7493 */ 7494 if (get_user(local_sense_off, &ioc->sense_off) || 7495 get_user(local_sense_len, &ioc->sense_len) || 7496 get_user(user_sense_off, &cioc->sense_off)) 7497 return -EFAULT; 7498 7499 if (local_sense_len) { 7500 void __user **sense_ioc_ptr = 7501 (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off); 7502 compat_uptr_t *sense_cioc_ptr = 7503 (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off); 7504 if (get_user(ptr, sense_cioc_ptr) || 7505 put_user(compat_ptr(ptr), sense_ioc_ptr)) 7506 return -EFAULT; 7507 } 7508 7509 for (i = 0; i < MAX_IOCTL_SGE; i++) { 7510 if (get_user(ptr, &cioc->sgl[i].iov_base) || 7511 put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) || 7512 copy_in_user(&ioc->sgl[i].iov_len, 7513 &cioc->sgl[i].iov_len, sizeof(compat_size_t))) 7514 return -EFAULT; 7515 } 7516 7517 error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc); 7518 7519 if (copy_in_user(&cioc->frame.hdr.cmd_status, 7520 &ioc->frame.hdr.cmd_status, sizeof(u8))) { 7521 printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n"); 7522 return -EFAULT; 7523 } 7524 return error; 7525 } 7526 7527 static long 7528 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd, 7529 unsigned long arg) 7530 { 7531 switch (cmd) { 7532 case MEGASAS_IOC_FIRMWARE32: 7533 return megasas_mgmt_compat_ioctl_fw(file, arg); 7534 case MEGASAS_IOC_GET_AEN: 7535 return megasas_mgmt_ioctl_aen(file, arg); 7536 } 7537 7538 return -ENOTTY; 7539 } 7540 #endif 7541 7542 /* 7543 * File operations structure for management interface 7544 */ 7545 static const struct file_operations megasas_mgmt_fops = { 7546 .owner = THIS_MODULE, 7547 .open = megasas_mgmt_open, 7548 .fasync = megasas_mgmt_fasync, 7549 .unlocked_ioctl = megasas_mgmt_ioctl, 7550 .poll = megasas_mgmt_poll, 7551 #ifdef CONFIG_COMPAT 7552 .compat_ioctl = megasas_mgmt_compat_ioctl, 7553 #endif 7554 .llseek = noop_llseek, 7555 }; 7556 7557 /* 7558 * PCI hotplug support registration structure 7559 */ 7560 static struct pci_driver megasas_pci_driver = { 7561 7562 .name = "megaraid_sas", 7563 .id_table = megasas_pci_table, 7564 .probe = megasas_probe_one, 7565 .remove = megasas_detach_one, 7566 .suspend = megasas_suspend, 7567 .resume = megasas_resume, 7568 .shutdown = megasas_shutdown, 7569 }; 7570 7571 /* 7572 * Sysfs driver attributes 7573 */ 7574 static ssize_t version_show(struct device_driver *dd, char *buf) 7575 { 7576 return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n", 7577 MEGASAS_VERSION); 7578 } 7579 static DRIVER_ATTR_RO(version); 7580 7581 static ssize_t release_date_show(struct device_driver *dd, char *buf) 7582 { 7583 return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n", 7584 MEGASAS_RELDATE); 7585 } 7586 static DRIVER_ATTR_RO(release_date); 7587 7588 static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf) 7589 { 7590 return sprintf(buf, "%u\n", support_poll_for_event); 7591 } 7592 static DRIVER_ATTR_RO(support_poll_for_event); 7593 7594 static ssize_t support_device_change_show(struct device_driver *dd, char *buf) 7595 { 7596 return sprintf(buf, "%u\n", support_device_change); 7597 } 7598 static DRIVER_ATTR_RO(support_device_change); 7599 7600 static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf) 7601 { 7602 return sprintf(buf, "%u\n", megasas_dbg_lvl); 7603 } 7604 7605 static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf, 7606 size_t count) 7607 { 7608 int retval = count; 7609 7610 if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) { 7611 printk(KERN_ERR "megasas: could not set dbg_lvl\n"); 7612 retval = -EINVAL; 7613 } 7614 return retval; 7615 } 7616 static DRIVER_ATTR_RW(dbg_lvl); 7617 7618 static ssize_t 7619 support_nvme_encapsulation_show(struct device_driver *dd, char *buf) 7620 { 7621 return sprintf(buf, "%u\n", support_nvme_encapsulation); 7622 } 7623 7624 static DRIVER_ATTR_RO(support_nvme_encapsulation); 7625 7626 static inline void megasas_remove_scsi_device(struct scsi_device *sdev) 7627 { 7628 sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n"); 7629 scsi_remove_device(sdev); 7630 scsi_device_put(sdev); 7631 } 7632 7633 static void 7634 megasas_aen_polling(struct work_struct *work) 7635 { 7636 struct megasas_aen_event *ev = 7637 container_of(work, struct megasas_aen_event, hotplug_work.work); 7638 struct megasas_instance *instance = ev->instance; 7639 union megasas_evt_class_locale class_locale; 7640 struct Scsi_Host *host; 7641 struct scsi_device *sdev1; 7642 u16 pd_index = 0; 7643 u16 ld_index = 0; 7644 int i, j, doscan = 0; 7645 u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME; 7646 int error; 7647 u8 dcmd_ret = DCMD_SUCCESS; 7648 7649 if (!instance) { 7650 printk(KERN_ERR "invalid instance!\n"); 7651 kfree(ev); 7652 return; 7653 } 7654 7655 /* Adjust event workqueue thread wait time for VF mode */ 7656 if (instance->requestorId) 7657 wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF; 7658 7659 /* Don't run the event workqueue thread if OCR is running */ 7660 mutex_lock(&instance->reset_mutex); 7661 7662 instance->ev = NULL; 7663 host = instance->host; 7664 if (instance->evt_detail) { 7665 megasas_decode_evt(instance); 7666 7667 switch (le32_to_cpu(instance->evt_detail->code)) { 7668 7669 case MR_EVT_PD_INSERTED: 7670 case MR_EVT_PD_REMOVED: 7671 dcmd_ret = megasas_get_pd_list(instance); 7672 if (dcmd_ret == DCMD_SUCCESS) 7673 doscan = SCAN_PD_CHANNEL; 7674 break; 7675 7676 case MR_EVT_LD_OFFLINE: 7677 case MR_EVT_CFG_CLEARED: 7678 case MR_EVT_LD_DELETED: 7679 case MR_EVT_LD_CREATED: 7680 if (!instance->requestorId || 7681 (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0))) 7682 dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); 7683 7684 if (dcmd_ret == DCMD_SUCCESS) 7685 doscan = SCAN_VD_CHANNEL; 7686 7687 break; 7688 7689 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: 7690 case MR_EVT_FOREIGN_CFG_IMPORTED: 7691 case MR_EVT_LD_STATE_CHANGE: 7692 dcmd_ret = megasas_get_pd_list(instance); 7693 7694 if (dcmd_ret != DCMD_SUCCESS) 7695 break; 7696 7697 if (!instance->requestorId || 7698 (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0))) 7699 dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); 7700 7701 if (dcmd_ret != DCMD_SUCCESS) 7702 break; 7703 7704 doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL; 7705 dev_info(&instance->pdev->dev, "scanning for scsi%d...\n", 7706 instance->host->host_no); 7707 break; 7708 7709 case MR_EVT_CTRL_PROP_CHANGED: 7710 dcmd_ret = megasas_get_ctrl_info(instance); 7711 break; 7712 default: 7713 doscan = 0; 7714 break; 7715 } 7716 } else { 7717 dev_err(&instance->pdev->dev, "invalid evt_detail!\n"); 7718 mutex_unlock(&instance->reset_mutex); 7719 kfree(ev); 7720 return; 7721 } 7722 7723 mutex_unlock(&instance->reset_mutex); 7724 7725 if (doscan & SCAN_PD_CHANNEL) { 7726 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { 7727 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 7728 pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j; 7729 sdev1 = scsi_device_lookup(host, i, j, 0); 7730 if (instance->pd_list[pd_index].driveState == 7731 MR_PD_STATE_SYSTEM) { 7732 if (!sdev1) 7733 scsi_add_device(host, i, j, 0); 7734 else 7735 scsi_device_put(sdev1); 7736 } else { 7737 if (sdev1) 7738 megasas_remove_scsi_device(sdev1); 7739 } 7740 } 7741 } 7742 } 7743 7744 if (doscan & SCAN_VD_CHANNEL) { 7745 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { 7746 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 7747 ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; 7748 sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); 7749 if (instance->ld_ids[ld_index] != 0xff) { 7750 if (!sdev1) 7751 scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); 7752 else 7753 scsi_device_put(sdev1); 7754 } else { 7755 if (sdev1) 7756 megasas_remove_scsi_device(sdev1); 7757 } 7758 } 7759 } 7760 } 7761 7762 if (dcmd_ret == DCMD_SUCCESS) 7763 seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1; 7764 else 7765 seq_num = instance->last_seq_num; 7766 7767 /* Register AEN with FW for latest sequence number plus 1 */ 7768 class_locale.members.reserved = 0; 7769 class_locale.members.locale = MR_EVT_LOCALE_ALL; 7770 class_locale.members.class = MR_EVT_CLASS_DEBUG; 7771 7772 if (instance->aen_cmd != NULL) { 7773 kfree(ev); 7774 return; 7775 } 7776 7777 mutex_lock(&instance->reset_mutex); 7778 error = megasas_register_aen(instance, seq_num, 7779 class_locale.word); 7780 if (error) 7781 dev_err(&instance->pdev->dev, 7782 "register aen failed error %x\n", error); 7783 7784 mutex_unlock(&instance->reset_mutex); 7785 kfree(ev); 7786 } 7787 7788 /** 7789 * megasas_init - Driver load entry point 7790 */ 7791 static int __init megasas_init(void) 7792 { 7793 int rval; 7794 7795 /* 7796 * Booted in kdump kernel, minimize memory footprints by 7797 * disabling few features 7798 */ 7799 if (reset_devices) { 7800 msix_vectors = 1; 7801 rdpq_enable = 0; 7802 dual_qdepth_disable = 1; 7803 } 7804 7805 /* 7806 * Announce driver version and other information 7807 */ 7808 pr_info("megasas: %s\n", MEGASAS_VERSION); 7809 7810 spin_lock_init(&poll_aen_lock); 7811 7812 support_poll_for_event = 2; 7813 support_device_change = 1; 7814 support_nvme_encapsulation = true; 7815 7816 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); 7817 7818 /* 7819 * Register character device node 7820 */ 7821 rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops); 7822 7823 if (rval < 0) { 7824 printk(KERN_DEBUG "megasas: failed to open device node\n"); 7825 return rval; 7826 } 7827 7828 megasas_mgmt_majorno = rval; 7829 7830 /* 7831 * Register ourselves as PCI hotplug module 7832 */ 7833 rval = pci_register_driver(&megasas_pci_driver); 7834 7835 if (rval) { 7836 printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n"); 7837 goto err_pcidrv; 7838 } 7839 7840 rval = driver_create_file(&megasas_pci_driver.driver, 7841 &driver_attr_version); 7842 if (rval) 7843 goto err_dcf_attr_ver; 7844 7845 rval = driver_create_file(&megasas_pci_driver.driver, 7846 &driver_attr_release_date); 7847 if (rval) 7848 goto err_dcf_rel_date; 7849 7850 rval = driver_create_file(&megasas_pci_driver.driver, 7851 &driver_attr_support_poll_for_event); 7852 if (rval) 7853 goto err_dcf_support_poll_for_event; 7854 7855 rval = driver_create_file(&megasas_pci_driver.driver, 7856 &driver_attr_dbg_lvl); 7857 if (rval) 7858 goto err_dcf_dbg_lvl; 7859 rval = driver_create_file(&megasas_pci_driver.driver, 7860 &driver_attr_support_device_change); 7861 if (rval) 7862 goto err_dcf_support_device_change; 7863 7864 rval = driver_create_file(&megasas_pci_driver.driver, 7865 &driver_attr_support_nvme_encapsulation); 7866 if (rval) 7867 goto err_dcf_support_nvme_encapsulation; 7868 7869 return rval; 7870 7871 err_dcf_support_nvme_encapsulation: 7872 driver_remove_file(&megasas_pci_driver.driver, 7873 &driver_attr_support_device_change); 7874 7875 err_dcf_support_device_change: 7876 driver_remove_file(&megasas_pci_driver.driver, 7877 &driver_attr_dbg_lvl); 7878 err_dcf_dbg_lvl: 7879 driver_remove_file(&megasas_pci_driver.driver, 7880 &driver_attr_support_poll_for_event); 7881 err_dcf_support_poll_for_event: 7882 driver_remove_file(&megasas_pci_driver.driver, 7883 &driver_attr_release_date); 7884 err_dcf_rel_date: 7885 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 7886 err_dcf_attr_ver: 7887 pci_unregister_driver(&megasas_pci_driver); 7888 err_pcidrv: 7889 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 7890 return rval; 7891 } 7892 7893 /** 7894 * megasas_exit - Driver unload entry point 7895 */ 7896 static void __exit megasas_exit(void) 7897 { 7898 driver_remove_file(&megasas_pci_driver.driver, 7899 &driver_attr_dbg_lvl); 7900 driver_remove_file(&megasas_pci_driver.driver, 7901 &driver_attr_support_poll_for_event); 7902 driver_remove_file(&megasas_pci_driver.driver, 7903 &driver_attr_support_device_change); 7904 driver_remove_file(&megasas_pci_driver.driver, 7905 &driver_attr_release_date); 7906 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 7907 driver_remove_file(&megasas_pci_driver.driver, 7908 &driver_attr_support_nvme_encapsulation); 7909 7910 pci_unregister_driver(&megasas_pci_driver); 7911 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 7912 } 7913 7914 module_init(megasas_init); 7915 module_exit(megasas_exit); 7916