1 /* 2 * Linux MegaRAID driver for SAS based RAID controllers 3 * 4 * Copyright (c) 2003-2013 LSI Corporation 5 * Copyright (c) 2013-2016 Avago Technologies 6 * Copyright (c) 2016-2018 Broadcom Inc. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 2 11 * of the License, or (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 * 21 * Authors: Broadcom Inc. 22 * Sreenivas Bagalkote 23 * Sumant Patro 24 * Bo Yang 25 * Adam Radford 26 * Kashyap Desai <kashyap.desai@broadcom.com> 27 * Sumit Saxena <sumit.saxena@broadcom.com> 28 * 29 * Send feedback to: megaraidlinux.pdl@broadcom.com 30 */ 31 32 #include <linux/kernel.h> 33 #include <linux/types.h> 34 #include <linux/pci.h> 35 #include <linux/list.h> 36 #include <linux/moduleparam.h> 37 #include <linux/module.h> 38 #include <linux/spinlock.h> 39 #include <linux/interrupt.h> 40 #include <linux/delay.h> 41 #include <linux/uio.h> 42 #include <linux/slab.h> 43 #include <linux/uaccess.h> 44 #include <asm/unaligned.h> 45 #include <linux/fs.h> 46 #include <linux/compat.h> 47 #include <linux/blkdev.h> 48 #include <linux/mutex.h> 49 #include <linux/poll.h> 50 #include <linux/vmalloc.h> 51 52 #include <scsi/scsi.h> 53 #include <scsi/scsi_cmnd.h> 54 #include <scsi/scsi_device.h> 55 #include <scsi/scsi_host.h> 56 #include <scsi/scsi_tcq.h> 57 #include "megaraid_sas_fusion.h" 58 #include "megaraid_sas.h" 59 60 /* 61 * Number of sectors per IO command 62 * Will be set in megasas_init_mfi if user does not provide 63 */ 64 static unsigned int max_sectors; 65 module_param_named(max_sectors, max_sectors, int, 0); 66 MODULE_PARM_DESC(max_sectors, 67 "Maximum number of sectors per IO command"); 68 69 static int msix_disable; 70 module_param(msix_disable, int, S_IRUGO); 71 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0"); 72 73 static unsigned int msix_vectors; 74 module_param(msix_vectors, int, S_IRUGO); 75 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW"); 76 77 static int allow_vf_ioctls; 78 module_param(allow_vf_ioctls, int, S_IRUGO); 79 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0"); 80 81 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; 82 module_param(throttlequeuedepth, int, S_IRUGO); 83 MODULE_PARM_DESC(throttlequeuedepth, 84 "Adapter queue depth when throttled due to I/O timeout. Default: 16"); 85 86 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME; 87 module_param(resetwaittime, int, S_IRUGO); 88 MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s"); 89 90 int smp_affinity_enable = 1; 91 module_param(smp_affinity_enable, int, S_IRUGO); 92 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)"); 93 94 int rdpq_enable = 1; 95 module_param(rdpq_enable, int, S_IRUGO); 96 MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)"); 97 98 unsigned int dual_qdepth_disable; 99 module_param(dual_qdepth_disable, int, S_IRUGO); 100 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0"); 101 102 unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; 103 module_param(scmd_timeout, int, S_IRUGO); 104 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer."); 105 106 MODULE_LICENSE("GPL"); 107 MODULE_VERSION(MEGASAS_VERSION); 108 MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com"); 109 MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver"); 110 111 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); 112 static int megasas_get_pd_list(struct megasas_instance *instance); 113 static int megasas_ld_list_query(struct megasas_instance *instance, 114 u8 query_type); 115 static int megasas_issue_init_mfi(struct megasas_instance *instance); 116 static int megasas_register_aen(struct megasas_instance *instance, 117 u32 seq_num, u32 class_locale_word); 118 static void megasas_get_pd_info(struct megasas_instance *instance, 119 struct scsi_device *sdev); 120 121 /* 122 * PCI ID table for all supported controllers 123 */ 124 static struct pci_device_id megasas_pci_table[] = { 125 126 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)}, 127 /* xscale IOP */ 128 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)}, 129 /* ppc IOP */ 130 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)}, 131 /* ppc IOP */ 132 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)}, 133 /* gen2*/ 134 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)}, 135 /* gen2*/ 136 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)}, 137 /* skinny*/ 138 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)}, 139 /* skinny*/ 140 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, 141 /* xscale IOP, vega */ 142 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, 143 /* xscale IOP */ 144 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)}, 145 /* Fusion */ 146 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)}, 147 /* Plasma */ 148 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)}, 149 /* Invader */ 150 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)}, 151 /* Fury */ 152 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)}, 153 /* Intruder */ 154 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)}, 155 /* Intruder 24 port*/ 156 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)}, 157 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)}, 158 /* VENTURA */ 159 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)}, 160 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)}, 161 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)}, 162 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)}, 163 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)}, 164 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)}, 165 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)}, 166 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)}, 167 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)}, 168 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)}, 169 {} 170 }; 171 172 MODULE_DEVICE_TABLE(pci, megasas_pci_table); 173 174 static int megasas_mgmt_majorno; 175 struct megasas_mgmt_info megasas_mgmt_info; 176 static struct fasync_struct *megasas_async_queue; 177 static DEFINE_MUTEX(megasas_async_queue_mutex); 178 179 static int megasas_poll_wait_aen; 180 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait); 181 static u32 support_poll_for_event; 182 u32 megasas_dbg_lvl; 183 static u32 support_device_change; 184 static bool support_nvme_encapsulation; 185 186 /* define lock for aen poll */ 187 spinlock_t poll_aen_lock; 188 189 void 190 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 191 u8 alt_status); 192 static u32 193 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance); 194 static int 195 megasas_adp_reset_gen2(struct megasas_instance *instance, 196 struct megasas_register_set __iomem *reg_set); 197 static irqreturn_t megasas_isr(int irq, void *devp); 198 static u32 199 megasas_init_adapter_mfi(struct megasas_instance *instance); 200 u32 201 megasas_build_and_issue_cmd(struct megasas_instance *instance, 202 struct scsi_cmnd *scmd); 203 static void megasas_complete_cmd_dpc(unsigned long instance_addr); 204 int 205 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, 206 int seconds); 207 void megasas_fusion_ocr_wq(struct work_struct *work); 208 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 209 int initial); 210 static int 211 megasas_set_dma_mask(struct megasas_instance *instance); 212 static int 213 megasas_alloc_ctrl_mem(struct megasas_instance *instance); 214 static inline void 215 megasas_free_ctrl_mem(struct megasas_instance *instance); 216 static inline int 217 megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance); 218 static inline void 219 megasas_free_ctrl_dma_buffers(struct megasas_instance *instance); 220 static inline void 221 megasas_init_ctrl_params(struct megasas_instance *instance); 222 223 u32 megasas_readl(struct megasas_instance *instance, 224 const volatile void __iomem *addr) 225 { 226 u32 i = 0, ret_val; 227 /* 228 * Due to a HW errata in Aero controllers, reads to certain 229 * Fusion registers could intermittently return all zeroes. 230 * This behavior is transient in nature and subsequent reads will 231 * return valid value. As a workaround in driver, retry readl for 232 * upto three times until a non-zero value is read. 233 */ 234 if (instance->adapter_type == AERO_SERIES) { 235 do { 236 ret_val = readl(addr); 237 i++; 238 } while (ret_val == 0 && i < 3); 239 return ret_val; 240 } else { 241 return readl(addr); 242 } 243 } 244 245 /** 246 * megasas_set_dma_settings - Populate DMA address, length and flags for DCMDs 247 * @instance: Adapter soft state 248 * @dcmd: DCMD frame inside MFI command 249 * @dma_addr: DMA address of buffer to be passed to FW 250 * @dma_len: Length of DMA buffer to be passed to FW 251 * @return: void 252 */ 253 void megasas_set_dma_settings(struct megasas_instance *instance, 254 struct megasas_dcmd_frame *dcmd, 255 dma_addr_t dma_addr, u32 dma_len) 256 { 257 if (instance->consistent_mask_64bit) { 258 dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr); 259 dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len); 260 dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64); 261 262 } else { 263 dcmd->sgl.sge32[0].phys_addr = 264 cpu_to_le32(lower_32_bits(dma_addr)); 265 dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len); 266 dcmd->flags = cpu_to_le16(dcmd->flags); 267 } 268 } 269 270 void 271 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 272 { 273 instance->instancet->fire_cmd(instance, 274 cmd->frame_phys_addr, 0, instance->reg_set); 275 return; 276 } 277 278 /** 279 * megasas_get_cmd - Get a command from the free pool 280 * @instance: Adapter soft state 281 * 282 * Returns a free command from the pool 283 */ 284 struct megasas_cmd *megasas_get_cmd(struct megasas_instance 285 *instance) 286 { 287 unsigned long flags; 288 struct megasas_cmd *cmd = NULL; 289 290 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 291 292 if (!list_empty(&instance->cmd_pool)) { 293 cmd = list_entry((&instance->cmd_pool)->next, 294 struct megasas_cmd, list); 295 list_del_init(&cmd->list); 296 } else { 297 dev_err(&instance->pdev->dev, "Command pool empty!\n"); 298 } 299 300 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 301 return cmd; 302 } 303 304 /** 305 * megasas_return_cmd - Return a cmd to free command pool 306 * @instance: Adapter soft state 307 * @cmd: Command packet to be returned to free command pool 308 */ 309 void 310 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 311 { 312 unsigned long flags; 313 u32 blk_tags; 314 struct megasas_cmd_fusion *cmd_fusion; 315 struct fusion_context *fusion = instance->ctrl_context; 316 317 /* This flag is used only for fusion adapter. 318 * Wait for Interrupt for Polled mode DCMD 319 */ 320 if (cmd->flags & DRV_DCMD_POLLED_MODE) 321 return; 322 323 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 324 325 if (fusion) { 326 blk_tags = instance->max_scsi_cmds + cmd->index; 327 cmd_fusion = fusion->cmd_list[blk_tags]; 328 megasas_return_cmd_fusion(instance, cmd_fusion); 329 } 330 cmd->scmd = NULL; 331 cmd->frame_count = 0; 332 cmd->flags = 0; 333 memset(cmd->frame, 0, instance->mfi_frame_size); 334 cmd->frame->io.context = cpu_to_le32(cmd->index); 335 if (!fusion && reset_devices) 336 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 337 list_add(&cmd->list, (&instance->cmd_pool)->next); 338 339 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 340 341 } 342 343 static const char * 344 format_timestamp(uint32_t timestamp) 345 { 346 static char buffer[32]; 347 348 if ((timestamp & 0xff000000) == 0xff000000) 349 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp & 350 0x00ffffff); 351 else 352 snprintf(buffer, sizeof(buffer), "%us", timestamp); 353 return buffer; 354 } 355 356 static const char * 357 format_class(int8_t class) 358 { 359 static char buffer[6]; 360 361 switch (class) { 362 case MFI_EVT_CLASS_DEBUG: 363 return "debug"; 364 case MFI_EVT_CLASS_PROGRESS: 365 return "progress"; 366 case MFI_EVT_CLASS_INFO: 367 return "info"; 368 case MFI_EVT_CLASS_WARNING: 369 return "WARN"; 370 case MFI_EVT_CLASS_CRITICAL: 371 return "CRIT"; 372 case MFI_EVT_CLASS_FATAL: 373 return "FATAL"; 374 case MFI_EVT_CLASS_DEAD: 375 return "DEAD"; 376 default: 377 snprintf(buffer, sizeof(buffer), "%d", class); 378 return buffer; 379 } 380 } 381 382 /** 383 * megasas_decode_evt: Decode FW AEN event and print critical event 384 * for information. 385 * @instance: Adapter soft state 386 */ 387 static void 388 megasas_decode_evt(struct megasas_instance *instance) 389 { 390 struct megasas_evt_detail *evt_detail = instance->evt_detail; 391 union megasas_evt_class_locale class_locale; 392 class_locale.word = le32_to_cpu(evt_detail->cl.word); 393 394 if (class_locale.members.class >= MFI_EVT_CLASS_CRITICAL) 395 dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n", 396 le32_to_cpu(evt_detail->seq_num), 397 format_timestamp(le32_to_cpu(evt_detail->time_stamp)), 398 (class_locale.members.locale), 399 format_class(class_locale.members.class), 400 evt_detail->description); 401 } 402 403 /** 404 * The following functions are defined for xscale 405 * (deviceid : 1064R, PERC5) controllers 406 */ 407 408 /** 409 * megasas_enable_intr_xscale - Enables interrupts 410 * @regs: MFI register set 411 */ 412 static inline void 413 megasas_enable_intr_xscale(struct megasas_instance *instance) 414 { 415 struct megasas_register_set __iomem *regs; 416 417 regs = instance->reg_set; 418 writel(0, &(regs)->outbound_intr_mask); 419 420 /* Dummy readl to force pci flush */ 421 readl(®s->outbound_intr_mask); 422 } 423 424 /** 425 * megasas_disable_intr_xscale -Disables interrupt 426 * @regs: MFI register set 427 */ 428 static inline void 429 megasas_disable_intr_xscale(struct megasas_instance *instance) 430 { 431 struct megasas_register_set __iomem *regs; 432 u32 mask = 0x1f; 433 434 regs = instance->reg_set; 435 writel(mask, ®s->outbound_intr_mask); 436 /* Dummy readl to force pci flush */ 437 readl(®s->outbound_intr_mask); 438 } 439 440 /** 441 * megasas_read_fw_status_reg_xscale - returns the current FW status value 442 * @regs: MFI register set 443 */ 444 static u32 445 megasas_read_fw_status_reg_xscale(struct megasas_instance *instance) 446 { 447 return readl(&instance->reg_set->outbound_msg_0); 448 } 449 /** 450 * megasas_clear_interrupt_xscale - Check & clear interrupt 451 * @regs: MFI register set 452 */ 453 static int 454 megasas_clear_intr_xscale(struct megasas_instance *instance) 455 { 456 u32 status; 457 u32 mfiStatus = 0; 458 struct megasas_register_set __iomem *regs; 459 regs = instance->reg_set; 460 461 /* 462 * Check if it is our interrupt 463 */ 464 status = readl(®s->outbound_intr_status); 465 466 if (status & MFI_OB_INTR_STATUS_MASK) 467 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 468 if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT) 469 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 470 471 /* 472 * Clear the interrupt by writing back the same value 473 */ 474 if (mfiStatus) 475 writel(status, ®s->outbound_intr_status); 476 477 /* Dummy readl to force pci flush */ 478 readl(®s->outbound_intr_status); 479 480 return mfiStatus; 481 } 482 483 /** 484 * megasas_fire_cmd_xscale - Sends command to the FW 485 * @frame_phys_addr : Physical address of cmd 486 * @frame_count : Number of frames for the command 487 * @regs : MFI register set 488 */ 489 static inline void 490 megasas_fire_cmd_xscale(struct megasas_instance *instance, 491 dma_addr_t frame_phys_addr, 492 u32 frame_count, 493 struct megasas_register_set __iomem *regs) 494 { 495 unsigned long flags; 496 497 spin_lock_irqsave(&instance->hba_lock, flags); 498 writel((frame_phys_addr >> 3)|(frame_count), 499 &(regs)->inbound_queue_port); 500 spin_unlock_irqrestore(&instance->hba_lock, flags); 501 } 502 503 /** 504 * megasas_adp_reset_xscale - For controller reset 505 * @regs: MFI register set 506 */ 507 static int 508 megasas_adp_reset_xscale(struct megasas_instance *instance, 509 struct megasas_register_set __iomem *regs) 510 { 511 u32 i; 512 u32 pcidata; 513 514 writel(MFI_ADP_RESET, ®s->inbound_doorbell); 515 516 for (i = 0; i < 3; i++) 517 msleep(1000); /* sleep for 3 secs */ 518 pcidata = 0; 519 pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata); 520 dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata); 521 if (pcidata & 0x2) { 522 dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata); 523 pcidata &= ~0x2; 524 pci_write_config_dword(instance->pdev, 525 MFI_1068_PCSR_OFFSET, pcidata); 526 527 for (i = 0; i < 2; i++) 528 msleep(1000); /* need to wait 2 secs again */ 529 530 pcidata = 0; 531 pci_read_config_dword(instance->pdev, 532 MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata); 533 dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata); 534 if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) { 535 dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata); 536 pcidata = 0; 537 pci_write_config_dword(instance->pdev, 538 MFI_1068_FW_HANDSHAKE_OFFSET, pcidata); 539 } 540 } 541 return 0; 542 } 543 544 /** 545 * megasas_check_reset_xscale - For controller reset check 546 * @regs: MFI register set 547 */ 548 static int 549 megasas_check_reset_xscale(struct megasas_instance *instance, 550 struct megasas_register_set __iomem *regs) 551 { 552 if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) && 553 (le32_to_cpu(*instance->consumer) == 554 MEGASAS_ADPRESET_INPROG_SIGN)) 555 return 1; 556 return 0; 557 } 558 559 static struct megasas_instance_template megasas_instance_template_xscale = { 560 561 .fire_cmd = megasas_fire_cmd_xscale, 562 .enable_intr = megasas_enable_intr_xscale, 563 .disable_intr = megasas_disable_intr_xscale, 564 .clear_intr = megasas_clear_intr_xscale, 565 .read_fw_status_reg = megasas_read_fw_status_reg_xscale, 566 .adp_reset = megasas_adp_reset_xscale, 567 .check_reset = megasas_check_reset_xscale, 568 .service_isr = megasas_isr, 569 .tasklet = megasas_complete_cmd_dpc, 570 .init_adapter = megasas_init_adapter_mfi, 571 .build_and_issue_cmd = megasas_build_and_issue_cmd, 572 .issue_dcmd = megasas_issue_dcmd, 573 }; 574 575 /** 576 * This is the end of set of functions & definitions specific 577 * to xscale (deviceid : 1064R, PERC5) controllers 578 */ 579 580 /** 581 * The following functions are defined for ppc (deviceid : 0x60) 582 * controllers 583 */ 584 585 /** 586 * megasas_enable_intr_ppc - Enables interrupts 587 * @regs: MFI register set 588 */ 589 static inline void 590 megasas_enable_intr_ppc(struct megasas_instance *instance) 591 { 592 struct megasas_register_set __iomem *regs; 593 594 regs = instance->reg_set; 595 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 596 597 writel(~0x80000000, &(regs)->outbound_intr_mask); 598 599 /* Dummy readl to force pci flush */ 600 readl(®s->outbound_intr_mask); 601 } 602 603 /** 604 * megasas_disable_intr_ppc - Disable interrupt 605 * @regs: MFI register set 606 */ 607 static inline void 608 megasas_disable_intr_ppc(struct megasas_instance *instance) 609 { 610 struct megasas_register_set __iomem *regs; 611 u32 mask = 0xFFFFFFFF; 612 613 regs = instance->reg_set; 614 writel(mask, ®s->outbound_intr_mask); 615 /* Dummy readl to force pci flush */ 616 readl(®s->outbound_intr_mask); 617 } 618 619 /** 620 * megasas_read_fw_status_reg_ppc - returns the current FW status value 621 * @regs: MFI register set 622 */ 623 static u32 624 megasas_read_fw_status_reg_ppc(struct megasas_instance *instance) 625 { 626 return readl(&instance->reg_set->outbound_scratch_pad_0); 627 } 628 629 /** 630 * megasas_clear_interrupt_ppc - Check & clear interrupt 631 * @regs: MFI register set 632 */ 633 static int 634 megasas_clear_intr_ppc(struct megasas_instance *instance) 635 { 636 u32 status, mfiStatus = 0; 637 struct megasas_register_set __iomem *regs; 638 regs = instance->reg_set; 639 640 /* 641 * Check if it is our interrupt 642 */ 643 status = readl(®s->outbound_intr_status); 644 645 if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT) 646 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 647 648 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) 649 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 650 651 /* 652 * Clear the interrupt by writing back the same value 653 */ 654 writel(status, ®s->outbound_doorbell_clear); 655 656 /* Dummy readl to force pci flush */ 657 readl(®s->outbound_doorbell_clear); 658 659 return mfiStatus; 660 } 661 662 /** 663 * megasas_fire_cmd_ppc - Sends command to the FW 664 * @frame_phys_addr : Physical address of cmd 665 * @frame_count : Number of frames for the command 666 * @regs : MFI register set 667 */ 668 static inline void 669 megasas_fire_cmd_ppc(struct megasas_instance *instance, 670 dma_addr_t frame_phys_addr, 671 u32 frame_count, 672 struct megasas_register_set __iomem *regs) 673 { 674 unsigned long flags; 675 676 spin_lock_irqsave(&instance->hba_lock, flags); 677 writel((frame_phys_addr | (frame_count<<1))|1, 678 &(regs)->inbound_queue_port); 679 spin_unlock_irqrestore(&instance->hba_lock, flags); 680 } 681 682 /** 683 * megasas_check_reset_ppc - For controller reset check 684 * @regs: MFI register set 685 */ 686 static int 687 megasas_check_reset_ppc(struct megasas_instance *instance, 688 struct megasas_register_set __iomem *regs) 689 { 690 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 691 return 1; 692 693 return 0; 694 } 695 696 static struct megasas_instance_template megasas_instance_template_ppc = { 697 698 .fire_cmd = megasas_fire_cmd_ppc, 699 .enable_intr = megasas_enable_intr_ppc, 700 .disable_intr = megasas_disable_intr_ppc, 701 .clear_intr = megasas_clear_intr_ppc, 702 .read_fw_status_reg = megasas_read_fw_status_reg_ppc, 703 .adp_reset = megasas_adp_reset_xscale, 704 .check_reset = megasas_check_reset_ppc, 705 .service_isr = megasas_isr, 706 .tasklet = megasas_complete_cmd_dpc, 707 .init_adapter = megasas_init_adapter_mfi, 708 .build_and_issue_cmd = megasas_build_and_issue_cmd, 709 .issue_dcmd = megasas_issue_dcmd, 710 }; 711 712 /** 713 * megasas_enable_intr_skinny - Enables interrupts 714 * @regs: MFI register set 715 */ 716 static inline void 717 megasas_enable_intr_skinny(struct megasas_instance *instance) 718 { 719 struct megasas_register_set __iomem *regs; 720 721 regs = instance->reg_set; 722 writel(0xFFFFFFFF, &(regs)->outbound_intr_mask); 723 724 writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 725 726 /* Dummy readl to force pci flush */ 727 readl(®s->outbound_intr_mask); 728 } 729 730 /** 731 * megasas_disable_intr_skinny - Disables interrupt 732 * @regs: MFI register set 733 */ 734 static inline void 735 megasas_disable_intr_skinny(struct megasas_instance *instance) 736 { 737 struct megasas_register_set __iomem *regs; 738 u32 mask = 0xFFFFFFFF; 739 740 regs = instance->reg_set; 741 writel(mask, ®s->outbound_intr_mask); 742 /* Dummy readl to force pci flush */ 743 readl(®s->outbound_intr_mask); 744 } 745 746 /** 747 * megasas_read_fw_status_reg_skinny - returns the current FW status value 748 * @regs: MFI register set 749 */ 750 static u32 751 megasas_read_fw_status_reg_skinny(struct megasas_instance *instance) 752 { 753 return readl(&instance->reg_set->outbound_scratch_pad_0); 754 } 755 756 /** 757 * megasas_clear_interrupt_skinny - Check & clear interrupt 758 * @regs: MFI register set 759 */ 760 static int 761 megasas_clear_intr_skinny(struct megasas_instance *instance) 762 { 763 u32 status; 764 u32 mfiStatus = 0; 765 struct megasas_register_set __iomem *regs; 766 regs = instance->reg_set; 767 768 /* 769 * Check if it is our interrupt 770 */ 771 status = readl(®s->outbound_intr_status); 772 773 if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) { 774 return 0; 775 } 776 777 /* 778 * Check if it is our interrupt 779 */ 780 if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) == 781 MFI_STATE_FAULT) { 782 mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 783 } else 784 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 785 786 /* 787 * Clear the interrupt by writing back the same value 788 */ 789 writel(status, ®s->outbound_intr_status); 790 791 /* 792 * dummy read to flush PCI 793 */ 794 readl(®s->outbound_intr_status); 795 796 return mfiStatus; 797 } 798 799 /** 800 * megasas_fire_cmd_skinny - Sends command to the FW 801 * @frame_phys_addr : Physical address of cmd 802 * @frame_count : Number of frames for the command 803 * @regs : MFI register set 804 */ 805 static inline void 806 megasas_fire_cmd_skinny(struct megasas_instance *instance, 807 dma_addr_t frame_phys_addr, 808 u32 frame_count, 809 struct megasas_register_set __iomem *regs) 810 { 811 unsigned long flags; 812 813 spin_lock_irqsave(&instance->hba_lock, flags); 814 writel(upper_32_bits(frame_phys_addr), 815 &(regs)->inbound_high_queue_port); 816 writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1, 817 &(regs)->inbound_low_queue_port); 818 mmiowb(); 819 spin_unlock_irqrestore(&instance->hba_lock, flags); 820 } 821 822 /** 823 * megasas_check_reset_skinny - For controller reset check 824 * @regs: MFI register set 825 */ 826 static int 827 megasas_check_reset_skinny(struct megasas_instance *instance, 828 struct megasas_register_set __iomem *regs) 829 { 830 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 831 return 1; 832 833 return 0; 834 } 835 836 static struct megasas_instance_template megasas_instance_template_skinny = { 837 838 .fire_cmd = megasas_fire_cmd_skinny, 839 .enable_intr = megasas_enable_intr_skinny, 840 .disable_intr = megasas_disable_intr_skinny, 841 .clear_intr = megasas_clear_intr_skinny, 842 .read_fw_status_reg = megasas_read_fw_status_reg_skinny, 843 .adp_reset = megasas_adp_reset_gen2, 844 .check_reset = megasas_check_reset_skinny, 845 .service_isr = megasas_isr, 846 .tasklet = megasas_complete_cmd_dpc, 847 .init_adapter = megasas_init_adapter_mfi, 848 .build_and_issue_cmd = megasas_build_and_issue_cmd, 849 .issue_dcmd = megasas_issue_dcmd, 850 }; 851 852 853 /** 854 * The following functions are defined for gen2 (deviceid : 0x78 0x79) 855 * controllers 856 */ 857 858 /** 859 * megasas_enable_intr_gen2 - Enables interrupts 860 * @regs: MFI register set 861 */ 862 static inline void 863 megasas_enable_intr_gen2(struct megasas_instance *instance) 864 { 865 struct megasas_register_set __iomem *regs; 866 867 regs = instance->reg_set; 868 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 869 870 /* write ~0x00000005 (4 & 1) to the intr mask*/ 871 writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 872 873 /* Dummy readl to force pci flush */ 874 readl(®s->outbound_intr_mask); 875 } 876 877 /** 878 * megasas_disable_intr_gen2 - Disables interrupt 879 * @regs: MFI register set 880 */ 881 static inline void 882 megasas_disable_intr_gen2(struct megasas_instance *instance) 883 { 884 struct megasas_register_set __iomem *regs; 885 u32 mask = 0xFFFFFFFF; 886 887 regs = instance->reg_set; 888 writel(mask, ®s->outbound_intr_mask); 889 /* Dummy readl to force pci flush */ 890 readl(®s->outbound_intr_mask); 891 } 892 893 /** 894 * megasas_read_fw_status_reg_gen2 - returns the current FW status value 895 * @regs: MFI register set 896 */ 897 static u32 898 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance) 899 { 900 return readl(&instance->reg_set->outbound_scratch_pad_0); 901 } 902 903 /** 904 * megasas_clear_interrupt_gen2 - Check & clear interrupt 905 * @regs: MFI register set 906 */ 907 static int 908 megasas_clear_intr_gen2(struct megasas_instance *instance) 909 { 910 u32 status; 911 u32 mfiStatus = 0; 912 struct megasas_register_set __iomem *regs; 913 regs = instance->reg_set; 914 915 /* 916 * Check if it is our interrupt 917 */ 918 status = readl(®s->outbound_intr_status); 919 920 if (status & MFI_INTR_FLAG_REPLY_MESSAGE) { 921 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 922 } 923 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) { 924 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 925 } 926 927 /* 928 * Clear the interrupt by writing back the same value 929 */ 930 if (mfiStatus) 931 writel(status, ®s->outbound_doorbell_clear); 932 933 /* Dummy readl to force pci flush */ 934 readl(®s->outbound_intr_status); 935 936 return mfiStatus; 937 } 938 /** 939 * megasas_fire_cmd_gen2 - Sends command to the FW 940 * @frame_phys_addr : Physical address of cmd 941 * @frame_count : Number of frames for the command 942 * @regs : MFI register set 943 */ 944 static inline void 945 megasas_fire_cmd_gen2(struct megasas_instance *instance, 946 dma_addr_t frame_phys_addr, 947 u32 frame_count, 948 struct megasas_register_set __iomem *regs) 949 { 950 unsigned long flags; 951 952 spin_lock_irqsave(&instance->hba_lock, flags); 953 writel((frame_phys_addr | (frame_count<<1))|1, 954 &(regs)->inbound_queue_port); 955 spin_unlock_irqrestore(&instance->hba_lock, flags); 956 } 957 958 /** 959 * megasas_adp_reset_gen2 - For controller reset 960 * @regs: MFI register set 961 */ 962 static int 963 megasas_adp_reset_gen2(struct megasas_instance *instance, 964 struct megasas_register_set __iomem *reg_set) 965 { 966 u32 retry = 0 ; 967 u32 HostDiag; 968 u32 __iomem *seq_offset = ®_set->seq_offset; 969 u32 __iomem *hostdiag_offset = ®_set->host_diag; 970 971 if (instance->instancet == &megasas_instance_template_skinny) { 972 seq_offset = ®_set->fusion_seq_offset; 973 hostdiag_offset = ®_set->fusion_host_diag; 974 } 975 976 writel(0, seq_offset); 977 writel(4, seq_offset); 978 writel(0xb, seq_offset); 979 writel(2, seq_offset); 980 writel(7, seq_offset); 981 writel(0xd, seq_offset); 982 983 msleep(1000); 984 985 HostDiag = (u32)readl(hostdiag_offset); 986 987 while (!(HostDiag & DIAG_WRITE_ENABLE)) { 988 msleep(100); 989 HostDiag = (u32)readl(hostdiag_offset); 990 dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n", 991 retry, HostDiag); 992 993 if (retry++ >= 100) 994 return 1; 995 996 } 997 998 dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag); 999 1000 writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset); 1001 1002 ssleep(10); 1003 1004 HostDiag = (u32)readl(hostdiag_offset); 1005 while (HostDiag & DIAG_RESET_ADAPTER) { 1006 msleep(100); 1007 HostDiag = (u32)readl(hostdiag_offset); 1008 dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n", 1009 retry, HostDiag); 1010 1011 if (retry++ >= 1000) 1012 return 1; 1013 1014 } 1015 return 0; 1016 } 1017 1018 /** 1019 * megasas_check_reset_gen2 - For controller reset check 1020 * @regs: MFI register set 1021 */ 1022 static int 1023 megasas_check_reset_gen2(struct megasas_instance *instance, 1024 struct megasas_register_set __iomem *regs) 1025 { 1026 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 1027 return 1; 1028 1029 return 0; 1030 } 1031 1032 static struct megasas_instance_template megasas_instance_template_gen2 = { 1033 1034 .fire_cmd = megasas_fire_cmd_gen2, 1035 .enable_intr = megasas_enable_intr_gen2, 1036 .disable_intr = megasas_disable_intr_gen2, 1037 .clear_intr = megasas_clear_intr_gen2, 1038 .read_fw_status_reg = megasas_read_fw_status_reg_gen2, 1039 .adp_reset = megasas_adp_reset_gen2, 1040 .check_reset = megasas_check_reset_gen2, 1041 .service_isr = megasas_isr, 1042 .tasklet = megasas_complete_cmd_dpc, 1043 .init_adapter = megasas_init_adapter_mfi, 1044 .build_and_issue_cmd = megasas_build_and_issue_cmd, 1045 .issue_dcmd = megasas_issue_dcmd, 1046 }; 1047 1048 /** 1049 * This is the end of set of functions & definitions 1050 * specific to gen2 (deviceid : 0x78, 0x79) controllers 1051 */ 1052 1053 /* 1054 * Template added for TB (Fusion) 1055 */ 1056 extern struct megasas_instance_template megasas_instance_template_fusion; 1057 1058 /** 1059 * megasas_issue_polled - Issues a polling command 1060 * @instance: Adapter soft state 1061 * @cmd: Command packet to be issued 1062 * 1063 * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting. 1064 */ 1065 int 1066 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) 1067 { 1068 struct megasas_header *frame_hdr = &cmd->frame->hdr; 1069 1070 frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS; 1071 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); 1072 1073 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1074 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1075 __func__, __LINE__); 1076 return DCMD_NOT_FIRED; 1077 } 1078 1079 instance->instancet->issue_dcmd(instance, cmd); 1080 1081 return wait_and_poll(instance, cmd, instance->requestorId ? 1082 MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS); 1083 } 1084 1085 /** 1086 * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds 1087 * @instance: Adapter soft state 1088 * @cmd: Command to be issued 1089 * @timeout: Timeout in seconds 1090 * 1091 * This function waits on an event for the command to be returned from ISR. 1092 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 1093 * Used to issue ioctl commands. 1094 */ 1095 int 1096 megasas_issue_blocked_cmd(struct megasas_instance *instance, 1097 struct megasas_cmd *cmd, int timeout) 1098 { 1099 int ret = 0; 1100 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 1101 1102 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1103 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1104 __func__, __LINE__); 1105 return DCMD_NOT_FIRED; 1106 } 1107 1108 instance->instancet->issue_dcmd(instance, cmd); 1109 1110 if (timeout) { 1111 ret = wait_event_timeout(instance->int_cmd_wait_q, 1112 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); 1113 if (!ret) { 1114 dev_err(&instance->pdev->dev, "Failed from %s %d DCMD Timed out\n", 1115 __func__, __LINE__); 1116 return DCMD_TIMEOUT; 1117 } 1118 } else 1119 wait_event(instance->int_cmd_wait_q, 1120 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS); 1121 1122 return (cmd->cmd_status_drv == MFI_STAT_OK) ? 1123 DCMD_SUCCESS : DCMD_FAILED; 1124 } 1125 1126 /** 1127 * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd 1128 * @instance: Adapter soft state 1129 * @cmd_to_abort: Previously issued cmd to be aborted 1130 * @timeout: Timeout in seconds 1131 * 1132 * MFI firmware can abort previously issued AEN comamnd (automatic event 1133 * notification). The megasas_issue_blocked_abort_cmd() issues such abort 1134 * cmd and waits for return status. 1135 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 1136 */ 1137 static int 1138 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, 1139 struct megasas_cmd *cmd_to_abort, int timeout) 1140 { 1141 struct megasas_cmd *cmd; 1142 struct megasas_abort_frame *abort_fr; 1143 int ret = 0; 1144 1145 cmd = megasas_get_cmd(instance); 1146 1147 if (!cmd) 1148 return -1; 1149 1150 abort_fr = &cmd->frame->abort; 1151 1152 /* 1153 * Prepare and issue the abort frame 1154 */ 1155 abort_fr->cmd = MFI_CMD_ABORT; 1156 abort_fr->cmd_status = MFI_STAT_INVALID_STATUS; 1157 abort_fr->flags = cpu_to_le16(0); 1158 abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index); 1159 abort_fr->abort_mfi_phys_addr_lo = 1160 cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr)); 1161 abort_fr->abort_mfi_phys_addr_hi = 1162 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); 1163 1164 cmd->sync_cmd = 1; 1165 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 1166 1167 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1168 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1169 __func__, __LINE__); 1170 return DCMD_NOT_FIRED; 1171 } 1172 1173 instance->instancet->issue_dcmd(instance, cmd); 1174 1175 if (timeout) { 1176 ret = wait_event_timeout(instance->abort_cmd_wait_q, 1177 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); 1178 if (!ret) { 1179 dev_err(&instance->pdev->dev, "Failed from %s %d Abort Timed out\n", 1180 __func__, __LINE__); 1181 return DCMD_TIMEOUT; 1182 } 1183 } else 1184 wait_event(instance->abort_cmd_wait_q, 1185 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS); 1186 1187 cmd->sync_cmd = 0; 1188 1189 megasas_return_cmd(instance, cmd); 1190 return (cmd->cmd_status_drv == MFI_STAT_OK) ? 1191 DCMD_SUCCESS : DCMD_FAILED; 1192 } 1193 1194 /** 1195 * megasas_make_sgl32 - Prepares 32-bit SGL 1196 * @instance: Adapter soft state 1197 * @scp: SCSI command from the mid-layer 1198 * @mfi_sgl: SGL to be filled in 1199 * 1200 * If successful, this function returns the number of SG elements. Otherwise, 1201 * it returnes -1. 1202 */ 1203 static int 1204 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp, 1205 union megasas_sgl *mfi_sgl) 1206 { 1207 int i; 1208 int sge_count; 1209 struct scatterlist *os_sgl; 1210 1211 sge_count = scsi_dma_map(scp); 1212 BUG_ON(sge_count < 0); 1213 1214 if (sge_count) { 1215 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1216 mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl)); 1217 mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl)); 1218 } 1219 } 1220 return sge_count; 1221 } 1222 1223 /** 1224 * megasas_make_sgl64 - Prepares 64-bit SGL 1225 * @instance: Adapter soft state 1226 * @scp: SCSI command from the mid-layer 1227 * @mfi_sgl: SGL to be filled in 1228 * 1229 * If successful, this function returns the number of SG elements. Otherwise, 1230 * it returnes -1. 1231 */ 1232 static int 1233 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, 1234 union megasas_sgl *mfi_sgl) 1235 { 1236 int i; 1237 int sge_count; 1238 struct scatterlist *os_sgl; 1239 1240 sge_count = scsi_dma_map(scp); 1241 BUG_ON(sge_count < 0); 1242 1243 if (sge_count) { 1244 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1245 mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl)); 1246 mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl)); 1247 } 1248 } 1249 return sge_count; 1250 } 1251 1252 /** 1253 * megasas_make_sgl_skinny - Prepares IEEE SGL 1254 * @instance: Adapter soft state 1255 * @scp: SCSI command from the mid-layer 1256 * @mfi_sgl: SGL to be filled in 1257 * 1258 * If successful, this function returns the number of SG elements. Otherwise, 1259 * it returnes -1. 1260 */ 1261 static int 1262 megasas_make_sgl_skinny(struct megasas_instance *instance, 1263 struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) 1264 { 1265 int i; 1266 int sge_count; 1267 struct scatterlist *os_sgl; 1268 1269 sge_count = scsi_dma_map(scp); 1270 1271 if (sge_count) { 1272 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1273 mfi_sgl->sge_skinny[i].length = 1274 cpu_to_le32(sg_dma_len(os_sgl)); 1275 mfi_sgl->sge_skinny[i].phys_addr = 1276 cpu_to_le64(sg_dma_address(os_sgl)); 1277 mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0); 1278 } 1279 } 1280 return sge_count; 1281 } 1282 1283 /** 1284 * megasas_get_frame_count - Computes the number of frames 1285 * @frame_type : type of frame- io or pthru frame 1286 * @sge_count : number of sg elements 1287 * 1288 * Returns the number of frames required for numnber of sge's (sge_count) 1289 */ 1290 1291 static u32 megasas_get_frame_count(struct megasas_instance *instance, 1292 u8 sge_count, u8 frame_type) 1293 { 1294 int num_cnt; 1295 int sge_bytes; 1296 u32 sge_sz; 1297 u32 frame_count = 0; 1298 1299 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : 1300 sizeof(struct megasas_sge32); 1301 1302 if (instance->flag_ieee) { 1303 sge_sz = sizeof(struct megasas_sge_skinny); 1304 } 1305 1306 /* 1307 * Main frame can contain 2 SGEs for 64-bit SGLs and 1308 * 3 SGEs for 32-bit SGLs for ldio & 1309 * 1 SGEs for 64-bit SGLs and 1310 * 2 SGEs for 32-bit SGLs for pthru frame 1311 */ 1312 if (unlikely(frame_type == PTHRU_FRAME)) { 1313 if (instance->flag_ieee == 1) { 1314 num_cnt = sge_count - 1; 1315 } else if (IS_DMA64) 1316 num_cnt = sge_count - 1; 1317 else 1318 num_cnt = sge_count - 2; 1319 } else { 1320 if (instance->flag_ieee == 1) { 1321 num_cnt = sge_count - 1; 1322 } else if (IS_DMA64) 1323 num_cnt = sge_count - 2; 1324 else 1325 num_cnt = sge_count - 3; 1326 } 1327 1328 if (num_cnt > 0) { 1329 sge_bytes = sge_sz * num_cnt; 1330 1331 frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + 1332 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ; 1333 } 1334 /* Main frame */ 1335 frame_count += 1; 1336 1337 if (frame_count > 7) 1338 frame_count = 8; 1339 return frame_count; 1340 } 1341 1342 /** 1343 * megasas_build_dcdb - Prepares a direct cdb (DCDB) command 1344 * @instance: Adapter soft state 1345 * @scp: SCSI command 1346 * @cmd: Command to be prepared in 1347 * 1348 * This function prepares CDB commands. These are typcially pass-through 1349 * commands to the devices. 1350 */ 1351 static int 1352 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, 1353 struct megasas_cmd *cmd) 1354 { 1355 u32 is_logical; 1356 u32 device_id; 1357 u16 flags = 0; 1358 struct megasas_pthru_frame *pthru; 1359 1360 is_logical = MEGASAS_IS_LOGICAL(scp->device); 1361 device_id = MEGASAS_DEV_INDEX(scp); 1362 pthru = (struct megasas_pthru_frame *)cmd->frame; 1363 1364 if (scp->sc_data_direction == DMA_TO_DEVICE) 1365 flags = MFI_FRAME_DIR_WRITE; 1366 else if (scp->sc_data_direction == DMA_FROM_DEVICE) 1367 flags = MFI_FRAME_DIR_READ; 1368 else if (scp->sc_data_direction == DMA_NONE) 1369 flags = MFI_FRAME_DIR_NONE; 1370 1371 if (instance->flag_ieee == 1) { 1372 flags |= MFI_FRAME_IEEE; 1373 } 1374 1375 /* 1376 * Prepare the DCDB frame 1377 */ 1378 pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO; 1379 pthru->cmd_status = 0x0; 1380 pthru->scsi_status = 0x0; 1381 pthru->target_id = device_id; 1382 pthru->lun = scp->device->lun; 1383 pthru->cdb_len = scp->cmd_len; 1384 pthru->timeout = 0; 1385 pthru->pad_0 = 0; 1386 pthru->flags = cpu_to_le16(flags); 1387 pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp)); 1388 1389 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); 1390 1391 /* 1392 * If the command is for the tape device, set the 1393 * pthru timeout to the os layer timeout value. 1394 */ 1395 if (scp->device->type == TYPE_TAPE) { 1396 if ((scp->request->timeout / HZ) > 0xFFFF) 1397 pthru->timeout = cpu_to_le16(0xFFFF); 1398 else 1399 pthru->timeout = cpu_to_le16(scp->request->timeout / HZ); 1400 } 1401 1402 /* 1403 * Construct SGL 1404 */ 1405 if (instance->flag_ieee == 1) { 1406 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1407 pthru->sge_count = megasas_make_sgl_skinny(instance, scp, 1408 &pthru->sgl); 1409 } else if (IS_DMA64) { 1410 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1411 pthru->sge_count = megasas_make_sgl64(instance, scp, 1412 &pthru->sgl); 1413 } else 1414 pthru->sge_count = megasas_make_sgl32(instance, scp, 1415 &pthru->sgl); 1416 1417 if (pthru->sge_count > instance->max_num_sge) { 1418 dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n", 1419 pthru->sge_count); 1420 return 0; 1421 } 1422 1423 /* 1424 * Sense info specific 1425 */ 1426 pthru->sense_len = SCSI_SENSE_BUFFERSIZE; 1427 pthru->sense_buf_phys_addr_hi = 1428 cpu_to_le32(upper_32_bits(cmd->sense_phys_addr)); 1429 pthru->sense_buf_phys_addr_lo = 1430 cpu_to_le32(lower_32_bits(cmd->sense_phys_addr)); 1431 1432 /* 1433 * Compute the total number of frames this command consumes. FW uses 1434 * this number to pull sufficient number of frames from host memory. 1435 */ 1436 cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count, 1437 PTHRU_FRAME); 1438 1439 return cmd->frame_count; 1440 } 1441 1442 /** 1443 * megasas_build_ldio - Prepares IOs to logical devices 1444 * @instance: Adapter soft state 1445 * @scp: SCSI command 1446 * @cmd: Command to be prepared 1447 * 1448 * Frames (and accompanying SGLs) for regular SCSI IOs use this function. 1449 */ 1450 static int 1451 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, 1452 struct megasas_cmd *cmd) 1453 { 1454 u32 device_id; 1455 u8 sc = scp->cmnd[0]; 1456 u16 flags = 0; 1457 struct megasas_io_frame *ldio; 1458 1459 device_id = MEGASAS_DEV_INDEX(scp); 1460 ldio = (struct megasas_io_frame *)cmd->frame; 1461 1462 if (scp->sc_data_direction == DMA_TO_DEVICE) 1463 flags = MFI_FRAME_DIR_WRITE; 1464 else if (scp->sc_data_direction == DMA_FROM_DEVICE) 1465 flags = MFI_FRAME_DIR_READ; 1466 1467 if (instance->flag_ieee == 1) { 1468 flags |= MFI_FRAME_IEEE; 1469 } 1470 1471 /* 1472 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds 1473 */ 1474 ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ; 1475 ldio->cmd_status = 0x0; 1476 ldio->scsi_status = 0x0; 1477 ldio->target_id = device_id; 1478 ldio->timeout = 0; 1479 ldio->reserved_0 = 0; 1480 ldio->pad_0 = 0; 1481 ldio->flags = cpu_to_le16(flags); 1482 ldio->start_lba_hi = 0; 1483 ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0; 1484 1485 /* 1486 * 6-byte READ(0x08) or WRITE(0x0A) cdb 1487 */ 1488 if (scp->cmd_len == 6) { 1489 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]); 1490 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) | 1491 ((u32) scp->cmnd[2] << 8) | 1492 (u32) scp->cmnd[3]); 1493 1494 ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF); 1495 } 1496 1497 /* 1498 * 10-byte READ(0x28) or WRITE(0x2A) cdb 1499 */ 1500 else if (scp->cmd_len == 10) { 1501 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] | 1502 ((u32) scp->cmnd[7] << 8)); 1503 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1504 ((u32) scp->cmnd[3] << 16) | 1505 ((u32) scp->cmnd[4] << 8) | 1506 (u32) scp->cmnd[5]); 1507 } 1508 1509 /* 1510 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 1511 */ 1512 else if (scp->cmd_len == 12) { 1513 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) | 1514 ((u32) scp->cmnd[7] << 16) | 1515 ((u32) scp->cmnd[8] << 8) | 1516 (u32) scp->cmnd[9]); 1517 1518 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1519 ((u32) scp->cmnd[3] << 16) | 1520 ((u32) scp->cmnd[4] << 8) | 1521 (u32) scp->cmnd[5]); 1522 } 1523 1524 /* 1525 * 16-byte READ(0x88) or WRITE(0x8A) cdb 1526 */ 1527 else if (scp->cmd_len == 16) { 1528 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) | 1529 ((u32) scp->cmnd[11] << 16) | 1530 ((u32) scp->cmnd[12] << 8) | 1531 (u32) scp->cmnd[13]); 1532 1533 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) | 1534 ((u32) scp->cmnd[7] << 16) | 1535 ((u32) scp->cmnd[8] << 8) | 1536 (u32) scp->cmnd[9]); 1537 1538 ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1539 ((u32) scp->cmnd[3] << 16) | 1540 ((u32) scp->cmnd[4] << 8) | 1541 (u32) scp->cmnd[5]); 1542 1543 } 1544 1545 /* 1546 * Construct SGL 1547 */ 1548 if (instance->flag_ieee) { 1549 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1550 ldio->sge_count = megasas_make_sgl_skinny(instance, scp, 1551 &ldio->sgl); 1552 } else if (IS_DMA64) { 1553 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1554 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); 1555 } else 1556 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); 1557 1558 if (ldio->sge_count > instance->max_num_sge) { 1559 dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n", 1560 ldio->sge_count); 1561 return 0; 1562 } 1563 1564 /* 1565 * Sense info specific 1566 */ 1567 ldio->sense_len = SCSI_SENSE_BUFFERSIZE; 1568 ldio->sense_buf_phys_addr_hi = 0; 1569 ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr); 1570 1571 /* 1572 * Compute the total number of frames this command consumes. FW uses 1573 * this number to pull sufficient number of frames from host memory. 1574 */ 1575 cmd->frame_count = megasas_get_frame_count(instance, 1576 ldio->sge_count, IO_FRAME); 1577 1578 return cmd->frame_count; 1579 } 1580 1581 /** 1582 * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD 1583 * and whether it's RW or non RW 1584 * @scmd: SCSI command 1585 * 1586 */ 1587 inline int megasas_cmd_type(struct scsi_cmnd *cmd) 1588 { 1589 int ret; 1590 1591 switch (cmd->cmnd[0]) { 1592 case READ_10: 1593 case WRITE_10: 1594 case READ_12: 1595 case WRITE_12: 1596 case READ_6: 1597 case WRITE_6: 1598 case READ_16: 1599 case WRITE_16: 1600 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? 1601 READ_WRITE_LDIO : READ_WRITE_SYSPDIO; 1602 break; 1603 default: 1604 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? 1605 NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO; 1606 } 1607 return ret; 1608 } 1609 1610 /** 1611 * megasas_dump_pending_frames - Dumps the frame address of all pending cmds 1612 * in FW 1613 * @instance: Adapter soft state 1614 */ 1615 static inline void 1616 megasas_dump_pending_frames(struct megasas_instance *instance) 1617 { 1618 struct megasas_cmd *cmd; 1619 int i,n; 1620 union megasas_sgl *mfi_sgl; 1621 struct megasas_io_frame *ldio; 1622 struct megasas_pthru_frame *pthru; 1623 u32 sgcount; 1624 u16 max_cmd = instance->max_fw_cmds; 1625 1626 dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no); 1627 dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding)); 1628 if (IS_DMA64) 1629 dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no); 1630 else 1631 dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no); 1632 1633 dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no); 1634 for (i = 0; i < max_cmd; i++) { 1635 cmd = instance->cmd_list[i]; 1636 if (!cmd->scmd) 1637 continue; 1638 dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr); 1639 if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) { 1640 ldio = (struct megasas_io_frame *)cmd->frame; 1641 mfi_sgl = &ldio->sgl; 1642 sgcount = ldio->sge_count; 1643 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x," 1644 " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1645 instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id, 1646 le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi), 1647 le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount); 1648 } else { 1649 pthru = (struct megasas_pthru_frame *) cmd->frame; 1650 mfi_sgl = &pthru->sgl; 1651 sgcount = pthru->sge_count; 1652 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, " 1653 "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1654 instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id, 1655 pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len), 1656 le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount); 1657 } 1658 if (megasas_dbg_lvl & MEGASAS_DBG_LVL) { 1659 for (n = 0; n < sgcount; n++) { 1660 if (IS_DMA64) 1661 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n", 1662 le32_to_cpu(mfi_sgl->sge64[n].length), 1663 le64_to_cpu(mfi_sgl->sge64[n].phys_addr)); 1664 else 1665 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n", 1666 le32_to_cpu(mfi_sgl->sge32[n].length), 1667 le32_to_cpu(mfi_sgl->sge32[n].phys_addr)); 1668 } 1669 } 1670 } /*for max_cmd*/ 1671 dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no); 1672 for (i = 0; i < max_cmd; i++) { 1673 1674 cmd = instance->cmd_list[i]; 1675 1676 if (cmd->sync_cmd == 1) 1677 dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr); 1678 } 1679 dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no); 1680 } 1681 1682 u32 1683 megasas_build_and_issue_cmd(struct megasas_instance *instance, 1684 struct scsi_cmnd *scmd) 1685 { 1686 struct megasas_cmd *cmd; 1687 u32 frame_count; 1688 1689 cmd = megasas_get_cmd(instance); 1690 if (!cmd) 1691 return SCSI_MLQUEUE_HOST_BUSY; 1692 1693 /* 1694 * Logical drive command 1695 */ 1696 if (megasas_cmd_type(scmd) == READ_WRITE_LDIO) 1697 frame_count = megasas_build_ldio(instance, scmd, cmd); 1698 else 1699 frame_count = megasas_build_dcdb(instance, scmd, cmd); 1700 1701 if (!frame_count) 1702 goto out_return_cmd; 1703 1704 cmd->scmd = scmd; 1705 scmd->SCp.ptr = (char *)cmd; 1706 1707 /* 1708 * Issue the command to the FW 1709 */ 1710 atomic_inc(&instance->fw_outstanding); 1711 1712 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 1713 cmd->frame_count-1, instance->reg_set); 1714 1715 return 0; 1716 out_return_cmd: 1717 megasas_return_cmd(instance, cmd); 1718 return SCSI_MLQUEUE_HOST_BUSY; 1719 } 1720 1721 1722 /** 1723 * megasas_queue_command - Queue entry point 1724 * @scmd: SCSI command to be queued 1725 * @done: Callback entry point 1726 */ 1727 static int 1728 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) 1729 { 1730 struct megasas_instance *instance; 1731 struct MR_PRIV_DEVICE *mr_device_priv_data; 1732 1733 instance = (struct megasas_instance *) 1734 scmd->device->host->hostdata; 1735 1736 if (instance->unload == 1) { 1737 scmd->result = DID_NO_CONNECT << 16; 1738 scmd->scsi_done(scmd); 1739 return 0; 1740 } 1741 1742 if (instance->issuepend_done == 0) 1743 return SCSI_MLQUEUE_HOST_BUSY; 1744 1745 1746 /* Check for an mpio path and adjust behavior */ 1747 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { 1748 if (megasas_check_mpio_paths(instance, scmd) == 1749 (DID_REQUEUE << 16)) { 1750 return SCSI_MLQUEUE_HOST_BUSY; 1751 } else { 1752 scmd->result = DID_NO_CONNECT << 16; 1753 scmd->scsi_done(scmd); 1754 return 0; 1755 } 1756 } 1757 1758 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1759 scmd->result = DID_NO_CONNECT << 16; 1760 scmd->scsi_done(scmd); 1761 return 0; 1762 } 1763 1764 mr_device_priv_data = scmd->device->hostdata; 1765 if (!mr_device_priv_data) { 1766 scmd->result = DID_NO_CONNECT << 16; 1767 scmd->scsi_done(scmd); 1768 return 0; 1769 } 1770 1771 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 1772 return SCSI_MLQUEUE_HOST_BUSY; 1773 1774 if (mr_device_priv_data->tm_busy) 1775 return SCSI_MLQUEUE_DEVICE_BUSY; 1776 1777 1778 scmd->result = 0; 1779 1780 if (MEGASAS_IS_LOGICAL(scmd->device) && 1781 (scmd->device->id >= instance->fw_supported_vd_count || 1782 scmd->device->lun)) { 1783 scmd->result = DID_BAD_TARGET << 16; 1784 goto out_done; 1785 } 1786 1787 if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && 1788 MEGASAS_IS_LOGICAL(scmd->device) && 1789 (!instance->fw_sync_cache_support)) { 1790 scmd->result = DID_OK << 16; 1791 goto out_done; 1792 } 1793 1794 return instance->instancet->build_and_issue_cmd(instance, scmd); 1795 1796 out_done: 1797 scmd->scsi_done(scmd); 1798 return 0; 1799 } 1800 1801 static struct megasas_instance *megasas_lookup_instance(u16 host_no) 1802 { 1803 int i; 1804 1805 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 1806 1807 if ((megasas_mgmt_info.instance[i]) && 1808 (megasas_mgmt_info.instance[i]->host->host_no == host_no)) 1809 return megasas_mgmt_info.instance[i]; 1810 } 1811 1812 return NULL; 1813 } 1814 1815 /* 1816 * megasas_set_dynamic_target_properties - 1817 * Device property set by driver may not be static and it is required to be 1818 * updated after OCR 1819 * 1820 * set tm_capable. 1821 * set dma alignment (only for eedp protection enable vd). 1822 * 1823 * @sdev: OS provided scsi device 1824 * 1825 * Returns void 1826 */ 1827 void megasas_set_dynamic_target_properties(struct scsi_device *sdev, 1828 bool is_target_prop) 1829 { 1830 u16 pd_index = 0, ld; 1831 u32 device_id; 1832 struct megasas_instance *instance; 1833 struct fusion_context *fusion; 1834 struct MR_PRIV_DEVICE *mr_device_priv_data; 1835 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 1836 struct MR_LD_RAID *raid; 1837 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 1838 1839 instance = megasas_lookup_instance(sdev->host->host_no); 1840 fusion = instance->ctrl_context; 1841 mr_device_priv_data = sdev->hostdata; 1842 1843 if (!fusion || !mr_device_priv_data) 1844 return; 1845 1846 if (MEGASAS_IS_LOGICAL(sdev)) { 1847 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) 1848 + sdev->id; 1849 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 1850 ld = MR_TargetIdToLdGet(device_id, local_map_ptr); 1851 if (ld >= instance->fw_supported_vd_count) 1852 return; 1853 raid = MR_LdRaidGet(ld, local_map_ptr); 1854 1855 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) 1856 blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1857 1858 mr_device_priv_data->is_tm_capable = 1859 raid->capability.tmCapable; 1860 } else if (instance->use_seqnum_jbod_fp) { 1861 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1862 sdev->id; 1863 pd_sync = (void *)fusion->pd_seq_sync 1864 [(instance->pd_seq_map_id - 1) & 1]; 1865 mr_device_priv_data->is_tm_capable = 1866 pd_sync->seq[pd_index].capability.tmCapable; 1867 } 1868 1869 if (is_target_prop && instance->tgt_prop->reset_tmo) { 1870 /* 1871 * If FW provides a target reset timeout value, driver will use 1872 * it. If not set, fallback to default values. 1873 */ 1874 mr_device_priv_data->target_reset_tmo = 1875 min_t(u8, instance->max_reset_tmo, 1876 instance->tgt_prop->reset_tmo); 1877 mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo; 1878 } else { 1879 mr_device_priv_data->target_reset_tmo = 1880 MEGASAS_DEFAULT_TM_TIMEOUT; 1881 mr_device_priv_data->task_abort_tmo = 1882 MEGASAS_DEFAULT_TM_TIMEOUT; 1883 } 1884 } 1885 1886 /* 1887 * megasas_set_nvme_device_properties - 1888 * set nomerges=2 1889 * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K). 1890 * set maximum io transfer = MDTS of NVME device provided by MR firmware. 1891 * 1892 * MR firmware provides value in KB. Caller of this function converts 1893 * kb into bytes. 1894 * 1895 * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size, 1896 * MR firmware provides value 128 as (32 * 4K) = 128K. 1897 * 1898 * @sdev: scsi device 1899 * @max_io_size: maximum io transfer size 1900 * 1901 */ 1902 static inline void 1903 megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size) 1904 { 1905 struct megasas_instance *instance; 1906 u32 mr_nvme_pg_size; 1907 1908 instance = (struct megasas_instance *)sdev->host->hostdata; 1909 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, 1910 MR_DEFAULT_NVME_PAGE_SIZE); 1911 1912 blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512)); 1913 1914 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue); 1915 blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1); 1916 } 1917 1918 1919 /* 1920 * megasas_set_static_target_properties - 1921 * Device property set by driver are static and it is not required to be 1922 * updated after OCR. 1923 * 1924 * set io timeout 1925 * set device queue depth 1926 * set nvme device properties. see - megasas_set_nvme_device_properties 1927 * 1928 * @sdev: scsi device 1929 * @is_target_prop true, if fw provided target properties. 1930 */ 1931 static void megasas_set_static_target_properties(struct scsi_device *sdev, 1932 bool is_target_prop) 1933 { 1934 u16 target_index = 0; 1935 u8 interface_type; 1936 u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN; 1937 u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB; 1938 u32 tgt_device_qd; 1939 struct megasas_instance *instance; 1940 struct MR_PRIV_DEVICE *mr_device_priv_data; 1941 1942 instance = megasas_lookup_instance(sdev->host->host_no); 1943 mr_device_priv_data = sdev->hostdata; 1944 interface_type = mr_device_priv_data->interface_type; 1945 1946 /* 1947 * The RAID firmware may require extended timeouts. 1948 */ 1949 blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ); 1950 1951 target_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; 1952 1953 switch (interface_type) { 1954 case SAS_PD: 1955 device_qd = MEGASAS_SAS_QD; 1956 break; 1957 case SATA_PD: 1958 device_qd = MEGASAS_SATA_QD; 1959 break; 1960 case NVME_PD: 1961 device_qd = MEGASAS_NVME_QD; 1962 break; 1963 } 1964 1965 if (is_target_prop) { 1966 tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth); 1967 if (tgt_device_qd && 1968 (tgt_device_qd <= instance->host->can_queue)) 1969 device_qd = tgt_device_qd; 1970 1971 /* max_io_size_kb will be set to non zero for 1972 * nvme based vd and syspd. 1973 */ 1974 max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb); 1975 } 1976 1977 if (instance->nvme_page_size && max_io_size_kb) 1978 megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10)); 1979 1980 scsi_change_queue_depth(sdev, device_qd); 1981 1982 } 1983 1984 1985 static int megasas_slave_configure(struct scsi_device *sdev) 1986 { 1987 u16 pd_index = 0; 1988 struct megasas_instance *instance; 1989 int ret_target_prop = DCMD_FAILED; 1990 bool is_target_prop = false; 1991 1992 instance = megasas_lookup_instance(sdev->host->host_no); 1993 if (instance->pd_list_not_supported) { 1994 if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) { 1995 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1996 sdev->id; 1997 if (instance->pd_list[pd_index].driveState != 1998 MR_PD_STATE_SYSTEM) 1999 return -ENXIO; 2000 } 2001 } 2002 2003 mutex_lock(&instance->reset_mutex); 2004 /* Send DCMD to Firmware and cache the information */ 2005 if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev)) 2006 megasas_get_pd_info(instance, sdev); 2007 2008 /* Some ventura firmware may not have instance->nvme_page_size set. 2009 * Do not send MR_DCMD_DRV_GET_TARGET_PROP 2010 */ 2011 if ((instance->tgt_prop) && (instance->nvme_page_size)) 2012 ret_target_prop = megasas_get_target_prop(instance, sdev); 2013 2014 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; 2015 megasas_set_static_target_properties(sdev, is_target_prop); 2016 2017 /* This sdev property may change post OCR */ 2018 megasas_set_dynamic_target_properties(sdev, is_target_prop); 2019 2020 mutex_unlock(&instance->reset_mutex); 2021 2022 return 0; 2023 } 2024 2025 static int megasas_slave_alloc(struct scsi_device *sdev) 2026 { 2027 u16 pd_index = 0; 2028 struct megasas_instance *instance ; 2029 struct MR_PRIV_DEVICE *mr_device_priv_data; 2030 2031 instance = megasas_lookup_instance(sdev->host->host_no); 2032 if (!MEGASAS_IS_LOGICAL(sdev)) { 2033 /* 2034 * Open the OS scan to the SYSTEM PD 2035 */ 2036 pd_index = 2037 (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 2038 sdev->id; 2039 if ((instance->pd_list_not_supported || 2040 instance->pd_list[pd_index].driveState == 2041 MR_PD_STATE_SYSTEM)) { 2042 goto scan_target; 2043 } 2044 return -ENXIO; 2045 } 2046 2047 scan_target: 2048 mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data), 2049 GFP_KERNEL); 2050 if (!mr_device_priv_data) 2051 return -ENOMEM; 2052 sdev->hostdata = mr_device_priv_data; 2053 2054 atomic_set(&mr_device_priv_data->r1_ldio_hint, 2055 instance->r1_ldio_hint_default); 2056 return 0; 2057 } 2058 2059 static void megasas_slave_destroy(struct scsi_device *sdev) 2060 { 2061 kfree(sdev->hostdata); 2062 sdev->hostdata = NULL; 2063 } 2064 2065 /* 2066 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a 2067 * kill adapter 2068 * @instance: Adapter soft state 2069 * 2070 */ 2071 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance) 2072 { 2073 int i; 2074 struct megasas_cmd *cmd_mfi; 2075 struct megasas_cmd_fusion *cmd_fusion; 2076 struct fusion_context *fusion = instance->ctrl_context; 2077 2078 /* Find all outstanding ioctls */ 2079 if (fusion) { 2080 for (i = 0; i < instance->max_fw_cmds; i++) { 2081 cmd_fusion = fusion->cmd_list[i]; 2082 if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) { 2083 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 2084 if (cmd_mfi->sync_cmd && 2085 (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) { 2086 cmd_mfi->frame->hdr.cmd_status = 2087 MFI_STAT_WRONG_STATE; 2088 megasas_complete_cmd(instance, 2089 cmd_mfi, DID_OK); 2090 } 2091 } 2092 } 2093 } else { 2094 for (i = 0; i < instance->max_fw_cmds; i++) { 2095 cmd_mfi = instance->cmd_list[i]; 2096 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != 2097 MFI_CMD_ABORT) 2098 megasas_complete_cmd(instance, cmd_mfi, DID_OK); 2099 } 2100 } 2101 } 2102 2103 2104 void megaraid_sas_kill_hba(struct megasas_instance *instance) 2105 { 2106 /* Set critical error to block I/O & ioctls in case caller didn't */ 2107 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); 2108 /* Wait 1 second to ensure IO or ioctls in build have posted */ 2109 msleep(1000); 2110 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 2111 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 2112 (instance->adapter_type != MFI_SERIES)) { 2113 if (!instance->requestorId) { 2114 writel(MFI_STOP_ADP, &instance->reg_set->doorbell); 2115 /* Flush */ 2116 readl(&instance->reg_set->doorbell); 2117 } 2118 if (instance->requestorId && instance->peerIsPresent) 2119 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 2120 } else { 2121 writel(MFI_STOP_ADP, 2122 &instance->reg_set->inbound_doorbell); 2123 } 2124 /* Complete outstanding ioctls when adapter is killed */ 2125 megasas_complete_outstanding_ioctls(instance); 2126 } 2127 2128 /** 2129 * megasas_check_and_restore_queue_depth - Check if queue depth needs to be 2130 * restored to max value 2131 * @instance: Adapter soft state 2132 * 2133 */ 2134 void 2135 megasas_check_and_restore_queue_depth(struct megasas_instance *instance) 2136 { 2137 unsigned long flags; 2138 2139 if (instance->flag & MEGASAS_FW_BUSY 2140 && time_after(jiffies, instance->last_time + 5 * HZ) 2141 && atomic_read(&instance->fw_outstanding) < 2142 instance->throttlequeuedepth + 1) { 2143 2144 spin_lock_irqsave(instance->host->host_lock, flags); 2145 instance->flag &= ~MEGASAS_FW_BUSY; 2146 2147 instance->host->can_queue = instance->cur_can_queue; 2148 spin_unlock_irqrestore(instance->host->host_lock, flags); 2149 } 2150 } 2151 2152 /** 2153 * megasas_complete_cmd_dpc - Returns FW's controller structure 2154 * @instance_addr: Address of adapter soft state 2155 * 2156 * Tasklet to complete cmds 2157 */ 2158 static void megasas_complete_cmd_dpc(unsigned long instance_addr) 2159 { 2160 u32 producer; 2161 u32 consumer; 2162 u32 context; 2163 struct megasas_cmd *cmd; 2164 struct megasas_instance *instance = 2165 (struct megasas_instance *)instance_addr; 2166 unsigned long flags; 2167 2168 /* If we have already declared adapter dead, donot complete cmds */ 2169 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 2170 return; 2171 2172 spin_lock_irqsave(&instance->completion_lock, flags); 2173 2174 producer = le32_to_cpu(*instance->producer); 2175 consumer = le32_to_cpu(*instance->consumer); 2176 2177 while (consumer != producer) { 2178 context = le32_to_cpu(instance->reply_queue[consumer]); 2179 if (context >= instance->max_fw_cmds) { 2180 dev_err(&instance->pdev->dev, "Unexpected context value %x\n", 2181 context); 2182 BUG(); 2183 } 2184 2185 cmd = instance->cmd_list[context]; 2186 2187 megasas_complete_cmd(instance, cmd, DID_OK); 2188 2189 consumer++; 2190 if (consumer == (instance->max_fw_cmds + 1)) { 2191 consumer = 0; 2192 } 2193 } 2194 2195 *instance->consumer = cpu_to_le32(producer); 2196 2197 spin_unlock_irqrestore(&instance->completion_lock, flags); 2198 2199 /* 2200 * Check if we can restore can_queue 2201 */ 2202 megasas_check_and_restore_queue_depth(instance); 2203 } 2204 2205 static void megasas_sriov_heartbeat_handler(struct timer_list *t); 2206 2207 /** 2208 * megasas_start_timer - Initializes sriov heartbeat timer object 2209 * @instance: Adapter soft state 2210 * 2211 */ 2212 void megasas_start_timer(struct megasas_instance *instance) 2213 { 2214 struct timer_list *timer = &instance->sriov_heartbeat_timer; 2215 2216 timer_setup(timer, megasas_sriov_heartbeat_handler, 0); 2217 timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF; 2218 add_timer(timer); 2219 } 2220 2221 static void 2222 megasas_internal_reset_defer_cmds(struct megasas_instance *instance); 2223 2224 static void 2225 process_fw_state_change_wq(struct work_struct *work); 2226 2227 void megasas_do_ocr(struct megasas_instance *instance) 2228 { 2229 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 2230 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 2231 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 2232 *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); 2233 } 2234 instance->instancet->disable_intr(instance); 2235 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 2236 instance->issuepend_done = 0; 2237 2238 atomic_set(&instance->fw_outstanding, 0); 2239 megasas_internal_reset_defer_cmds(instance); 2240 process_fw_state_change_wq(&instance->work_init); 2241 } 2242 2243 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance, 2244 int initial) 2245 { 2246 struct megasas_cmd *cmd; 2247 struct megasas_dcmd_frame *dcmd; 2248 struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL; 2249 dma_addr_t new_affiliation_111_h; 2250 int ld, retval = 0; 2251 u8 thisVf; 2252 2253 cmd = megasas_get_cmd(instance); 2254 2255 if (!cmd) { 2256 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:" 2257 "Failed to get cmd for scsi%d\n", 2258 instance->host->host_no); 2259 return -ENOMEM; 2260 } 2261 2262 dcmd = &cmd->frame->dcmd; 2263 2264 if (!instance->vf_affiliation_111) { 2265 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " 2266 "affiliation for scsi%d\n", instance->host->host_no); 2267 megasas_return_cmd(instance, cmd); 2268 return -ENOMEM; 2269 } 2270 2271 if (initial) 2272 memset(instance->vf_affiliation_111, 0, 2273 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2274 else { 2275 new_affiliation_111 = 2276 dma_zalloc_coherent(&instance->pdev->dev, 2277 sizeof(struct MR_LD_VF_AFFILIATION_111), 2278 &new_affiliation_111_h, GFP_KERNEL); 2279 if (!new_affiliation_111) { 2280 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2281 "memory for new affiliation for scsi%d\n", 2282 instance->host->host_no); 2283 megasas_return_cmd(instance, cmd); 2284 return -ENOMEM; 2285 } 2286 } 2287 2288 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2289 2290 dcmd->cmd = MFI_CMD_DCMD; 2291 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2292 dcmd->sge_count = 1; 2293 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2294 dcmd->timeout = 0; 2295 dcmd->pad_0 = 0; 2296 dcmd->data_xfer_len = 2297 cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111)); 2298 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111); 2299 2300 if (initial) 2301 dcmd->sgl.sge32[0].phys_addr = 2302 cpu_to_le32(instance->vf_affiliation_111_h); 2303 else 2304 dcmd->sgl.sge32[0].phys_addr = 2305 cpu_to_le32(new_affiliation_111_h); 2306 2307 dcmd->sgl.sge32[0].length = cpu_to_le32( 2308 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2309 2310 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " 2311 "scsi%d\n", instance->host->host_no); 2312 2313 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { 2314 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" 2315 " failed with status 0x%x for scsi%d\n", 2316 dcmd->cmd_status, instance->host->host_no); 2317 retval = 1; /* Do a scan if we couldn't get affiliation */ 2318 goto out; 2319 } 2320 2321 if (!initial) { 2322 thisVf = new_affiliation_111->thisVf; 2323 for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++) 2324 if (instance->vf_affiliation_111->map[ld].policy[thisVf] != 2325 new_affiliation_111->map[ld].policy[thisVf]) { 2326 dev_warn(&instance->pdev->dev, "SR-IOV: " 2327 "Got new LD/VF affiliation for scsi%d\n", 2328 instance->host->host_no); 2329 memcpy(instance->vf_affiliation_111, 2330 new_affiliation_111, 2331 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2332 retval = 1; 2333 goto out; 2334 } 2335 } 2336 out: 2337 if (new_affiliation_111) { 2338 dma_free_coherent(&instance->pdev->dev, 2339 sizeof(struct MR_LD_VF_AFFILIATION_111), 2340 new_affiliation_111, 2341 new_affiliation_111_h); 2342 } 2343 2344 megasas_return_cmd(instance, cmd); 2345 2346 return retval; 2347 } 2348 2349 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance, 2350 int initial) 2351 { 2352 struct megasas_cmd *cmd; 2353 struct megasas_dcmd_frame *dcmd; 2354 struct MR_LD_VF_AFFILIATION *new_affiliation = NULL; 2355 struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL; 2356 dma_addr_t new_affiliation_h; 2357 int i, j, retval = 0, found = 0, doscan = 0; 2358 u8 thisVf; 2359 2360 cmd = megasas_get_cmd(instance); 2361 2362 if (!cmd) { 2363 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: " 2364 "Failed to get cmd for scsi%d\n", 2365 instance->host->host_no); 2366 return -ENOMEM; 2367 } 2368 2369 dcmd = &cmd->frame->dcmd; 2370 2371 if (!instance->vf_affiliation) { 2372 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " 2373 "affiliation for scsi%d\n", instance->host->host_no); 2374 megasas_return_cmd(instance, cmd); 2375 return -ENOMEM; 2376 } 2377 2378 if (initial) 2379 memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) * 2380 sizeof(struct MR_LD_VF_AFFILIATION)); 2381 else { 2382 new_affiliation = 2383 dma_zalloc_coherent(&instance->pdev->dev, 2384 (MAX_LOGICAL_DRIVES + 1) * 2385 sizeof(struct MR_LD_VF_AFFILIATION), 2386 &new_affiliation_h, GFP_KERNEL); 2387 if (!new_affiliation) { 2388 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2389 "memory for new affiliation for scsi%d\n", 2390 instance->host->host_no); 2391 megasas_return_cmd(instance, cmd); 2392 return -ENOMEM; 2393 } 2394 } 2395 2396 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2397 2398 dcmd->cmd = MFI_CMD_DCMD; 2399 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2400 dcmd->sge_count = 1; 2401 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2402 dcmd->timeout = 0; 2403 dcmd->pad_0 = 0; 2404 dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * 2405 sizeof(struct MR_LD_VF_AFFILIATION)); 2406 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS); 2407 2408 if (initial) 2409 dcmd->sgl.sge32[0].phys_addr = 2410 cpu_to_le32(instance->vf_affiliation_h); 2411 else 2412 dcmd->sgl.sge32[0].phys_addr = 2413 cpu_to_le32(new_affiliation_h); 2414 2415 dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * 2416 sizeof(struct MR_LD_VF_AFFILIATION)); 2417 2418 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " 2419 "scsi%d\n", instance->host->host_no); 2420 2421 2422 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { 2423 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" 2424 " failed with status 0x%x for scsi%d\n", 2425 dcmd->cmd_status, instance->host->host_no); 2426 retval = 1; /* Do a scan if we couldn't get affiliation */ 2427 goto out; 2428 } 2429 2430 if (!initial) { 2431 if (!new_affiliation->ldCount) { 2432 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " 2433 "affiliation for passive path for scsi%d\n", 2434 instance->host->host_no); 2435 retval = 1; 2436 goto out; 2437 } 2438 newmap = new_affiliation->map; 2439 savedmap = instance->vf_affiliation->map; 2440 thisVf = new_affiliation->thisVf; 2441 for (i = 0 ; i < new_affiliation->ldCount; i++) { 2442 found = 0; 2443 for (j = 0; j < instance->vf_affiliation->ldCount; 2444 j++) { 2445 if (newmap->ref.targetId == 2446 savedmap->ref.targetId) { 2447 found = 1; 2448 if (newmap->policy[thisVf] != 2449 savedmap->policy[thisVf]) { 2450 doscan = 1; 2451 goto out; 2452 } 2453 } 2454 savedmap = (struct MR_LD_VF_MAP *) 2455 ((unsigned char *)savedmap + 2456 savedmap->size); 2457 } 2458 if (!found && newmap->policy[thisVf] != 2459 MR_LD_ACCESS_HIDDEN) { 2460 doscan = 1; 2461 goto out; 2462 } 2463 newmap = (struct MR_LD_VF_MAP *) 2464 ((unsigned char *)newmap + newmap->size); 2465 } 2466 2467 newmap = new_affiliation->map; 2468 savedmap = instance->vf_affiliation->map; 2469 2470 for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) { 2471 found = 0; 2472 for (j = 0 ; j < new_affiliation->ldCount; j++) { 2473 if (savedmap->ref.targetId == 2474 newmap->ref.targetId) { 2475 found = 1; 2476 if (savedmap->policy[thisVf] != 2477 newmap->policy[thisVf]) { 2478 doscan = 1; 2479 goto out; 2480 } 2481 } 2482 newmap = (struct MR_LD_VF_MAP *) 2483 ((unsigned char *)newmap + 2484 newmap->size); 2485 } 2486 if (!found && savedmap->policy[thisVf] != 2487 MR_LD_ACCESS_HIDDEN) { 2488 doscan = 1; 2489 goto out; 2490 } 2491 savedmap = (struct MR_LD_VF_MAP *) 2492 ((unsigned char *)savedmap + 2493 savedmap->size); 2494 } 2495 } 2496 out: 2497 if (doscan) { 2498 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " 2499 "affiliation for scsi%d\n", instance->host->host_no); 2500 memcpy(instance->vf_affiliation, new_affiliation, 2501 new_affiliation->size); 2502 retval = 1; 2503 } 2504 2505 if (new_affiliation) 2506 dma_free_coherent(&instance->pdev->dev, 2507 (MAX_LOGICAL_DRIVES + 1) * 2508 sizeof(struct MR_LD_VF_AFFILIATION), 2509 new_affiliation, new_affiliation_h); 2510 megasas_return_cmd(instance, cmd); 2511 2512 return retval; 2513 } 2514 2515 /* This function will get the current SR-IOV LD/VF affiliation */ 2516 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 2517 int initial) 2518 { 2519 int retval; 2520 2521 if (instance->PlasmaFW111) 2522 retval = megasas_get_ld_vf_affiliation_111(instance, initial); 2523 else 2524 retval = megasas_get_ld_vf_affiliation_12(instance, initial); 2525 return retval; 2526 } 2527 2528 /* This function will tell FW to start the SR-IOV heartbeat */ 2529 int megasas_sriov_start_heartbeat(struct megasas_instance *instance, 2530 int initial) 2531 { 2532 struct megasas_cmd *cmd; 2533 struct megasas_dcmd_frame *dcmd; 2534 int retval = 0; 2535 2536 cmd = megasas_get_cmd(instance); 2537 2538 if (!cmd) { 2539 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: " 2540 "Failed to get cmd for scsi%d\n", 2541 instance->host->host_no); 2542 return -ENOMEM; 2543 } 2544 2545 dcmd = &cmd->frame->dcmd; 2546 2547 if (initial) { 2548 instance->hb_host_mem = 2549 dma_zalloc_coherent(&instance->pdev->dev, 2550 sizeof(struct MR_CTRL_HB_HOST_MEM), 2551 &instance->hb_host_mem_h, GFP_KERNEL); 2552 if (!instance->hb_host_mem) { 2553 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate" 2554 " memory for heartbeat host memory for scsi%d\n", 2555 instance->host->host_no); 2556 retval = -ENOMEM; 2557 goto out; 2558 } 2559 } 2560 2561 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2562 2563 dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2564 dcmd->cmd = MFI_CMD_DCMD; 2565 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2566 dcmd->sge_count = 1; 2567 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2568 dcmd->timeout = 0; 2569 dcmd->pad_0 = 0; 2570 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2571 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC); 2572 2573 megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h, 2574 sizeof(struct MR_CTRL_HB_HOST_MEM)); 2575 2576 dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n", 2577 instance->host->host_no); 2578 2579 if ((instance->adapter_type != MFI_SERIES) && 2580 !instance->mask_interrupts) 2581 retval = megasas_issue_blocked_cmd(instance, cmd, 2582 MEGASAS_ROUTINE_WAIT_TIME_VF); 2583 else 2584 retval = megasas_issue_polled(instance, cmd); 2585 2586 if (retval) { 2587 dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST" 2588 "_MEM_ALLOC DCMD %s for scsi%d\n", 2589 (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ? 2590 "timed out" : "failed", instance->host->host_no); 2591 retval = 1; 2592 } 2593 2594 out: 2595 megasas_return_cmd(instance, cmd); 2596 2597 return retval; 2598 } 2599 2600 /* Handler for SR-IOV heartbeat */ 2601 static void megasas_sriov_heartbeat_handler(struct timer_list *t) 2602 { 2603 struct megasas_instance *instance = 2604 from_timer(instance, t, sriov_heartbeat_timer); 2605 2606 if (instance->hb_host_mem->HB.fwCounter != 2607 instance->hb_host_mem->HB.driverCounter) { 2608 instance->hb_host_mem->HB.driverCounter = 2609 instance->hb_host_mem->HB.fwCounter; 2610 mod_timer(&instance->sriov_heartbeat_timer, 2611 jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 2612 } else { 2613 dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never " 2614 "completed for scsi%d\n", instance->host->host_no); 2615 schedule_work(&instance->work_init); 2616 } 2617 } 2618 2619 /** 2620 * megasas_wait_for_outstanding - Wait for all outstanding cmds 2621 * @instance: Adapter soft state 2622 * 2623 * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to 2624 * complete all its outstanding commands. Returns error if one or more IOs 2625 * are pending after this time period. It also marks the controller dead. 2626 */ 2627 static int megasas_wait_for_outstanding(struct megasas_instance *instance) 2628 { 2629 int i, sl, outstanding; 2630 u32 reset_index; 2631 u32 wait_time = MEGASAS_RESET_WAIT_TIME; 2632 unsigned long flags; 2633 struct list_head clist_local; 2634 struct megasas_cmd *reset_cmd; 2635 u32 fw_state; 2636 2637 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2638 dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n", 2639 __func__, __LINE__); 2640 return FAILED; 2641 } 2642 2643 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 2644 2645 INIT_LIST_HEAD(&clist_local); 2646 spin_lock_irqsave(&instance->hba_lock, flags); 2647 list_splice_init(&instance->internal_reset_pending_q, 2648 &clist_local); 2649 spin_unlock_irqrestore(&instance->hba_lock, flags); 2650 2651 dev_notice(&instance->pdev->dev, "HBA reset wait ...\n"); 2652 for (i = 0; i < wait_time; i++) { 2653 msleep(1000); 2654 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) 2655 break; 2656 } 2657 2658 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 2659 dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n"); 2660 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); 2661 return FAILED; 2662 } 2663 2664 reset_index = 0; 2665 while (!list_empty(&clist_local)) { 2666 reset_cmd = list_entry((&clist_local)->next, 2667 struct megasas_cmd, list); 2668 list_del_init(&reset_cmd->list); 2669 if (reset_cmd->scmd) { 2670 reset_cmd->scmd->result = DID_REQUEUE << 16; 2671 dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n", 2672 reset_index, reset_cmd, 2673 reset_cmd->scmd->cmnd[0]); 2674 2675 reset_cmd->scmd->scsi_done(reset_cmd->scmd); 2676 megasas_return_cmd(instance, reset_cmd); 2677 } else if (reset_cmd->sync_cmd) { 2678 dev_notice(&instance->pdev->dev, "%p synch cmds" 2679 "reset queue\n", 2680 reset_cmd); 2681 2682 reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 2683 instance->instancet->fire_cmd(instance, 2684 reset_cmd->frame_phys_addr, 2685 0, instance->reg_set); 2686 } else { 2687 dev_notice(&instance->pdev->dev, "%p unexpected" 2688 "cmds lst\n", 2689 reset_cmd); 2690 } 2691 reset_index++; 2692 } 2693 2694 return SUCCESS; 2695 } 2696 2697 for (i = 0; i < resetwaittime; i++) { 2698 outstanding = atomic_read(&instance->fw_outstanding); 2699 2700 if (!outstanding) 2701 break; 2702 2703 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 2704 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d " 2705 "commands to complete\n",i,outstanding); 2706 /* 2707 * Call cmd completion routine. Cmd to be 2708 * be completed directly without depending on isr. 2709 */ 2710 megasas_complete_cmd_dpc((unsigned long)instance); 2711 } 2712 2713 msleep(1000); 2714 } 2715 2716 i = 0; 2717 outstanding = atomic_read(&instance->fw_outstanding); 2718 fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK; 2719 2720 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) 2721 goto no_outstanding; 2722 2723 if (instance->disableOnlineCtrlReset) 2724 goto kill_hba_and_failed; 2725 do { 2726 if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) { 2727 dev_info(&instance->pdev->dev, 2728 "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, oustanding 0x%x\n", 2729 __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding)); 2730 if (i == 3) 2731 goto kill_hba_and_failed; 2732 megasas_do_ocr(instance); 2733 2734 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2735 dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n", 2736 __func__, __LINE__); 2737 return FAILED; 2738 } 2739 dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n", 2740 __func__, __LINE__); 2741 2742 for (sl = 0; sl < 10; sl++) 2743 msleep(500); 2744 2745 outstanding = atomic_read(&instance->fw_outstanding); 2746 2747 fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK; 2748 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) 2749 goto no_outstanding; 2750 } 2751 i++; 2752 } while (i <= 3); 2753 2754 no_outstanding: 2755 2756 dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n", 2757 __func__, __LINE__); 2758 return SUCCESS; 2759 2760 kill_hba_and_failed: 2761 2762 /* Reset not supported, kill adapter */ 2763 dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d" 2764 " disableOnlineCtrlReset %d fw_outstanding %d \n", 2765 __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset, 2766 atomic_read(&instance->fw_outstanding)); 2767 megasas_dump_pending_frames(instance); 2768 megaraid_sas_kill_hba(instance); 2769 2770 return FAILED; 2771 } 2772 2773 /** 2774 * megasas_generic_reset - Generic reset routine 2775 * @scmd: Mid-layer SCSI command 2776 * 2777 * This routine implements a generic reset handler for device, bus and host 2778 * reset requests. Device, bus and host specific reset handlers can use this 2779 * function after they do their specific tasks. 2780 */ 2781 static int megasas_generic_reset(struct scsi_cmnd *scmd) 2782 { 2783 int ret_val; 2784 struct megasas_instance *instance; 2785 2786 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2787 2788 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n", 2789 scmd->cmnd[0], scmd->retries); 2790 2791 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2792 dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n"); 2793 return FAILED; 2794 } 2795 2796 ret_val = megasas_wait_for_outstanding(instance); 2797 if (ret_val == SUCCESS) 2798 dev_notice(&instance->pdev->dev, "reset successful\n"); 2799 else 2800 dev_err(&instance->pdev->dev, "failed to do reset\n"); 2801 2802 return ret_val; 2803 } 2804 2805 /** 2806 * megasas_reset_timer - quiesce the adapter if required 2807 * @scmd: scsi cmnd 2808 * 2809 * Sets the FW busy flag and reduces the host->can_queue if the 2810 * cmd has not been completed within the timeout period. 2811 */ 2812 static enum 2813 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) 2814 { 2815 struct megasas_instance *instance; 2816 unsigned long flags; 2817 2818 if (time_after(jiffies, scmd->jiffies_at_alloc + 2819 (scmd_timeout * 2) * HZ)) { 2820 return BLK_EH_DONE; 2821 } 2822 2823 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2824 if (!(instance->flag & MEGASAS_FW_BUSY)) { 2825 /* FW is busy, throttle IO */ 2826 spin_lock_irqsave(instance->host->host_lock, flags); 2827 2828 instance->host->can_queue = instance->throttlequeuedepth; 2829 instance->last_time = jiffies; 2830 instance->flag |= MEGASAS_FW_BUSY; 2831 2832 spin_unlock_irqrestore(instance->host->host_lock, flags); 2833 } 2834 return BLK_EH_RESET_TIMER; 2835 } 2836 2837 /** 2838 * megasas_dump_frame - This function will dump MPT/MFI frame 2839 */ 2840 static inline void 2841 megasas_dump_frame(void *mpi_request, int sz) 2842 { 2843 int i; 2844 __le32 *mfp = (__le32 *)mpi_request; 2845 2846 printk(KERN_INFO "IO request frame:\n\t"); 2847 for (i = 0; i < sz / sizeof(__le32); i++) { 2848 if (i && ((i % 8) == 0)) 2849 printk("\n\t"); 2850 printk("%08x ", le32_to_cpu(mfp[i])); 2851 } 2852 printk("\n"); 2853 } 2854 2855 /** 2856 * megasas_reset_bus_host - Bus & host reset handler entry point 2857 */ 2858 static int megasas_reset_bus_host(struct scsi_cmnd *scmd) 2859 { 2860 int ret; 2861 struct megasas_instance *instance; 2862 2863 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2864 2865 scmd_printk(KERN_INFO, scmd, 2866 "Controller reset is requested due to IO timeout\n" 2867 "SCSI command pointer: (%p)\t SCSI host state: %d\t" 2868 " SCSI host busy: %d\t FW outstanding: %d\n", 2869 scmd, scmd->device->host->shost_state, 2870 scsi_host_busy(scmd->device->host), 2871 atomic_read(&instance->fw_outstanding)); 2872 2873 /* 2874 * First wait for all commands to complete 2875 */ 2876 if (instance->adapter_type == MFI_SERIES) { 2877 ret = megasas_generic_reset(scmd); 2878 } else { 2879 struct megasas_cmd_fusion *cmd; 2880 cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr; 2881 if (cmd) 2882 megasas_dump_frame(cmd->io_request, 2883 MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE); 2884 ret = megasas_reset_fusion(scmd->device->host, 2885 SCSIIO_TIMEOUT_OCR); 2886 } 2887 2888 return ret; 2889 } 2890 2891 /** 2892 * megasas_task_abort - Issues task abort request to firmware 2893 * (supported only for fusion adapters) 2894 * @scmd: SCSI command pointer 2895 */ 2896 static int megasas_task_abort(struct scsi_cmnd *scmd) 2897 { 2898 int ret; 2899 struct megasas_instance *instance; 2900 2901 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2902 2903 if (instance->adapter_type != MFI_SERIES) 2904 ret = megasas_task_abort_fusion(scmd); 2905 else { 2906 sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n"); 2907 ret = FAILED; 2908 } 2909 2910 return ret; 2911 } 2912 2913 /** 2914 * megasas_reset_target: Issues target reset request to firmware 2915 * (supported only for fusion adapters) 2916 * @scmd: SCSI command pointer 2917 */ 2918 static int megasas_reset_target(struct scsi_cmnd *scmd) 2919 { 2920 int ret; 2921 struct megasas_instance *instance; 2922 2923 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2924 2925 if (instance->adapter_type != MFI_SERIES) 2926 ret = megasas_reset_target_fusion(scmd); 2927 else { 2928 sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n"); 2929 ret = FAILED; 2930 } 2931 2932 return ret; 2933 } 2934 2935 /** 2936 * megasas_bios_param - Returns disk geometry for a disk 2937 * @sdev: device handle 2938 * @bdev: block device 2939 * @capacity: drive capacity 2940 * @geom: geometry parameters 2941 */ 2942 static int 2943 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev, 2944 sector_t capacity, int geom[]) 2945 { 2946 int heads; 2947 int sectors; 2948 sector_t cylinders; 2949 unsigned long tmp; 2950 2951 /* Default heads (64) & sectors (32) */ 2952 heads = 64; 2953 sectors = 32; 2954 2955 tmp = heads * sectors; 2956 cylinders = capacity; 2957 2958 sector_div(cylinders, tmp); 2959 2960 /* 2961 * Handle extended translation size for logical drives > 1Gb 2962 */ 2963 2964 if (capacity >= 0x200000) { 2965 heads = 255; 2966 sectors = 63; 2967 tmp = heads*sectors; 2968 cylinders = capacity; 2969 sector_div(cylinders, tmp); 2970 } 2971 2972 geom[0] = heads; 2973 geom[1] = sectors; 2974 geom[2] = cylinders; 2975 2976 return 0; 2977 } 2978 2979 static void megasas_aen_polling(struct work_struct *work); 2980 2981 /** 2982 * megasas_service_aen - Processes an event notification 2983 * @instance: Adapter soft state 2984 * @cmd: AEN command completed by the ISR 2985 * 2986 * For AEN, driver sends a command down to FW that is held by the FW till an 2987 * event occurs. When an event of interest occurs, FW completes the command 2988 * that it was previously holding. 2989 * 2990 * This routines sends SIGIO signal to processes that have registered with the 2991 * driver for AEN. 2992 */ 2993 static void 2994 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) 2995 { 2996 unsigned long flags; 2997 2998 /* 2999 * Don't signal app if it is just an aborted previously registered aen 3000 */ 3001 if ((!cmd->abort_aen) && (instance->unload == 0)) { 3002 spin_lock_irqsave(&poll_aen_lock, flags); 3003 megasas_poll_wait_aen = 1; 3004 spin_unlock_irqrestore(&poll_aen_lock, flags); 3005 wake_up(&megasas_poll_wait); 3006 kill_fasync(&megasas_async_queue, SIGIO, POLL_IN); 3007 } 3008 else 3009 cmd->abort_aen = 0; 3010 3011 instance->aen_cmd = NULL; 3012 3013 megasas_return_cmd(instance, cmd); 3014 3015 if ((instance->unload == 0) && 3016 ((instance->issuepend_done == 1))) { 3017 struct megasas_aen_event *ev; 3018 3019 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 3020 if (!ev) { 3021 dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n"); 3022 } else { 3023 ev->instance = instance; 3024 instance->ev = ev; 3025 INIT_DELAYED_WORK(&ev->hotplug_work, 3026 megasas_aen_polling); 3027 schedule_delayed_work(&ev->hotplug_work, 0); 3028 } 3029 } 3030 } 3031 3032 static ssize_t 3033 megasas_fw_crash_buffer_store(struct device *cdev, 3034 struct device_attribute *attr, const char *buf, size_t count) 3035 { 3036 struct Scsi_Host *shost = class_to_shost(cdev); 3037 struct megasas_instance *instance = 3038 (struct megasas_instance *) shost->hostdata; 3039 int val = 0; 3040 unsigned long flags; 3041 3042 if (kstrtoint(buf, 0, &val) != 0) 3043 return -EINVAL; 3044 3045 spin_lock_irqsave(&instance->crashdump_lock, flags); 3046 instance->fw_crash_buffer_offset = val; 3047 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3048 return strlen(buf); 3049 } 3050 3051 static ssize_t 3052 megasas_fw_crash_buffer_show(struct device *cdev, 3053 struct device_attribute *attr, char *buf) 3054 { 3055 struct Scsi_Host *shost = class_to_shost(cdev); 3056 struct megasas_instance *instance = 3057 (struct megasas_instance *) shost->hostdata; 3058 u32 size; 3059 unsigned long buff_addr; 3060 unsigned long dmachunk = CRASH_DMA_BUF_SIZE; 3061 unsigned long src_addr; 3062 unsigned long flags; 3063 u32 buff_offset; 3064 3065 spin_lock_irqsave(&instance->crashdump_lock, flags); 3066 buff_offset = instance->fw_crash_buffer_offset; 3067 if (!instance->crash_dump_buf && 3068 !((instance->fw_crash_state == AVAILABLE) || 3069 (instance->fw_crash_state == COPYING))) { 3070 dev_err(&instance->pdev->dev, 3071 "Firmware crash dump is not available\n"); 3072 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3073 return -EINVAL; 3074 } 3075 3076 buff_addr = (unsigned long) buf; 3077 3078 if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) { 3079 dev_err(&instance->pdev->dev, 3080 "Firmware crash dump offset is out of range\n"); 3081 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3082 return 0; 3083 } 3084 3085 size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset; 3086 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; 3087 3088 src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] + 3089 (buff_offset % dmachunk); 3090 memcpy(buf, (void *)src_addr, size); 3091 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3092 3093 return size; 3094 } 3095 3096 static ssize_t 3097 megasas_fw_crash_buffer_size_show(struct device *cdev, 3098 struct device_attribute *attr, char *buf) 3099 { 3100 struct Scsi_Host *shost = class_to_shost(cdev); 3101 struct megasas_instance *instance = 3102 (struct megasas_instance *) shost->hostdata; 3103 3104 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long) 3105 ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE); 3106 } 3107 3108 static ssize_t 3109 megasas_fw_crash_state_store(struct device *cdev, 3110 struct device_attribute *attr, const char *buf, size_t count) 3111 { 3112 struct Scsi_Host *shost = class_to_shost(cdev); 3113 struct megasas_instance *instance = 3114 (struct megasas_instance *) shost->hostdata; 3115 int val = 0; 3116 unsigned long flags; 3117 3118 if (kstrtoint(buf, 0, &val) != 0) 3119 return -EINVAL; 3120 3121 if ((val <= AVAILABLE || val > COPY_ERROR)) { 3122 dev_err(&instance->pdev->dev, "application updates invalid " 3123 "firmware crash state\n"); 3124 return -EINVAL; 3125 } 3126 3127 instance->fw_crash_state = val; 3128 3129 if ((val == COPIED) || (val == COPY_ERROR)) { 3130 spin_lock_irqsave(&instance->crashdump_lock, flags); 3131 megasas_free_host_crash_buffer(instance); 3132 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3133 if (val == COPY_ERROR) 3134 dev_info(&instance->pdev->dev, "application failed to " 3135 "copy Firmware crash dump\n"); 3136 else 3137 dev_info(&instance->pdev->dev, "Firmware crash dump " 3138 "copied successfully\n"); 3139 } 3140 return strlen(buf); 3141 } 3142 3143 static ssize_t 3144 megasas_fw_crash_state_show(struct device *cdev, 3145 struct device_attribute *attr, char *buf) 3146 { 3147 struct Scsi_Host *shost = class_to_shost(cdev); 3148 struct megasas_instance *instance = 3149 (struct megasas_instance *) shost->hostdata; 3150 3151 return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state); 3152 } 3153 3154 static ssize_t 3155 megasas_page_size_show(struct device *cdev, 3156 struct device_attribute *attr, char *buf) 3157 { 3158 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1); 3159 } 3160 3161 static ssize_t 3162 megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr, 3163 char *buf) 3164 { 3165 struct Scsi_Host *shost = class_to_shost(cdev); 3166 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3167 3168 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding)); 3169 } 3170 3171 static ssize_t 3172 megasas_fw_cmds_outstanding_show(struct device *cdev, 3173 struct device_attribute *attr, char *buf) 3174 { 3175 struct Scsi_Host *shost = class_to_shost(cdev); 3176 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3177 3178 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding)); 3179 } 3180 3181 static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR, 3182 megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store); 3183 static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO, 3184 megasas_fw_crash_buffer_size_show, NULL); 3185 static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR, 3186 megasas_fw_crash_state_show, megasas_fw_crash_state_store); 3187 static DEVICE_ATTR(page_size, S_IRUGO, 3188 megasas_page_size_show, NULL); 3189 static DEVICE_ATTR(ldio_outstanding, S_IRUGO, 3190 megasas_ldio_outstanding_show, NULL); 3191 static DEVICE_ATTR(fw_cmds_outstanding, S_IRUGO, 3192 megasas_fw_cmds_outstanding_show, NULL); 3193 3194 struct device_attribute *megaraid_host_attrs[] = { 3195 &dev_attr_fw_crash_buffer_size, 3196 &dev_attr_fw_crash_buffer, 3197 &dev_attr_fw_crash_state, 3198 &dev_attr_page_size, 3199 &dev_attr_ldio_outstanding, 3200 &dev_attr_fw_cmds_outstanding, 3201 NULL, 3202 }; 3203 3204 /* 3205 * Scsi host template for megaraid_sas driver 3206 */ 3207 static struct scsi_host_template megasas_template = { 3208 3209 .module = THIS_MODULE, 3210 .name = "Avago SAS based MegaRAID driver", 3211 .proc_name = "megaraid_sas", 3212 .slave_configure = megasas_slave_configure, 3213 .slave_alloc = megasas_slave_alloc, 3214 .slave_destroy = megasas_slave_destroy, 3215 .queuecommand = megasas_queue_command, 3216 .eh_target_reset_handler = megasas_reset_target, 3217 .eh_abort_handler = megasas_task_abort, 3218 .eh_host_reset_handler = megasas_reset_bus_host, 3219 .eh_timed_out = megasas_reset_timer, 3220 .shost_attrs = megaraid_host_attrs, 3221 .bios_param = megasas_bios_param, 3222 .change_queue_depth = scsi_change_queue_depth, 3223 .no_write_same = 1, 3224 }; 3225 3226 /** 3227 * megasas_complete_int_cmd - Completes an internal command 3228 * @instance: Adapter soft state 3229 * @cmd: Command to be completed 3230 * 3231 * The megasas_issue_blocked_cmd() function waits for a command to complete 3232 * after it issues a command. This function wakes up that waiting routine by 3233 * calling wake_up() on the wait queue. 3234 */ 3235 static void 3236 megasas_complete_int_cmd(struct megasas_instance *instance, 3237 struct megasas_cmd *cmd) 3238 { 3239 cmd->cmd_status_drv = cmd->frame->io.cmd_status; 3240 wake_up(&instance->int_cmd_wait_q); 3241 } 3242 3243 /** 3244 * megasas_complete_abort - Completes aborting a command 3245 * @instance: Adapter soft state 3246 * @cmd: Cmd that was issued to abort another cmd 3247 * 3248 * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q 3249 * after it issues an abort on a previously issued command. This function 3250 * wakes up all functions waiting on the same wait queue. 3251 */ 3252 static void 3253 megasas_complete_abort(struct megasas_instance *instance, 3254 struct megasas_cmd *cmd) 3255 { 3256 if (cmd->sync_cmd) { 3257 cmd->sync_cmd = 0; 3258 cmd->cmd_status_drv = 0; 3259 wake_up(&instance->abort_cmd_wait_q); 3260 } 3261 } 3262 3263 /** 3264 * megasas_complete_cmd - Completes a command 3265 * @instance: Adapter soft state 3266 * @cmd: Command to be completed 3267 * @alt_status: If non-zero, use this value as status to 3268 * SCSI mid-layer instead of the value returned 3269 * by the FW. This should be used if caller wants 3270 * an alternate status (as in the case of aborted 3271 * commands) 3272 */ 3273 void 3274 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 3275 u8 alt_status) 3276 { 3277 int exception = 0; 3278 struct megasas_header *hdr = &cmd->frame->hdr; 3279 unsigned long flags; 3280 struct fusion_context *fusion = instance->ctrl_context; 3281 u32 opcode, status; 3282 3283 /* flag for the retry reset */ 3284 cmd->retry_for_fw_reset = 0; 3285 3286 if (cmd->scmd) 3287 cmd->scmd->SCp.ptr = NULL; 3288 3289 switch (hdr->cmd) { 3290 case MFI_CMD_INVALID: 3291 /* Some older 1068 controller FW may keep a pended 3292 MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel 3293 when booting the kdump kernel. Ignore this command to 3294 prevent a kernel panic on shutdown of the kdump kernel. */ 3295 dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command " 3296 "completed\n"); 3297 dev_warn(&instance->pdev->dev, "If you have a controller " 3298 "other than PERC5, please upgrade your firmware\n"); 3299 break; 3300 case MFI_CMD_PD_SCSI_IO: 3301 case MFI_CMD_LD_SCSI_IO: 3302 3303 /* 3304 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been 3305 * issued either through an IO path or an IOCTL path. If it 3306 * was via IOCTL, we will send it to internal completion. 3307 */ 3308 if (cmd->sync_cmd) { 3309 cmd->sync_cmd = 0; 3310 megasas_complete_int_cmd(instance, cmd); 3311 break; 3312 } 3313 /* fall through */ 3314 3315 case MFI_CMD_LD_READ: 3316 case MFI_CMD_LD_WRITE: 3317 3318 if (alt_status) { 3319 cmd->scmd->result = alt_status << 16; 3320 exception = 1; 3321 } 3322 3323 if (exception) { 3324 3325 atomic_dec(&instance->fw_outstanding); 3326 3327 scsi_dma_unmap(cmd->scmd); 3328 cmd->scmd->scsi_done(cmd->scmd); 3329 megasas_return_cmd(instance, cmd); 3330 3331 break; 3332 } 3333 3334 switch (hdr->cmd_status) { 3335 3336 case MFI_STAT_OK: 3337 cmd->scmd->result = DID_OK << 16; 3338 break; 3339 3340 case MFI_STAT_SCSI_IO_FAILED: 3341 case MFI_STAT_LD_INIT_IN_PROGRESS: 3342 cmd->scmd->result = 3343 (DID_ERROR << 16) | hdr->scsi_status; 3344 break; 3345 3346 case MFI_STAT_SCSI_DONE_WITH_ERROR: 3347 3348 cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status; 3349 3350 if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) { 3351 memset(cmd->scmd->sense_buffer, 0, 3352 SCSI_SENSE_BUFFERSIZE); 3353 memcpy(cmd->scmd->sense_buffer, cmd->sense, 3354 hdr->sense_len); 3355 3356 cmd->scmd->result |= DRIVER_SENSE << 24; 3357 } 3358 3359 break; 3360 3361 case MFI_STAT_LD_OFFLINE: 3362 case MFI_STAT_DEVICE_NOT_FOUND: 3363 cmd->scmd->result = DID_BAD_TARGET << 16; 3364 break; 3365 3366 default: 3367 dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n", 3368 hdr->cmd_status); 3369 cmd->scmd->result = DID_ERROR << 16; 3370 break; 3371 } 3372 3373 atomic_dec(&instance->fw_outstanding); 3374 3375 scsi_dma_unmap(cmd->scmd); 3376 cmd->scmd->scsi_done(cmd->scmd); 3377 megasas_return_cmd(instance, cmd); 3378 3379 break; 3380 3381 case MFI_CMD_SMP: 3382 case MFI_CMD_STP: 3383 case MFI_CMD_NVME: 3384 megasas_complete_int_cmd(instance, cmd); 3385 break; 3386 3387 case MFI_CMD_DCMD: 3388 opcode = le32_to_cpu(cmd->frame->dcmd.opcode); 3389 /* Check for LD map update */ 3390 if ((opcode == MR_DCMD_LD_MAP_GET_INFO) 3391 && (cmd->frame->dcmd.mbox.b[1] == 1)) { 3392 fusion->fast_path_io = 0; 3393 spin_lock_irqsave(instance->host->host_lock, flags); 3394 status = cmd->frame->hdr.cmd_status; 3395 instance->map_update_cmd = NULL; 3396 if (status != MFI_STAT_OK) { 3397 if (status != MFI_STAT_NOT_FOUND) 3398 dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n", 3399 cmd->frame->hdr.cmd_status); 3400 else { 3401 megasas_return_cmd(instance, cmd); 3402 spin_unlock_irqrestore( 3403 instance->host->host_lock, 3404 flags); 3405 break; 3406 } 3407 } 3408 3409 megasas_return_cmd(instance, cmd); 3410 3411 /* 3412 * Set fast path IO to ZERO. 3413 * Validate Map will set proper value. 3414 * Meanwhile all IOs will go as LD IO. 3415 */ 3416 if (status == MFI_STAT_OK && 3417 (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) { 3418 instance->map_id++; 3419 fusion->fast_path_io = 1; 3420 } else { 3421 fusion->fast_path_io = 0; 3422 } 3423 3424 megasas_sync_map_info(instance); 3425 spin_unlock_irqrestore(instance->host->host_lock, 3426 flags); 3427 break; 3428 } 3429 if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO || 3430 opcode == MR_DCMD_CTRL_EVENT_GET) { 3431 spin_lock_irqsave(&poll_aen_lock, flags); 3432 megasas_poll_wait_aen = 0; 3433 spin_unlock_irqrestore(&poll_aen_lock, flags); 3434 } 3435 3436 /* FW has an updated PD sequence */ 3437 if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && 3438 (cmd->frame->dcmd.mbox.b[0] == 1)) { 3439 3440 spin_lock_irqsave(instance->host->host_lock, flags); 3441 status = cmd->frame->hdr.cmd_status; 3442 instance->jbod_seq_cmd = NULL; 3443 megasas_return_cmd(instance, cmd); 3444 3445 if (status == MFI_STAT_OK) { 3446 instance->pd_seq_map_id++; 3447 /* Re-register a pd sync seq num cmd */ 3448 if (megasas_sync_pd_seq_num(instance, true)) 3449 instance->use_seqnum_jbod_fp = false; 3450 } else 3451 instance->use_seqnum_jbod_fp = false; 3452 3453 spin_unlock_irqrestore(instance->host->host_lock, flags); 3454 break; 3455 } 3456 3457 /* 3458 * See if got an event notification 3459 */ 3460 if (opcode == MR_DCMD_CTRL_EVENT_WAIT) 3461 megasas_service_aen(instance, cmd); 3462 else 3463 megasas_complete_int_cmd(instance, cmd); 3464 3465 break; 3466 3467 case MFI_CMD_ABORT: 3468 /* 3469 * Cmd issued to abort another cmd returned 3470 */ 3471 megasas_complete_abort(instance, cmd); 3472 break; 3473 3474 default: 3475 dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n", 3476 hdr->cmd); 3477 megasas_complete_int_cmd(instance, cmd); 3478 break; 3479 } 3480 } 3481 3482 /** 3483 * megasas_issue_pending_cmds_again - issue all pending cmds 3484 * in FW again because of the fw reset 3485 * @instance: Adapter soft state 3486 */ 3487 static inline void 3488 megasas_issue_pending_cmds_again(struct megasas_instance *instance) 3489 { 3490 struct megasas_cmd *cmd; 3491 struct list_head clist_local; 3492 union megasas_evt_class_locale class_locale; 3493 unsigned long flags; 3494 u32 seq_num; 3495 3496 INIT_LIST_HEAD(&clist_local); 3497 spin_lock_irqsave(&instance->hba_lock, flags); 3498 list_splice_init(&instance->internal_reset_pending_q, &clist_local); 3499 spin_unlock_irqrestore(&instance->hba_lock, flags); 3500 3501 while (!list_empty(&clist_local)) { 3502 cmd = list_entry((&clist_local)->next, 3503 struct megasas_cmd, list); 3504 list_del_init(&cmd->list); 3505 3506 if (cmd->sync_cmd || cmd->scmd) { 3507 dev_notice(&instance->pdev->dev, "command %p, %p:%d" 3508 "detected to be pending while HBA reset\n", 3509 cmd, cmd->scmd, cmd->sync_cmd); 3510 3511 cmd->retry_for_fw_reset++; 3512 3513 if (cmd->retry_for_fw_reset == 3) { 3514 dev_notice(&instance->pdev->dev, "cmd %p, %p:%d" 3515 "was tried multiple times during reset." 3516 "Shutting down the HBA\n", 3517 cmd, cmd->scmd, cmd->sync_cmd); 3518 instance->instancet->disable_intr(instance); 3519 atomic_set(&instance->fw_reset_no_pci_access, 1); 3520 megaraid_sas_kill_hba(instance); 3521 return; 3522 } 3523 } 3524 3525 if (cmd->sync_cmd == 1) { 3526 if (cmd->scmd) { 3527 dev_notice(&instance->pdev->dev, "unexpected" 3528 "cmd attached to internal command!\n"); 3529 } 3530 dev_notice(&instance->pdev->dev, "%p synchronous cmd" 3531 "on the internal reset queue," 3532 "issue it again.\n", cmd); 3533 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 3534 instance->instancet->fire_cmd(instance, 3535 cmd->frame_phys_addr, 3536 0, instance->reg_set); 3537 } else if (cmd->scmd) { 3538 dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]" 3539 "detected on the internal queue, issue again.\n", 3540 cmd, cmd->scmd->cmnd[0]); 3541 3542 atomic_inc(&instance->fw_outstanding); 3543 instance->instancet->fire_cmd(instance, 3544 cmd->frame_phys_addr, 3545 cmd->frame_count-1, instance->reg_set); 3546 } else { 3547 dev_notice(&instance->pdev->dev, "%p unexpected cmd on the" 3548 "internal reset defer list while re-issue!!\n", 3549 cmd); 3550 } 3551 } 3552 3553 if (instance->aen_cmd) { 3554 dev_notice(&instance->pdev->dev, "aen_cmd in def process\n"); 3555 megasas_return_cmd(instance, instance->aen_cmd); 3556 3557 instance->aen_cmd = NULL; 3558 } 3559 3560 /* 3561 * Initiate AEN (Asynchronous Event Notification) 3562 */ 3563 seq_num = instance->last_seq_num; 3564 class_locale.members.reserved = 0; 3565 class_locale.members.locale = MR_EVT_LOCALE_ALL; 3566 class_locale.members.class = MR_EVT_CLASS_DEBUG; 3567 3568 megasas_register_aen(instance, seq_num, class_locale.word); 3569 } 3570 3571 /** 3572 * Move the internal reset pending commands to a deferred queue. 3573 * 3574 * We move the commands pending at internal reset time to a 3575 * pending queue. This queue would be flushed after successful 3576 * completion of the internal reset sequence. if the internal reset 3577 * did not complete in time, the kernel reset handler would flush 3578 * these commands. 3579 **/ 3580 static void 3581 megasas_internal_reset_defer_cmds(struct megasas_instance *instance) 3582 { 3583 struct megasas_cmd *cmd; 3584 int i; 3585 u16 max_cmd = instance->max_fw_cmds; 3586 u32 defer_index; 3587 unsigned long flags; 3588 3589 defer_index = 0; 3590 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 3591 for (i = 0; i < max_cmd; i++) { 3592 cmd = instance->cmd_list[i]; 3593 if (cmd->sync_cmd == 1 || cmd->scmd) { 3594 dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p" 3595 "on the defer queue as internal\n", 3596 defer_index, cmd, cmd->sync_cmd, cmd->scmd); 3597 3598 if (!list_empty(&cmd->list)) { 3599 dev_notice(&instance->pdev->dev, "ERROR while" 3600 " moving this cmd:%p, %d %p, it was" 3601 "discovered on some list?\n", 3602 cmd, cmd->sync_cmd, cmd->scmd); 3603 3604 list_del_init(&cmd->list); 3605 } 3606 defer_index++; 3607 list_add_tail(&cmd->list, 3608 &instance->internal_reset_pending_q); 3609 } 3610 } 3611 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 3612 } 3613 3614 3615 static void 3616 process_fw_state_change_wq(struct work_struct *work) 3617 { 3618 struct megasas_instance *instance = 3619 container_of(work, struct megasas_instance, work_init); 3620 u32 wait; 3621 unsigned long flags; 3622 3623 if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) { 3624 dev_notice(&instance->pdev->dev, "error, recovery st %x\n", 3625 atomic_read(&instance->adprecovery)); 3626 return ; 3627 } 3628 3629 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { 3630 dev_notice(&instance->pdev->dev, "FW detected to be in fault" 3631 "state, restarting it...\n"); 3632 3633 instance->instancet->disable_intr(instance); 3634 atomic_set(&instance->fw_outstanding, 0); 3635 3636 atomic_set(&instance->fw_reset_no_pci_access, 1); 3637 instance->instancet->adp_reset(instance, instance->reg_set); 3638 atomic_set(&instance->fw_reset_no_pci_access, 0); 3639 3640 dev_notice(&instance->pdev->dev, "FW restarted successfully," 3641 "initiating next stage...\n"); 3642 3643 dev_notice(&instance->pdev->dev, "HBA recovery state machine," 3644 "state 2 starting...\n"); 3645 3646 /* waiting for about 20 second before start the second init */ 3647 for (wait = 0; wait < 30; wait++) { 3648 msleep(1000); 3649 } 3650 3651 if (megasas_transition_to_ready(instance, 1)) { 3652 dev_notice(&instance->pdev->dev, "adapter not ready\n"); 3653 3654 atomic_set(&instance->fw_reset_no_pci_access, 1); 3655 megaraid_sas_kill_hba(instance); 3656 return ; 3657 } 3658 3659 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 3660 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 3661 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR) 3662 ) { 3663 *instance->consumer = *instance->producer; 3664 } else { 3665 *instance->consumer = 0; 3666 *instance->producer = 0; 3667 } 3668 3669 megasas_issue_init_mfi(instance); 3670 3671 spin_lock_irqsave(&instance->hba_lock, flags); 3672 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 3673 spin_unlock_irqrestore(&instance->hba_lock, flags); 3674 instance->instancet->enable_intr(instance); 3675 3676 megasas_issue_pending_cmds_again(instance); 3677 instance->issuepend_done = 1; 3678 } 3679 } 3680 3681 /** 3682 * megasas_deplete_reply_queue - Processes all completed commands 3683 * @instance: Adapter soft state 3684 * @alt_status: Alternate status to be returned to 3685 * SCSI mid-layer instead of the status 3686 * returned by the FW 3687 * Note: this must be called with hba lock held 3688 */ 3689 static int 3690 megasas_deplete_reply_queue(struct megasas_instance *instance, 3691 u8 alt_status) 3692 { 3693 u32 mfiStatus; 3694 u32 fw_state; 3695 3696 if ((mfiStatus = instance->instancet->check_reset(instance, 3697 instance->reg_set)) == 1) { 3698 return IRQ_HANDLED; 3699 } 3700 3701 mfiStatus = instance->instancet->clear_intr(instance); 3702 if (mfiStatus == 0) { 3703 /* Hardware may not set outbound_intr_status in MSI-X mode */ 3704 if (!instance->msix_vectors) 3705 return IRQ_NONE; 3706 } 3707 3708 instance->mfiStatus = mfiStatus; 3709 3710 if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) { 3711 fw_state = instance->instancet->read_fw_status_reg( 3712 instance) & MFI_STATE_MASK; 3713 3714 if (fw_state != MFI_STATE_FAULT) { 3715 dev_notice(&instance->pdev->dev, "fw state:%x\n", 3716 fw_state); 3717 } 3718 3719 if ((fw_state == MFI_STATE_FAULT) && 3720 (instance->disableOnlineCtrlReset == 0)) { 3721 dev_notice(&instance->pdev->dev, "wait adp restart\n"); 3722 3723 if ((instance->pdev->device == 3724 PCI_DEVICE_ID_LSI_SAS1064R) || 3725 (instance->pdev->device == 3726 PCI_DEVICE_ID_DELL_PERC5) || 3727 (instance->pdev->device == 3728 PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 3729 3730 *instance->consumer = 3731 cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); 3732 } 3733 3734 3735 instance->instancet->disable_intr(instance); 3736 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 3737 instance->issuepend_done = 0; 3738 3739 atomic_set(&instance->fw_outstanding, 0); 3740 megasas_internal_reset_defer_cmds(instance); 3741 3742 dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n", 3743 fw_state, atomic_read(&instance->adprecovery)); 3744 3745 schedule_work(&instance->work_init); 3746 return IRQ_HANDLED; 3747 3748 } else { 3749 dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n", 3750 fw_state, instance->disableOnlineCtrlReset); 3751 } 3752 } 3753 3754 tasklet_schedule(&instance->isr_tasklet); 3755 return IRQ_HANDLED; 3756 } 3757 /** 3758 * megasas_isr - isr entry point 3759 */ 3760 static irqreturn_t megasas_isr(int irq, void *devp) 3761 { 3762 struct megasas_irq_context *irq_context = devp; 3763 struct megasas_instance *instance = irq_context->instance; 3764 unsigned long flags; 3765 irqreturn_t rc; 3766 3767 if (atomic_read(&instance->fw_reset_no_pci_access)) 3768 return IRQ_HANDLED; 3769 3770 spin_lock_irqsave(&instance->hba_lock, flags); 3771 rc = megasas_deplete_reply_queue(instance, DID_OK); 3772 spin_unlock_irqrestore(&instance->hba_lock, flags); 3773 3774 return rc; 3775 } 3776 3777 /** 3778 * megasas_transition_to_ready - Move the FW to READY state 3779 * @instance: Adapter soft state 3780 * 3781 * During the initialization, FW passes can potentially be in any one of 3782 * several possible states. If the FW in operational, waiting-for-handshake 3783 * states, driver must take steps to bring it to ready state. Otherwise, it 3784 * has to wait for the ready state. 3785 */ 3786 int 3787 megasas_transition_to_ready(struct megasas_instance *instance, int ocr) 3788 { 3789 int i; 3790 u8 max_wait; 3791 u32 fw_state; 3792 u32 cur_state; 3793 u32 abs_state, curr_abs_state; 3794 3795 abs_state = instance->instancet->read_fw_status_reg(instance); 3796 fw_state = abs_state & MFI_STATE_MASK; 3797 3798 if (fw_state != MFI_STATE_READY) 3799 dev_info(&instance->pdev->dev, "Waiting for FW to come to ready" 3800 " state\n"); 3801 3802 while (fw_state != MFI_STATE_READY) { 3803 3804 switch (fw_state) { 3805 3806 case MFI_STATE_FAULT: 3807 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW in FAULT state!!\n"); 3808 if (ocr) { 3809 max_wait = MEGASAS_RESET_WAIT_TIME; 3810 cur_state = MFI_STATE_FAULT; 3811 break; 3812 } else 3813 return -ENODEV; 3814 3815 case MFI_STATE_WAIT_HANDSHAKE: 3816 /* 3817 * Set the CLR bit in inbound doorbell 3818 */ 3819 if ((instance->pdev->device == 3820 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 3821 (instance->pdev->device == 3822 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 3823 (instance->adapter_type != MFI_SERIES)) 3824 writel( 3825 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 3826 &instance->reg_set->doorbell); 3827 else 3828 writel( 3829 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 3830 &instance->reg_set->inbound_doorbell); 3831 3832 max_wait = MEGASAS_RESET_WAIT_TIME; 3833 cur_state = MFI_STATE_WAIT_HANDSHAKE; 3834 break; 3835 3836 case MFI_STATE_BOOT_MESSAGE_PENDING: 3837 if ((instance->pdev->device == 3838 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 3839 (instance->pdev->device == 3840 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 3841 (instance->adapter_type != MFI_SERIES)) 3842 writel(MFI_INIT_HOTPLUG, 3843 &instance->reg_set->doorbell); 3844 else 3845 writel(MFI_INIT_HOTPLUG, 3846 &instance->reg_set->inbound_doorbell); 3847 3848 max_wait = MEGASAS_RESET_WAIT_TIME; 3849 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 3850 break; 3851 3852 case MFI_STATE_OPERATIONAL: 3853 /* 3854 * Bring it to READY state; assuming max wait 10 secs 3855 */ 3856 instance->instancet->disable_intr(instance); 3857 if ((instance->pdev->device == 3858 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 3859 (instance->pdev->device == 3860 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 3861 (instance->adapter_type != MFI_SERIES)) { 3862 writel(MFI_RESET_FLAGS, 3863 &instance->reg_set->doorbell); 3864 3865 if (instance->adapter_type != MFI_SERIES) { 3866 for (i = 0; i < (10 * 1000); i += 20) { 3867 if (megasas_readl( 3868 instance, 3869 &instance-> 3870 reg_set-> 3871 doorbell) & 1) 3872 msleep(20); 3873 else 3874 break; 3875 } 3876 } 3877 } else 3878 writel(MFI_RESET_FLAGS, 3879 &instance->reg_set->inbound_doorbell); 3880 3881 max_wait = MEGASAS_RESET_WAIT_TIME; 3882 cur_state = MFI_STATE_OPERATIONAL; 3883 break; 3884 3885 case MFI_STATE_UNDEFINED: 3886 /* 3887 * This state should not last for more than 2 seconds 3888 */ 3889 max_wait = MEGASAS_RESET_WAIT_TIME; 3890 cur_state = MFI_STATE_UNDEFINED; 3891 break; 3892 3893 case MFI_STATE_BB_INIT: 3894 max_wait = MEGASAS_RESET_WAIT_TIME; 3895 cur_state = MFI_STATE_BB_INIT; 3896 break; 3897 3898 case MFI_STATE_FW_INIT: 3899 max_wait = MEGASAS_RESET_WAIT_TIME; 3900 cur_state = MFI_STATE_FW_INIT; 3901 break; 3902 3903 case MFI_STATE_FW_INIT_2: 3904 max_wait = MEGASAS_RESET_WAIT_TIME; 3905 cur_state = MFI_STATE_FW_INIT_2; 3906 break; 3907 3908 case MFI_STATE_DEVICE_SCAN: 3909 max_wait = MEGASAS_RESET_WAIT_TIME; 3910 cur_state = MFI_STATE_DEVICE_SCAN; 3911 break; 3912 3913 case MFI_STATE_FLUSH_CACHE: 3914 max_wait = MEGASAS_RESET_WAIT_TIME; 3915 cur_state = MFI_STATE_FLUSH_CACHE; 3916 break; 3917 3918 default: 3919 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n", 3920 fw_state); 3921 return -ENODEV; 3922 } 3923 3924 /* 3925 * The cur_state should not last for more than max_wait secs 3926 */ 3927 for (i = 0; i < max_wait; i++) { 3928 curr_abs_state = instance->instancet-> 3929 read_fw_status_reg(instance); 3930 3931 if (abs_state == curr_abs_state) { 3932 msleep(1000); 3933 } else 3934 break; 3935 } 3936 3937 /* 3938 * Return error if fw_state hasn't changed after max_wait 3939 */ 3940 if (curr_abs_state == abs_state) { 3941 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed " 3942 "in %d secs\n", fw_state, max_wait); 3943 return -ENODEV; 3944 } 3945 3946 abs_state = curr_abs_state; 3947 fw_state = curr_abs_state & MFI_STATE_MASK; 3948 } 3949 dev_info(&instance->pdev->dev, "FW now in Ready state\n"); 3950 3951 return 0; 3952 } 3953 3954 /** 3955 * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool 3956 * @instance: Adapter soft state 3957 */ 3958 static void megasas_teardown_frame_pool(struct megasas_instance *instance) 3959 { 3960 int i; 3961 u16 max_cmd = instance->max_mfi_cmds; 3962 struct megasas_cmd *cmd; 3963 3964 if (!instance->frame_dma_pool) 3965 return; 3966 3967 /* 3968 * Return all frames to pool 3969 */ 3970 for (i = 0; i < max_cmd; i++) { 3971 3972 cmd = instance->cmd_list[i]; 3973 3974 if (cmd->frame) 3975 dma_pool_free(instance->frame_dma_pool, cmd->frame, 3976 cmd->frame_phys_addr); 3977 3978 if (cmd->sense) 3979 dma_pool_free(instance->sense_dma_pool, cmd->sense, 3980 cmd->sense_phys_addr); 3981 } 3982 3983 /* 3984 * Now destroy the pool itself 3985 */ 3986 dma_pool_destroy(instance->frame_dma_pool); 3987 dma_pool_destroy(instance->sense_dma_pool); 3988 3989 instance->frame_dma_pool = NULL; 3990 instance->sense_dma_pool = NULL; 3991 } 3992 3993 /** 3994 * megasas_create_frame_pool - Creates DMA pool for cmd frames 3995 * @instance: Adapter soft state 3996 * 3997 * Each command packet has an embedded DMA memory buffer that is used for 3998 * filling MFI frame and the SG list that immediately follows the frame. This 3999 * function creates those DMA memory buffers for each command packet by using 4000 * PCI pool facility. 4001 */ 4002 static int megasas_create_frame_pool(struct megasas_instance *instance) 4003 { 4004 int i; 4005 u16 max_cmd; 4006 u32 sge_sz; 4007 u32 frame_count; 4008 struct megasas_cmd *cmd; 4009 4010 max_cmd = instance->max_mfi_cmds; 4011 4012 /* 4013 * Size of our frame is 64 bytes for MFI frame, followed by max SG 4014 * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer 4015 */ 4016 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : 4017 sizeof(struct megasas_sge32); 4018 4019 if (instance->flag_ieee) 4020 sge_sz = sizeof(struct megasas_sge_skinny); 4021 4022 /* 4023 * For MFI controllers. 4024 * max_num_sge = 60 4025 * max_sge_sz = 16 byte (sizeof megasas_sge_skinny) 4026 * Total 960 byte (15 MFI frame of 64 byte) 4027 * 4028 * Fusion adapter require only 3 extra frame. 4029 * max_num_sge = 16 (defined as MAX_IOCTL_SGE) 4030 * max_sge_sz = 12 byte (sizeof megasas_sge64) 4031 * Total 192 byte (3 MFI frame of 64 byte) 4032 */ 4033 frame_count = (instance->adapter_type == MFI_SERIES) ? 4034 (15 + 1) : (3 + 1); 4035 instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count; 4036 /* 4037 * Use DMA pool facility provided by PCI layer 4038 */ 4039 instance->frame_dma_pool = dma_pool_create("megasas frame pool", 4040 &instance->pdev->dev, 4041 instance->mfi_frame_size, 256, 0); 4042 4043 if (!instance->frame_dma_pool) { 4044 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n"); 4045 return -ENOMEM; 4046 } 4047 4048 instance->sense_dma_pool = dma_pool_create("megasas sense pool", 4049 &instance->pdev->dev, 128, 4050 4, 0); 4051 4052 if (!instance->sense_dma_pool) { 4053 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n"); 4054 4055 dma_pool_destroy(instance->frame_dma_pool); 4056 instance->frame_dma_pool = NULL; 4057 4058 return -ENOMEM; 4059 } 4060 4061 /* 4062 * Allocate and attach a frame to each of the commands in cmd_list. 4063 * By making cmd->index as the context instead of the &cmd, we can 4064 * always use 32bit context regardless of the architecture 4065 */ 4066 for (i = 0; i < max_cmd; i++) { 4067 4068 cmd = instance->cmd_list[i]; 4069 4070 cmd->frame = dma_pool_zalloc(instance->frame_dma_pool, 4071 GFP_KERNEL, &cmd->frame_phys_addr); 4072 4073 cmd->sense = dma_pool_alloc(instance->sense_dma_pool, 4074 GFP_KERNEL, &cmd->sense_phys_addr); 4075 4076 /* 4077 * megasas_teardown_frame_pool() takes care of freeing 4078 * whatever has been allocated 4079 */ 4080 if (!cmd->frame || !cmd->sense) { 4081 dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n"); 4082 megasas_teardown_frame_pool(instance); 4083 return -ENOMEM; 4084 } 4085 4086 cmd->frame->io.context = cpu_to_le32(cmd->index); 4087 cmd->frame->io.pad_0 = 0; 4088 if ((instance->adapter_type == MFI_SERIES) && reset_devices) 4089 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 4090 } 4091 4092 return 0; 4093 } 4094 4095 /** 4096 * megasas_free_cmds - Free all the cmds in the free cmd pool 4097 * @instance: Adapter soft state 4098 */ 4099 void megasas_free_cmds(struct megasas_instance *instance) 4100 { 4101 int i; 4102 4103 /* First free the MFI frame pool */ 4104 megasas_teardown_frame_pool(instance); 4105 4106 /* Free all the commands in the cmd_list */ 4107 for (i = 0; i < instance->max_mfi_cmds; i++) 4108 4109 kfree(instance->cmd_list[i]); 4110 4111 /* Free the cmd_list buffer itself */ 4112 kfree(instance->cmd_list); 4113 instance->cmd_list = NULL; 4114 4115 INIT_LIST_HEAD(&instance->cmd_pool); 4116 } 4117 4118 /** 4119 * megasas_alloc_cmds - Allocates the command packets 4120 * @instance: Adapter soft state 4121 * 4122 * Each command that is issued to the FW, whether IO commands from the OS or 4123 * internal commands like IOCTLs, are wrapped in local data structure called 4124 * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to 4125 * the FW. 4126 * 4127 * Each frame has a 32-bit field called context (tag). This context is used 4128 * to get back the megasas_cmd from the frame when a frame gets completed in 4129 * the ISR. Typically the address of the megasas_cmd itself would be used as 4130 * the context. But we wanted to keep the differences between 32 and 64 bit 4131 * systems to the mininum. We always use 32 bit integers for the context. In 4132 * this driver, the 32 bit values are the indices into an array cmd_list. 4133 * This array is used only to look up the megasas_cmd given the context. The 4134 * free commands themselves are maintained in a linked list called cmd_pool. 4135 */ 4136 int megasas_alloc_cmds(struct megasas_instance *instance) 4137 { 4138 int i; 4139 int j; 4140 u16 max_cmd; 4141 struct megasas_cmd *cmd; 4142 4143 max_cmd = instance->max_mfi_cmds; 4144 4145 /* 4146 * instance->cmd_list is an array of struct megasas_cmd pointers. 4147 * Allocate the dynamic array first and then allocate individual 4148 * commands. 4149 */ 4150 instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL); 4151 4152 if (!instance->cmd_list) { 4153 dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n"); 4154 return -ENOMEM; 4155 } 4156 4157 memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd); 4158 4159 for (i = 0; i < max_cmd; i++) { 4160 instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd), 4161 GFP_KERNEL); 4162 4163 if (!instance->cmd_list[i]) { 4164 4165 for (j = 0; j < i; j++) 4166 kfree(instance->cmd_list[j]); 4167 4168 kfree(instance->cmd_list); 4169 instance->cmd_list = NULL; 4170 4171 return -ENOMEM; 4172 } 4173 } 4174 4175 for (i = 0; i < max_cmd; i++) { 4176 cmd = instance->cmd_list[i]; 4177 memset(cmd, 0, sizeof(struct megasas_cmd)); 4178 cmd->index = i; 4179 cmd->scmd = NULL; 4180 cmd->instance = instance; 4181 4182 list_add_tail(&cmd->list, &instance->cmd_pool); 4183 } 4184 4185 /* 4186 * Create a frame pool and assign one frame to each cmd 4187 */ 4188 if (megasas_create_frame_pool(instance)) { 4189 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n"); 4190 megasas_free_cmds(instance); 4191 } 4192 4193 return 0; 4194 } 4195 4196 /* 4197 * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state. 4198 * @instance: Adapter soft state 4199 * 4200 * Return 0 for only Fusion adapter, if driver load/unload is not in progress 4201 * or FW is not under OCR. 4202 */ 4203 inline int 4204 dcmd_timeout_ocr_possible(struct megasas_instance *instance) { 4205 4206 if (instance->adapter_type == MFI_SERIES) 4207 return KILL_ADAPTER; 4208 else if (instance->unload || 4209 test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) 4210 return IGNORE_TIMEOUT; 4211 else 4212 return INITIATE_OCR; 4213 } 4214 4215 static void 4216 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev) 4217 { 4218 int ret; 4219 struct megasas_cmd *cmd; 4220 struct megasas_dcmd_frame *dcmd; 4221 4222 struct MR_PRIV_DEVICE *mr_device_priv_data; 4223 u16 device_id = 0; 4224 4225 device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; 4226 cmd = megasas_get_cmd(instance); 4227 4228 if (!cmd) { 4229 dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__); 4230 return; 4231 } 4232 4233 dcmd = &cmd->frame->dcmd; 4234 4235 memset(instance->pd_info, 0, sizeof(*instance->pd_info)); 4236 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4237 4238 dcmd->mbox.s[0] = cpu_to_le16(device_id); 4239 dcmd->cmd = MFI_CMD_DCMD; 4240 dcmd->cmd_status = 0xFF; 4241 dcmd->sge_count = 1; 4242 dcmd->flags = MFI_FRAME_DIR_READ; 4243 dcmd->timeout = 0; 4244 dcmd->pad_0 = 0; 4245 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO)); 4246 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO); 4247 4248 megasas_set_dma_settings(instance, dcmd, instance->pd_info_h, 4249 sizeof(struct MR_PD_INFO)); 4250 4251 if ((instance->adapter_type != MFI_SERIES) && 4252 !instance->mask_interrupts) 4253 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4254 else 4255 ret = megasas_issue_polled(instance, cmd); 4256 4257 switch (ret) { 4258 case DCMD_SUCCESS: 4259 mr_device_priv_data = sdev->hostdata; 4260 le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType); 4261 mr_device_priv_data->interface_type = 4262 instance->pd_info->state.ddf.pdType.intf; 4263 break; 4264 4265 case DCMD_TIMEOUT: 4266 4267 switch (dcmd_timeout_ocr_possible(instance)) { 4268 case INITIATE_OCR: 4269 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4270 megasas_reset_fusion(instance->host, 4271 MFI_IO_TIMEOUT_OCR); 4272 break; 4273 case KILL_ADAPTER: 4274 megaraid_sas_kill_hba(instance); 4275 break; 4276 case IGNORE_TIMEOUT: 4277 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4278 __func__, __LINE__); 4279 break; 4280 } 4281 4282 break; 4283 } 4284 4285 if (ret != DCMD_TIMEOUT) 4286 megasas_return_cmd(instance, cmd); 4287 4288 return; 4289 } 4290 /* 4291 * megasas_get_pd_list_info - Returns FW's pd_list structure 4292 * @instance: Adapter soft state 4293 * @pd_list: pd_list structure 4294 * 4295 * Issues an internal command (DCMD) to get the FW's controller PD 4296 * list structure. This information is mainly used to find out SYSTEM 4297 * supported by the FW. 4298 */ 4299 static int 4300 megasas_get_pd_list(struct megasas_instance *instance) 4301 { 4302 int ret = 0, pd_index = 0; 4303 struct megasas_cmd *cmd; 4304 struct megasas_dcmd_frame *dcmd; 4305 struct MR_PD_LIST *ci; 4306 struct MR_PD_ADDRESS *pd_addr; 4307 dma_addr_t ci_h = 0; 4308 4309 if (instance->pd_list_not_supported) { 4310 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " 4311 "not supported by firmware\n"); 4312 return ret; 4313 } 4314 4315 ci = instance->pd_list_buf; 4316 ci_h = instance->pd_list_buf_h; 4317 4318 cmd = megasas_get_cmd(instance); 4319 4320 if (!cmd) { 4321 dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n"); 4322 return -ENOMEM; 4323 } 4324 4325 dcmd = &cmd->frame->dcmd; 4326 4327 memset(ci, 0, sizeof(*ci)); 4328 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4329 4330 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; 4331 dcmd->mbox.b[1] = 0; 4332 dcmd->cmd = MFI_CMD_DCMD; 4333 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4334 dcmd->sge_count = 1; 4335 dcmd->flags = MFI_FRAME_DIR_READ; 4336 dcmd->timeout = 0; 4337 dcmd->pad_0 = 0; 4338 dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); 4339 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY); 4340 4341 megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h, 4342 (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST))); 4343 4344 if ((instance->adapter_type != MFI_SERIES) && 4345 !instance->mask_interrupts) 4346 ret = megasas_issue_blocked_cmd(instance, cmd, 4347 MFI_IO_TIMEOUT_SECS); 4348 else 4349 ret = megasas_issue_polled(instance, cmd); 4350 4351 switch (ret) { 4352 case DCMD_FAILED: 4353 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " 4354 "failed/not supported by firmware\n"); 4355 4356 if (instance->adapter_type != MFI_SERIES) 4357 megaraid_sas_kill_hba(instance); 4358 else 4359 instance->pd_list_not_supported = 1; 4360 break; 4361 case DCMD_TIMEOUT: 4362 4363 switch (dcmd_timeout_ocr_possible(instance)) { 4364 case INITIATE_OCR: 4365 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4366 /* 4367 * DCMD failed from AEN path. 4368 * AEN path already hold reset_mutex to avoid PCI access 4369 * while OCR is in progress. 4370 */ 4371 mutex_unlock(&instance->reset_mutex); 4372 megasas_reset_fusion(instance->host, 4373 MFI_IO_TIMEOUT_OCR); 4374 mutex_lock(&instance->reset_mutex); 4375 break; 4376 case KILL_ADAPTER: 4377 megaraid_sas_kill_hba(instance); 4378 break; 4379 case IGNORE_TIMEOUT: 4380 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n", 4381 __func__, __LINE__); 4382 break; 4383 } 4384 4385 break; 4386 4387 case DCMD_SUCCESS: 4388 pd_addr = ci->addr; 4389 4390 if ((le32_to_cpu(ci->count) > 4391 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) 4392 break; 4393 4394 memset(instance->local_pd_list, 0, 4395 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); 4396 4397 for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) { 4398 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid = 4399 le16_to_cpu(pd_addr->deviceId); 4400 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType = 4401 pd_addr->scsiDevType; 4402 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState = 4403 MR_PD_STATE_SYSTEM; 4404 pd_addr++; 4405 } 4406 4407 memcpy(instance->pd_list, instance->local_pd_list, 4408 sizeof(instance->pd_list)); 4409 break; 4410 4411 } 4412 4413 if (ret != DCMD_TIMEOUT) 4414 megasas_return_cmd(instance, cmd); 4415 4416 return ret; 4417 } 4418 4419 /* 4420 * megasas_get_ld_list_info - Returns FW's ld_list structure 4421 * @instance: Adapter soft state 4422 * @ld_list: ld_list structure 4423 * 4424 * Issues an internal command (DCMD) to get the FW's controller PD 4425 * list structure. This information is mainly used to find out SYSTEM 4426 * supported by the FW. 4427 */ 4428 static int 4429 megasas_get_ld_list(struct megasas_instance *instance) 4430 { 4431 int ret = 0, ld_index = 0, ids = 0; 4432 struct megasas_cmd *cmd; 4433 struct megasas_dcmd_frame *dcmd; 4434 struct MR_LD_LIST *ci; 4435 dma_addr_t ci_h = 0; 4436 u32 ld_count; 4437 4438 ci = instance->ld_list_buf; 4439 ci_h = instance->ld_list_buf_h; 4440 4441 cmd = megasas_get_cmd(instance); 4442 4443 if (!cmd) { 4444 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n"); 4445 return -ENOMEM; 4446 } 4447 4448 dcmd = &cmd->frame->dcmd; 4449 4450 memset(ci, 0, sizeof(*ci)); 4451 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4452 4453 if (instance->supportmax256vd) 4454 dcmd->mbox.b[0] = 1; 4455 dcmd->cmd = MFI_CMD_DCMD; 4456 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4457 dcmd->sge_count = 1; 4458 dcmd->flags = MFI_FRAME_DIR_READ; 4459 dcmd->timeout = 0; 4460 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST)); 4461 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST); 4462 dcmd->pad_0 = 0; 4463 4464 megasas_set_dma_settings(instance, dcmd, ci_h, 4465 sizeof(struct MR_LD_LIST)); 4466 4467 if ((instance->adapter_type != MFI_SERIES) && 4468 !instance->mask_interrupts) 4469 ret = megasas_issue_blocked_cmd(instance, cmd, 4470 MFI_IO_TIMEOUT_SECS); 4471 else 4472 ret = megasas_issue_polled(instance, cmd); 4473 4474 ld_count = le32_to_cpu(ci->ldCount); 4475 4476 switch (ret) { 4477 case DCMD_FAILED: 4478 megaraid_sas_kill_hba(instance); 4479 break; 4480 case DCMD_TIMEOUT: 4481 4482 switch (dcmd_timeout_ocr_possible(instance)) { 4483 case INITIATE_OCR: 4484 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4485 /* 4486 * DCMD failed from AEN path. 4487 * AEN path already hold reset_mutex to avoid PCI access 4488 * while OCR is in progress. 4489 */ 4490 mutex_unlock(&instance->reset_mutex); 4491 megasas_reset_fusion(instance->host, 4492 MFI_IO_TIMEOUT_OCR); 4493 mutex_lock(&instance->reset_mutex); 4494 break; 4495 case KILL_ADAPTER: 4496 megaraid_sas_kill_hba(instance); 4497 break; 4498 case IGNORE_TIMEOUT: 4499 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4500 __func__, __LINE__); 4501 break; 4502 } 4503 4504 break; 4505 4506 case DCMD_SUCCESS: 4507 if (ld_count > instance->fw_supported_vd_count) 4508 break; 4509 4510 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 4511 4512 for (ld_index = 0; ld_index < ld_count; ld_index++) { 4513 if (ci->ldList[ld_index].state != 0) { 4514 ids = ci->ldList[ld_index].ref.targetId; 4515 instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId; 4516 } 4517 } 4518 4519 break; 4520 } 4521 4522 if (ret != DCMD_TIMEOUT) 4523 megasas_return_cmd(instance, cmd); 4524 4525 return ret; 4526 } 4527 4528 /** 4529 * megasas_ld_list_query - Returns FW's ld_list structure 4530 * @instance: Adapter soft state 4531 * @ld_list: ld_list structure 4532 * 4533 * Issues an internal command (DCMD) to get the FW's controller PD 4534 * list structure. This information is mainly used to find out SYSTEM 4535 * supported by the FW. 4536 */ 4537 static int 4538 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) 4539 { 4540 int ret = 0, ld_index = 0, ids = 0; 4541 struct megasas_cmd *cmd; 4542 struct megasas_dcmd_frame *dcmd; 4543 struct MR_LD_TARGETID_LIST *ci; 4544 dma_addr_t ci_h = 0; 4545 u32 tgtid_count; 4546 4547 ci = instance->ld_targetid_list_buf; 4548 ci_h = instance->ld_targetid_list_buf_h; 4549 4550 cmd = megasas_get_cmd(instance); 4551 4552 if (!cmd) { 4553 dev_warn(&instance->pdev->dev, 4554 "megasas_ld_list_query: Failed to get cmd\n"); 4555 return -ENOMEM; 4556 } 4557 4558 dcmd = &cmd->frame->dcmd; 4559 4560 memset(ci, 0, sizeof(*ci)); 4561 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4562 4563 dcmd->mbox.b[0] = query_type; 4564 if (instance->supportmax256vd) 4565 dcmd->mbox.b[2] = 1; 4566 4567 dcmd->cmd = MFI_CMD_DCMD; 4568 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4569 dcmd->sge_count = 1; 4570 dcmd->flags = MFI_FRAME_DIR_READ; 4571 dcmd->timeout = 0; 4572 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); 4573 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY); 4574 dcmd->pad_0 = 0; 4575 4576 megasas_set_dma_settings(instance, dcmd, ci_h, 4577 sizeof(struct MR_LD_TARGETID_LIST)); 4578 4579 if ((instance->adapter_type != MFI_SERIES) && 4580 !instance->mask_interrupts) 4581 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4582 else 4583 ret = megasas_issue_polled(instance, cmd); 4584 4585 switch (ret) { 4586 case DCMD_FAILED: 4587 dev_info(&instance->pdev->dev, 4588 "DCMD not supported by firmware - %s %d\n", 4589 __func__, __LINE__); 4590 ret = megasas_get_ld_list(instance); 4591 break; 4592 case DCMD_TIMEOUT: 4593 switch (dcmd_timeout_ocr_possible(instance)) { 4594 case INITIATE_OCR: 4595 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4596 /* 4597 * DCMD failed from AEN path. 4598 * AEN path already hold reset_mutex to avoid PCI access 4599 * while OCR is in progress. 4600 */ 4601 mutex_unlock(&instance->reset_mutex); 4602 megasas_reset_fusion(instance->host, 4603 MFI_IO_TIMEOUT_OCR); 4604 mutex_lock(&instance->reset_mutex); 4605 break; 4606 case KILL_ADAPTER: 4607 megaraid_sas_kill_hba(instance); 4608 break; 4609 case IGNORE_TIMEOUT: 4610 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4611 __func__, __LINE__); 4612 break; 4613 } 4614 4615 break; 4616 case DCMD_SUCCESS: 4617 tgtid_count = le32_to_cpu(ci->count); 4618 4619 if ((tgtid_count > (instance->fw_supported_vd_count))) 4620 break; 4621 4622 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 4623 for (ld_index = 0; ld_index < tgtid_count; ld_index++) { 4624 ids = ci->targetId[ld_index]; 4625 instance->ld_ids[ids] = ci->targetId[ld_index]; 4626 } 4627 4628 break; 4629 } 4630 4631 if (ret != DCMD_TIMEOUT) 4632 megasas_return_cmd(instance, cmd); 4633 4634 return ret; 4635 } 4636 4637 /* 4638 * megasas_update_ext_vd_details : Update details w.r.t Extended VD 4639 * instance : Controller's instance 4640 */ 4641 static void megasas_update_ext_vd_details(struct megasas_instance *instance) 4642 { 4643 struct fusion_context *fusion; 4644 u32 ventura_map_sz = 0; 4645 4646 fusion = instance->ctrl_context; 4647 /* For MFI based controllers return dummy success */ 4648 if (!fusion) 4649 return; 4650 4651 instance->supportmax256vd = 4652 instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs; 4653 /* Below is additional check to address future FW enhancement */ 4654 if (instance->ctrl_info_buf->max_lds > 64) 4655 instance->supportmax256vd = 1; 4656 4657 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS 4658 * MEGASAS_MAX_DEV_PER_CHANNEL; 4659 instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS 4660 * MEGASAS_MAX_DEV_PER_CHANNEL; 4661 if (instance->supportmax256vd) { 4662 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; 4663 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 4664 } else { 4665 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 4666 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 4667 } 4668 4669 dev_info(&instance->pdev->dev, 4670 "FW provided supportMaxExtLDs: %d\tmax_lds: %d\n", 4671 instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0, 4672 instance->ctrl_info_buf->max_lds); 4673 4674 if (instance->max_raid_mapsize) { 4675 ventura_map_sz = instance->max_raid_mapsize * 4676 MR_MIN_MAP_SIZE; /* 64k */ 4677 fusion->current_map_sz = ventura_map_sz; 4678 fusion->max_map_sz = ventura_map_sz; 4679 } else { 4680 fusion->old_map_sz = sizeof(struct MR_FW_RAID_MAP) + 4681 (sizeof(struct MR_LD_SPAN_MAP) * 4682 (instance->fw_supported_vd_count - 1)); 4683 fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT); 4684 4685 fusion->max_map_sz = 4686 max(fusion->old_map_sz, fusion->new_map_sz); 4687 4688 if (instance->supportmax256vd) 4689 fusion->current_map_sz = fusion->new_map_sz; 4690 else 4691 fusion->current_map_sz = fusion->old_map_sz; 4692 } 4693 /* irrespective of FW raid maps, driver raid map is constant */ 4694 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL); 4695 } 4696 4697 /* 4698 * dcmd.opcode - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES 4699 * dcmd.hdr.length - number of bytes to read 4700 * dcmd.sge - Ptr to MR_SNAPDUMP_PROPERTIES 4701 * Desc: Fill in snapdump properties 4702 * Status: MFI_STAT_OK- Command successful 4703 */ 4704 void megasas_get_snapdump_properties(struct megasas_instance *instance) 4705 { 4706 int ret = 0; 4707 struct megasas_cmd *cmd; 4708 struct megasas_dcmd_frame *dcmd; 4709 struct MR_SNAPDUMP_PROPERTIES *ci; 4710 dma_addr_t ci_h = 0; 4711 4712 ci = instance->snapdump_prop; 4713 ci_h = instance->snapdump_prop_h; 4714 4715 if (!ci) 4716 return; 4717 4718 cmd = megasas_get_cmd(instance); 4719 4720 if (!cmd) { 4721 dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n"); 4722 return; 4723 } 4724 4725 dcmd = &cmd->frame->dcmd; 4726 4727 memset(ci, 0, sizeof(*ci)); 4728 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4729 4730 dcmd->cmd = MFI_CMD_DCMD; 4731 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4732 dcmd->sge_count = 1; 4733 dcmd->flags = MFI_FRAME_DIR_READ; 4734 dcmd->timeout = 0; 4735 dcmd->pad_0 = 0; 4736 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES)); 4737 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES); 4738 4739 megasas_set_dma_settings(instance, dcmd, ci_h, 4740 sizeof(struct MR_SNAPDUMP_PROPERTIES)); 4741 4742 if (!instance->mask_interrupts) { 4743 ret = megasas_issue_blocked_cmd(instance, cmd, 4744 MFI_IO_TIMEOUT_SECS); 4745 } else { 4746 ret = megasas_issue_polled(instance, cmd); 4747 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4748 } 4749 4750 switch (ret) { 4751 case DCMD_SUCCESS: 4752 instance->snapdump_wait_time = 4753 min_t(u8, ci->trigger_min_num_sec_before_ocr, 4754 MEGASAS_MAX_SNAP_DUMP_WAIT_TIME); 4755 break; 4756 4757 case DCMD_TIMEOUT: 4758 switch (dcmd_timeout_ocr_possible(instance)) { 4759 case INITIATE_OCR: 4760 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4761 megasas_reset_fusion(instance->host, 4762 MFI_IO_TIMEOUT_OCR); 4763 break; 4764 case KILL_ADAPTER: 4765 megaraid_sas_kill_hba(instance); 4766 break; 4767 case IGNORE_TIMEOUT: 4768 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4769 __func__, __LINE__); 4770 break; 4771 } 4772 } 4773 4774 if (ret != DCMD_TIMEOUT) 4775 megasas_return_cmd(instance, cmd); 4776 } 4777 4778 /** 4779 * megasas_get_controller_info - Returns FW's controller structure 4780 * @instance: Adapter soft state 4781 * 4782 * Issues an internal command (DCMD) to get the FW's controller structure. 4783 * This information is mainly used to find out the maximum IO transfer per 4784 * command supported by the FW. 4785 */ 4786 int 4787 megasas_get_ctrl_info(struct megasas_instance *instance) 4788 { 4789 int ret = 0; 4790 struct megasas_cmd *cmd; 4791 struct megasas_dcmd_frame *dcmd; 4792 struct megasas_ctrl_info *ci; 4793 dma_addr_t ci_h = 0; 4794 4795 ci = instance->ctrl_info_buf; 4796 ci_h = instance->ctrl_info_buf_h; 4797 4798 cmd = megasas_get_cmd(instance); 4799 4800 if (!cmd) { 4801 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n"); 4802 return -ENOMEM; 4803 } 4804 4805 dcmd = &cmd->frame->dcmd; 4806 4807 memset(ci, 0, sizeof(*ci)); 4808 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4809 4810 dcmd->cmd = MFI_CMD_DCMD; 4811 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4812 dcmd->sge_count = 1; 4813 dcmd->flags = MFI_FRAME_DIR_READ; 4814 dcmd->timeout = 0; 4815 dcmd->pad_0 = 0; 4816 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info)); 4817 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO); 4818 dcmd->mbox.b[0] = 1; 4819 4820 megasas_set_dma_settings(instance, dcmd, ci_h, 4821 sizeof(struct megasas_ctrl_info)); 4822 4823 if ((instance->adapter_type != MFI_SERIES) && 4824 !instance->mask_interrupts) { 4825 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4826 } else { 4827 ret = megasas_issue_polled(instance, cmd); 4828 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4829 } 4830 4831 switch (ret) { 4832 case DCMD_SUCCESS: 4833 /* Save required controller information in 4834 * CPU endianness format. 4835 */ 4836 le32_to_cpus((u32 *)&ci->properties.OnOffProperties); 4837 le16_to_cpus((u16 *)&ci->properties.on_off_properties2); 4838 le32_to_cpus((u32 *)&ci->adapterOperations2); 4839 le32_to_cpus((u32 *)&ci->adapterOperations3); 4840 le16_to_cpus((u16 *)&ci->adapter_operations4); 4841 4842 /* Update the latest Ext VD info. 4843 * From Init path, store current firmware details. 4844 * From OCR path, detect any firmware properties changes. 4845 * in case of Firmware upgrade without system reboot. 4846 */ 4847 megasas_update_ext_vd_details(instance); 4848 instance->use_seqnum_jbod_fp = 4849 ci->adapterOperations3.useSeqNumJbodFP; 4850 instance->support_morethan256jbod = 4851 ci->adapter_operations4.support_pd_map_target_id; 4852 instance->support_nvme_passthru = 4853 ci->adapter_operations4.support_nvme_passthru; 4854 instance->task_abort_tmo = ci->TaskAbortTO; 4855 instance->max_reset_tmo = ci->MaxResetTO; 4856 4857 /*Check whether controller is iMR or MR */ 4858 instance->is_imr = (ci->memory_size ? 0 : 1); 4859 4860 instance->snapdump_wait_time = 4861 (ci->properties.on_off_properties2.enable_snap_dump ? 4862 MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0); 4863 4864 dev_info(&instance->pdev->dev, 4865 "controller type\t: %s(%dMB)\n", 4866 instance->is_imr ? "iMR" : "MR", 4867 le16_to_cpu(ci->memory_size)); 4868 4869 instance->disableOnlineCtrlReset = 4870 ci->properties.OnOffProperties.disableOnlineCtrlReset; 4871 instance->secure_jbod_support = 4872 ci->adapterOperations3.supportSecurityonJBOD; 4873 dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n", 4874 instance->disableOnlineCtrlReset ? "Disabled" : "Enabled"); 4875 dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n", 4876 instance->secure_jbod_support ? "Yes" : "No"); 4877 dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n", 4878 instance->support_nvme_passthru ? "Yes" : "No"); 4879 dev_info(&instance->pdev->dev, 4880 "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n", 4881 instance->task_abort_tmo, instance->max_reset_tmo); 4882 4883 break; 4884 4885 case DCMD_TIMEOUT: 4886 switch (dcmd_timeout_ocr_possible(instance)) { 4887 case INITIATE_OCR: 4888 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4889 megasas_reset_fusion(instance->host, 4890 MFI_IO_TIMEOUT_OCR); 4891 break; 4892 case KILL_ADAPTER: 4893 megaraid_sas_kill_hba(instance); 4894 break; 4895 case IGNORE_TIMEOUT: 4896 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4897 __func__, __LINE__); 4898 break; 4899 } 4900 break; 4901 case DCMD_FAILED: 4902 megaraid_sas_kill_hba(instance); 4903 break; 4904 4905 } 4906 4907 if (ret != DCMD_TIMEOUT) 4908 megasas_return_cmd(instance, cmd); 4909 4910 return ret; 4911 } 4912 4913 /* 4914 * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer 4915 * to firmware 4916 * 4917 * @instance: Adapter soft state 4918 * @crash_buf_state - tell FW to turn ON/OFF crash dump feature 4919 MR_CRASH_BUF_TURN_OFF = 0 4920 MR_CRASH_BUF_TURN_ON = 1 4921 * @return 0 on success non-zero on failure. 4922 * Issues an internal command (DCMD) to set parameters for crash dump feature. 4923 * Driver will send address of crash dump DMA buffer and set mbox to tell FW 4924 * that driver supports crash dump feature. This DCMD will be sent only if 4925 * crash dump feature is supported by the FW. 4926 * 4927 */ 4928 int megasas_set_crash_dump_params(struct megasas_instance *instance, 4929 u8 crash_buf_state) 4930 { 4931 int ret = 0; 4932 struct megasas_cmd *cmd; 4933 struct megasas_dcmd_frame *dcmd; 4934 4935 cmd = megasas_get_cmd(instance); 4936 4937 if (!cmd) { 4938 dev_err(&instance->pdev->dev, "Failed to get a free cmd\n"); 4939 return -ENOMEM; 4940 } 4941 4942 4943 dcmd = &cmd->frame->dcmd; 4944 4945 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4946 dcmd->mbox.b[0] = crash_buf_state; 4947 dcmd->cmd = MFI_CMD_DCMD; 4948 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4949 dcmd->sge_count = 1; 4950 dcmd->flags = MFI_FRAME_DIR_NONE; 4951 dcmd->timeout = 0; 4952 dcmd->pad_0 = 0; 4953 dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE); 4954 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS); 4955 4956 megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h, 4957 CRASH_DMA_BUF_SIZE); 4958 4959 if ((instance->adapter_type != MFI_SERIES) && 4960 !instance->mask_interrupts) 4961 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4962 else 4963 ret = megasas_issue_polled(instance, cmd); 4964 4965 if (ret == DCMD_TIMEOUT) { 4966 switch (dcmd_timeout_ocr_possible(instance)) { 4967 case INITIATE_OCR: 4968 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4969 megasas_reset_fusion(instance->host, 4970 MFI_IO_TIMEOUT_OCR); 4971 break; 4972 case KILL_ADAPTER: 4973 megaraid_sas_kill_hba(instance); 4974 break; 4975 case IGNORE_TIMEOUT: 4976 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4977 __func__, __LINE__); 4978 break; 4979 } 4980 } else 4981 megasas_return_cmd(instance, cmd); 4982 4983 return ret; 4984 } 4985 4986 /** 4987 * megasas_issue_init_mfi - Initializes the FW 4988 * @instance: Adapter soft state 4989 * 4990 * Issues the INIT MFI cmd 4991 */ 4992 static int 4993 megasas_issue_init_mfi(struct megasas_instance *instance) 4994 { 4995 __le32 context; 4996 struct megasas_cmd *cmd; 4997 struct megasas_init_frame *init_frame; 4998 struct megasas_init_queue_info *initq_info; 4999 dma_addr_t init_frame_h; 5000 dma_addr_t initq_info_h; 5001 5002 /* 5003 * Prepare a init frame. Note the init frame points to queue info 5004 * structure. Each frame has SGL allocated after first 64 bytes. For 5005 * this frame - since we don't need any SGL - we use SGL's space as 5006 * queue info structure 5007 * 5008 * We will not get a NULL command below. We just created the pool. 5009 */ 5010 cmd = megasas_get_cmd(instance); 5011 5012 init_frame = (struct megasas_init_frame *)cmd->frame; 5013 initq_info = (struct megasas_init_queue_info *) 5014 ((unsigned long)init_frame + 64); 5015 5016 init_frame_h = cmd->frame_phys_addr; 5017 initq_info_h = init_frame_h + 64; 5018 5019 context = init_frame->context; 5020 memset(init_frame, 0, MEGAMFI_FRAME_SIZE); 5021 memset(initq_info, 0, sizeof(struct megasas_init_queue_info)); 5022 init_frame->context = context; 5023 5024 initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1); 5025 initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h); 5026 5027 initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h); 5028 initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h); 5029 5030 init_frame->cmd = MFI_CMD_INIT; 5031 init_frame->cmd_status = MFI_STAT_INVALID_STATUS; 5032 init_frame->queue_info_new_phys_addr_lo = 5033 cpu_to_le32(lower_32_bits(initq_info_h)); 5034 init_frame->queue_info_new_phys_addr_hi = 5035 cpu_to_le32(upper_32_bits(initq_info_h)); 5036 5037 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info)); 5038 5039 /* 5040 * disable the intr before firing the init frame to FW 5041 */ 5042 instance->instancet->disable_intr(instance); 5043 5044 /* 5045 * Issue the init frame in polled mode 5046 */ 5047 5048 if (megasas_issue_polled(instance, cmd)) { 5049 dev_err(&instance->pdev->dev, "Failed to init firmware\n"); 5050 megasas_return_cmd(instance, cmd); 5051 goto fail_fw_init; 5052 } 5053 5054 megasas_return_cmd(instance, cmd); 5055 5056 return 0; 5057 5058 fail_fw_init: 5059 return -EINVAL; 5060 } 5061 5062 static u32 5063 megasas_init_adapter_mfi(struct megasas_instance *instance) 5064 { 5065 u32 context_sz; 5066 u32 reply_q_sz; 5067 5068 /* 5069 * Get various operational parameters from status register 5070 */ 5071 instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF; 5072 /* 5073 * Reduce the max supported cmds by 1. This is to ensure that the 5074 * reply_q_sz (1 more than the max cmd that driver may send) 5075 * does not exceed max cmds that the FW can support 5076 */ 5077 instance->max_fw_cmds = instance->max_fw_cmds-1; 5078 instance->max_mfi_cmds = instance->max_fw_cmds; 5079 instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >> 5080 0x10; 5081 /* 5082 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands 5083 * are reserved for IOCTL + driver's internal DCMDs. 5084 */ 5085 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 5086 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { 5087 instance->max_scsi_cmds = (instance->max_fw_cmds - 5088 MEGASAS_SKINNY_INT_CMDS); 5089 sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); 5090 } else { 5091 instance->max_scsi_cmds = (instance->max_fw_cmds - 5092 MEGASAS_INT_CMDS); 5093 sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS)); 5094 } 5095 5096 instance->cur_can_queue = instance->max_scsi_cmds; 5097 /* 5098 * Create a pool of commands 5099 */ 5100 if (megasas_alloc_cmds(instance)) 5101 goto fail_alloc_cmds; 5102 5103 /* 5104 * Allocate memory for reply queue. Length of reply queue should 5105 * be _one_ more than the maximum commands handled by the firmware. 5106 * 5107 * Note: When FW completes commands, it places corresponding contex 5108 * values in this circular reply queue. This circular queue is a fairly 5109 * typical producer-consumer queue. FW is the producer (of completed 5110 * commands) and the driver is the consumer. 5111 */ 5112 context_sz = sizeof(u32); 5113 reply_q_sz = context_sz * (instance->max_fw_cmds + 1); 5114 5115 instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev, 5116 reply_q_sz, &instance->reply_queue_h, GFP_KERNEL); 5117 5118 if (!instance->reply_queue) { 5119 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n"); 5120 goto fail_reply_queue; 5121 } 5122 5123 if (megasas_issue_init_mfi(instance)) 5124 goto fail_fw_init; 5125 5126 if (megasas_get_ctrl_info(instance)) { 5127 dev_err(&instance->pdev->dev, "(%d): Could get controller info " 5128 "Fail from %s %d\n", instance->unique_id, 5129 __func__, __LINE__); 5130 goto fail_fw_init; 5131 } 5132 5133 instance->fw_support_ieee = 0; 5134 instance->fw_support_ieee = 5135 (instance->instancet->read_fw_status_reg(instance) & 5136 0x04000000); 5137 5138 dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d", 5139 instance->fw_support_ieee); 5140 5141 if (instance->fw_support_ieee) 5142 instance->flag_ieee = 1; 5143 5144 return 0; 5145 5146 fail_fw_init: 5147 5148 dma_free_coherent(&instance->pdev->dev, reply_q_sz, 5149 instance->reply_queue, instance->reply_queue_h); 5150 fail_reply_queue: 5151 megasas_free_cmds(instance); 5152 5153 fail_alloc_cmds: 5154 return 1; 5155 } 5156 5157 /* 5158 * megasas_setup_irqs_ioapic - register legacy interrupts. 5159 * @instance: Adapter soft state 5160 * 5161 * Do not enable interrupt, only setup ISRs. 5162 * 5163 * Return 0 on success. 5164 */ 5165 static int 5166 megasas_setup_irqs_ioapic(struct megasas_instance *instance) 5167 { 5168 struct pci_dev *pdev; 5169 5170 pdev = instance->pdev; 5171 instance->irq_context[0].instance = instance; 5172 instance->irq_context[0].MSIxIndex = 0; 5173 if (request_irq(pci_irq_vector(pdev, 0), 5174 instance->instancet->service_isr, IRQF_SHARED, 5175 "megasas", &instance->irq_context[0])) { 5176 dev_err(&instance->pdev->dev, 5177 "Failed to register IRQ from %s %d\n", 5178 __func__, __LINE__); 5179 return -1; 5180 } 5181 return 0; 5182 } 5183 5184 /** 5185 * megasas_setup_irqs_msix - register MSI-x interrupts. 5186 * @instance: Adapter soft state 5187 * @is_probe: Driver probe check 5188 * 5189 * Do not enable interrupt, only setup ISRs. 5190 * 5191 * Return 0 on success. 5192 */ 5193 static int 5194 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) 5195 { 5196 int i, j; 5197 struct pci_dev *pdev; 5198 5199 pdev = instance->pdev; 5200 5201 /* Try MSI-x */ 5202 for (i = 0; i < instance->msix_vectors; i++) { 5203 instance->irq_context[i].instance = instance; 5204 instance->irq_context[i].MSIxIndex = i; 5205 if (request_irq(pci_irq_vector(pdev, i), 5206 instance->instancet->service_isr, 0, "megasas", 5207 &instance->irq_context[i])) { 5208 dev_err(&instance->pdev->dev, 5209 "Failed to register IRQ for vector %d.\n", i); 5210 for (j = 0; j < i; j++) 5211 free_irq(pci_irq_vector(pdev, j), 5212 &instance->irq_context[j]); 5213 /* Retry irq register for IO_APIC*/ 5214 instance->msix_vectors = 0; 5215 if (is_probe) { 5216 pci_free_irq_vectors(instance->pdev); 5217 return megasas_setup_irqs_ioapic(instance); 5218 } else { 5219 return -1; 5220 } 5221 } 5222 } 5223 return 0; 5224 } 5225 5226 /* 5227 * megasas_destroy_irqs- unregister interrupts. 5228 * @instance: Adapter soft state 5229 * return: void 5230 */ 5231 static void 5232 megasas_destroy_irqs(struct megasas_instance *instance) { 5233 5234 int i; 5235 5236 if (instance->msix_vectors) 5237 for (i = 0; i < instance->msix_vectors; i++) { 5238 free_irq(pci_irq_vector(instance->pdev, i), 5239 &instance->irq_context[i]); 5240 } 5241 else 5242 free_irq(pci_irq_vector(instance->pdev, 0), 5243 &instance->irq_context[0]); 5244 } 5245 5246 /** 5247 * megasas_setup_jbod_map - setup jbod map for FP seq_number. 5248 * @instance: Adapter soft state 5249 * @is_probe: Driver probe check 5250 * 5251 * Return 0 on success. 5252 */ 5253 void 5254 megasas_setup_jbod_map(struct megasas_instance *instance) 5255 { 5256 int i; 5257 struct fusion_context *fusion = instance->ctrl_context; 5258 u32 pd_seq_map_sz; 5259 5260 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 5261 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1)); 5262 5263 if (reset_devices || !fusion || 5264 !instance->ctrl_info_buf->adapterOperations3.useSeqNumJbodFP) { 5265 dev_info(&instance->pdev->dev, 5266 "Jbod map is not supported %s %d\n", 5267 __func__, __LINE__); 5268 instance->use_seqnum_jbod_fp = false; 5269 return; 5270 } 5271 5272 if (fusion->pd_seq_sync[0]) 5273 goto skip_alloc; 5274 5275 for (i = 0; i < JBOD_MAPS_COUNT; i++) { 5276 fusion->pd_seq_sync[i] = dma_alloc_coherent 5277 (&instance->pdev->dev, pd_seq_map_sz, 5278 &fusion->pd_seq_phys[i], GFP_KERNEL); 5279 if (!fusion->pd_seq_sync[i]) { 5280 dev_err(&instance->pdev->dev, 5281 "Failed to allocate memory from %s %d\n", 5282 __func__, __LINE__); 5283 if (i == 1) { 5284 dma_free_coherent(&instance->pdev->dev, 5285 pd_seq_map_sz, fusion->pd_seq_sync[0], 5286 fusion->pd_seq_phys[0]); 5287 fusion->pd_seq_sync[0] = NULL; 5288 } 5289 instance->use_seqnum_jbod_fp = false; 5290 return; 5291 } 5292 } 5293 5294 skip_alloc: 5295 if (!megasas_sync_pd_seq_num(instance, false) && 5296 !megasas_sync_pd_seq_num(instance, true)) 5297 instance->use_seqnum_jbod_fp = true; 5298 else 5299 instance->use_seqnum_jbod_fp = false; 5300 } 5301 5302 static void megasas_setup_reply_map(struct megasas_instance *instance) 5303 { 5304 const struct cpumask *mask; 5305 unsigned int queue, cpu; 5306 5307 for (queue = 0; queue < instance->msix_vectors; queue++) { 5308 mask = pci_irq_get_affinity(instance->pdev, queue); 5309 if (!mask) 5310 goto fallback; 5311 5312 for_each_cpu(cpu, mask) 5313 instance->reply_map[cpu] = queue; 5314 } 5315 return; 5316 5317 fallback: 5318 for_each_possible_cpu(cpu) 5319 instance->reply_map[cpu] = cpu % instance->msix_vectors; 5320 } 5321 5322 /** 5323 * megasas_init_fw - Initializes the FW 5324 * @instance: Adapter soft state 5325 * 5326 * This is the main function for initializing firmware 5327 */ 5328 5329 static int megasas_init_fw(struct megasas_instance *instance) 5330 { 5331 u32 max_sectors_1; 5332 u32 max_sectors_2, tmp_sectors, msix_enable; 5333 u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg; 5334 resource_size_t base_addr; 5335 struct megasas_ctrl_info *ctrl_info = NULL; 5336 unsigned long bar_list; 5337 int i, j, loop, fw_msix_count = 0; 5338 struct IOV_111 *iovPtr; 5339 struct fusion_context *fusion; 5340 bool do_adp_reset = true; 5341 5342 fusion = instance->ctrl_context; 5343 5344 /* Find first memory bar */ 5345 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); 5346 instance->bar = find_first_bit(&bar_list, BITS_PER_LONG); 5347 if (pci_request_selected_regions(instance->pdev, 1<<instance->bar, 5348 "megasas: LSI")) { 5349 dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n"); 5350 return -EBUSY; 5351 } 5352 5353 base_addr = pci_resource_start(instance->pdev, instance->bar); 5354 instance->reg_set = ioremap_nocache(base_addr, 8192); 5355 5356 if (!instance->reg_set) { 5357 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n"); 5358 goto fail_ioremap; 5359 } 5360 5361 if (instance->adapter_type != MFI_SERIES) 5362 instance->instancet = &megasas_instance_template_fusion; 5363 else { 5364 switch (instance->pdev->device) { 5365 case PCI_DEVICE_ID_LSI_SAS1078R: 5366 case PCI_DEVICE_ID_LSI_SAS1078DE: 5367 instance->instancet = &megasas_instance_template_ppc; 5368 break; 5369 case PCI_DEVICE_ID_LSI_SAS1078GEN2: 5370 case PCI_DEVICE_ID_LSI_SAS0079GEN2: 5371 instance->instancet = &megasas_instance_template_gen2; 5372 break; 5373 case PCI_DEVICE_ID_LSI_SAS0073SKINNY: 5374 case PCI_DEVICE_ID_LSI_SAS0071SKINNY: 5375 instance->instancet = &megasas_instance_template_skinny; 5376 break; 5377 case PCI_DEVICE_ID_LSI_SAS1064R: 5378 case PCI_DEVICE_ID_DELL_PERC5: 5379 default: 5380 instance->instancet = &megasas_instance_template_xscale; 5381 instance->pd_list_not_supported = 1; 5382 break; 5383 } 5384 } 5385 5386 if (megasas_transition_to_ready(instance, 0)) { 5387 if (instance->adapter_type >= INVADER_SERIES) { 5388 status_reg = instance->instancet->read_fw_status_reg( 5389 instance); 5390 do_adp_reset = status_reg & MFI_RESET_ADAPTER; 5391 } 5392 5393 if (do_adp_reset) { 5394 atomic_set(&instance->fw_reset_no_pci_access, 1); 5395 instance->instancet->adp_reset 5396 (instance, instance->reg_set); 5397 atomic_set(&instance->fw_reset_no_pci_access, 0); 5398 dev_info(&instance->pdev->dev, 5399 "FW restarted successfully from %s!\n", 5400 __func__); 5401 5402 /*waiting for about 30 second before retry*/ 5403 ssleep(30); 5404 5405 if (megasas_transition_to_ready(instance, 0)) 5406 goto fail_ready_state; 5407 } else { 5408 goto fail_ready_state; 5409 } 5410 } 5411 5412 megasas_init_ctrl_params(instance); 5413 5414 if (megasas_set_dma_mask(instance)) 5415 goto fail_ready_state; 5416 5417 if (megasas_alloc_ctrl_mem(instance)) 5418 goto fail_alloc_dma_buf; 5419 5420 if (megasas_alloc_ctrl_dma_buffers(instance)) 5421 goto fail_alloc_dma_buf; 5422 5423 fusion = instance->ctrl_context; 5424 5425 if (instance->adapter_type >= VENTURA_SERIES) { 5426 scratch_pad_2 = 5427 megasas_readl(instance, 5428 &instance->reg_set->outbound_scratch_pad_2); 5429 instance->max_raid_mapsize = ((scratch_pad_2 >> 5430 MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) & 5431 MR_MAX_RAID_MAP_SIZE_MASK); 5432 } 5433 5434 /* Check if MSI-X is supported while in ready state */ 5435 msix_enable = (instance->instancet->read_fw_status_reg(instance) & 5436 0x4000000) >> 0x1a; 5437 if (msix_enable && !msix_disable) { 5438 int irq_flags = PCI_IRQ_MSIX; 5439 5440 scratch_pad_1 = megasas_readl 5441 (instance, &instance->reg_set->outbound_scratch_pad_1); 5442 /* Check max MSI-X vectors */ 5443 if (fusion) { 5444 if (instance->adapter_type == THUNDERBOLT_SERIES) { 5445 /* Thunderbolt Series*/ 5446 instance->msix_vectors = (scratch_pad_1 5447 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; 5448 fw_msix_count = instance->msix_vectors; 5449 } else { 5450 instance->msix_vectors = ((scratch_pad_1 5451 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) 5452 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; 5453 5454 /* 5455 * For Invader series, > 8 MSI-x vectors 5456 * supported by FW/HW implies combined 5457 * reply queue mode is enabled. 5458 * For Ventura series, > 16 MSI-x vectors 5459 * supported by FW/HW implies combined 5460 * reply queue mode is enabled. 5461 */ 5462 switch (instance->adapter_type) { 5463 case INVADER_SERIES: 5464 if (instance->msix_vectors > 8) 5465 instance->msix_combined = true; 5466 break; 5467 case AERO_SERIES: 5468 case VENTURA_SERIES: 5469 if (instance->msix_vectors > 16) 5470 instance->msix_combined = true; 5471 break; 5472 } 5473 5474 if (rdpq_enable) 5475 instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ? 5476 1 : 0; 5477 fw_msix_count = instance->msix_vectors; 5478 /* Save 1-15 reply post index address to local memory 5479 * Index 0 is already saved from reg offset 5480 * MPI2_REPLY_POST_HOST_INDEX_OFFSET 5481 */ 5482 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) { 5483 instance->reply_post_host_index_addr[loop] = 5484 (u32 __iomem *) 5485 ((u8 __iomem *)instance->reg_set + 5486 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET 5487 + (loop * 0x10)); 5488 } 5489 } 5490 if (msix_vectors) 5491 instance->msix_vectors = min(msix_vectors, 5492 instance->msix_vectors); 5493 } else /* MFI adapters */ 5494 instance->msix_vectors = 1; 5495 /* Don't bother allocating more MSI-X vectors than cpus */ 5496 instance->msix_vectors = min(instance->msix_vectors, 5497 (unsigned int)num_online_cpus()); 5498 if (smp_affinity_enable) 5499 irq_flags |= PCI_IRQ_AFFINITY; 5500 i = pci_alloc_irq_vectors(instance->pdev, 1, 5501 instance->msix_vectors, irq_flags); 5502 if (i > 0) 5503 instance->msix_vectors = i; 5504 else 5505 instance->msix_vectors = 0; 5506 } 5507 /* 5508 * MSI-X host index 0 is common for all adapter. 5509 * It is used for all MPT based Adapters. 5510 */ 5511 if (instance->msix_combined) { 5512 instance->reply_post_host_index_addr[0] = 5513 (u32 *)((u8 *)instance->reg_set + 5514 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET); 5515 } else { 5516 instance->reply_post_host_index_addr[0] = 5517 (u32 *)((u8 *)instance->reg_set + 5518 MPI2_REPLY_POST_HOST_INDEX_OFFSET); 5519 } 5520 5521 if (!instance->msix_vectors) { 5522 i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); 5523 if (i < 0) 5524 goto fail_init_adapter; 5525 } 5526 5527 megasas_setup_reply_map(instance); 5528 5529 dev_info(&instance->pdev->dev, 5530 "firmware supports msix\t: (%d)", fw_msix_count); 5531 dev_info(&instance->pdev->dev, 5532 "current msix/online cpus\t: (%d/%d)\n", 5533 instance->msix_vectors, (unsigned int)num_online_cpus()); 5534 dev_info(&instance->pdev->dev, 5535 "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled"); 5536 5537 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 5538 (unsigned long)instance); 5539 5540 /* 5541 * Below are default value for legacy Firmware. 5542 * non-fusion based controllers 5543 */ 5544 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 5545 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 5546 /* Get operational params, sge flags, send init cmd to controller */ 5547 if (instance->instancet->init_adapter(instance)) 5548 goto fail_init_adapter; 5549 5550 if (instance->adapter_type >= VENTURA_SERIES) { 5551 scratch_pad_3 = 5552 megasas_readl(instance, 5553 &instance->reg_set->outbound_scratch_pad_3); 5554 if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >= 5555 MR_DEFAULT_NVME_PAGE_SHIFT) 5556 instance->nvme_page_size = 5557 (1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK)); 5558 5559 dev_info(&instance->pdev->dev, 5560 "NVME page size\t: (%d)\n", instance->nvme_page_size); 5561 } 5562 5563 if (instance->msix_vectors ? 5564 megasas_setup_irqs_msix(instance, 1) : 5565 megasas_setup_irqs_ioapic(instance)) 5566 goto fail_init_adapter; 5567 5568 instance->instancet->enable_intr(instance); 5569 5570 dev_info(&instance->pdev->dev, "INIT adapter done\n"); 5571 5572 megasas_setup_jbod_map(instance); 5573 5574 /** for passthrough 5575 * the following function will get the PD LIST. 5576 */ 5577 memset(instance->pd_list, 0, 5578 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); 5579 if (megasas_get_pd_list(instance) < 0) { 5580 dev_err(&instance->pdev->dev, "failed to get PD list\n"); 5581 goto fail_get_ld_pd_list; 5582 } 5583 5584 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 5585 5586 /* stream detection initialization */ 5587 if (instance->adapter_type >= VENTURA_SERIES) { 5588 fusion->stream_detect_by_ld = 5589 kcalloc(MAX_LOGICAL_DRIVES_EXT, 5590 sizeof(struct LD_STREAM_DETECT *), 5591 GFP_KERNEL); 5592 if (!fusion->stream_detect_by_ld) { 5593 dev_err(&instance->pdev->dev, 5594 "unable to allocate stream detection for pool of LDs\n"); 5595 goto fail_get_ld_pd_list; 5596 } 5597 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) { 5598 fusion->stream_detect_by_ld[i] = 5599 kzalloc(sizeof(struct LD_STREAM_DETECT), 5600 GFP_KERNEL); 5601 if (!fusion->stream_detect_by_ld[i]) { 5602 dev_err(&instance->pdev->dev, 5603 "unable to allocate stream detect by LD\n "); 5604 for (j = 0; j < i; ++j) 5605 kfree(fusion->stream_detect_by_ld[j]); 5606 kfree(fusion->stream_detect_by_ld); 5607 fusion->stream_detect_by_ld = NULL; 5608 goto fail_get_ld_pd_list; 5609 } 5610 fusion->stream_detect_by_ld[i]->mru_bit_map 5611 = MR_STREAM_BITMAP; 5612 } 5613 } 5614 5615 if (megasas_ld_list_query(instance, 5616 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) 5617 goto fail_get_ld_pd_list; 5618 5619 /* 5620 * Compute the max allowed sectors per IO: The controller info has two 5621 * limits on max sectors. Driver should use the minimum of these two. 5622 * 5623 * 1 << stripe_sz_ops.min = max sectors per strip 5624 * 5625 * Note that older firmwares ( < FW ver 30) didn't report information 5626 * to calculate max_sectors_1. So the number ended up as zero always. 5627 */ 5628 tmp_sectors = 0; 5629 ctrl_info = instance->ctrl_info_buf; 5630 5631 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * 5632 le16_to_cpu(ctrl_info->max_strips_per_io); 5633 max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size); 5634 5635 tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2); 5636 5637 instance->peerIsPresent = ctrl_info->cluster.peerIsPresent; 5638 instance->passive = ctrl_info->cluster.passive; 5639 memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId)); 5640 instance->UnevenSpanSupport = 5641 ctrl_info->adapterOperations2.supportUnevenSpans; 5642 if (instance->UnevenSpanSupport) { 5643 struct fusion_context *fusion = instance->ctrl_context; 5644 if (MR_ValidateMapInfo(instance, instance->map_id)) 5645 fusion->fast_path_io = 1; 5646 else 5647 fusion->fast_path_io = 0; 5648 5649 } 5650 if (ctrl_info->host_interface.SRIOV) { 5651 instance->requestorId = ctrl_info->iov.requestorId; 5652 if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) { 5653 if (!ctrl_info->adapterOperations2.activePassive) 5654 instance->PlasmaFW111 = 1; 5655 5656 dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n", 5657 instance->PlasmaFW111 ? "1.11" : "new"); 5658 5659 if (instance->PlasmaFW111) { 5660 iovPtr = (struct IOV_111 *) 5661 ((unsigned char *)ctrl_info + IOV_111_OFFSET); 5662 instance->requestorId = iovPtr->requestorId; 5663 } 5664 } 5665 dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n", 5666 instance->requestorId); 5667 } 5668 5669 instance->crash_dump_fw_support = 5670 ctrl_info->adapterOperations3.supportCrashDump; 5671 instance->crash_dump_drv_support = 5672 (instance->crash_dump_fw_support && 5673 instance->crash_dump_buf); 5674 if (instance->crash_dump_drv_support) 5675 megasas_set_crash_dump_params(instance, 5676 MR_CRASH_BUF_TURN_OFF); 5677 5678 else { 5679 if (instance->crash_dump_buf) 5680 dma_free_coherent(&instance->pdev->dev, 5681 CRASH_DMA_BUF_SIZE, 5682 instance->crash_dump_buf, 5683 instance->crash_dump_h); 5684 instance->crash_dump_buf = NULL; 5685 } 5686 5687 if (instance->snapdump_wait_time) { 5688 megasas_get_snapdump_properties(instance); 5689 dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n", 5690 instance->snapdump_wait_time); 5691 } 5692 5693 dev_info(&instance->pdev->dev, 5694 "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n", 5695 le16_to_cpu(ctrl_info->pci.vendor_id), 5696 le16_to_cpu(ctrl_info->pci.device_id), 5697 le16_to_cpu(ctrl_info->pci.sub_vendor_id), 5698 le16_to_cpu(ctrl_info->pci.sub_device_id)); 5699 dev_info(&instance->pdev->dev, "unevenspan support : %s\n", 5700 instance->UnevenSpanSupport ? "yes" : "no"); 5701 dev_info(&instance->pdev->dev, "firmware crash dump : %s\n", 5702 instance->crash_dump_drv_support ? "yes" : "no"); 5703 dev_info(&instance->pdev->dev, "jbod sync map : %s\n", 5704 instance->use_seqnum_jbod_fp ? "yes" : "no"); 5705 5706 instance->max_sectors_per_req = instance->max_num_sge * 5707 SGE_BUFFER_SIZE / 512; 5708 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) 5709 instance->max_sectors_per_req = tmp_sectors; 5710 5711 /* Check for valid throttlequeuedepth module parameter */ 5712 if (throttlequeuedepth && 5713 throttlequeuedepth <= instance->max_scsi_cmds) 5714 instance->throttlequeuedepth = throttlequeuedepth; 5715 else 5716 instance->throttlequeuedepth = 5717 MEGASAS_THROTTLE_QUEUE_DEPTH; 5718 5719 if ((resetwaittime < 1) || 5720 (resetwaittime > MEGASAS_RESET_WAIT_TIME)) 5721 resetwaittime = MEGASAS_RESET_WAIT_TIME; 5722 5723 if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT)) 5724 scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; 5725 5726 /* Launch SR-IOV heartbeat timer */ 5727 if (instance->requestorId) { 5728 if (!megasas_sriov_start_heartbeat(instance, 1)) { 5729 megasas_start_timer(instance); 5730 } else { 5731 instance->skip_heartbeat_timer_del = 1; 5732 goto fail_get_ld_pd_list; 5733 } 5734 } 5735 5736 /* 5737 * Create and start watchdog thread which will monitor 5738 * controller state every 1 sec and trigger OCR when 5739 * it enters fault state 5740 */ 5741 if (instance->adapter_type != MFI_SERIES) 5742 if (megasas_fusion_start_watchdog(instance) != SUCCESS) 5743 goto fail_start_watchdog; 5744 5745 return 0; 5746 5747 fail_start_watchdog: 5748 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 5749 del_timer_sync(&instance->sriov_heartbeat_timer); 5750 fail_get_ld_pd_list: 5751 instance->instancet->disable_intr(instance); 5752 megasas_destroy_irqs(instance); 5753 fail_init_adapter: 5754 if (instance->msix_vectors) 5755 pci_free_irq_vectors(instance->pdev); 5756 instance->msix_vectors = 0; 5757 fail_alloc_dma_buf: 5758 megasas_free_ctrl_dma_buffers(instance); 5759 megasas_free_ctrl_mem(instance); 5760 fail_ready_state: 5761 iounmap(instance->reg_set); 5762 5763 fail_ioremap: 5764 pci_release_selected_regions(instance->pdev, 1<<instance->bar); 5765 5766 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 5767 __func__, __LINE__); 5768 return -EINVAL; 5769 } 5770 5771 /** 5772 * megasas_release_mfi - Reverses the FW initialization 5773 * @instance: Adapter soft state 5774 */ 5775 static void megasas_release_mfi(struct megasas_instance *instance) 5776 { 5777 u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1); 5778 5779 if (instance->reply_queue) 5780 dma_free_coherent(&instance->pdev->dev, reply_q_sz, 5781 instance->reply_queue, instance->reply_queue_h); 5782 5783 megasas_free_cmds(instance); 5784 5785 iounmap(instance->reg_set); 5786 5787 pci_release_selected_regions(instance->pdev, 1<<instance->bar); 5788 } 5789 5790 /** 5791 * megasas_get_seq_num - Gets latest event sequence numbers 5792 * @instance: Adapter soft state 5793 * @eli: FW event log sequence numbers information 5794 * 5795 * FW maintains a log of all events in a non-volatile area. Upper layers would 5796 * usually find out the latest sequence number of the events, the seq number at 5797 * the boot etc. They would "read" all the events below the latest seq number 5798 * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq 5799 * number), they would subsribe to AEN (asynchronous event notification) and 5800 * wait for the events to happen. 5801 */ 5802 static int 5803 megasas_get_seq_num(struct megasas_instance *instance, 5804 struct megasas_evt_log_info *eli) 5805 { 5806 struct megasas_cmd *cmd; 5807 struct megasas_dcmd_frame *dcmd; 5808 struct megasas_evt_log_info *el_info; 5809 dma_addr_t el_info_h = 0; 5810 int ret; 5811 5812 cmd = megasas_get_cmd(instance); 5813 5814 if (!cmd) { 5815 return -ENOMEM; 5816 } 5817 5818 dcmd = &cmd->frame->dcmd; 5819 el_info = dma_zalloc_coherent(&instance->pdev->dev, 5820 sizeof(struct megasas_evt_log_info), &el_info_h, 5821 GFP_KERNEL); 5822 if (!el_info) { 5823 megasas_return_cmd(instance, cmd); 5824 return -ENOMEM; 5825 } 5826 5827 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5828 5829 dcmd->cmd = MFI_CMD_DCMD; 5830 dcmd->cmd_status = 0x0; 5831 dcmd->sge_count = 1; 5832 dcmd->flags = MFI_FRAME_DIR_READ; 5833 dcmd->timeout = 0; 5834 dcmd->pad_0 = 0; 5835 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info)); 5836 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO); 5837 5838 megasas_set_dma_settings(instance, dcmd, el_info_h, 5839 sizeof(struct megasas_evt_log_info)); 5840 5841 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 5842 if (ret != DCMD_SUCCESS) { 5843 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 5844 __func__, __LINE__); 5845 goto dcmd_failed; 5846 } 5847 5848 /* 5849 * Copy the data back into callers buffer 5850 */ 5851 eli->newest_seq_num = el_info->newest_seq_num; 5852 eli->oldest_seq_num = el_info->oldest_seq_num; 5853 eli->clear_seq_num = el_info->clear_seq_num; 5854 eli->shutdown_seq_num = el_info->shutdown_seq_num; 5855 eli->boot_seq_num = el_info->boot_seq_num; 5856 5857 dcmd_failed: 5858 dma_free_coherent(&instance->pdev->dev, 5859 sizeof(struct megasas_evt_log_info), 5860 el_info, el_info_h); 5861 5862 megasas_return_cmd(instance, cmd); 5863 5864 return ret; 5865 } 5866 5867 /** 5868 * megasas_register_aen - Registers for asynchronous event notification 5869 * @instance: Adapter soft state 5870 * @seq_num: The starting sequence number 5871 * @class_locale: Class of the event 5872 * 5873 * This function subscribes for AEN for events beyond the @seq_num. It requests 5874 * to be notified if and only if the event is of type @class_locale 5875 */ 5876 static int 5877 megasas_register_aen(struct megasas_instance *instance, u32 seq_num, 5878 u32 class_locale_word) 5879 { 5880 int ret_val; 5881 struct megasas_cmd *cmd; 5882 struct megasas_dcmd_frame *dcmd; 5883 union megasas_evt_class_locale curr_aen; 5884 union megasas_evt_class_locale prev_aen; 5885 5886 /* 5887 * If there an AEN pending already (aen_cmd), check if the 5888 * class_locale of that pending AEN is inclusive of the new 5889 * AEN request we currently have. If it is, then we don't have 5890 * to do anything. In other words, whichever events the current 5891 * AEN request is subscribing to, have already been subscribed 5892 * to. 5893 * 5894 * If the old_cmd is _not_ inclusive, then we have to abort 5895 * that command, form a class_locale that is superset of both 5896 * old and current and re-issue to the FW 5897 */ 5898 5899 curr_aen.word = class_locale_word; 5900 5901 if (instance->aen_cmd) { 5902 5903 prev_aen.word = 5904 le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]); 5905 5906 if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) || 5907 (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) { 5908 dev_info(&instance->pdev->dev, 5909 "%s %d out of range class %d send by application\n", 5910 __func__, __LINE__, curr_aen.members.class); 5911 return 0; 5912 } 5913 5914 /* 5915 * A class whose enum value is smaller is inclusive of all 5916 * higher values. If a PROGRESS (= -1) was previously 5917 * registered, then a new registration requests for higher 5918 * classes need not be sent to FW. They are automatically 5919 * included. 5920 * 5921 * Locale numbers don't have such hierarchy. They are bitmap 5922 * values 5923 */ 5924 if ((prev_aen.members.class <= curr_aen.members.class) && 5925 !((prev_aen.members.locale & curr_aen.members.locale) ^ 5926 curr_aen.members.locale)) { 5927 /* 5928 * Previously issued event registration includes 5929 * current request. Nothing to do. 5930 */ 5931 return 0; 5932 } else { 5933 curr_aen.members.locale |= prev_aen.members.locale; 5934 5935 if (prev_aen.members.class < curr_aen.members.class) 5936 curr_aen.members.class = prev_aen.members.class; 5937 5938 instance->aen_cmd->abort_aen = 1; 5939 ret_val = megasas_issue_blocked_abort_cmd(instance, 5940 instance-> 5941 aen_cmd, 30); 5942 5943 if (ret_val) { 5944 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort " 5945 "previous AEN command\n"); 5946 return ret_val; 5947 } 5948 } 5949 } 5950 5951 cmd = megasas_get_cmd(instance); 5952 5953 if (!cmd) 5954 return -ENOMEM; 5955 5956 dcmd = &cmd->frame->dcmd; 5957 5958 memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail)); 5959 5960 /* 5961 * Prepare DCMD for aen registration 5962 */ 5963 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5964 5965 dcmd->cmd = MFI_CMD_DCMD; 5966 dcmd->cmd_status = 0x0; 5967 dcmd->sge_count = 1; 5968 dcmd->flags = MFI_FRAME_DIR_READ; 5969 dcmd->timeout = 0; 5970 dcmd->pad_0 = 0; 5971 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail)); 5972 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT); 5973 dcmd->mbox.w[0] = cpu_to_le32(seq_num); 5974 instance->last_seq_num = seq_num; 5975 dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word); 5976 5977 megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h, 5978 sizeof(struct megasas_evt_detail)); 5979 5980 if (instance->aen_cmd != NULL) { 5981 megasas_return_cmd(instance, cmd); 5982 return 0; 5983 } 5984 5985 /* 5986 * Store reference to the cmd used to register for AEN. When an 5987 * application wants us to register for AEN, we have to abort this 5988 * cmd and re-register with a new EVENT LOCALE supplied by that app 5989 */ 5990 instance->aen_cmd = cmd; 5991 5992 /* 5993 * Issue the aen registration frame 5994 */ 5995 instance->instancet->issue_dcmd(instance, cmd); 5996 5997 return 0; 5998 } 5999 6000 /* megasas_get_target_prop - Send DCMD with below details to firmware. 6001 * 6002 * This DCMD will fetch few properties of LD/system PD defined 6003 * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value. 6004 * 6005 * DCMD send by drivers whenever new target is added to the OS. 6006 * 6007 * dcmd.opcode - MR_DCMD_DEV_GET_TARGET_PROP 6008 * dcmd.mbox.b[0] - DCMD is to be fired for LD or system PD. 6009 * 0 = system PD, 1 = LD. 6010 * dcmd.mbox.s[1] - TargetID for LD/system PD. 6011 * dcmd.sge IN - Pointer to return MR_TARGET_DEV_PROPERTIES. 6012 * 6013 * @instance: Adapter soft state 6014 * @sdev: OS provided scsi device 6015 * 6016 * Returns 0 on success non-zero on failure. 6017 */ 6018 int 6019 megasas_get_target_prop(struct megasas_instance *instance, 6020 struct scsi_device *sdev) 6021 { 6022 int ret; 6023 struct megasas_cmd *cmd; 6024 struct megasas_dcmd_frame *dcmd; 6025 u16 targetId = (sdev->channel % 2) + sdev->id; 6026 6027 cmd = megasas_get_cmd(instance); 6028 6029 if (!cmd) { 6030 dev_err(&instance->pdev->dev, 6031 "Failed to get cmd %s\n", __func__); 6032 return -ENOMEM; 6033 } 6034 6035 dcmd = &cmd->frame->dcmd; 6036 6037 memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop)); 6038 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6039 dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev); 6040 6041 dcmd->mbox.s[1] = cpu_to_le16(targetId); 6042 dcmd->cmd = MFI_CMD_DCMD; 6043 dcmd->cmd_status = 0xFF; 6044 dcmd->sge_count = 1; 6045 dcmd->flags = MFI_FRAME_DIR_READ; 6046 dcmd->timeout = 0; 6047 dcmd->pad_0 = 0; 6048 dcmd->data_xfer_len = 6049 cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES)); 6050 dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP); 6051 6052 megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h, 6053 sizeof(struct MR_TARGET_PROPERTIES)); 6054 6055 if ((instance->adapter_type != MFI_SERIES) && 6056 !instance->mask_interrupts) 6057 ret = megasas_issue_blocked_cmd(instance, 6058 cmd, MFI_IO_TIMEOUT_SECS); 6059 else 6060 ret = megasas_issue_polled(instance, cmd); 6061 6062 switch (ret) { 6063 case DCMD_TIMEOUT: 6064 switch (dcmd_timeout_ocr_possible(instance)) { 6065 case INITIATE_OCR: 6066 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 6067 megasas_reset_fusion(instance->host, 6068 MFI_IO_TIMEOUT_OCR); 6069 break; 6070 case KILL_ADAPTER: 6071 megaraid_sas_kill_hba(instance); 6072 break; 6073 case IGNORE_TIMEOUT: 6074 dev_info(&instance->pdev->dev, 6075 "Ignore DCMD timeout: %s %d\n", 6076 __func__, __LINE__); 6077 break; 6078 } 6079 break; 6080 6081 default: 6082 megasas_return_cmd(instance, cmd); 6083 } 6084 if (ret != DCMD_SUCCESS) 6085 dev_err(&instance->pdev->dev, 6086 "return from %s %d return value %d\n", 6087 __func__, __LINE__, ret); 6088 6089 return ret; 6090 } 6091 6092 /** 6093 * megasas_start_aen - Subscribes to AEN during driver load time 6094 * @instance: Adapter soft state 6095 */ 6096 static int megasas_start_aen(struct megasas_instance *instance) 6097 { 6098 struct megasas_evt_log_info eli; 6099 union megasas_evt_class_locale class_locale; 6100 6101 /* 6102 * Get the latest sequence number from FW 6103 */ 6104 memset(&eli, 0, sizeof(eli)); 6105 6106 if (megasas_get_seq_num(instance, &eli)) 6107 return -1; 6108 6109 /* 6110 * Register AEN with FW for latest sequence number plus 1 6111 */ 6112 class_locale.members.reserved = 0; 6113 class_locale.members.locale = MR_EVT_LOCALE_ALL; 6114 class_locale.members.class = MR_EVT_CLASS_DEBUG; 6115 6116 return megasas_register_aen(instance, 6117 le32_to_cpu(eli.newest_seq_num) + 1, 6118 class_locale.word); 6119 } 6120 6121 /** 6122 * megasas_io_attach - Attaches this driver to SCSI mid-layer 6123 * @instance: Adapter soft state 6124 */ 6125 static int megasas_io_attach(struct megasas_instance *instance) 6126 { 6127 struct Scsi_Host *host = instance->host; 6128 6129 /* 6130 * Export parameters required by SCSI mid-layer 6131 */ 6132 host->unique_id = instance->unique_id; 6133 host->can_queue = instance->max_scsi_cmds; 6134 host->this_id = instance->init_id; 6135 host->sg_tablesize = instance->max_num_sge; 6136 6137 if (instance->fw_support_ieee) 6138 instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE; 6139 6140 /* 6141 * Check if the module parameter value for max_sectors can be used 6142 */ 6143 if (max_sectors && max_sectors < instance->max_sectors_per_req) 6144 instance->max_sectors_per_req = max_sectors; 6145 else { 6146 if (max_sectors) { 6147 if (((instance->pdev->device == 6148 PCI_DEVICE_ID_LSI_SAS1078GEN2) || 6149 (instance->pdev->device == 6150 PCI_DEVICE_ID_LSI_SAS0079GEN2)) && 6151 (max_sectors <= MEGASAS_MAX_SECTORS)) { 6152 instance->max_sectors_per_req = max_sectors; 6153 } else { 6154 dev_info(&instance->pdev->dev, "max_sectors should be > 0" 6155 "and <= %d (or < 1MB for GEN2 controller)\n", 6156 instance->max_sectors_per_req); 6157 } 6158 } 6159 } 6160 6161 host->max_sectors = instance->max_sectors_per_req; 6162 host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN; 6163 host->max_channel = MEGASAS_MAX_CHANNELS - 1; 6164 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; 6165 host->max_lun = MEGASAS_MAX_LUN; 6166 host->max_cmd_len = 16; 6167 6168 /* 6169 * Notify the mid-layer about the new controller 6170 */ 6171 if (scsi_add_host(host, &instance->pdev->dev)) { 6172 dev_err(&instance->pdev->dev, 6173 "Failed to add host from %s %d\n", 6174 __func__, __LINE__); 6175 return -ENODEV; 6176 } 6177 6178 return 0; 6179 } 6180 6181 /** 6182 * megasas_set_dma_mask - Set DMA mask for supported controllers 6183 * 6184 * @instance: Adapter soft state 6185 * Description: 6186 * 6187 * For Ventura, driver/FW will operate in 63bit DMA addresses. 6188 * 6189 * For invader- 6190 * By default, driver/FW will operate in 32bit DMA addresses 6191 * for consistent DMA mapping but if 32 bit consistent 6192 * DMA mask fails, driver will try with 63 bit consistent 6193 * mask provided FW is true 63bit DMA capable 6194 * 6195 * For older controllers(Thunderbolt and MFI based adapters)- 6196 * driver/FW will operate in 32 bit consistent DMA addresses. 6197 */ 6198 static int 6199 megasas_set_dma_mask(struct megasas_instance *instance) 6200 { 6201 u64 consistent_mask; 6202 struct pci_dev *pdev; 6203 u32 scratch_pad_1; 6204 6205 pdev = instance->pdev; 6206 consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ? 6207 DMA_BIT_MASK(63) : DMA_BIT_MASK(32); 6208 6209 if (IS_DMA64) { 6210 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) && 6211 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) 6212 goto fail_set_dma_mask; 6213 6214 if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) && 6215 (dma_set_coherent_mask(&pdev->dev, consistent_mask) && 6216 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) { 6217 /* 6218 * If 32 bit DMA mask fails, then try for 64 bit mask 6219 * for FW capable of handling 64 bit DMA. 6220 */ 6221 scratch_pad_1 = megasas_readl 6222 (instance, &instance->reg_set->outbound_scratch_pad_1); 6223 6224 if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET)) 6225 goto fail_set_dma_mask; 6226 else if (dma_set_mask_and_coherent(&pdev->dev, 6227 DMA_BIT_MASK(63))) 6228 goto fail_set_dma_mask; 6229 } 6230 } else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) 6231 goto fail_set_dma_mask; 6232 6233 if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32)) 6234 instance->consistent_mask_64bit = false; 6235 else 6236 instance->consistent_mask_64bit = true; 6237 6238 dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n", 6239 ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "63" : "32"), 6240 (instance->consistent_mask_64bit ? "63" : "32")); 6241 6242 return 0; 6243 6244 fail_set_dma_mask: 6245 dev_err(&pdev->dev, "Failed to set DMA mask\n"); 6246 return -1; 6247 6248 } 6249 6250 /* 6251 * megasas_set_adapter_type - Set adapter type. 6252 * Supported controllers can be divided in 6253 * different categories- 6254 * enum MR_ADAPTER_TYPE { 6255 * MFI_SERIES = 1, 6256 * THUNDERBOLT_SERIES = 2, 6257 * INVADER_SERIES = 3, 6258 * VENTURA_SERIES = 4, 6259 * AERO_SERIES = 5, 6260 * }; 6261 * @instance: Adapter soft state 6262 * return: void 6263 */ 6264 static inline void megasas_set_adapter_type(struct megasas_instance *instance) 6265 { 6266 if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) && 6267 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) { 6268 instance->adapter_type = MFI_SERIES; 6269 } else { 6270 switch (instance->pdev->device) { 6271 case PCI_DEVICE_ID_LSI_AERO_10E1: 6272 case PCI_DEVICE_ID_LSI_AERO_10E2: 6273 case PCI_DEVICE_ID_LSI_AERO_10E5: 6274 case PCI_DEVICE_ID_LSI_AERO_10E6: 6275 instance->adapter_type = AERO_SERIES; 6276 break; 6277 case PCI_DEVICE_ID_LSI_VENTURA: 6278 case PCI_DEVICE_ID_LSI_CRUSADER: 6279 case PCI_DEVICE_ID_LSI_HARPOON: 6280 case PCI_DEVICE_ID_LSI_TOMCAT: 6281 case PCI_DEVICE_ID_LSI_VENTURA_4PORT: 6282 case PCI_DEVICE_ID_LSI_CRUSADER_4PORT: 6283 instance->adapter_type = VENTURA_SERIES; 6284 break; 6285 case PCI_DEVICE_ID_LSI_FUSION: 6286 case PCI_DEVICE_ID_LSI_PLASMA: 6287 instance->adapter_type = THUNDERBOLT_SERIES; 6288 break; 6289 case PCI_DEVICE_ID_LSI_INVADER: 6290 case PCI_DEVICE_ID_LSI_INTRUDER: 6291 case PCI_DEVICE_ID_LSI_INTRUDER_24: 6292 case PCI_DEVICE_ID_LSI_CUTLASS_52: 6293 case PCI_DEVICE_ID_LSI_CUTLASS_53: 6294 case PCI_DEVICE_ID_LSI_FURY: 6295 instance->adapter_type = INVADER_SERIES; 6296 break; 6297 default: /* For all other supported controllers */ 6298 instance->adapter_type = MFI_SERIES; 6299 break; 6300 } 6301 } 6302 } 6303 6304 static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance) 6305 { 6306 instance->producer = dma_alloc_coherent(&instance->pdev->dev, 6307 sizeof(u32), &instance->producer_h, GFP_KERNEL); 6308 instance->consumer = dma_alloc_coherent(&instance->pdev->dev, 6309 sizeof(u32), &instance->consumer_h, GFP_KERNEL); 6310 6311 if (!instance->producer || !instance->consumer) { 6312 dev_err(&instance->pdev->dev, 6313 "Failed to allocate memory for producer, consumer\n"); 6314 return -1; 6315 } 6316 6317 *instance->producer = 0; 6318 *instance->consumer = 0; 6319 return 0; 6320 } 6321 6322 /** 6323 * megasas_alloc_ctrl_mem - Allocate per controller memory for core data 6324 * structures which are not common across MFI 6325 * adapters and fusion adapters. 6326 * For MFI based adapters, allocate producer and 6327 * consumer buffers. For fusion adapters, allocate 6328 * memory for fusion context. 6329 * @instance: Adapter soft state 6330 * return: 0 for SUCCESS 6331 */ 6332 static int megasas_alloc_ctrl_mem(struct megasas_instance *instance) 6333 { 6334 instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int), 6335 GFP_KERNEL); 6336 if (!instance->reply_map) 6337 return -ENOMEM; 6338 6339 switch (instance->adapter_type) { 6340 case MFI_SERIES: 6341 if (megasas_alloc_mfi_ctrl_mem(instance)) 6342 goto fail; 6343 break; 6344 case AERO_SERIES: 6345 case VENTURA_SERIES: 6346 case THUNDERBOLT_SERIES: 6347 case INVADER_SERIES: 6348 if (megasas_alloc_fusion_context(instance)) 6349 goto fail; 6350 break; 6351 } 6352 6353 return 0; 6354 fail: 6355 kfree(instance->reply_map); 6356 instance->reply_map = NULL; 6357 return -ENOMEM; 6358 } 6359 6360 /* 6361 * megasas_free_ctrl_mem - Free fusion context for fusion adapters and 6362 * producer, consumer buffers for MFI adapters 6363 * 6364 * @instance - Adapter soft instance 6365 * 6366 */ 6367 static inline void megasas_free_ctrl_mem(struct megasas_instance *instance) 6368 { 6369 kfree(instance->reply_map); 6370 if (instance->adapter_type == MFI_SERIES) { 6371 if (instance->producer) 6372 dma_free_coherent(&instance->pdev->dev, sizeof(u32), 6373 instance->producer, 6374 instance->producer_h); 6375 if (instance->consumer) 6376 dma_free_coherent(&instance->pdev->dev, sizeof(u32), 6377 instance->consumer, 6378 instance->consumer_h); 6379 } else { 6380 megasas_free_fusion_context(instance); 6381 } 6382 } 6383 6384 /** 6385 * megasas_alloc_ctrl_dma_buffers - Allocate consistent DMA buffers during 6386 * driver load time 6387 * 6388 * @instance- Adapter soft instance 6389 * @return- O for SUCCESS 6390 */ 6391 static inline 6392 int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance) 6393 { 6394 struct pci_dev *pdev = instance->pdev; 6395 struct fusion_context *fusion = instance->ctrl_context; 6396 6397 instance->evt_detail = dma_alloc_coherent(&pdev->dev, 6398 sizeof(struct megasas_evt_detail), 6399 &instance->evt_detail_h, GFP_KERNEL); 6400 6401 if (!instance->evt_detail) { 6402 dev_err(&instance->pdev->dev, 6403 "Failed to allocate event detail buffer\n"); 6404 return -ENOMEM; 6405 } 6406 6407 if (fusion) { 6408 fusion->ioc_init_request = 6409 dma_alloc_coherent(&pdev->dev, 6410 sizeof(struct MPI2_IOC_INIT_REQUEST), 6411 &fusion->ioc_init_request_phys, 6412 GFP_KERNEL); 6413 6414 if (!fusion->ioc_init_request) { 6415 dev_err(&pdev->dev, 6416 "Failed to allocate PD list buffer\n"); 6417 return -ENOMEM; 6418 } 6419 6420 instance->snapdump_prop = dma_alloc_coherent(&pdev->dev, 6421 sizeof(struct MR_SNAPDUMP_PROPERTIES), 6422 &instance->snapdump_prop_h, GFP_KERNEL); 6423 6424 if (!instance->snapdump_prop) 6425 dev_err(&pdev->dev, 6426 "Failed to allocate snapdump properties buffer\n"); 6427 } 6428 6429 instance->pd_list_buf = 6430 dma_alloc_coherent(&pdev->dev, 6431 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), 6432 &instance->pd_list_buf_h, GFP_KERNEL); 6433 6434 if (!instance->pd_list_buf) { 6435 dev_err(&pdev->dev, "Failed to allocate PD list buffer\n"); 6436 return -ENOMEM; 6437 } 6438 6439 instance->ctrl_info_buf = 6440 dma_alloc_coherent(&pdev->dev, 6441 sizeof(struct megasas_ctrl_info), 6442 &instance->ctrl_info_buf_h, GFP_KERNEL); 6443 6444 if (!instance->ctrl_info_buf) { 6445 dev_err(&pdev->dev, 6446 "Failed to allocate controller info buffer\n"); 6447 return -ENOMEM; 6448 } 6449 6450 instance->ld_list_buf = 6451 dma_alloc_coherent(&pdev->dev, 6452 sizeof(struct MR_LD_LIST), 6453 &instance->ld_list_buf_h, GFP_KERNEL); 6454 6455 if (!instance->ld_list_buf) { 6456 dev_err(&pdev->dev, "Failed to allocate LD list buffer\n"); 6457 return -ENOMEM; 6458 } 6459 6460 instance->ld_targetid_list_buf = 6461 dma_alloc_coherent(&pdev->dev, 6462 sizeof(struct MR_LD_TARGETID_LIST), 6463 &instance->ld_targetid_list_buf_h, GFP_KERNEL); 6464 6465 if (!instance->ld_targetid_list_buf) { 6466 dev_err(&pdev->dev, 6467 "Failed to allocate LD targetid list buffer\n"); 6468 return -ENOMEM; 6469 } 6470 6471 if (!reset_devices) { 6472 instance->system_info_buf = 6473 dma_alloc_coherent(&pdev->dev, 6474 sizeof(struct MR_DRV_SYSTEM_INFO), 6475 &instance->system_info_h, GFP_KERNEL); 6476 instance->pd_info = 6477 dma_alloc_coherent(&pdev->dev, 6478 sizeof(struct MR_PD_INFO), 6479 &instance->pd_info_h, GFP_KERNEL); 6480 instance->tgt_prop = 6481 dma_alloc_coherent(&pdev->dev, 6482 sizeof(struct MR_TARGET_PROPERTIES), 6483 &instance->tgt_prop_h, GFP_KERNEL); 6484 instance->crash_dump_buf = 6485 dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE, 6486 &instance->crash_dump_h, GFP_KERNEL); 6487 6488 if (!instance->system_info_buf) 6489 dev_err(&instance->pdev->dev, 6490 "Failed to allocate system info buffer\n"); 6491 6492 if (!instance->pd_info) 6493 dev_err(&instance->pdev->dev, 6494 "Failed to allocate pd_info buffer\n"); 6495 6496 if (!instance->tgt_prop) 6497 dev_err(&instance->pdev->dev, 6498 "Failed to allocate tgt_prop buffer\n"); 6499 6500 if (!instance->crash_dump_buf) 6501 dev_err(&instance->pdev->dev, 6502 "Failed to allocate crash dump buffer\n"); 6503 } 6504 6505 return 0; 6506 } 6507 6508 /* 6509 * megasas_free_ctrl_dma_buffers - Free consistent DMA buffers allocated 6510 * during driver load time 6511 * 6512 * @instance- Adapter soft instance 6513 * 6514 */ 6515 static inline 6516 void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance) 6517 { 6518 struct pci_dev *pdev = instance->pdev; 6519 struct fusion_context *fusion = instance->ctrl_context; 6520 6521 if (instance->evt_detail) 6522 dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail), 6523 instance->evt_detail, 6524 instance->evt_detail_h); 6525 6526 if (fusion && fusion->ioc_init_request) 6527 dma_free_coherent(&pdev->dev, 6528 sizeof(struct MPI2_IOC_INIT_REQUEST), 6529 fusion->ioc_init_request, 6530 fusion->ioc_init_request_phys); 6531 6532 if (instance->pd_list_buf) 6533 dma_free_coherent(&pdev->dev, 6534 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), 6535 instance->pd_list_buf, 6536 instance->pd_list_buf_h); 6537 6538 if (instance->ld_list_buf) 6539 dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST), 6540 instance->ld_list_buf, 6541 instance->ld_list_buf_h); 6542 6543 if (instance->ld_targetid_list_buf) 6544 dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST), 6545 instance->ld_targetid_list_buf, 6546 instance->ld_targetid_list_buf_h); 6547 6548 if (instance->ctrl_info_buf) 6549 dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info), 6550 instance->ctrl_info_buf, 6551 instance->ctrl_info_buf_h); 6552 6553 if (instance->system_info_buf) 6554 dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO), 6555 instance->system_info_buf, 6556 instance->system_info_h); 6557 6558 if (instance->pd_info) 6559 dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO), 6560 instance->pd_info, instance->pd_info_h); 6561 6562 if (instance->tgt_prop) 6563 dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES), 6564 instance->tgt_prop, instance->tgt_prop_h); 6565 6566 if (instance->crash_dump_buf) 6567 dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE, 6568 instance->crash_dump_buf, 6569 instance->crash_dump_h); 6570 6571 if (instance->snapdump_prop) 6572 dma_free_coherent(&pdev->dev, 6573 sizeof(struct MR_SNAPDUMP_PROPERTIES), 6574 instance->snapdump_prop, 6575 instance->snapdump_prop_h); 6576 } 6577 6578 /* 6579 * megasas_init_ctrl_params - Initialize controller's instance 6580 * parameters before FW init 6581 * @instance - Adapter soft instance 6582 * @return - void 6583 */ 6584 static inline void megasas_init_ctrl_params(struct megasas_instance *instance) 6585 { 6586 instance->fw_crash_state = UNAVAILABLE; 6587 6588 megasas_poll_wait_aen = 0; 6589 instance->issuepend_done = 1; 6590 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 6591 6592 /* 6593 * Initialize locks and queues 6594 */ 6595 INIT_LIST_HEAD(&instance->cmd_pool); 6596 INIT_LIST_HEAD(&instance->internal_reset_pending_q); 6597 6598 atomic_set(&instance->fw_outstanding, 0); 6599 6600 init_waitqueue_head(&instance->int_cmd_wait_q); 6601 init_waitqueue_head(&instance->abort_cmd_wait_q); 6602 6603 spin_lock_init(&instance->crashdump_lock); 6604 spin_lock_init(&instance->mfi_pool_lock); 6605 spin_lock_init(&instance->hba_lock); 6606 spin_lock_init(&instance->stream_lock); 6607 spin_lock_init(&instance->completion_lock); 6608 6609 mutex_init(&instance->reset_mutex); 6610 6611 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 6612 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) 6613 instance->flag_ieee = 1; 6614 6615 megasas_dbg_lvl = 0; 6616 instance->flag = 0; 6617 instance->unload = 1; 6618 instance->last_time = 0; 6619 instance->disableOnlineCtrlReset = 1; 6620 instance->UnevenSpanSupport = 0; 6621 6622 if (instance->adapter_type != MFI_SERIES) 6623 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); 6624 else 6625 INIT_WORK(&instance->work_init, process_fw_state_change_wq); 6626 } 6627 6628 /** 6629 * megasas_probe_one - PCI hotplug entry point 6630 * @pdev: PCI device structure 6631 * @id: PCI ids of supported hotplugged adapter 6632 */ 6633 static int megasas_probe_one(struct pci_dev *pdev, 6634 const struct pci_device_id *id) 6635 { 6636 int rval, pos; 6637 struct Scsi_Host *host; 6638 struct megasas_instance *instance; 6639 u16 control = 0; 6640 6641 switch (pdev->device) { 6642 case PCI_DEVICE_ID_LSI_AERO_10E1: 6643 case PCI_DEVICE_ID_LSI_AERO_10E5: 6644 dev_info(&pdev->dev, "Adapter is in configurable secure mode\n"); 6645 break; 6646 } 6647 6648 /* Reset MSI-X in the kdump kernel */ 6649 if (reset_devices) { 6650 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 6651 if (pos) { 6652 pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, 6653 &control); 6654 if (control & PCI_MSIX_FLAGS_ENABLE) { 6655 dev_info(&pdev->dev, "resetting MSI-X\n"); 6656 pci_write_config_word(pdev, 6657 pos + PCI_MSIX_FLAGS, 6658 control & 6659 ~PCI_MSIX_FLAGS_ENABLE); 6660 } 6661 } 6662 } 6663 6664 /* 6665 * PCI prepping: enable device set bus mastering and dma mask 6666 */ 6667 rval = pci_enable_device_mem(pdev); 6668 6669 if (rval) { 6670 return rval; 6671 } 6672 6673 pci_set_master(pdev); 6674 6675 host = scsi_host_alloc(&megasas_template, 6676 sizeof(struct megasas_instance)); 6677 6678 if (!host) { 6679 dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n"); 6680 goto fail_alloc_instance; 6681 } 6682 6683 instance = (struct megasas_instance *)host->hostdata; 6684 memset(instance, 0, sizeof(*instance)); 6685 atomic_set(&instance->fw_reset_no_pci_access, 0); 6686 6687 /* 6688 * Initialize PCI related and misc parameters 6689 */ 6690 instance->pdev = pdev; 6691 instance->host = host; 6692 instance->unique_id = pdev->bus->number << 8 | pdev->devfn; 6693 instance->init_id = MEGASAS_DEFAULT_INIT_ID; 6694 6695 megasas_set_adapter_type(instance); 6696 6697 /* 6698 * Initialize MFI Firmware 6699 */ 6700 if (megasas_init_fw(instance)) 6701 goto fail_init_mfi; 6702 6703 if (instance->requestorId) { 6704 if (instance->PlasmaFW111) { 6705 instance->vf_affiliation_111 = 6706 dma_alloc_coherent(&pdev->dev, 6707 sizeof(struct MR_LD_VF_AFFILIATION_111), 6708 &instance->vf_affiliation_111_h, 6709 GFP_KERNEL); 6710 if (!instance->vf_affiliation_111) 6711 dev_warn(&pdev->dev, "Can't allocate " 6712 "memory for VF affiliation buffer\n"); 6713 } else { 6714 instance->vf_affiliation = 6715 dma_alloc_coherent(&pdev->dev, 6716 (MAX_LOGICAL_DRIVES + 1) * 6717 sizeof(struct MR_LD_VF_AFFILIATION), 6718 &instance->vf_affiliation_h, 6719 GFP_KERNEL); 6720 if (!instance->vf_affiliation) 6721 dev_warn(&pdev->dev, "Can't allocate " 6722 "memory for VF affiliation buffer\n"); 6723 } 6724 } 6725 6726 /* 6727 * Store instance in PCI softstate 6728 */ 6729 pci_set_drvdata(pdev, instance); 6730 6731 /* 6732 * Add this controller to megasas_mgmt_info structure so that it 6733 * can be exported to management applications 6734 */ 6735 megasas_mgmt_info.count++; 6736 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance; 6737 megasas_mgmt_info.max_index++; 6738 6739 /* 6740 * Register with SCSI mid-layer 6741 */ 6742 if (megasas_io_attach(instance)) 6743 goto fail_io_attach; 6744 6745 instance->unload = 0; 6746 /* 6747 * Trigger SCSI to scan our drives 6748 */ 6749 scsi_scan_host(host); 6750 6751 /* 6752 * Initiate AEN (Asynchronous Event Notification) 6753 */ 6754 if (megasas_start_aen(instance)) { 6755 dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n"); 6756 goto fail_start_aen; 6757 } 6758 6759 /* Get current SR-IOV LD/VF affiliation */ 6760 if (instance->requestorId) 6761 megasas_get_ld_vf_affiliation(instance, 1); 6762 6763 return 0; 6764 6765 fail_start_aen: 6766 fail_io_attach: 6767 megasas_mgmt_info.count--; 6768 megasas_mgmt_info.max_index--; 6769 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; 6770 6771 instance->instancet->disable_intr(instance); 6772 megasas_destroy_irqs(instance); 6773 6774 if (instance->adapter_type != MFI_SERIES) 6775 megasas_release_fusion(instance); 6776 else 6777 megasas_release_mfi(instance); 6778 if (instance->msix_vectors) 6779 pci_free_irq_vectors(instance->pdev); 6780 fail_init_mfi: 6781 scsi_host_put(host); 6782 fail_alloc_instance: 6783 pci_disable_device(pdev); 6784 6785 return -ENODEV; 6786 } 6787 6788 /** 6789 * megasas_flush_cache - Requests FW to flush all its caches 6790 * @instance: Adapter soft state 6791 */ 6792 static void megasas_flush_cache(struct megasas_instance *instance) 6793 { 6794 struct megasas_cmd *cmd; 6795 struct megasas_dcmd_frame *dcmd; 6796 6797 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 6798 return; 6799 6800 cmd = megasas_get_cmd(instance); 6801 6802 if (!cmd) 6803 return; 6804 6805 dcmd = &cmd->frame->dcmd; 6806 6807 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6808 6809 dcmd->cmd = MFI_CMD_DCMD; 6810 dcmd->cmd_status = 0x0; 6811 dcmd->sge_count = 0; 6812 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 6813 dcmd->timeout = 0; 6814 dcmd->pad_0 = 0; 6815 dcmd->data_xfer_len = 0; 6816 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH); 6817 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 6818 6819 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) 6820 != DCMD_SUCCESS) { 6821 dev_err(&instance->pdev->dev, 6822 "return from %s %d\n", __func__, __LINE__); 6823 return; 6824 } 6825 6826 megasas_return_cmd(instance, cmd); 6827 } 6828 6829 /** 6830 * megasas_shutdown_controller - Instructs FW to shutdown the controller 6831 * @instance: Adapter soft state 6832 * @opcode: Shutdown/Hibernate 6833 */ 6834 static void megasas_shutdown_controller(struct megasas_instance *instance, 6835 u32 opcode) 6836 { 6837 struct megasas_cmd *cmd; 6838 struct megasas_dcmd_frame *dcmd; 6839 6840 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 6841 return; 6842 6843 cmd = megasas_get_cmd(instance); 6844 6845 if (!cmd) 6846 return; 6847 6848 if (instance->aen_cmd) 6849 megasas_issue_blocked_abort_cmd(instance, 6850 instance->aen_cmd, MFI_IO_TIMEOUT_SECS); 6851 if (instance->map_update_cmd) 6852 megasas_issue_blocked_abort_cmd(instance, 6853 instance->map_update_cmd, MFI_IO_TIMEOUT_SECS); 6854 if (instance->jbod_seq_cmd) 6855 megasas_issue_blocked_abort_cmd(instance, 6856 instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS); 6857 6858 dcmd = &cmd->frame->dcmd; 6859 6860 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6861 6862 dcmd->cmd = MFI_CMD_DCMD; 6863 dcmd->cmd_status = 0x0; 6864 dcmd->sge_count = 0; 6865 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 6866 dcmd->timeout = 0; 6867 dcmd->pad_0 = 0; 6868 dcmd->data_xfer_len = 0; 6869 dcmd->opcode = cpu_to_le32(opcode); 6870 6871 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) 6872 != DCMD_SUCCESS) { 6873 dev_err(&instance->pdev->dev, 6874 "return from %s %d\n", __func__, __LINE__); 6875 return; 6876 } 6877 6878 megasas_return_cmd(instance, cmd); 6879 } 6880 6881 #ifdef CONFIG_PM 6882 /** 6883 * megasas_suspend - driver suspend entry point 6884 * @pdev: PCI device structure 6885 * @state: PCI power state to suspend routine 6886 */ 6887 static int 6888 megasas_suspend(struct pci_dev *pdev, pm_message_t state) 6889 { 6890 struct Scsi_Host *host; 6891 struct megasas_instance *instance; 6892 6893 instance = pci_get_drvdata(pdev); 6894 host = instance->host; 6895 instance->unload = 1; 6896 6897 /* Shutdown SR-IOV heartbeat timer */ 6898 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 6899 del_timer_sync(&instance->sriov_heartbeat_timer); 6900 6901 /* Stop the FW fault detection watchdog */ 6902 if (instance->adapter_type != MFI_SERIES) 6903 megasas_fusion_stop_watchdog(instance); 6904 6905 megasas_flush_cache(instance); 6906 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); 6907 6908 /* cancel the delayed work if this work still in queue */ 6909 if (instance->ev != NULL) { 6910 struct megasas_aen_event *ev = instance->ev; 6911 cancel_delayed_work_sync(&ev->hotplug_work); 6912 instance->ev = NULL; 6913 } 6914 6915 tasklet_kill(&instance->isr_tasklet); 6916 6917 pci_set_drvdata(instance->pdev, instance); 6918 instance->instancet->disable_intr(instance); 6919 6920 megasas_destroy_irqs(instance); 6921 6922 if (instance->msix_vectors) 6923 pci_free_irq_vectors(instance->pdev); 6924 6925 pci_save_state(pdev); 6926 pci_disable_device(pdev); 6927 6928 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 6929 6930 return 0; 6931 } 6932 6933 /** 6934 * megasas_resume- driver resume entry point 6935 * @pdev: PCI device structure 6936 */ 6937 static int 6938 megasas_resume(struct pci_dev *pdev) 6939 { 6940 int rval; 6941 struct Scsi_Host *host; 6942 struct megasas_instance *instance; 6943 int irq_flags = PCI_IRQ_LEGACY; 6944 6945 instance = pci_get_drvdata(pdev); 6946 host = instance->host; 6947 pci_set_power_state(pdev, PCI_D0); 6948 pci_enable_wake(pdev, PCI_D0, 0); 6949 pci_restore_state(pdev); 6950 6951 /* 6952 * PCI prepping: enable device set bus mastering and dma mask 6953 */ 6954 rval = pci_enable_device_mem(pdev); 6955 6956 if (rval) { 6957 dev_err(&pdev->dev, "Enable device failed\n"); 6958 return rval; 6959 } 6960 6961 pci_set_master(pdev); 6962 6963 /* 6964 * We expect the FW state to be READY 6965 */ 6966 if (megasas_transition_to_ready(instance, 0)) 6967 goto fail_ready_state; 6968 6969 if (megasas_set_dma_mask(instance)) 6970 goto fail_set_dma_mask; 6971 6972 /* 6973 * Initialize MFI Firmware 6974 */ 6975 6976 atomic_set(&instance->fw_outstanding, 0); 6977 atomic_set(&instance->ldio_outstanding, 0); 6978 6979 /* Now re-enable MSI-X */ 6980 if (instance->msix_vectors) { 6981 irq_flags = PCI_IRQ_MSIX; 6982 if (smp_affinity_enable) 6983 irq_flags |= PCI_IRQ_AFFINITY; 6984 } 6985 rval = pci_alloc_irq_vectors(instance->pdev, 1, 6986 instance->msix_vectors ? 6987 instance->msix_vectors : 1, irq_flags); 6988 if (rval < 0) 6989 goto fail_reenable_msix; 6990 6991 megasas_setup_reply_map(instance); 6992 6993 if (instance->adapter_type != MFI_SERIES) { 6994 megasas_reset_reply_desc(instance); 6995 if (megasas_ioc_init_fusion(instance)) { 6996 megasas_free_cmds(instance); 6997 megasas_free_cmds_fusion(instance); 6998 goto fail_init_mfi; 6999 } 7000 if (!megasas_get_map_info(instance)) 7001 megasas_sync_map_info(instance); 7002 } else { 7003 *instance->producer = 0; 7004 *instance->consumer = 0; 7005 if (megasas_issue_init_mfi(instance)) 7006 goto fail_init_mfi; 7007 } 7008 7009 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) 7010 goto fail_init_mfi; 7011 7012 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 7013 (unsigned long)instance); 7014 7015 if (instance->msix_vectors ? 7016 megasas_setup_irqs_msix(instance, 0) : 7017 megasas_setup_irqs_ioapic(instance)) 7018 goto fail_init_mfi; 7019 7020 /* Re-launch SR-IOV heartbeat timer */ 7021 if (instance->requestorId) { 7022 if (!megasas_sriov_start_heartbeat(instance, 0)) 7023 megasas_start_timer(instance); 7024 else { 7025 instance->skip_heartbeat_timer_del = 1; 7026 goto fail_init_mfi; 7027 } 7028 } 7029 7030 instance->instancet->enable_intr(instance); 7031 megasas_setup_jbod_map(instance); 7032 instance->unload = 0; 7033 7034 /* 7035 * Initiate AEN (Asynchronous Event Notification) 7036 */ 7037 if (megasas_start_aen(instance)) 7038 dev_err(&instance->pdev->dev, "Start AEN failed\n"); 7039 7040 /* Re-launch FW fault watchdog */ 7041 if (instance->adapter_type != MFI_SERIES) 7042 if (megasas_fusion_start_watchdog(instance) != SUCCESS) 7043 goto fail_start_watchdog; 7044 7045 return 0; 7046 7047 fail_start_watchdog: 7048 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7049 del_timer_sync(&instance->sriov_heartbeat_timer); 7050 fail_init_mfi: 7051 megasas_free_ctrl_dma_buffers(instance); 7052 megasas_free_ctrl_mem(instance); 7053 scsi_host_put(host); 7054 7055 fail_reenable_msix: 7056 fail_set_dma_mask: 7057 fail_ready_state: 7058 7059 pci_disable_device(pdev); 7060 7061 return -ENODEV; 7062 } 7063 #else 7064 #define megasas_suspend NULL 7065 #define megasas_resume NULL 7066 #endif 7067 7068 static inline int 7069 megasas_wait_for_adapter_operational(struct megasas_instance *instance) 7070 { 7071 int wait_time = MEGASAS_RESET_WAIT_TIME * 2; 7072 int i; 7073 u8 adp_state; 7074 7075 for (i = 0; i < wait_time; i++) { 7076 adp_state = atomic_read(&instance->adprecovery); 7077 if ((adp_state == MEGASAS_HBA_OPERATIONAL) || 7078 (adp_state == MEGASAS_HW_CRITICAL_ERROR)) 7079 break; 7080 7081 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) 7082 dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n"); 7083 7084 msleep(1000); 7085 } 7086 7087 if (adp_state != MEGASAS_HBA_OPERATIONAL) { 7088 dev_info(&instance->pdev->dev, 7089 "%s HBA failed to become operational, adp_state %d\n", 7090 __func__, adp_state); 7091 return 1; 7092 } 7093 7094 return 0; 7095 } 7096 7097 /** 7098 * megasas_detach_one - PCI hot"un"plug entry point 7099 * @pdev: PCI device structure 7100 */ 7101 static void megasas_detach_one(struct pci_dev *pdev) 7102 { 7103 int i; 7104 struct Scsi_Host *host; 7105 struct megasas_instance *instance; 7106 struct fusion_context *fusion; 7107 u32 pd_seq_map_sz; 7108 7109 instance = pci_get_drvdata(pdev); 7110 host = instance->host; 7111 fusion = instance->ctrl_context; 7112 7113 /* Shutdown SR-IOV heartbeat timer */ 7114 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7115 del_timer_sync(&instance->sriov_heartbeat_timer); 7116 7117 /* Stop the FW fault detection watchdog */ 7118 if (instance->adapter_type != MFI_SERIES) 7119 megasas_fusion_stop_watchdog(instance); 7120 7121 if (instance->fw_crash_state != UNAVAILABLE) 7122 megasas_free_host_crash_buffer(instance); 7123 scsi_remove_host(instance->host); 7124 instance->unload = 1; 7125 7126 if (megasas_wait_for_adapter_operational(instance)) 7127 goto skip_firing_dcmds; 7128 7129 megasas_flush_cache(instance); 7130 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 7131 7132 skip_firing_dcmds: 7133 /* cancel the delayed work if this work still in queue*/ 7134 if (instance->ev != NULL) { 7135 struct megasas_aen_event *ev = instance->ev; 7136 cancel_delayed_work_sync(&ev->hotplug_work); 7137 instance->ev = NULL; 7138 } 7139 7140 /* cancel all wait events */ 7141 wake_up_all(&instance->int_cmd_wait_q); 7142 7143 tasklet_kill(&instance->isr_tasklet); 7144 7145 /* 7146 * Take the instance off the instance array. Note that we will not 7147 * decrement the max_index. We let this array be sparse array 7148 */ 7149 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 7150 if (megasas_mgmt_info.instance[i] == instance) { 7151 megasas_mgmt_info.count--; 7152 megasas_mgmt_info.instance[i] = NULL; 7153 7154 break; 7155 } 7156 } 7157 7158 instance->instancet->disable_intr(instance); 7159 7160 megasas_destroy_irqs(instance); 7161 7162 if (instance->msix_vectors) 7163 pci_free_irq_vectors(instance->pdev); 7164 7165 if (instance->adapter_type >= VENTURA_SERIES) { 7166 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) 7167 kfree(fusion->stream_detect_by_ld[i]); 7168 kfree(fusion->stream_detect_by_ld); 7169 fusion->stream_detect_by_ld = NULL; 7170 } 7171 7172 7173 if (instance->adapter_type != MFI_SERIES) { 7174 megasas_release_fusion(instance); 7175 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 7176 (sizeof(struct MR_PD_CFG_SEQ) * 7177 (MAX_PHYSICAL_DEVICES - 1)); 7178 for (i = 0; i < 2 ; i++) { 7179 if (fusion->ld_map[i]) 7180 dma_free_coherent(&instance->pdev->dev, 7181 fusion->max_map_sz, 7182 fusion->ld_map[i], 7183 fusion->ld_map_phys[i]); 7184 if (fusion->ld_drv_map[i]) { 7185 if (is_vmalloc_addr(fusion->ld_drv_map[i])) 7186 vfree(fusion->ld_drv_map[i]); 7187 else 7188 free_pages((ulong)fusion->ld_drv_map[i], 7189 fusion->drv_map_pages); 7190 } 7191 7192 if (fusion->pd_seq_sync[i]) 7193 dma_free_coherent(&instance->pdev->dev, 7194 pd_seq_map_sz, 7195 fusion->pd_seq_sync[i], 7196 fusion->pd_seq_phys[i]); 7197 } 7198 } else { 7199 megasas_release_mfi(instance); 7200 } 7201 7202 if (instance->vf_affiliation) 7203 dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) * 7204 sizeof(struct MR_LD_VF_AFFILIATION), 7205 instance->vf_affiliation, 7206 instance->vf_affiliation_h); 7207 7208 if (instance->vf_affiliation_111) 7209 dma_free_coherent(&pdev->dev, 7210 sizeof(struct MR_LD_VF_AFFILIATION_111), 7211 instance->vf_affiliation_111, 7212 instance->vf_affiliation_111_h); 7213 7214 if (instance->hb_host_mem) 7215 dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM), 7216 instance->hb_host_mem, 7217 instance->hb_host_mem_h); 7218 7219 megasas_free_ctrl_dma_buffers(instance); 7220 7221 megasas_free_ctrl_mem(instance); 7222 7223 scsi_host_put(host); 7224 7225 pci_disable_device(pdev); 7226 } 7227 7228 /** 7229 * megasas_shutdown - Shutdown entry point 7230 * @device: Generic device structure 7231 */ 7232 static void megasas_shutdown(struct pci_dev *pdev) 7233 { 7234 struct megasas_instance *instance = pci_get_drvdata(pdev); 7235 7236 instance->unload = 1; 7237 7238 if (megasas_wait_for_adapter_operational(instance)) 7239 goto skip_firing_dcmds; 7240 7241 megasas_flush_cache(instance); 7242 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 7243 7244 skip_firing_dcmds: 7245 instance->instancet->disable_intr(instance); 7246 megasas_destroy_irqs(instance); 7247 7248 if (instance->msix_vectors) 7249 pci_free_irq_vectors(instance->pdev); 7250 } 7251 7252 /** 7253 * megasas_mgmt_open - char node "open" entry point 7254 */ 7255 static int megasas_mgmt_open(struct inode *inode, struct file *filep) 7256 { 7257 /* 7258 * Allow only those users with admin rights 7259 */ 7260 if (!capable(CAP_SYS_ADMIN)) 7261 return -EACCES; 7262 7263 return 0; 7264 } 7265 7266 /** 7267 * megasas_mgmt_fasync - Async notifier registration from applications 7268 * 7269 * This function adds the calling process to a driver global queue. When an 7270 * event occurs, SIGIO will be sent to all processes in this queue. 7271 */ 7272 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode) 7273 { 7274 int rc; 7275 7276 mutex_lock(&megasas_async_queue_mutex); 7277 7278 rc = fasync_helper(fd, filep, mode, &megasas_async_queue); 7279 7280 mutex_unlock(&megasas_async_queue_mutex); 7281 7282 if (rc >= 0) { 7283 /* For sanity check when we get ioctl */ 7284 filep->private_data = filep; 7285 return 0; 7286 } 7287 7288 printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc); 7289 7290 return rc; 7291 } 7292 7293 /** 7294 * megasas_mgmt_poll - char node "poll" entry point 7295 * */ 7296 static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait) 7297 { 7298 __poll_t mask; 7299 unsigned long flags; 7300 7301 poll_wait(file, &megasas_poll_wait, wait); 7302 spin_lock_irqsave(&poll_aen_lock, flags); 7303 if (megasas_poll_wait_aen) 7304 mask = (EPOLLIN | EPOLLRDNORM); 7305 else 7306 mask = 0; 7307 megasas_poll_wait_aen = 0; 7308 spin_unlock_irqrestore(&poll_aen_lock, flags); 7309 return mask; 7310 } 7311 7312 /* 7313 * megasas_set_crash_dump_params_ioctl: 7314 * Send CRASH_DUMP_MODE DCMD to all controllers 7315 * @cmd: MFI command frame 7316 */ 7317 7318 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd) 7319 { 7320 struct megasas_instance *local_instance; 7321 int i, error = 0; 7322 int crash_support; 7323 7324 crash_support = cmd->frame->dcmd.mbox.w[0]; 7325 7326 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 7327 local_instance = megasas_mgmt_info.instance[i]; 7328 if (local_instance && local_instance->crash_dump_drv_support) { 7329 if ((atomic_read(&local_instance->adprecovery) == 7330 MEGASAS_HBA_OPERATIONAL) && 7331 !megasas_set_crash_dump_params(local_instance, 7332 crash_support)) { 7333 local_instance->crash_dump_app_support = 7334 crash_support; 7335 dev_info(&local_instance->pdev->dev, 7336 "Application firmware crash " 7337 "dump mode set success\n"); 7338 error = 0; 7339 } else { 7340 dev_info(&local_instance->pdev->dev, 7341 "Application firmware crash " 7342 "dump mode set failed\n"); 7343 error = -1; 7344 } 7345 } 7346 } 7347 return error; 7348 } 7349 7350 /** 7351 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW 7352 * @instance: Adapter soft state 7353 * @argp: User's ioctl packet 7354 */ 7355 static int 7356 megasas_mgmt_fw_ioctl(struct megasas_instance *instance, 7357 struct megasas_iocpacket __user * user_ioc, 7358 struct megasas_iocpacket *ioc) 7359 { 7360 struct megasas_sge64 *kern_sge64 = NULL; 7361 struct megasas_sge32 *kern_sge32 = NULL; 7362 struct megasas_cmd *cmd; 7363 void *kbuff_arr[MAX_IOCTL_SGE]; 7364 dma_addr_t buf_handle = 0; 7365 int error = 0, i; 7366 void *sense = NULL; 7367 dma_addr_t sense_handle; 7368 unsigned long *sense_ptr; 7369 u32 opcode = 0; 7370 7371 memset(kbuff_arr, 0, sizeof(kbuff_arr)); 7372 7373 if (ioc->sge_count > MAX_IOCTL_SGE) { 7374 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n", 7375 ioc->sge_count, MAX_IOCTL_SGE); 7376 return -EINVAL; 7377 } 7378 7379 if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) || 7380 ((ioc->frame.hdr.cmd == MFI_CMD_NVME) && 7381 !instance->support_nvme_passthru)) { 7382 dev_err(&instance->pdev->dev, 7383 "Received invalid ioctl command 0x%x\n", 7384 ioc->frame.hdr.cmd); 7385 return -ENOTSUPP; 7386 } 7387 7388 cmd = megasas_get_cmd(instance); 7389 if (!cmd) { 7390 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n"); 7391 return -ENOMEM; 7392 } 7393 7394 /* 7395 * User's IOCTL packet has 2 frames (maximum). Copy those two 7396 * frames into our cmd's frames. cmd->frame's context will get 7397 * overwritten when we copy from user's frames. So set that value 7398 * alone separately 7399 */ 7400 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); 7401 cmd->frame->hdr.context = cpu_to_le32(cmd->index); 7402 cmd->frame->hdr.pad_0 = 0; 7403 7404 cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE); 7405 7406 if (instance->consistent_mask_64bit) 7407 cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 | 7408 MFI_FRAME_SENSE64)); 7409 else 7410 cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 | 7411 MFI_FRAME_SENSE64)); 7412 7413 if (cmd->frame->hdr.cmd == MFI_CMD_DCMD) 7414 opcode = le32_to_cpu(cmd->frame->dcmd.opcode); 7415 7416 if (opcode == MR_DCMD_CTRL_SHUTDOWN) { 7417 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) { 7418 megasas_return_cmd(instance, cmd); 7419 return -1; 7420 } 7421 } 7422 7423 if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) { 7424 error = megasas_set_crash_dump_params_ioctl(cmd); 7425 megasas_return_cmd(instance, cmd); 7426 return error; 7427 } 7428 7429 /* 7430 * The management interface between applications and the fw uses 7431 * MFI frames. E.g, RAID configuration changes, LD property changes 7432 * etc are accomplishes through different kinds of MFI frames. The 7433 * driver needs to care only about substituting user buffers with 7434 * kernel buffers in SGLs. The location of SGL is embedded in the 7435 * struct iocpacket itself. 7436 */ 7437 if (instance->consistent_mask_64bit) 7438 kern_sge64 = (struct megasas_sge64 *) 7439 ((unsigned long)cmd->frame + ioc->sgl_off); 7440 else 7441 kern_sge32 = (struct megasas_sge32 *) 7442 ((unsigned long)cmd->frame + ioc->sgl_off); 7443 7444 /* 7445 * For each user buffer, create a mirror buffer and copy in 7446 */ 7447 for (i = 0; i < ioc->sge_count; i++) { 7448 if (!ioc->sgl[i].iov_len) 7449 continue; 7450 7451 kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev, 7452 ioc->sgl[i].iov_len, 7453 &buf_handle, GFP_KERNEL); 7454 if (!kbuff_arr[i]) { 7455 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc " 7456 "kernel SGL buffer for IOCTL\n"); 7457 error = -ENOMEM; 7458 goto out; 7459 } 7460 7461 /* 7462 * We don't change the dma_coherent_mask, so 7463 * dma_alloc_coherent only returns 32bit addresses 7464 */ 7465 if (instance->consistent_mask_64bit) { 7466 kern_sge64[i].phys_addr = cpu_to_le64(buf_handle); 7467 kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len); 7468 } else { 7469 kern_sge32[i].phys_addr = cpu_to_le32(buf_handle); 7470 kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len); 7471 } 7472 7473 /* 7474 * We created a kernel buffer corresponding to the 7475 * user buffer. Now copy in from the user buffer 7476 */ 7477 if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base, 7478 (u32) (ioc->sgl[i].iov_len))) { 7479 error = -EFAULT; 7480 goto out; 7481 } 7482 } 7483 7484 if (ioc->sense_len) { 7485 sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len, 7486 &sense_handle, GFP_KERNEL); 7487 if (!sense) { 7488 error = -ENOMEM; 7489 goto out; 7490 } 7491 7492 sense_ptr = 7493 (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off); 7494 if (instance->consistent_mask_64bit) 7495 *sense_ptr = cpu_to_le64(sense_handle); 7496 else 7497 *sense_ptr = cpu_to_le32(sense_handle); 7498 } 7499 7500 /* 7501 * Set the sync_cmd flag so that the ISR knows not to complete this 7502 * cmd to the SCSI mid-layer 7503 */ 7504 cmd->sync_cmd = 1; 7505 if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) { 7506 cmd->sync_cmd = 0; 7507 dev_err(&instance->pdev->dev, 7508 "return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n", 7509 __func__, __LINE__, cmd->frame->hdr.cmd, opcode, 7510 cmd->cmd_status_drv); 7511 return -EBUSY; 7512 } 7513 7514 cmd->sync_cmd = 0; 7515 7516 if (instance->unload == 1) { 7517 dev_info(&instance->pdev->dev, "Driver unload is in progress " 7518 "don't submit data to application\n"); 7519 goto out; 7520 } 7521 /* 7522 * copy out the kernel buffers to user buffers 7523 */ 7524 for (i = 0; i < ioc->sge_count; i++) { 7525 if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i], 7526 ioc->sgl[i].iov_len)) { 7527 error = -EFAULT; 7528 goto out; 7529 } 7530 } 7531 7532 /* 7533 * copy out the sense 7534 */ 7535 if (ioc->sense_len) { 7536 /* 7537 * sense_ptr points to the location that has the user 7538 * sense buffer address 7539 */ 7540 sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw + 7541 ioc->sense_off); 7542 7543 if (copy_to_user((void __user *)((unsigned long) 7544 get_unaligned((unsigned long *)sense_ptr)), 7545 sense, ioc->sense_len)) { 7546 dev_err(&instance->pdev->dev, "Failed to copy out to user " 7547 "sense data\n"); 7548 error = -EFAULT; 7549 goto out; 7550 } 7551 } 7552 7553 /* 7554 * copy the status codes returned by the fw 7555 */ 7556 if (copy_to_user(&user_ioc->frame.hdr.cmd_status, 7557 &cmd->frame->hdr.cmd_status, sizeof(u8))) { 7558 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n"); 7559 error = -EFAULT; 7560 } 7561 7562 out: 7563 if (sense) { 7564 dma_free_coherent(&instance->pdev->dev, ioc->sense_len, 7565 sense, sense_handle); 7566 } 7567 7568 for (i = 0; i < ioc->sge_count; i++) { 7569 if (kbuff_arr[i]) { 7570 if (instance->consistent_mask_64bit) 7571 dma_free_coherent(&instance->pdev->dev, 7572 le32_to_cpu(kern_sge64[i].length), 7573 kbuff_arr[i], 7574 le64_to_cpu(kern_sge64[i].phys_addr)); 7575 else 7576 dma_free_coherent(&instance->pdev->dev, 7577 le32_to_cpu(kern_sge32[i].length), 7578 kbuff_arr[i], 7579 le32_to_cpu(kern_sge32[i].phys_addr)); 7580 kbuff_arr[i] = NULL; 7581 } 7582 } 7583 7584 megasas_return_cmd(instance, cmd); 7585 return error; 7586 } 7587 7588 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) 7589 { 7590 struct megasas_iocpacket __user *user_ioc = 7591 (struct megasas_iocpacket __user *)arg; 7592 struct megasas_iocpacket *ioc; 7593 struct megasas_instance *instance; 7594 int error; 7595 7596 ioc = memdup_user(user_ioc, sizeof(*ioc)); 7597 if (IS_ERR(ioc)) 7598 return PTR_ERR(ioc); 7599 7600 instance = megasas_lookup_instance(ioc->host_no); 7601 if (!instance) { 7602 error = -ENODEV; 7603 goto out_kfree_ioc; 7604 } 7605 7606 /* Block ioctls in VF mode */ 7607 if (instance->requestorId && !allow_vf_ioctls) { 7608 error = -ENODEV; 7609 goto out_kfree_ioc; 7610 } 7611 7612 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 7613 dev_err(&instance->pdev->dev, "Controller in crit error\n"); 7614 error = -ENODEV; 7615 goto out_kfree_ioc; 7616 } 7617 7618 if (instance->unload == 1) { 7619 error = -ENODEV; 7620 goto out_kfree_ioc; 7621 } 7622 7623 if (down_interruptible(&instance->ioctl_sem)) { 7624 error = -ERESTARTSYS; 7625 goto out_kfree_ioc; 7626 } 7627 7628 if (megasas_wait_for_adapter_operational(instance)) { 7629 error = -ENODEV; 7630 goto out_up; 7631 } 7632 7633 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); 7634 out_up: 7635 up(&instance->ioctl_sem); 7636 7637 out_kfree_ioc: 7638 kfree(ioc); 7639 return error; 7640 } 7641 7642 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) 7643 { 7644 struct megasas_instance *instance; 7645 struct megasas_aen aen; 7646 int error; 7647 7648 if (file->private_data != file) { 7649 printk(KERN_DEBUG "megasas: fasync_helper was not " 7650 "called first\n"); 7651 return -EINVAL; 7652 } 7653 7654 if (copy_from_user(&aen, (void __user *)arg, sizeof(aen))) 7655 return -EFAULT; 7656 7657 instance = megasas_lookup_instance(aen.host_no); 7658 7659 if (!instance) 7660 return -ENODEV; 7661 7662 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 7663 return -ENODEV; 7664 } 7665 7666 if (instance->unload == 1) { 7667 return -ENODEV; 7668 } 7669 7670 if (megasas_wait_for_adapter_operational(instance)) 7671 return -ENODEV; 7672 7673 mutex_lock(&instance->reset_mutex); 7674 error = megasas_register_aen(instance, aen.seq_num, 7675 aen.class_locale_word); 7676 mutex_unlock(&instance->reset_mutex); 7677 return error; 7678 } 7679 7680 /** 7681 * megasas_mgmt_ioctl - char node ioctl entry point 7682 */ 7683 static long 7684 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 7685 { 7686 switch (cmd) { 7687 case MEGASAS_IOC_FIRMWARE: 7688 return megasas_mgmt_ioctl_fw(file, arg); 7689 7690 case MEGASAS_IOC_GET_AEN: 7691 return megasas_mgmt_ioctl_aen(file, arg); 7692 } 7693 7694 return -ENOTTY; 7695 } 7696 7697 #ifdef CONFIG_COMPAT 7698 static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) 7699 { 7700 struct compat_megasas_iocpacket __user *cioc = 7701 (struct compat_megasas_iocpacket __user *)arg; 7702 struct megasas_iocpacket __user *ioc = 7703 compat_alloc_user_space(sizeof(struct megasas_iocpacket)); 7704 int i; 7705 int error = 0; 7706 compat_uptr_t ptr; 7707 u32 local_sense_off; 7708 u32 local_sense_len; 7709 u32 user_sense_off; 7710 7711 if (clear_user(ioc, sizeof(*ioc))) 7712 return -EFAULT; 7713 7714 if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) || 7715 copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) || 7716 copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) || 7717 copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) || 7718 copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) || 7719 copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32))) 7720 return -EFAULT; 7721 7722 /* 7723 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when 7724 * sense_len is not null, so prepare the 64bit value under 7725 * the same condition. 7726 */ 7727 if (get_user(local_sense_off, &ioc->sense_off) || 7728 get_user(local_sense_len, &ioc->sense_len) || 7729 get_user(user_sense_off, &cioc->sense_off)) 7730 return -EFAULT; 7731 7732 if (local_sense_off != user_sense_off) 7733 return -EINVAL; 7734 7735 if (local_sense_len) { 7736 void __user **sense_ioc_ptr = 7737 (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off); 7738 compat_uptr_t *sense_cioc_ptr = 7739 (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off); 7740 if (get_user(ptr, sense_cioc_ptr) || 7741 put_user(compat_ptr(ptr), sense_ioc_ptr)) 7742 return -EFAULT; 7743 } 7744 7745 for (i = 0; i < MAX_IOCTL_SGE; i++) { 7746 if (get_user(ptr, &cioc->sgl[i].iov_base) || 7747 put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) || 7748 copy_in_user(&ioc->sgl[i].iov_len, 7749 &cioc->sgl[i].iov_len, sizeof(compat_size_t))) 7750 return -EFAULT; 7751 } 7752 7753 error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc); 7754 7755 if (copy_in_user(&cioc->frame.hdr.cmd_status, 7756 &ioc->frame.hdr.cmd_status, sizeof(u8))) { 7757 printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n"); 7758 return -EFAULT; 7759 } 7760 return error; 7761 } 7762 7763 static long 7764 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd, 7765 unsigned long arg) 7766 { 7767 switch (cmd) { 7768 case MEGASAS_IOC_FIRMWARE32: 7769 return megasas_mgmt_compat_ioctl_fw(file, arg); 7770 case MEGASAS_IOC_GET_AEN: 7771 return megasas_mgmt_ioctl_aen(file, arg); 7772 } 7773 7774 return -ENOTTY; 7775 } 7776 #endif 7777 7778 /* 7779 * File operations structure for management interface 7780 */ 7781 static const struct file_operations megasas_mgmt_fops = { 7782 .owner = THIS_MODULE, 7783 .open = megasas_mgmt_open, 7784 .fasync = megasas_mgmt_fasync, 7785 .unlocked_ioctl = megasas_mgmt_ioctl, 7786 .poll = megasas_mgmt_poll, 7787 #ifdef CONFIG_COMPAT 7788 .compat_ioctl = megasas_mgmt_compat_ioctl, 7789 #endif 7790 .llseek = noop_llseek, 7791 }; 7792 7793 /* 7794 * PCI hotplug support registration structure 7795 */ 7796 static struct pci_driver megasas_pci_driver = { 7797 7798 .name = "megaraid_sas", 7799 .id_table = megasas_pci_table, 7800 .probe = megasas_probe_one, 7801 .remove = megasas_detach_one, 7802 .suspend = megasas_suspend, 7803 .resume = megasas_resume, 7804 .shutdown = megasas_shutdown, 7805 }; 7806 7807 /* 7808 * Sysfs driver attributes 7809 */ 7810 static ssize_t version_show(struct device_driver *dd, char *buf) 7811 { 7812 return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n", 7813 MEGASAS_VERSION); 7814 } 7815 static DRIVER_ATTR_RO(version); 7816 7817 static ssize_t release_date_show(struct device_driver *dd, char *buf) 7818 { 7819 return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n", 7820 MEGASAS_RELDATE); 7821 } 7822 static DRIVER_ATTR_RO(release_date); 7823 7824 static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf) 7825 { 7826 return sprintf(buf, "%u\n", support_poll_for_event); 7827 } 7828 static DRIVER_ATTR_RO(support_poll_for_event); 7829 7830 static ssize_t support_device_change_show(struct device_driver *dd, char *buf) 7831 { 7832 return sprintf(buf, "%u\n", support_device_change); 7833 } 7834 static DRIVER_ATTR_RO(support_device_change); 7835 7836 static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf) 7837 { 7838 return sprintf(buf, "%u\n", megasas_dbg_lvl); 7839 } 7840 7841 static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf, 7842 size_t count) 7843 { 7844 int retval = count; 7845 7846 if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) { 7847 printk(KERN_ERR "megasas: could not set dbg_lvl\n"); 7848 retval = -EINVAL; 7849 } 7850 return retval; 7851 } 7852 static DRIVER_ATTR_RW(dbg_lvl); 7853 7854 static ssize_t 7855 support_nvme_encapsulation_show(struct device_driver *dd, char *buf) 7856 { 7857 return sprintf(buf, "%u\n", support_nvme_encapsulation); 7858 } 7859 7860 static DRIVER_ATTR_RO(support_nvme_encapsulation); 7861 7862 static inline void megasas_remove_scsi_device(struct scsi_device *sdev) 7863 { 7864 sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n"); 7865 scsi_remove_device(sdev); 7866 scsi_device_put(sdev); 7867 } 7868 7869 static void 7870 megasas_aen_polling(struct work_struct *work) 7871 { 7872 struct megasas_aen_event *ev = 7873 container_of(work, struct megasas_aen_event, hotplug_work.work); 7874 struct megasas_instance *instance = ev->instance; 7875 union megasas_evt_class_locale class_locale; 7876 struct Scsi_Host *host; 7877 struct scsi_device *sdev1; 7878 u16 pd_index = 0; 7879 u16 ld_index = 0; 7880 int i, j, doscan = 0; 7881 u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME; 7882 int error; 7883 u8 dcmd_ret = DCMD_SUCCESS; 7884 7885 if (!instance) { 7886 printk(KERN_ERR "invalid instance!\n"); 7887 kfree(ev); 7888 return; 7889 } 7890 7891 /* Adjust event workqueue thread wait time for VF mode */ 7892 if (instance->requestorId) 7893 wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF; 7894 7895 /* Don't run the event workqueue thread if OCR is running */ 7896 mutex_lock(&instance->reset_mutex); 7897 7898 instance->ev = NULL; 7899 host = instance->host; 7900 if (instance->evt_detail) { 7901 megasas_decode_evt(instance); 7902 7903 switch (le32_to_cpu(instance->evt_detail->code)) { 7904 7905 case MR_EVT_PD_INSERTED: 7906 case MR_EVT_PD_REMOVED: 7907 dcmd_ret = megasas_get_pd_list(instance); 7908 if (dcmd_ret == DCMD_SUCCESS) 7909 doscan = SCAN_PD_CHANNEL; 7910 break; 7911 7912 case MR_EVT_LD_OFFLINE: 7913 case MR_EVT_CFG_CLEARED: 7914 case MR_EVT_LD_DELETED: 7915 case MR_EVT_LD_CREATED: 7916 if (!instance->requestorId || 7917 (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0))) 7918 dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); 7919 7920 if (dcmd_ret == DCMD_SUCCESS) 7921 doscan = SCAN_VD_CHANNEL; 7922 7923 break; 7924 7925 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: 7926 case MR_EVT_FOREIGN_CFG_IMPORTED: 7927 case MR_EVT_LD_STATE_CHANGE: 7928 dcmd_ret = megasas_get_pd_list(instance); 7929 7930 if (dcmd_ret != DCMD_SUCCESS) 7931 break; 7932 7933 if (!instance->requestorId || 7934 (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0))) 7935 dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); 7936 7937 if (dcmd_ret != DCMD_SUCCESS) 7938 break; 7939 7940 doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL; 7941 dev_info(&instance->pdev->dev, "scanning for scsi%d...\n", 7942 instance->host->host_no); 7943 break; 7944 7945 case MR_EVT_CTRL_PROP_CHANGED: 7946 dcmd_ret = megasas_get_ctrl_info(instance); 7947 if (dcmd_ret == DCMD_SUCCESS && 7948 instance->snapdump_wait_time) { 7949 megasas_get_snapdump_properties(instance); 7950 dev_info(&instance->pdev->dev, 7951 "Snap dump wait time\t: %d\n", 7952 instance->snapdump_wait_time); 7953 } 7954 break; 7955 default: 7956 doscan = 0; 7957 break; 7958 } 7959 } else { 7960 dev_err(&instance->pdev->dev, "invalid evt_detail!\n"); 7961 mutex_unlock(&instance->reset_mutex); 7962 kfree(ev); 7963 return; 7964 } 7965 7966 mutex_unlock(&instance->reset_mutex); 7967 7968 if (doscan & SCAN_PD_CHANNEL) { 7969 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { 7970 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 7971 pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j; 7972 sdev1 = scsi_device_lookup(host, i, j, 0); 7973 if (instance->pd_list[pd_index].driveState == 7974 MR_PD_STATE_SYSTEM) { 7975 if (!sdev1) 7976 scsi_add_device(host, i, j, 0); 7977 else 7978 scsi_device_put(sdev1); 7979 } else { 7980 if (sdev1) 7981 megasas_remove_scsi_device(sdev1); 7982 } 7983 } 7984 } 7985 } 7986 7987 if (doscan & SCAN_VD_CHANNEL) { 7988 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { 7989 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 7990 ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; 7991 sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); 7992 if (instance->ld_ids[ld_index] != 0xff) { 7993 if (!sdev1) 7994 scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); 7995 else 7996 scsi_device_put(sdev1); 7997 } else { 7998 if (sdev1) 7999 megasas_remove_scsi_device(sdev1); 8000 } 8001 } 8002 } 8003 } 8004 8005 if (dcmd_ret == DCMD_SUCCESS) 8006 seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1; 8007 else 8008 seq_num = instance->last_seq_num; 8009 8010 /* Register AEN with FW for latest sequence number plus 1 */ 8011 class_locale.members.reserved = 0; 8012 class_locale.members.locale = MR_EVT_LOCALE_ALL; 8013 class_locale.members.class = MR_EVT_CLASS_DEBUG; 8014 8015 if (instance->aen_cmd != NULL) { 8016 kfree(ev); 8017 return; 8018 } 8019 8020 mutex_lock(&instance->reset_mutex); 8021 error = megasas_register_aen(instance, seq_num, 8022 class_locale.word); 8023 if (error) 8024 dev_err(&instance->pdev->dev, 8025 "register aen failed error %x\n", error); 8026 8027 mutex_unlock(&instance->reset_mutex); 8028 kfree(ev); 8029 } 8030 8031 /** 8032 * megasas_init - Driver load entry point 8033 */ 8034 static int __init megasas_init(void) 8035 { 8036 int rval; 8037 8038 /* 8039 * Booted in kdump kernel, minimize memory footprints by 8040 * disabling few features 8041 */ 8042 if (reset_devices) { 8043 msix_vectors = 1; 8044 rdpq_enable = 0; 8045 dual_qdepth_disable = 1; 8046 } 8047 8048 /* 8049 * Announce driver version and other information 8050 */ 8051 pr_info("megasas: %s\n", MEGASAS_VERSION); 8052 8053 spin_lock_init(&poll_aen_lock); 8054 8055 support_poll_for_event = 2; 8056 support_device_change = 1; 8057 support_nvme_encapsulation = true; 8058 8059 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); 8060 8061 /* 8062 * Register character device node 8063 */ 8064 rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops); 8065 8066 if (rval < 0) { 8067 printk(KERN_DEBUG "megasas: failed to open device node\n"); 8068 return rval; 8069 } 8070 8071 megasas_mgmt_majorno = rval; 8072 8073 /* 8074 * Register ourselves as PCI hotplug module 8075 */ 8076 rval = pci_register_driver(&megasas_pci_driver); 8077 8078 if (rval) { 8079 printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n"); 8080 goto err_pcidrv; 8081 } 8082 8083 rval = driver_create_file(&megasas_pci_driver.driver, 8084 &driver_attr_version); 8085 if (rval) 8086 goto err_dcf_attr_ver; 8087 8088 rval = driver_create_file(&megasas_pci_driver.driver, 8089 &driver_attr_release_date); 8090 if (rval) 8091 goto err_dcf_rel_date; 8092 8093 rval = driver_create_file(&megasas_pci_driver.driver, 8094 &driver_attr_support_poll_for_event); 8095 if (rval) 8096 goto err_dcf_support_poll_for_event; 8097 8098 rval = driver_create_file(&megasas_pci_driver.driver, 8099 &driver_attr_dbg_lvl); 8100 if (rval) 8101 goto err_dcf_dbg_lvl; 8102 rval = driver_create_file(&megasas_pci_driver.driver, 8103 &driver_attr_support_device_change); 8104 if (rval) 8105 goto err_dcf_support_device_change; 8106 8107 rval = driver_create_file(&megasas_pci_driver.driver, 8108 &driver_attr_support_nvme_encapsulation); 8109 if (rval) 8110 goto err_dcf_support_nvme_encapsulation; 8111 8112 return rval; 8113 8114 err_dcf_support_nvme_encapsulation: 8115 driver_remove_file(&megasas_pci_driver.driver, 8116 &driver_attr_support_device_change); 8117 8118 err_dcf_support_device_change: 8119 driver_remove_file(&megasas_pci_driver.driver, 8120 &driver_attr_dbg_lvl); 8121 err_dcf_dbg_lvl: 8122 driver_remove_file(&megasas_pci_driver.driver, 8123 &driver_attr_support_poll_for_event); 8124 err_dcf_support_poll_for_event: 8125 driver_remove_file(&megasas_pci_driver.driver, 8126 &driver_attr_release_date); 8127 err_dcf_rel_date: 8128 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 8129 err_dcf_attr_ver: 8130 pci_unregister_driver(&megasas_pci_driver); 8131 err_pcidrv: 8132 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 8133 return rval; 8134 } 8135 8136 /** 8137 * megasas_exit - Driver unload entry point 8138 */ 8139 static void __exit megasas_exit(void) 8140 { 8141 driver_remove_file(&megasas_pci_driver.driver, 8142 &driver_attr_dbg_lvl); 8143 driver_remove_file(&megasas_pci_driver.driver, 8144 &driver_attr_support_poll_for_event); 8145 driver_remove_file(&megasas_pci_driver.driver, 8146 &driver_attr_support_device_change); 8147 driver_remove_file(&megasas_pci_driver.driver, 8148 &driver_attr_release_date); 8149 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 8150 driver_remove_file(&megasas_pci_driver.driver, 8151 &driver_attr_support_nvme_encapsulation); 8152 8153 pci_unregister_driver(&megasas_pci_driver); 8154 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 8155 } 8156 8157 module_init(megasas_init); 8158 module_exit(megasas_exit); 8159