1 /* 2 * Linux MegaRAID driver for SAS based RAID controllers 3 * 4 * Copyright (c) 2003-2013 LSI Corporation 5 * Copyright (c) 2013-2016 Avago Technologies 6 * Copyright (c) 2016-2018 Broadcom Inc. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 2 11 * of the License, or (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 * 21 * Authors: Broadcom Inc. 22 * Sreenivas Bagalkote 23 * Sumant Patro 24 * Bo Yang 25 * Adam Radford 26 * Kashyap Desai <kashyap.desai@broadcom.com> 27 * Sumit Saxena <sumit.saxena@broadcom.com> 28 * 29 * Send feedback to: megaraidlinux.pdl@broadcom.com 30 */ 31 32 #include <linux/kernel.h> 33 #include <linux/types.h> 34 #include <linux/pci.h> 35 #include <linux/list.h> 36 #include <linux/moduleparam.h> 37 #include <linux/module.h> 38 #include <linux/spinlock.h> 39 #include <linux/interrupt.h> 40 #include <linux/delay.h> 41 #include <linux/uio.h> 42 #include <linux/slab.h> 43 #include <linux/uaccess.h> 44 #include <asm/unaligned.h> 45 #include <linux/fs.h> 46 #include <linux/compat.h> 47 #include <linux/blkdev.h> 48 #include <linux/mutex.h> 49 #include <linux/poll.h> 50 #include <linux/vmalloc.h> 51 52 #include <scsi/scsi.h> 53 #include <scsi/scsi_cmnd.h> 54 #include <scsi/scsi_device.h> 55 #include <scsi/scsi_host.h> 56 #include <scsi/scsi_tcq.h> 57 #include "megaraid_sas_fusion.h" 58 #include "megaraid_sas.h" 59 60 /* 61 * Number of sectors per IO command 62 * Will be set in megasas_init_mfi if user does not provide 63 */ 64 static unsigned int max_sectors; 65 module_param_named(max_sectors, max_sectors, int, 0); 66 MODULE_PARM_DESC(max_sectors, 67 "Maximum number of sectors per IO command"); 68 69 static int msix_disable; 70 module_param(msix_disable, int, S_IRUGO); 71 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0"); 72 73 static unsigned int msix_vectors; 74 module_param(msix_vectors, int, S_IRUGO); 75 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW"); 76 77 static int allow_vf_ioctls; 78 module_param(allow_vf_ioctls, int, S_IRUGO); 79 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0"); 80 81 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; 82 module_param(throttlequeuedepth, int, S_IRUGO); 83 MODULE_PARM_DESC(throttlequeuedepth, 84 "Adapter queue depth when throttled due to I/O timeout. Default: 16"); 85 86 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME; 87 module_param(resetwaittime, int, S_IRUGO); 88 MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s"); 89 90 int smp_affinity_enable = 1; 91 module_param(smp_affinity_enable, int, S_IRUGO); 92 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)"); 93 94 int rdpq_enable = 1; 95 module_param(rdpq_enable, int, S_IRUGO); 96 MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)"); 97 98 unsigned int dual_qdepth_disable; 99 module_param(dual_qdepth_disable, int, S_IRUGO); 100 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0"); 101 102 unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; 103 module_param(scmd_timeout, int, S_IRUGO); 104 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer."); 105 106 MODULE_LICENSE("GPL"); 107 MODULE_VERSION(MEGASAS_VERSION); 108 MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com"); 109 MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver"); 110 111 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); 112 static int megasas_get_pd_list(struct megasas_instance *instance); 113 static int megasas_ld_list_query(struct megasas_instance *instance, 114 u8 query_type); 115 static int megasas_issue_init_mfi(struct megasas_instance *instance); 116 static int megasas_register_aen(struct megasas_instance *instance, 117 u32 seq_num, u32 class_locale_word); 118 static void megasas_get_pd_info(struct megasas_instance *instance, 119 struct scsi_device *sdev); 120 121 /* 122 * PCI ID table for all supported controllers 123 */ 124 static struct pci_device_id megasas_pci_table[] = { 125 126 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)}, 127 /* xscale IOP */ 128 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)}, 129 /* ppc IOP */ 130 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)}, 131 /* ppc IOP */ 132 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)}, 133 /* gen2*/ 134 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)}, 135 /* gen2*/ 136 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)}, 137 /* skinny*/ 138 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)}, 139 /* skinny*/ 140 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, 141 /* xscale IOP, vega */ 142 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, 143 /* xscale IOP */ 144 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)}, 145 /* Fusion */ 146 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)}, 147 /* Plasma */ 148 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)}, 149 /* Invader */ 150 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)}, 151 /* Fury */ 152 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)}, 153 /* Intruder */ 154 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)}, 155 /* Intruder 24 port*/ 156 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)}, 157 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)}, 158 /* VENTURA */ 159 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)}, 160 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)}, 161 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)}, 162 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)}, 163 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)}, 164 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)}, 165 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)}, 166 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)}, 167 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)}, 168 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)}, 169 {} 170 }; 171 172 MODULE_DEVICE_TABLE(pci, megasas_pci_table); 173 174 static int megasas_mgmt_majorno; 175 struct megasas_mgmt_info megasas_mgmt_info; 176 static struct fasync_struct *megasas_async_queue; 177 static DEFINE_MUTEX(megasas_async_queue_mutex); 178 179 static int megasas_poll_wait_aen; 180 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait); 181 static u32 support_poll_for_event; 182 u32 megasas_dbg_lvl; 183 static u32 support_device_change; 184 static bool support_nvme_encapsulation; 185 186 /* define lock for aen poll */ 187 spinlock_t poll_aen_lock; 188 189 void 190 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 191 u8 alt_status); 192 static u32 193 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance); 194 static int 195 megasas_adp_reset_gen2(struct megasas_instance *instance, 196 struct megasas_register_set __iomem *reg_set); 197 static irqreturn_t megasas_isr(int irq, void *devp); 198 static u32 199 megasas_init_adapter_mfi(struct megasas_instance *instance); 200 u32 201 megasas_build_and_issue_cmd(struct megasas_instance *instance, 202 struct scsi_cmnd *scmd); 203 static void megasas_complete_cmd_dpc(unsigned long instance_addr); 204 int 205 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, 206 int seconds); 207 void megasas_fusion_ocr_wq(struct work_struct *work); 208 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 209 int initial); 210 static int 211 megasas_set_dma_mask(struct megasas_instance *instance); 212 static int 213 megasas_alloc_ctrl_mem(struct megasas_instance *instance); 214 static inline void 215 megasas_free_ctrl_mem(struct megasas_instance *instance); 216 static inline int 217 megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance); 218 static inline void 219 megasas_free_ctrl_dma_buffers(struct megasas_instance *instance); 220 static inline void 221 megasas_init_ctrl_params(struct megasas_instance *instance); 222 223 u32 megasas_readl(struct megasas_instance *instance, 224 const volatile void __iomem *addr) 225 { 226 u32 i = 0, ret_val; 227 /* 228 * Due to a HW errata in Aero controllers, reads to certain 229 * Fusion registers could intermittently return all zeroes. 230 * This behavior is transient in nature and subsequent reads will 231 * return valid value. As a workaround in driver, retry readl for 232 * upto three times until a non-zero value is read. 233 */ 234 if (instance->adapter_type == AERO_SERIES) { 235 do { 236 ret_val = readl(addr); 237 i++; 238 } while (ret_val == 0 && i < 3); 239 return ret_val; 240 } else { 241 return readl(addr); 242 } 243 } 244 245 /** 246 * megasas_set_dma_settings - Populate DMA address, length and flags for DCMDs 247 * @instance: Adapter soft state 248 * @dcmd: DCMD frame inside MFI command 249 * @dma_addr: DMA address of buffer to be passed to FW 250 * @dma_len: Length of DMA buffer to be passed to FW 251 * @return: void 252 */ 253 void megasas_set_dma_settings(struct megasas_instance *instance, 254 struct megasas_dcmd_frame *dcmd, 255 dma_addr_t dma_addr, u32 dma_len) 256 { 257 if (instance->consistent_mask_64bit) { 258 dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr); 259 dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len); 260 dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64); 261 262 } else { 263 dcmd->sgl.sge32[0].phys_addr = 264 cpu_to_le32(lower_32_bits(dma_addr)); 265 dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len); 266 dcmd->flags = cpu_to_le16(dcmd->flags); 267 } 268 } 269 270 void 271 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 272 { 273 instance->instancet->fire_cmd(instance, 274 cmd->frame_phys_addr, 0, instance->reg_set); 275 return; 276 } 277 278 /** 279 * megasas_get_cmd - Get a command from the free pool 280 * @instance: Adapter soft state 281 * 282 * Returns a free command from the pool 283 */ 284 struct megasas_cmd *megasas_get_cmd(struct megasas_instance 285 *instance) 286 { 287 unsigned long flags; 288 struct megasas_cmd *cmd = NULL; 289 290 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 291 292 if (!list_empty(&instance->cmd_pool)) { 293 cmd = list_entry((&instance->cmd_pool)->next, 294 struct megasas_cmd, list); 295 list_del_init(&cmd->list); 296 } else { 297 dev_err(&instance->pdev->dev, "Command pool empty!\n"); 298 } 299 300 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 301 return cmd; 302 } 303 304 /** 305 * megasas_return_cmd - Return a cmd to free command pool 306 * @instance: Adapter soft state 307 * @cmd: Command packet to be returned to free command pool 308 */ 309 void 310 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 311 { 312 unsigned long flags; 313 u32 blk_tags; 314 struct megasas_cmd_fusion *cmd_fusion; 315 struct fusion_context *fusion = instance->ctrl_context; 316 317 /* This flag is used only for fusion adapter. 318 * Wait for Interrupt for Polled mode DCMD 319 */ 320 if (cmd->flags & DRV_DCMD_POLLED_MODE) 321 return; 322 323 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 324 325 if (fusion) { 326 blk_tags = instance->max_scsi_cmds + cmd->index; 327 cmd_fusion = fusion->cmd_list[blk_tags]; 328 megasas_return_cmd_fusion(instance, cmd_fusion); 329 } 330 cmd->scmd = NULL; 331 cmd->frame_count = 0; 332 cmd->flags = 0; 333 memset(cmd->frame, 0, instance->mfi_frame_size); 334 cmd->frame->io.context = cpu_to_le32(cmd->index); 335 if (!fusion && reset_devices) 336 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 337 list_add(&cmd->list, (&instance->cmd_pool)->next); 338 339 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 340 341 } 342 343 static const char * 344 format_timestamp(uint32_t timestamp) 345 { 346 static char buffer[32]; 347 348 if ((timestamp & 0xff000000) == 0xff000000) 349 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp & 350 0x00ffffff); 351 else 352 snprintf(buffer, sizeof(buffer), "%us", timestamp); 353 return buffer; 354 } 355 356 static const char * 357 format_class(int8_t class) 358 { 359 static char buffer[6]; 360 361 switch (class) { 362 case MFI_EVT_CLASS_DEBUG: 363 return "debug"; 364 case MFI_EVT_CLASS_PROGRESS: 365 return "progress"; 366 case MFI_EVT_CLASS_INFO: 367 return "info"; 368 case MFI_EVT_CLASS_WARNING: 369 return "WARN"; 370 case MFI_EVT_CLASS_CRITICAL: 371 return "CRIT"; 372 case MFI_EVT_CLASS_FATAL: 373 return "FATAL"; 374 case MFI_EVT_CLASS_DEAD: 375 return "DEAD"; 376 default: 377 snprintf(buffer, sizeof(buffer), "%d", class); 378 return buffer; 379 } 380 } 381 382 /** 383 * megasas_decode_evt: Decode FW AEN event and print critical event 384 * for information. 385 * @instance: Adapter soft state 386 */ 387 static void 388 megasas_decode_evt(struct megasas_instance *instance) 389 { 390 struct megasas_evt_detail *evt_detail = instance->evt_detail; 391 union megasas_evt_class_locale class_locale; 392 class_locale.word = le32_to_cpu(evt_detail->cl.word); 393 394 if (class_locale.members.class >= MFI_EVT_CLASS_CRITICAL) 395 dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n", 396 le32_to_cpu(evt_detail->seq_num), 397 format_timestamp(le32_to_cpu(evt_detail->time_stamp)), 398 (class_locale.members.locale), 399 format_class(class_locale.members.class), 400 evt_detail->description); 401 } 402 403 /** 404 * The following functions are defined for xscale 405 * (deviceid : 1064R, PERC5) controllers 406 */ 407 408 /** 409 * megasas_enable_intr_xscale - Enables interrupts 410 * @regs: MFI register set 411 */ 412 static inline void 413 megasas_enable_intr_xscale(struct megasas_instance *instance) 414 { 415 struct megasas_register_set __iomem *regs; 416 417 regs = instance->reg_set; 418 writel(0, &(regs)->outbound_intr_mask); 419 420 /* Dummy readl to force pci flush */ 421 readl(®s->outbound_intr_mask); 422 } 423 424 /** 425 * megasas_disable_intr_xscale -Disables interrupt 426 * @regs: MFI register set 427 */ 428 static inline void 429 megasas_disable_intr_xscale(struct megasas_instance *instance) 430 { 431 struct megasas_register_set __iomem *regs; 432 u32 mask = 0x1f; 433 434 regs = instance->reg_set; 435 writel(mask, ®s->outbound_intr_mask); 436 /* Dummy readl to force pci flush */ 437 readl(®s->outbound_intr_mask); 438 } 439 440 /** 441 * megasas_read_fw_status_reg_xscale - returns the current FW status value 442 * @regs: MFI register set 443 */ 444 static u32 445 megasas_read_fw_status_reg_xscale(struct megasas_instance *instance) 446 { 447 return readl(&instance->reg_set->outbound_msg_0); 448 } 449 /** 450 * megasas_clear_interrupt_xscale - Check & clear interrupt 451 * @regs: MFI register set 452 */ 453 static int 454 megasas_clear_intr_xscale(struct megasas_instance *instance) 455 { 456 u32 status; 457 u32 mfiStatus = 0; 458 struct megasas_register_set __iomem *regs; 459 regs = instance->reg_set; 460 461 /* 462 * Check if it is our interrupt 463 */ 464 status = readl(®s->outbound_intr_status); 465 466 if (status & MFI_OB_INTR_STATUS_MASK) 467 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 468 if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT) 469 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 470 471 /* 472 * Clear the interrupt by writing back the same value 473 */ 474 if (mfiStatus) 475 writel(status, ®s->outbound_intr_status); 476 477 /* Dummy readl to force pci flush */ 478 readl(®s->outbound_intr_status); 479 480 return mfiStatus; 481 } 482 483 /** 484 * megasas_fire_cmd_xscale - Sends command to the FW 485 * @frame_phys_addr : Physical address of cmd 486 * @frame_count : Number of frames for the command 487 * @regs : MFI register set 488 */ 489 static inline void 490 megasas_fire_cmd_xscale(struct megasas_instance *instance, 491 dma_addr_t frame_phys_addr, 492 u32 frame_count, 493 struct megasas_register_set __iomem *regs) 494 { 495 unsigned long flags; 496 497 spin_lock_irqsave(&instance->hba_lock, flags); 498 writel((frame_phys_addr >> 3)|(frame_count), 499 &(regs)->inbound_queue_port); 500 spin_unlock_irqrestore(&instance->hba_lock, flags); 501 } 502 503 /** 504 * megasas_adp_reset_xscale - For controller reset 505 * @regs: MFI register set 506 */ 507 static int 508 megasas_adp_reset_xscale(struct megasas_instance *instance, 509 struct megasas_register_set __iomem *regs) 510 { 511 u32 i; 512 u32 pcidata; 513 514 writel(MFI_ADP_RESET, ®s->inbound_doorbell); 515 516 for (i = 0; i < 3; i++) 517 msleep(1000); /* sleep for 3 secs */ 518 pcidata = 0; 519 pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata); 520 dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata); 521 if (pcidata & 0x2) { 522 dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata); 523 pcidata &= ~0x2; 524 pci_write_config_dword(instance->pdev, 525 MFI_1068_PCSR_OFFSET, pcidata); 526 527 for (i = 0; i < 2; i++) 528 msleep(1000); /* need to wait 2 secs again */ 529 530 pcidata = 0; 531 pci_read_config_dword(instance->pdev, 532 MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata); 533 dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata); 534 if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) { 535 dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata); 536 pcidata = 0; 537 pci_write_config_dword(instance->pdev, 538 MFI_1068_FW_HANDSHAKE_OFFSET, pcidata); 539 } 540 } 541 return 0; 542 } 543 544 /** 545 * megasas_check_reset_xscale - For controller reset check 546 * @regs: MFI register set 547 */ 548 static int 549 megasas_check_reset_xscale(struct megasas_instance *instance, 550 struct megasas_register_set __iomem *regs) 551 { 552 if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) && 553 (le32_to_cpu(*instance->consumer) == 554 MEGASAS_ADPRESET_INPROG_SIGN)) 555 return 1; 556 return 0; 557 } 558 559 static struct megasas_instance_template megasas_instance_template_xscale = { 560 561 .fire_cmd = megasas_fire_cmd_xscale, 562 .enable_intr = megasas_enable_intr_xscale, 563 .disable_intr = megasas_disable_intr_xscale, 564 .clear_intr = megasas_clear_intr_xscale, 565 .read_fw_status_reg = megasas_read_fw_status_reg_xscale, 566 .adp_reset = megasas_adp_reset_xscale, 567 .check_reset = megasas_check_reset_xscale, 568 .service_isr = megasas_isr, 569 .tasklet = megasas_complete_cmd_dpc, 570 .init_adapter = megasas_init_adapter_mfi, 571 .build_and_issue_cmd = megasas_build_and_issue_cmd, 572 .issue_dcmd = megasas_issue_dcmd, 573 }; 574 575 /** 576 * This is the end of set of functions & definitions specific 577 * to xscale (deviceid : 1064R, PERC5) controllers 578 */ 579 580 /** 581 * The following functions are defined for ppc (deviceid : 0x60) 582 * controllers 583 */ 584 585 /** 586 * megasas_enable_intr_ppc - Enables interrupts 587 * @regs: MFI register set 588 */ 589 static inline void 590 megasas_enable_intr_ppc(struct megasas_instance *instance) 591 { 592 struct megasas_register_set __iomem *regs; 593 594 regs = instance->reg_set; 595 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 596 597 writel(~0x80000000, &(regs)->outbound_intr_mask); 598 599 /* Dummy readl to force pci flush */ 600 readl(®s->outbound_intr_mask); 601 } 602 603 /** 604 * megasas_disable_intr_ppc - Disable interrupt 605 * @regs: MFI register set 606 */ 607 static inline void 608 megasas_disable_intr_ppc(struct megasas_instance *instance) 609 { 610 struct megasas_register_set __iomem *regs; 611 u32 mask = 0xFFFFFFFF; 612 613 regs = instance->reg_set; 614 writel(mask, ®s->outbound_intr_mask); 615 /* Dummy readl to force pci flush */ 616 readl(®s->outbound_intr_mask); 617 } 618 619 /** 620 * megasas_read_fw_status_reg_ppc - returns the current FW status value 621 * @regs: MFI register set 622 */ 623 static u32 624 megasas_read_fw_status_reg_ppc(struct megasas_instance *instance) 625 { 626 return readl(&instance->reg_set->outbound_scratch_pad_0); 627 } 628 629 /** 630 * megasas_clear_interrupt_ppc - Check & clear interrupt 631 * @regs: MFI register set 632 */ 633 static int 634 megasas_clear_intr_ppc(struct megasas_instance *instance) 635 { 636 u32 status, mfiStatus = 0; 637 struct megasas_register_set __iomem *regs; 638 regs = instance->reg_set; 639 640 /* 641 * Check if it is our interrupt 642 */ 643 status = readl(®s->outbound_intr_status); 644 645 if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT) 646 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 647 648 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) 649 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 650 651 /* 652 * Clear the interrupt by writing back the same value 653 */ 654 writel(status, ®s->outbound_doorbell_clear); 655 656 /* Dummy readl to force pci flush */ 657 readl(®s->outbound_doorbell_clear); 658 659 return mfiStatus; 660 } 661 662 /** 663 * megasas_fire_cmd_ppc - Sends command to the FW 664 * @frame_phys_addr : Physical address of cmd 665 * @frame_count : Number of frames for the command 666 * @regs : MFI register set 667 */ 668 static inline void 669 megasas_fire_cmd_ppc(struct megasas_instance *instance, 670 dma_addr_t frame_phys_addr, 671 u32 frame_count, 672 struct megasas_register_set __iomem *regs) 673 { 674 unsigned long flags; 675 676 spin_lock_irqsave(&instance->hba_lock, flags); 677 writel((frame_phys_addr | (frame_count<<1))|1, 678 &(regs)->inbound_queue_port); 679 spin_unlock_irqrestore(&instance->hba_lock, flags); 680 } 681 682 /** 683 * megasas_check_reset_ppc - For controller reset check 684 * @regs: MFI register set 685 */ 686 static int 687 megasas_check_reset_ppc(struct megasas_instance *instance, 688 struct megasas_register_set __iomem *regs) 689 { 690 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 691 return 1; 692 693 return 0; 694 } 695 696 static struct megasas_instance_template megasas_instance_template_ppc = { 697 698 .fire_cmd = megasas_fire_cmd_ppc, 699 .enable_intr = megasas_enable_intr_ppc, 700 .disable_intr = megasas_disable_intr_ppc, 701 .clear_intr = megasas_clear_intr_ppc, 702 .read_fw_status_reg = megasas_read_fw_status_reg_ppc, 703 .adp_reset = megasas_adp_reset_xscale, 704 .check_reset = megasas_check_reset_ppc, 705 .service_isr = megasas_isr, 706 .tasklet = megasas_complete_cmd_dpc, 707 .init_adapter = megasas_init_adapter_mfi, 708 .build_and_issue_cmd = megasas_build_and_issue_cmd, 709 .issue_dcmd = megasas_issue_dcmd, 710 }; 711 712 /** 713 * megasas_enable_intr_skinny - Enables interrupts 714 * @regs: MFI register set 715 */ 716 static inline void 717 megasas_enable_intr_skinny(struct megasas_instance *instance) 718 { 719 struct megasas_register_set __iomem *regs; 720 721 regs = instance->reg_set; 722 writel(0xFFFFFFFF, &(regs)->outbound_intr_mask); 723 724 writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 725 726 /* Dummy readl to force pci flush */ 727 readl(®s->outbound_intr_mask); 728 } 729 730 /** 731 * megasas_disable_intr_skinny - Disables interrupt 732 * @regs: MFI register set 733 */ 734 static inline void 735 megasas_disable_intr_skinny(struct megasas_instance *instance) 736 { 737 struct megasas_register_set __iomem *regs; 738 u32 mask = 0xFFFFFFFF; 739 740 regs = instance->reg_set; 741 writel(mask, ®s->outbound_intr_mask); 742 /* Dummy readl to force pci flush */ 743 readl(®s->outbound_intr_mask); 744 } 745 746 /** 747 * megasas_read_fw_status_reg_skinny - returns the current FW status value 748 * @regs: MFI register set 749 */ 750 static u32 751 megasas_read_fw_status_reg_skinny(struct megasas_instance *instance) 752 { 753 return readl(&instance->reg_set->outbound_scratch_pad_0); 754 } 755 756 /** 757 * megasas_clear_interrupt_skinny - Check & clear interrupt 758 * @regs: MFI register set 759 */ 760 static int 761 megasas_clear_intr_skinny(struct megasas_instance *instance) 762 { 763 u32 status; 764 u32 mfiStatus = 0; 765 struct megasas_register_set __iomem *regs; 766 regs = instance->reg_set; 767 768 /* 769 * Check if it is our interrupt 770 */ 771 status = readl(®s->outbound_intr_status); 772 773 if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) { 774 return 0; 775 } 776 777 /* 778 * Check if it is our interrupt 779 */ 780 if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) == 781 MFI_STATE_FAULT) { 782 mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 783 } else 784 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 785 786 /* 787 * Clear the interrupt by writing back the same value 788 */ 789 writel(status, ®s->outbound_intr_status); 790 791 /* 792 * dummy read to flush PCI 793 */ 794 readl(®s->outbound_intr_status); 795 796 return mfiStatus; 797 } 798 799 /** 800 * megasas_fire_cmd_skinny - Sends command to the FW 801 * @frame_phys_addr : Physical address of cmd 802 * @frame_count : Number of frames for the command 803 * @regs : MFI register set 804 */ 805 static inline void 806 megasas_fire_cmd_skinny(struct megasas_instance *instance, 807 dma_addr_t frame_phys_addr, 808 u32 frame_count, 809 struct megasas_register_set __iomem *regs) 810 { 811 unsigned long flags; 812 813 spin_lock_irqsave(&instance->hba_lock, flags); 814 writel(upper_32_bits(frame_phys_addr), 815 &(regs)->inbound_high_queue_port); 816 writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1, 817 &(regs)->inbound_low_queue_port); 818 spin_unlock_irqrestore(&instance->hba_lock, flags); 819 } 820 821 /** 822 * megasas_check_reset_skinny - For controller reset check 823 * @regs: MFI register set 824 */ 825 static int 826 megasas_check_reset_skinny(struct megasas_instance *instance, 827 struct megasas_register_set __iomem *regs) 828 { 829 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 830 return 1; 831 832 return 0; 833 } 834 835 static struct megasas_instance_template megasas_instance_template_skinny = { 836 837 .fire_cmd = megasas_fire_cmd_skinny, 838 .enable_intr = megasas_enable_intr_skinny, 839 .disable_intr = megasas_disable_intr_skinny, 840 .clear_intr = megasas_clear_intr_skinny, 841 .read_fw_status_reg = megasas_read_fw_status_reg_skinny, 842 .adp_reset = megasas_adp_reset_gen2, 843 .check_reset = megasas_check_reset_skinny, 844 .service_isr = megasas_isr, 845 .tasklet = megasas_complete_cmd_dpc, 846 .init_adapter = megasas_init_adapter_mfi, 847 .build_and_issue_cmd = megasas_build_and_issue_cmd, 848 .issue_dcmd = megasas_issue_dcmd, 849 }; 850 851 852 /** 853 * The following functions are defined for gen2 (deviceid : 0x78 0x79) 854 * controllers 855 */ 856 857 /** 858 * megasas_enable_intr_gen2 - Enables interrupts 859 * @regs: MFI register set 860 */ 861 static inline void 862 megasas_enable_intr_gen2(struct megasas_instance *instance) 863 { 864 struct megasas_register_set __iomem *regs; 865 866 regs = instance->reg_set; 867 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 868 869 /* write ~0x00000005 (4 & 1) to the intr mask*/ 870 writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 871 872 /* Dummy readl to force pci flush */ 873 readl(®s->outbound_intr_mask); 874 } 875 876 /** 877 * megasas_disable_intr_gen2 - Disables interrupt 878 * @regs: MFI register set 879 */ 880 static inline void 881 megasas_disable_intr_gen2(struct megasas_instance *instance) 882 { 883 struct megasas_register_set __iomem *regs; 884 u32 mask = 0xFFFFFFFF; 885 886 regs = instance->reg_set; 887 writel(mask, ®s->outbound_intr_mask); 888 /* Dummy readl to force pci flush */ 889 readl(®s->outbound_intr_mask); 890 } 891 892 /** 893 * megasas_read_fw_status_reg_gen2 - returns the current FW status value 894 * @regs: MFI register set 895 */ 896 static u32 897 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance) 898 { 899 return readl(&instance->reg_set->outbound_scratch_pad_0); 900 } 901 902 /** 903 * megasas_clear_interrupt_gen2 - Check & clear interrupt 904 * @regs: MFI register set 905 */ 906 static int 907 megasas_clear_intr_gen2(struct megasas_instance *instance) 908 { 909 u32 status; 910 u32 mfiStatus = 0; 911 struct megasas_register_set __iomem *regs; 912 regs = instance->reg_set; 913 914 /* 915 * Check if it is our interrupt 916 */ 917 status = readl(®s->outbound_intr_status); 918 919 if (status & MFI_INTR_FLAG_REPLY_MESSAGE) { 920 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 921 } 922 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) { 923 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 924 } 925 926 /* 927 * Clear the interrupt by writing back the same value 928 */ 929 if (mfiStatus) 930 writel(status, ®s->outbound_doorbell_clear); 931 932 /* Dummy readl to force pci flush */ 933 readl(®s->outbound_intr_status); 934 935 return mfiStatus; 936 } 937 /** 938 * megasas_fire_cmd_gen2 - Sends command to the FW 939 * @frame_phys_addr : Physical address of cmd 940 * @frame_count : Number of frames for the command 941 * @regs : MFI register set 942 */ 943 static inline void 944 megasas_fire_cmd_gen2(struct megasas_instance *instance, 945 dma_addr_t frame_phys_addr, 946 u32 frame_count, 947 struct megasas_register_set __iomem *regs) 948 { 949 unsigned long flags; 950 951 spin_lock_irqsave(&instance->hba_lock, flags); 952 writel((frame_phys_addr | (frame_count<<1))|1, 953 &(regs)->inbound_queue_port); 954 spin_unlock_irqrestore(&instance->hba_lock, flags); 955 } 956 957 /** 958 * megasas_adp_reset_gen2 - For controller reset 959 * @regs: MFI register set 960 */ 961 static int 962 megasas_adp_reset_gen2(struct megasas_instance *instance, 963 struct megasas_register_set __iomem *reg_set) 964 { 965 u32 retry = 0 ; 966 u32 HostDiag; 967 u32 __iomem *seq_offset = ®_set->seq_offset; 968 u32 __iomem *hostdiag_offset = ®_set->host_diag; 969 970 if (instance->instancet == &megasas_instance_template_skinny) { 971 seq_offset = ®_set->fusion_seq_offset; 972 hostdiag_offset = ®_set->fusion_host_diag; 973 } 974 975 writel(0, seq_offset); 976 writel(4, seq_offset); 977 writel(0xb, seq_offset); 978 writel(2, seq_offset); 979 writel(7, seq_offset); 980 writel(0xd, seq_offset); 981 982 msleep(1000); 983 984 HostDiag = (u32)readl(hostdiag_offset); 985 986 while (!(HostDiag & DIAG_WRITE_ENABLE)) { 987 msleep(100); 988 HostDiag = (u32)readl(hostdiag_offset); 989 dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n", 990 retry, HostDiag); 991 992 if (retry++ >= 100) 993 return 1; 994 995 } 996 997 dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag); 998 999 writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset); 1000 1001 ssleep(10); 1002 1003 HostDiag = (u32)readl(hostdiag_offset); 1004 while (HostDiag & DIAG_RESET_ADAPTER) { 1005 msleep(100); 1006 HostDiag = (u32)readl(hostdiag_offset); 1007 dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n", 1008 retry, HostDiag); 1009 1010 if (retry++ >= 1000) 1011 return 1; 1012 1013 } 1014 return 0; 1015 } 1016 1017 /** 1018 * megasas_check_reset_gen2 - For controller reset check 1019 * @regs: MFI register set 1020 */ 1021 static int 1022 megasas_check_reset_gen2(struct megasas_instance *instance, 1023 struct megasas_register_set __iomem *regs) 1024 { 1025 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 1026 return 1; 1027 1028 return 0; 1029 } 1030 1031 static struct megasas_instance_template megasas_instance_template_gen2 = { 1032 1033 .fire_cmd = megasas_fire_cmd_gen2, 1034 .enable_intr = megasas_enable_intr_gen2, 1035 .disable_intr = megasas_disable_intr_gen2, 1036 .clear_intr = megasas_clear_intr_gen2, 1037 .read_fw_status_reg = megasas_read_fw_status_reg_gen2, 1038 .adp_reset = megasas_adp_reset_gen2, 1039 .check_reset = megasas_check_reset_gen2, 1040 .service_isr = megasas_isr, 1041 .tasklet = megasas_complete_cmd_dpc, 1042 .init_adapter = megasas_init_adapter_mfi, 1043 .build_and_issue_cmd = megasas_build_and_issue_cmd, 1044 .issue_dcmd = megasas_issue_dcmd, 1045 }; 1046 1047 /** 1048 * This is the end of set of functions & definitions 1049 * specific to gen2 (deviceid : 0x78, 0x79) controllers 1050 */ 1051 1052 /* 1053 * Template added for TB (Fusion) 1054 */ 1055 extern struct megasas_instance_template megasas_instance_template_fusion; 1056 1057 /** 1058 * megasas_issue_polled - Issues a polling command 1059 * @instance: Adapter soft state 1060 * @cmd: Command packet to be issued 1061 * 1062 * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting. 1063 */ 1064 int 1065 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) 1066 { 1067 struct megasas_header *frame_hdr = &cmd->frame->hdr; 1068 1069 frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS; 1070 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); 1071 1072 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1073 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1074 __func__, __LINE__); 1075 return DCMD_NOT_FIRED; 1076 } 1077 1078 instance->instancet->issue_dcmd(instance, cmd); 1079 1080 return wait_and_poll(instance, cmd, instance->requestorId ? 1081 MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS); 1082 } 1083 1084 /** 1085 * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds 1086 * @instance: Adapter soft state 1087 * @cmd: Command to be issued 1088 * @timeout: Timeout in seconds 1089 * 1090 * This function waits on an event for the command to be returned from ISR. 1091 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 1092 * Used to issue ioctl commands. 1093 */ 1094 int 1095 megasas_issue_blocked_cmd(struct megasas_instance *instance, 1096 struct megasas_cmd *cmd, int timeout) 1097 { 1098 int ret = 0; 1099 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 1100 1101 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1102 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1103 __func__, __LINE__); 1104 return DCMD_NOT_FIRED; 1105 } 1106 1107 instance->instancet->issue_dcmd(instance, cmd); 1108 1109 if (timeout) { 1110 ret = wait_event_timeout(instance->int_cmd_wait_q, 1111 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); 1112 if (!ret) { 1113 dev_err(&instance->pdev->dev, "Failed from %s %d DCMD Timed out\n", 1114 __func__, __LINE__); 1115 return DCMD_TIMEOUT; 1116 } 1117 } else 1118 wait_event(instance->int_cmd_wait_q, 1119 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS); 1120 1121 return (cmd->cmd_status_drv == MFI_STAT_OK) ? 1122 DCMD_SUCCESS : DCMD_FAILED; 1123 } 1124 1125 /** 1126 * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd 1127 * @instance: Adapter soft state 1128 * @cmd_to_abort: Previously issued cmd to be aborted 1129 * @timeout: Timeout in seconds 1130 * 1131 * MFI firmware can abort previously issued AEN comamnd (automatic event 1132 * notification). The megasas_issue_blocked_abort_cmd() issues such abort 1133 * cmd and waits for return status. 1134 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 1135 */ 1136 static int 1137 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, 1138 struct megasas_cmd *cmd_to_abort, int timeout) 1139 { 1140 struct megasas_cmd *cmd; 1141 struct megasas_abort_frame *abort_fr; 1142 int ret = 0; 1143 1144 cmd = megasas_get_cmd(instance); 1145 1146 if (!cmd) 1147 return -1; 1148 1149 abort_fr = &cmd->frame->abort; 1150 1151 /* 1152 * Prepare and issue the abort frame 1153 */ 1154 abort_fr->cmd = MFI_CMD_ABORT; 1155 abort_fr->cmd_status = MFI_STAT_INVALID_STATUS; 1156 abort_fr->flags = cpu_to_le16(0); 1157 abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index); 1158 abort_fr->abort_mfi_phys_addr_lo = 1159 cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr)); 1160 abort_fr->abort_mfi_phys_addr_hi = 1161 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); 1162 1163 cmd->sync_cmd = 1; 1164 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 1165 1166 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1167 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1168 __func__, __LINE__); 1169 return DCMD_NOT_FIRED; 1170 } 1171 1172 instance->instancet->issue_dcmd(instance, cmd); 1173 1174 if (timeout) { 1175 ret = wait_event_timeout(instance->abort_cmd_wait_q, 1176 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); 1177 if (!ret) { 1178 dev_err(&instance->pdev->dev, "Failed from %s %d Abort Timed out\n", 1179 __func__, __LINE__); 1180 return DCMD_TIMEOUT; 1181 } 1182 } else 1183 wait_event(instance->abort_cmd_wait_q, 1184 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS); 1185 1186 cmd->sync_cmd = 0; 1187 1188 megasas_return_cmd(instance, cmd); 1189 return (cmd->cmd_status_drv == MFI_STAT_OK) ? 1190 DCMD_SUCCESS : DCMD_FAILED; 1191 } 1192 1193 /** 1194 * megasas_make_sgl32 - Prepares 32-bit SGL 1195 * @instance: Adapter soft state 1196 * @scp: SCSI command from the mid-layer 1197 * @mfi_sgl: SGL to be filled in 1198 * 1199 * If successful, this function returns the number of SG elements. Otherwise, 1200 * it returnes -1. 1201 */ 1202 static int 1203 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp, 1204 union megasas_sgl *mfi_sgl) 1205 { 1206 int i; 1207 int sge_count; 1208 struct scatterlist *os_sgl; 1209 1210 sge_count = scsi_dma_map(scp); 1211 BUG_ON(sge_count < 0); 1212 1213 if (sge_count) { 1214 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1215 mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl)); 1216 mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl)); 1217 } 1218 } 1219 return sge_count; 1220 } 1221 1222 /** 1223 * megasas_make_sgl64 - Prepares 64-bit SGL 1224 * @instance: Adapter soft state 1225 * @scp: SCSI command from the mid-layer 1226 * @mfi_sgl: SGL to be filled in 1227 * 1228 * If successful, this function returns the number of SG elements. Otherwise, 1229 * it returnes -1. 1230 */ 1231 static int 1232 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, 1233 union megasas_sgl *mfi_sgl) 1234 { 1235 int i; 1236 int sge_count; 1237 struct scatterlist *os_sgl; 1238 1239 sge_count = scsi_dma_map(scp); 1240 BUG_ON(sge_count < 0); 1241 1242 if (sge_count) { 1243 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1244 mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl)); 1245 mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl)); 1246 } 1247 } 1248 return sge_count; 1249 } 1250 1251 /** 1252 * megasas_make_sgl_skinny - Prepares IEEE SGL 1253 * @instance: Adapter soft state 1254 * @scp: SCSI command from the mid-layer 1255 * @mfi_sgl: SGL to be filled in 1256 * 1257 * If successful, this function returns the number of SG elements. Otherwise, 1258 * it returnes -1. 1259 */ 1260 static int 1261 megasas_make_sgl_skinny(struct megasas_instance *instance, 1262 struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) 1263 { 1264 int i; 1265 int sge_count; 1266 struct scatterlist *os_sgl; 1267 1268 sge_count = scsi_dma_map(scp); 1269 1270 if (sge_count) { 1271 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1272 mfi_sgl->sge_skinny[i].length = 1273 cpu_to_le32(sg_dma_len(os_sgl)); 1274 mfi_sgl->sge_skinny[i].phys_addr = 1275 cpu_to_le64(sg_dma_address(os_sgl)); 1276 mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0); 1277 } 1278 } 1279 return sge_count; 1280 } 1281 1282 /** 1283 * megasas_get_frame_count - Computes the number of frames 1284 * @frame_type : type of frame- io or pthru frame 1285 * @sge_count : number of sg elements 1286 * 1287 * Returns the number of frames required for numnber of sge's (sge_count) 1288 */ 1289 1290 static u32 megasas_get_frame_count(struct megasas_instance *instance, 1291 u8 sge_count, u8 frame_type) 1292 { 1293 int num_cnt; 1294 int sge_bytes; 1295 u32 sge_sz; 1296 u32 frame_count = 0; 1297 1298 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : 1299 sizeof(struct megasas_sge32); 1300 1301 if (instance->flag_ieee) { 1302 sge_sz = sizeof(struct megasas_sge_skinny); 1303 } 1304 1305 /* 1306 * Main frame can contain 2 SGEs for 64-bit SGLs and 1307 * 3 SGEs for 32-bit SGLs for ldio & 1308 * 1 SGEs for 64-bit SGLs and 1309 * 2 SGEs for 32-bit SGLs for pthru frame 1310 */ 1311 if (unlikely(frame_type == PTHRU_FRAME)) { 1312 if (instance->flag_ieee == 1) { 1313 num_cnt = sge_count - 1; 1314 } else if (IS_DMA64) 1315 num_cnt = sge_count - 1; 1316 else 1317 num_cnt = sge_count - 2; 1318 } else { 1319 if (instance->flag_ieee == 1) { 1320 num_cnt = sge_count - 1; 1321 } else if (IS_DMA64) 1322 num_cnt = sge_count - 2; 1323 else 1324 num_cnt = sge_count - 3; 1325 } 1326 1327 if (num_cnt > 0) { 1328 sge_bytes = sge_sz * num_cnt; 1329 1330 frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + 1331 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ; 1332 } 1333 /* Main frame */ 1334 frame_count += 1; 1335 1336 if (frame_count > 7) 1337 frame_count = 8; 1338 return frame_count; 1339 } 1340 1341 /** 1342 * megasas_build_dcdb - Prepares a direct cdb (DCDB) command 1343 * @instance: Adapter soft state 1344 * @scp: SCSI command 1345 * @cmd: Command to be prepared in 1346 * 1347 * This function prepares CDB commands. These are typcially pass-through 1348 * commands to the devices. 1349 */ 1350 static int 1351 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, 1352 struct megasas_cmd *cmd) 1353 { 1354 u32 is_logical; 1355 u32 device_id; 1356 u16 flags = 0; 1357 struct megasas_pthru_frame *pthru; 1358 1359 is_logical = MEGASAS_IS_LOGICAL(scp->device); 1360 device_id = MEGASAS_DEV_INDEX(scp); 1361 pthru = (struct megasas_pthru_frame *)cmd->frame; 1362 1363 if (scp->sc_data_direction == DMA_TO_DEVICE) 1364 flags = MFI_FRAME_DIR_WRITE; 1365 else if (scp->sc_data_direction == DMA_FROM_DEVICE) 1366 flags = MFI_FRAME_DIR_READ; 1367 else if (scp->sc_data_direction == DMA_NONE) 1368 flags = MFI_FRAME_DIR_NONE; 1369 1370 if (instance->flag_ieee == 1) { 1371 flags |= MFI_FRAME_IEEE; 1372 } 1373 1374 /* 1375 * Prepare the DCDB frame 1376 */ 1377 pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO; 1378 pthru->cmd_status = 0x0; 1379 pthru->scsi_status = 0x0; 1380 pthru->target_id = device_id; 1381 pthru->lun = scp->device->lun; 1382 pthru->cdb_len = scp->cmd_len; 1383 pthru->timeout = 0; 1384 pthru->pad_0 = 0; 1385 pthru->flags = cpu_to_le16(flags); 1386 pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp)); 1387 1388 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); 1389 1390 /* 1391 * If the command is for the tape device, set the 1392 * pthru timeout to the os layer timeout value. 1393 */ 1394 if (scp->device->type == TYPE_TAPE) { 1395 if ((scp->request->timeout / HZ) > 0xFFFF) 1396 pthru->timeout = cpu_to_le16(0xFFFF); 1397 else 1398 pthru->timeout = cpu_to_le16(scp->request->timeout / HZ); 1399 } 1400 1401 /* 1402 * Construct SGL 1403 */ 1404 if (instance->flag_ieee == 1) { 1405 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1406 pthru->sge_count = megasas_make_sgl_skinny(instance, scp, 1407 &pthru->sgl); 1408 } else if (IS_DMA64) { 1409 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1410 pthru->sge_count = megasas_make_sgl64(instance, scp, 1411 &pthru->sgl); 1412 } else 1413 pthru->sge_count = megasas_make_sgl32(instance, scp, 1414 &pthru->sgl); 1415 1416 if (pthru->sge_count > instance->max_num_sge) { 1417 dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n", 1418 pthru->sge_count); 1419 return 0; 1420 } 1421 1422 /* 1423 * Sense info specific 1424 */ 1425 pthru->sense_len = SCSI_SENSE_BUFFERSIZE; 1426 pthru->sense_buf_phys_addr_hi = 1427 cpu_to_le32(upper_32_bits(cmd->sense_phys_addr)); 1428 pthru->sense_buf_phys_addr_lo = 1429 cpu_to_le32(lower_32_bits(cmd->sense_phys_addr)); 1430 1431 /* 1432 * Compute the total number of frames this command consumes. FW uses 1433 * this number to pull sufficient number of frames from host memory. 1434 */ 1435 cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count, 1436 PTHRU_FRAME); 1437 1438 return cmd->frame_count; 1439 } 1440 1441 /** 1442 * megasas_build_ldio - Prepares IOs to logical devices 1443 * @instance: Adapter soft state 1444 * @scp: SCSI command 1445 * @cmd: Command to be prepared 1446 * 1447 * Frames (and accompanying SGLs) for regular SCSI IOs use this function. 1448 */ 1449 static int 1450 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, 1451 struct megasas_cmd *cmd) 1452 { 1453 u32 device_id; 1454 u8 sc = scp->cmnd[0]; 1455 u16 flags = 0; 1456 struct megasas_io_frame *ldio; 1457 1458 device_id = MEGASAS_DEV_INDEX(scp); 1459 ldio = (struct megasas_io_frame *)cmd->frame; 1460 1461 if (scp->sc_data_direction == DMA_TO_DEVICE) 1462 flags = MFI_FRAME_DIR_WRITE; 1463 else if (scp->sc_data_direction == DMA_FROM_DEVICE) 1464 flags = MFI_FRAME_DIR_READ; 1465 1466 if (instance->flag_ieee == 1) { 1467 flags |= MFI_FRAME_IEEE; 1468 } 1469 1470 /* 1471 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds 1472 */ 1473 ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ; 1474 ldio->cmd_status = 0x0; 1475 ldio->scsi_status = 0x0; 1476 ldio->target_id = device_id; 1477 ldio->timeout = 0; 1478 ldio->reserved_0 = 0; 1479 ldio->pad_0 = 0; 1480 ldio->flags = cpu_to_le16(flags); 1481 ldio->start_lba_hi = 0; 1482 ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0; 1483 1484 /* 1485 * 6-byte READ(0x08) or WRITE(0x0A) cdb 1486 */ 1487 if (scp->cmd_len == 6) { 1488 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]); 1489 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) | 1490 ((u32) scp->cmnd[2] << 8) | 1491 (u32) scp->cmnd[3]); 1492 1493 ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF); 1494 } 1495 1496 /* 1497 * 10-byte READ(0x28) or WRITE(0x2A) cdb 1498 */ 1499 else if (scp->cmd_len == 10) { 1500 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] | 1501 ((u32) scp->cmnd[7] << 8)); 1502 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1503 ((u32) scp->cmnd[3] << 16) | 1504 ((u32) scp->cmnd[4] << 8) | 1505 (u32) scp->cmnd[5]); 1506 } 1507 1508 /* 1509 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 1510 */ 1511 else if (scp->cmd_len == 12) { 1512 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) | 1513 ((u32) scp->cmnd[7] << 16) | 1514 ((u32) scp->cmnd[8] << 8) | 1515 (u32) scp->cmnd[9]); 1516 1517 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1518 ((u32) scp->cmnd[3] << 16) | 1519 ((u32) scp->cmnd[4] << 8) | 1520 (u32) scp->cmnd[5]); 1521 } 1522 1523 /* 1524 * 16-byte READ(0x88) or WRITE(0x8A) cdb 1525 */ 1526 else if (scp->cmd_len == 16) { 1527 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) | 1528 ((u32) scp->cmnd[11] << 16) | 1529 ((u32) scp->cmnd[12] << 8) | 1530 (u32) scp->cmnd[13]); 1531 1532 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) | 1533 ((u32) scp->cmnd[7] << 16) | 1534 ((u32) scp->cmnd[8] << 8) | 1535 (u32) scp->cmnd[9]); 1536 1537 ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1538 ((u32) scp->cmnd[3] << 16) | 1539 ((u32) scp->cmnd[4] << 8) | 1540 (u32) scp->cmnd[5]); 1541 1542 } 1543 1544 /* 1545 * Construct SGL 1546 */ 1547 if (instance->flag_ieee) { 1548 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1549 ldio->sge_count = megasas_make_sgl_skinny(instance, scp, 1550 &ldio->sgl); 1551 } else if (IS_DMA64) { 1552 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1553 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); 1554 } else 1555 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); 1556 1557 if (ldio->sge_count > instance->max_num_sge) { 1558 dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n", 1559 ldio->sge_count); 1560 return 0; 1561 } 1562 1563 /* 1564 * Sense info specific 1565 */ 1566 ldio->sense_len = SCSI_SENSE_BUFFERSIZE; 1567 ldio->sense_buf_phys_addr_hi = 0; 1568 ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr); 1569 1570 /* 1571 * Compute the total number of frames this command consumes. FW uses 1572 * this number to pull sufficient number of frames from host memory. 1573 */ 1574 cmd->frame_count = megasas_get_frame_count(instance, 1575 ldio->sge_count, IO_FRAME); 1576 1577 return cmd->frame_count; 1578 } 1579 1580 /** 1581 * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD 1582 * and whether it's RW or non RW 1583 * @scmd: SCSI command 1584 * 1585 */ 1586 inline int megasas_cmd_type(struct scsi_cmnd *cmd) 1587 { 1588 int ret; 1589 1590 switch (cmd->cmnd[0]) { 1591 case READ_10: 1592 case WRITE_10: 1593 case READ_12: 1594 case WRITE_12: 1595 case READ_6: 1596 case WRITE_6: 1597 case READ_16: 1598 case WRITE_16: 1599 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? 1600 READ_WRITE_LDIO : READ_WRITE_SYSPDIO; 1601 break; 1602 default: 1603 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? 1604 NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO; 1605 } 1606 return ret; 1607 } 1608 1609 /** 1610 * megasas_dump_pending_frames - Dumps the frame address of all pending cmds 1611 * in FW 1612 * @instance: Adapter soft state 1613 */ 1614 static inline void 1615 megasas_dump_pending_frames(struct megasas_instance *instance) 1616 { 1617 struct megasas_cmd *cmd; 1618 int i,n; 1619 union megasas_sgl *mfi_sgl; 1620 struct megasas_io_frame *ldio; 1621 struct megasas_pthru_frame *pthru; 1622 u32 sgcount; 1623 u16 max_cmd = instance->max_fw_cmds; 1624 1625 dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no); 1626 dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding)); 1627 if (IS_DMA64) 1628 dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no); 1629 else 1630 dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no); 1631 1632 dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no); 1633 for (i = 0; i < max_cmd; i++) { 1634 cmd = instance->cmd_list[i]; 1635 if (!cmd->scmd) 1636 continue; 1637 dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr); 1638 if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) { 1639 ldio = (struct megasas_io_frame *)cmd->frame; 1640 mfi_sgl = &ldio->sgl; 1641 sgcount = ldio->sge_count; 1642 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x," 1643 " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1644 instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id, 1645 le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi), 1646 le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount); 1647 } else { 1648 pthru = (struct megasas_pthru_frame *) cmd->frame; 1649 mfi_sgl = &pthru->sgl; 1650 sgcount = pthru->sge_count; 1651 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, " 1652 "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1653 instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id, 1654 pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len), 1655 le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount); 1656 } 1657 if (megasas_dbg_lvl & MEGASAS_DBG_LVL) { 1658 for (n = 0; n < sgcount; n++) { 1659 if (IS_DMA64) 1660 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n", 1661 le32_to_cpu(mfi_sgl->sge64[n].length), 1662 le64_to_cpu(mfi_sgl->sge64[n].phys_addr)); 1663 else 1664 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n", 1665 le32_to_cpu(mfi_sgl->sge32[n].length), 1666 le32_to_cpu(mfi_sgl->sge32[n].phys_addr)); 1667 } 1668 } 1669 } /*for max_cmd*/ 1670 dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no); 1671 for (i = 0; i < max_cmd; i++) { 1672 1673 cmd = instance->cmd_list[i]; 1674 1675 if (cmd->sync_cmd == 1) 1676 dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr); 1677 } 1678 dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no); 1679 } 1680 1681 u32 1682 megasas_build_and_issue_cmd(struct megasas_instance *instance, 1683 struct scsi_cmnd *scmd) 1684 { 1685 struct megasas_cmd *cmd; 1686 u32 frame_count; 1687 1688 cmd = megasas_get_cmd(instance); 1689 if (!cmd) 1690 return SCSI_MLQUEUE_HOST_BUSY; 1691 1692 /* 1693 * Logical drive command 1694 */ 1695 if (megasas_cmd_type(scmd) == READ_WRITE_LDIO) 1696 frame_count = megasas_build_ldio(instance, scmd, cmd); 1697 else 1698 frame_count = megasas_build_dcdb(instance, scmd, cmd); 1699 1700 if (!frame_count) 1701 goto out_return_cmd; 1702 1703 cmd->scmd = scmd; 1704 scmd->SCp.ptr = (char *)cmd; 1705 1706 /* 1707 * Issue the command to the FW 1708 */ 1709 atomic_inc(&instance->fw_outstanding); 1710 1711 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 1712 cmd->frame_count-1, instance->reg_set); 1713 1714 return 0; 1715 out_return_cmd: 1716 megasas_return_cmd(instance, cmd); 1717 return SCSI_MLQUEUE_HOST_BUSY; 1718 } 1719 1720 1721 /** 1722 * megasas_queue_command - Queue entry point 1723 * @scmd: SCSI command to be queued 1724 * @done: Callback entry point 1725 */ 1726 static int 1727 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) 1728 { 1729 struct megasas_instance *instance; 1730 struct MR_PRIV_DEVICE *mr_device_priv_data; 1731 1732 instance = (struct megasas_instance *) 1733 scmd->device->host->hostdata; 1734 1735 if (instance->unload == 1) { 1736 scmd->result = DID_NO_CONNECT << 16; 1737 scmd->scsi_done(scmd); 1738 return 0; 1739 } 1740 1741 if (instance->issuepend_done == 0) 1742 return SCSI_MLQUEUE_HOST_BUSY; 1743 1744 1745 /* Check for an mpio path and adjust behavior */ 1746 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { 1747 if (megasas_check_mpio_paths(instance, scmd) == 1748 (DID_REQUEUE << 16)) { 1749 return SCSI_MLQUEUE_HOST_BUSY; 1750 } else { 1751 scmd->result = DID_NO_CONNECT << 16; 1752 scmd->scsi_done(scmd); 1753 return 0; 1754 } 1755 } 1756 1757 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1758 scmd->result = DID_NO_CONNECT << 16; 1759 scmd->scsi_done(scmd); 1760 return 0; 1761 } 1762 1763 mr_device_priv_data = scmd->device->hostdata; 1764 if (!mr_device_priv_data) { 1765 scmd->result = DID_NO_CONNECT << 16; 1766 scmd->scsi_done(scmd); 1767 return 0; 1768 } 1769 1770 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 1771 return SCSI_MLQUEUE_HOST_BUSY; 1772 1773 if (mr_device_priv_data->tm_busy) 1774 return SCSI_MLQUEUE_DEVICE_BUSY; 1775 1776 1777 scmd->result = 0; 1778 1779 if (MEGASAS_IS_LOGICAL(scmd->device) && 1780 (scmd->device->id >= instance->fw_supported_vd_count || 1781 scmd->device->lun)) { 1782 scmd->result = DID_BAD_TARGET << 16; 1783 goto out_done; 1784 } 1785 1786 if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && 1787 MEGASAS_IS_LOGICAL(scmd->device) && 1788 (!instance->fw_sync_cache_support)) { 1789 scmd->result = DID_OK << 16; 1790 goto out_done; 1791 } 1792 1793 return instance->instancet->build_and_issue_cmd(instance, scmd); 1794 1795 out_done: 1796 scmd->scsi_done(scmd); 1797 return 0; 1798 } 1799 1800 static struct megasas_instance *megasas_lookup_instance(u16 host_no) 1801 { 1802 int i; 1803 1804 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 1805 1806 if ((megasas_mgmt_info.instance[i]) && 1807 (megasas_mgmt_info.instance[i]->host->host_no == host_no)) 1808 return megasas_mgmt_info.instance[i]; 1809 } 1810 1811 return NULL; 1812 } 1813 1814 /* 1815 * megasas_set_dynamic_target_properties - 1816 * Device property set by driver may not be static and it is required to be 1817 * updated after OCR 1818 * 1819 * set tm_capable. 1820 * set dma alignment (only for eedp protection enable vd). 1821 * 1822 * @sdev: OS provided scsi device 1823 * 1824 * Returns void 1825 */ 1826 void megasas_set_dynamic_target_properties(struct scsi_device *sdev, 1827 bool is_target_prop) 1828 { 1829 u16 pd_index = 0, ld; 1830 u32 device_id; 1831 struct megasas_instance *instance; 1832 struct fusion_context *fusion; 1833 struct MR_PRIV_DEVICE *mr_device_priv_data; 1834 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 1835 struct MR_LD_RAID *raid; 1836 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 1837 1838 instance = megasas_lookup_instance(sdev->host->host_no); 1839 fusion = instance->ctrl_context; 1840 mr_device_priv_data = sdev->hostdata; 1841 1842 if (!fusion || !mr_device_priv_data) 1843 return; 1844 1845 if (MEGASAS_IS_LOGICAL(sdev)) { 1846 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) 1847 + sdev->id; 1848 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 1849 ld = MR_TargetIdToLdGet(device_id, local_map_ptr); 1850 if (ld >= instance->fw_supported_vd_count) 1851 return; 1852 raid = MR_LdRaidGet(ld, local_map_ptr); 1853 1854 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) 1855 blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1856 1857 mr_device_priv_data->is_tm_capable = 1858 raid->capability.tmCapable; 1859 } else if (instance->use_seqnum_jbod_fp) { 1860 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1861 sdev->id; 1862 pd_sync = (void *)fusion->pd_seq_sync 1863 [(instance->pd_seq_map_id - 1) & 1]; 1864 mr_device_priv_data->is_tm_capable = 1865 pd_sync->seq[pd_index].capability.tmCapable; 1866 } 1867 1868 if (is_target_prop && instance->tgt_prop->reset_tmo) { 1869 /* 1870 * If FW provides a target reset timeout value, driver will use 1871 * it. If not set, fallback to default values. 1872 */ 1873 mr_device_priv_data->target_reset_tmo = 1874 min_t(u8, instance->max_reset_tmo, 1875 instance->tgt_prop->reset_tmo); 1876 mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo; 1877 } else { 1878 mr_device_priv_data->target_reset_tmo = 1879 MEGASAS_DEFAULT_TM_TIMEOUT; 1880 mr_device_priv_data->task_abort_tmo = 1881 MEGASAS_DEFAULT_TM_TIMEOUT; 1882 } 1883 } 1884 1885 /* 1886 * megasas_set_nvme_device_properties - 1887 * set nomerges=2 1888 * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K). 1889 * set maximum io transfer = MDTS of NVME device provided by MR firmware. 1890 * 1891 * MR firmware provides value in KB. Caller of this function converts 1892 * kb into bytes. 1893 * 1894 * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size, 1895 * MR firmware provides value 128 as (32 * 4K) = 128K. 1896 * 1897 * @sdev: scsi device 1898 * @max_io_size: maximum io transfer size 1899 * 1900 */ 1901 static inline void 1902 megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size) 1903 { 1904 struct megasas_instance *instance; 1905 u32 mr_nvme_pg_size; 1906 1907 instance = (struct megasas_instance *)sdev->host->hostdata; 1908 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, 1909 MR_DEFAULT_NVME_PAGE_SIZE); 1910 1911 blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512)); 1912 1913 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue); 1914 blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1); 1915 } 1916 1917 1918 /* 1919 * megasas_set_static_target_properties - 1920 * Device property set by driver are static and it is not required to be 1921 * updated after OCR. 1922 * 1923 * set io timeout 1924 * set device queue depth 1925 * set nvme device properties. see - megasas_set_nvme_device_properties 1926 * 1927 * @sdev: scsi device 1928 * @is_target_prop true, if fw provided target properties. 1929 */ 1930 static void megasas_set_static_target_properties(struct scsi_device *sdev, 1931 bool is_target_prop) 1932 { 1933 u16 target_index = 0; 1934 u8 interface_type; 1935 u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN; 1936 u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB; 1937 u32 tgt_device_qd; 1938 struct megasas_instance *instance; 1939 struct MR_PRIV_DEVICE *mr_device_priv_data; 1940 1941 instance = megasas_lookup_instance(sdev->host->host_no); 1942 mr_device_priv_data = sdev->hostdata; 1943 interface_type = mr_device_priv_data->interface_type; 1944 1945 /* 1946 * The RAID firmware may require extended timeouts. 1947 */ 1948 blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ); 1949 1950 target_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; 1951 1952 switch (interface_type) { 1953 case SAS_PD: 1954 device_qd = MEGASAS_SAS_QD; 1955 break; 1956 case SATA_PD: 1957 device_qd = MEGASAS_SATA_QD; 1958 break; 1959 case NVME_PD: 1960 device_qd = MEGASAS_NVME_QD; 1961 break; 1962 } 1963 1964 if (is_target_prop) { 1965 tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth); 1966 if (tgt_device_qd && 1967 (tgt_device_qd <= instance->host->can_queue)) 1968 device_qd = tgt_device_qd; 1969 1970 /* max_io_size_kb will be set to non zero for 1971 * nvme based vd and syspd. 1972 */ 1973 max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb); 1974 } 1975 1976 if (instance->nvme_page_size && max_io_size_kb) 1977 megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10)); 1978 1979 scsi_change_queue_depth(sdev, device_qd); 1980 1981 } 1982 1983 1984 static int megasas_slave_configure(struct scsi_device *sdev) 1985 { 1986 u16 pd_index = 0; 1987 struct megasas_instance *instance; 1988 int ret_target_prop = DCMD_FAILED; 1989 bool is_target_prop = false; 1990 1991 instance = megasas_lookup_instance(sdev->host->host_no); 1992 if (instance->pd_list_not_supported) { 1993 if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) { 1994 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1995 sdev->id; 1996 if (instance->pd_list[pd_index].driveState != 1997 MR_PD_STATE_SYSTEM) 1998 return -ENXIO; 1999 } 2000 } 2001 2002 mutex_lock(&instance->reset_mutex); 2003 /* Send DCMD to Firmware and cache the information */ 2004 if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev)) 2005 megasas_get_pd_info(instance, sdev); 2006 2007 /* Some ventura firmware may not have instance->nvme_page_size set. 2008 * Do not send MR_DCMD_DRV_GET_TARGET_PROP 2009 */ 2010 if ((instance->tgt_prop) && (instance->nvme_page_size)) 2011 ret_target_prop = megasas_get_target_prop(instance, sdev); 2012 2013 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; 2014 megasas_set_static_target_properties(sdev, is_target_prop); 2015 2016 /* This sdev property may change post OCR */ 2017 megasas_set_dynamic_target_properties(sdev, is_target_prop); 2018 2019 mutex_unlock(&instance->reset_mutex); 2020 2021 return 0; 2022 } 2023 2024 static int megasas_slave_alloc(struct scsi_device *sdev) 2025 { 2026 u16 pd_index = 0; 2027 struct megasas_instance *instance ; 2028 struct MR_PRIV_DEVICE *mr_device_priv_data; 2029 2030 instance = megasas_lookup_instance(sdev->host->host_no); 2031 if (!MEGASAS_IS_LOGICAL(sdev)) { 2032 /* 2033 * Open the OS scan to the SYSTEM PD 2034 */ 2035 pd_index = 2036 (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 2037 sdev->id; 2038 if ((instance->pd_list_not_supported || 2039 instance->pd_list[pd_index].driveState == 2040 MR_PD_STATE_SYSTEM)) { 2041 goto scan_target; 2042 } 2043 return -ENXIO; 2044 } 2045 2046 scan_target: 2047 mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data), 2048 GFP_KERNEL); 2049 if (!mr_device_priv_data) 2050 return -ENOMEM; 2051 sdev->hostdata = mr_device_priv_data; 2052 2053 atomic_set(&mr_device_priv_data->r1_ldio_hint, 2054 instance->r1_ldio_hint_default); 2055 return 0; 2056 } 2057 2058 static void megasas_slave_destroy(struct scsi_device *sdev) 2059 { 2060 kfree(sdev->hostdata); 2061 sdev->hostdata = NULL; 2062 } 2063 2064 /* 2065 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a 2066 * kill adapter 2067 * @instance: Adapter soft state 2068 * 2069 */ 2070 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance) 2071 { 2072 int i; 2073 struct megasas_cmd *cmd_mfi; 2074 struct megasas_cmd_fusion *cmd_fusion; 2075 struct fusion_context *fusion = instance->ctrl_context; 2076 2077 /* Find all outstanding ioctls */ 2078 if (fusion) { 2079 for (i = 0; i < instance->max_fw_cmds; i++) { 2080 cmd_fusion = fusion->cmd_list[i]; 2081 if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) { 2082 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 2083 if (cmd_mfi->sync_cmd && 2084 (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) { 2085 cmd_mfi->frame->hdr.cmd_status = 2086 MFI_STAT_WRONG_STATE; 2087 megasas_complete_cmd(instance, 2088 cmd_mfi, DID_OK); 2089 } 2090 } 2091 } 2092 } else { 2093 for (i = 0; i < instance->max_fw_cmds; i++) { 2094 cmd_mfi = instance->cmd_list[i]; 2095 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != 2096 MFI_CMD_ABORT) 2097 megasas_complete_cmd(instance, cmd_mfi, DID_OK); 2098 } 2099 } 2100 } 2101 2102 2103 void megaraid_sas_kill_hba(struct megasas_instance *instance) 2104 { 2105 /* Set critical error to block I/O & ioctls in case caller didn't */ 2106 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); 2107 /* Wait 1 second to ensure IO or ioctls in build have posted */ 2108 msleep(1000); 2109 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 2110 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 2111 (instance->adapter_type != MFI_SERIES)) { 2112 if (!instance->requestorId) { 2113 writel(MFI_STOP_ADP, &instance->reg_set->doorbell); 2114 /* Flush */ 2115 readl(&instance->reg_set->doorbell); 2116 } 2117 if (instance->requestorId && instance->peerIsPresent) 2118 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 2119 } else { 2120 writel(MFI_STOP_ADP, 2121 &instance->reg_set->inbound_doorbell); 2122 } 2123 /* Complete outstanding ioctls when adapter is killed */ 2124 megasas_complete_outstanding_ioctls(instance); 2125 } 2126 2127 /** 2128 * megasas_check_and_restore_queue_depth - Check if queue depth needs to be 2129 * restored to max value 2130 * @instance: Adapter soft state 2131 * 2132 */ 2133 void 2134 megasas_check_and_restore_queue_depth(struct megasas_instance *instance) 2135 { 2136 unsigned long flags; 2137 2138 if (instance->flag & MEGASAS_FW_BUSY 2139 && time_after(jiffies, instance->last_time + 5 * HZ) 2140 && atomic_read(&instance->fw_outstanding) < 2141 instance->throttlequeuedepth + 1) { 2142 2143 spin_lock_irqsave(instance->host->host_lock, flags); 2144 instance->flag &= ~MEGASAS_FW_BUSY; 2145 2146 instance->host->can_queue = instance->cur_can_queue; 2147 spin_unlock_irqrestore(instance->host->host_lock, flags); 2148 } 2149 } 2150 2151 /** 2152 * megasas_complete_cmd_dpc - Returns FW's controller structure 2153 * @instance_addr: Address of adapter soft state 2154 * 2155 * Tasklet to complete cmds 2156 */ 2157 static void megasas_complete_cmd_dpc(unsigned long instance_addr) 2158 { 2159 u32 producer; 2160 u32 consumer; 2161 u32 context; 2162 struct megasas_cmd *cmd; 2163 struct megasas_instance *instance = 2164 (struct megasas_instance *)instance_addr; 2165 unsigned long flags; 2166 2167 /* If we have already declared adapter dead, donot complete cmds */ 2168 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 2169 return; 2170 2171 spin_lock_irqsave(&instance->completion_lock, flags); 2172 2173 producer = le32_to_cpu(*instance->producer); 2174 consumer = le32_to_cpu(*instance->consumer); 2175 2176 while (consumer != producer) { 2177 context = le32_to_cpu(instance->reply_queue[consumer]); 2178 if (context >= instance->max_fw_cmds) { 2179 dev_err(&instance->pdev->dev, "Unexpected context value %x\n", 2180 context); 2181 BUG(); 2182 } 2183 2184 cmd = instance->cmd_list[context]; 2185 2186 megasas_complete_cmd(instance, cmd, DID_OK); 2187 2188 consumer++; 2189 if (consumer == (instance->max_fw_cmds + 1)) { 2190 consumer = 0; 2191 } 2192 } 2193 2194 *instance->consumer = cpu_to_le32(producer); 2195 2196 spin_unlock_irqrestore(&instance->completion_lock, flags); 2197 2198 /* 2199 * Check if we can restore can_queue 2200 */ 2201 megasas_check_and_restore_queue_depth(instance); 2202 } 2203 2204 static void megasas_sriov_heartbeat_handler(struct timer_list *t); 2205 2206 /** 2207 * megasas_start_timer - Initializes sriov heartbeat timer object 2208 * @instance: Adapter soft state 2209 * 2210 */ 2211 void megasas_start_timer(struct megasas_instance *instance) 2212 { 2213 struct timer_list *timer = &instance->sriov_heartbeat_timer; 2214 2215 timer_setup(timer, megasas_sriov_heartbeat_handler, 0); 2216 timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF; 2217 add_timer(timer); 2218 } 2219 2220 static void 2221 megasas_internal_reset_defer_cmds(struct megasas_instance *instance); 2222 2223 static void 2224 process_fw_state_change_wq(struct work_struct *work); 2225 2226 void megasas_do_ocr(struct megasas_instance *instance) 2227 { 2228 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 2229 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 2230 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 2231 *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); 2232 } 2233 instance->instancet->disable_intr(instance); 2234 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 2235 instance->issuepend_done = 0; 2236 2237 atomic_set(&instance->fw_outstanding, 0); 2238 megasas_internal_reset_defer_cmds(instance); 2239 process_fw_state_change_wq(&instance->work_init); 2240 } 2241 2242 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance, 2243 int initial) 2244 { 2245 struct megasas_cmd *cmd; 2246 struct megasas_dcmd_frame *dcmd; 2247 struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL; 2248 dma_addr_t new_affiliation_111_h; 2249 int ld, retval = 0; 2250 u8 thisVf; 2251 2252 cmd = megasas_get_cmd(instance); 2253 2254 if (!cmd) { 2255 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:" 2256 "Failed to get cmd for scsi%d\n", 2257 instance->host->host_no); 2258 return -ENOMEM; 2259 } 2260 2261 dcmd = &cmd->frame->dcmd; 2262 2263 if (!instance->vf_affiliation_111) { 2264 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " 2265 "affiliation for scsi%d\n", instance->host->host_no); 2266 megasas_return_cmd(instance, cmd); 2267 return -ENOMEM; 2268 } 2269 2270 if (initial) 2271 memset(instance->vf_affiliation_111, 0, 2272 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2273 else { 2274 new_affiliation_111 = 2275 dma_alloc_coherent(&instance->pdev->dev, 2276 sizeof(struct MR_LD_VF_AFFILIATION_111), 2277 &new_affiliation_111_h, GFP_KERNEL); 2278 if (!new_affiliation_111) { 2279 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2280 "memory for new affiliation for scsi%d\n", 2281 instance->host->host_no); 2282 megasas_return_cmd(instance, cmd); 2283 return -ENOMEM; 2284 } 2285 } 2286 2287 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2288 2289 dcmd->cmd = MFI_CMD_DCMD; 2290 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2291 dcmd->sge_count = 1; 2292 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2293 dcmd->timeout = 0; 2294 dcmd->pad_0 = 0; 2295 dcmd->data_xfer_len = 2296 cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111)); 2297 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111); 2298 2299 if (initial) 2300 dcmd->sgl.sge32[0].phys_addr = 2301 cpu_to_le32(instance->vf_affiliation_111_h); 2302 else 2303 dcmd->sgl.sge32[0].phys_addr = 2304 cpu_to_le32(new_affiliation_111_h); 2305 2306 dcmd->sgl.sge32[0].length = cpu_to_le32( 2307 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2308 2309 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " 2310 "scsi%d\n", instance->host->host_no); 2311 2312 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { 2313 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" 2314 " failed with status 0x%x for scsi%d\n", 2315 dcmd->cmd_status, instance->host->host_no); 2316 retval = 1; /* Do a scan if we couldn't get affiliation */ 2317 goto out; 2318 } 2319 2320 if (!initial) { 2321 thisVf = new_affiliation_111->thisVf; 2322 for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++) 2323 if (instance->vf_affiliation_111->map[ld].policy[thisVf] != 2324 new_affiliation_111->map[ld].policy[thisVf]) { 2325 dev_warn(&instance->pdev->dev, "SR-IOV: " 2326 "Got new LD/VF affiliation for scsi%d\n", 2327 instance->host->host_no); 2328 memcpy(instance->vf_affiliation_111, 2329 new_affiliation_111, 2330 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2331 retval = 1; 2332 goto out; 2333 } 2334 } 2335 out: 2336 if (new_affiliation_111) { 2337 dma_free_coherent(&instance->pdev->dev, 2338 sizeof(struct MR_LD_VF_AFFILIATION_111), 2339 new_affiliation_111, 2340 new_affiliation_111_h); 2341 } 2342 2343 megasas_return_cmd(instance, cmd); 2344 2345 return retval; 2346 } 2347 2348 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance, 2349 int initial) 2350 { 2351 struct megasas_cmd *cmd; 2352 struct megasas_dcmd_frame *dcmd; 2353 struct MR_LD_VF_AFFILIATION *new_affiliation = NULL; 2354 struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL; 2355 dma_addr_t new_affiliation_h; 2356 int i, j, retval = 0, found = 0, doscan = 0; 2357 u8 thisVf; 2358 2359 cmd = megasas_get_cmd(instance); 2360 2361 if (!cmd) { 2362 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: " 2363 "Failed to get cmd for scsi%d\n", 2364 instance->host->host_no); 2365 return -ENOMEM; 2366 } 2367 2368 dcmd = &cmd->frame->dcmd; 2369 2370 if (!instance->vf_affiliation) { 2371 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " 2372 "affiliation for scsi%d\n", instance->host->host_no); 2373 megasas_return_cmd(instance, cmd); 2374 return -ENOMEM; 2375 } 2376 2377 if (initial) 2378 memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) * 2379 sizeof(struct MR_LD_VF_AFFILIATION)); 2380 else { 2381 new_affiliation = 2382 dma_alloc_coherent(&instance->pdev->dev, 2383 (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION), 2384 &new_affiliation_h, GFP_KERNEL); 2385 if (!new_affiliation) { 2386 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2387 "memory for new affiliation for scsi%d\n", 2388 instance->host->host_no); 2389 megasas_return_cmd(instance, cmd); 2390 return -ENOMEM; 2391 } 2392 } 2393 2394 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2395 2396 dcmd->cmd = MFI_CMD_DCMD; 2397 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2398 dcmd->sge_count = 1; 2399 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2400 dcmd->timeout = 0; 2401 dcmd->pad_0 = 0; 2402 dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * 2403 sizeof(struct MR_LD_VF_AFFILIATION)); 2404 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS); 2405 2406 if (initial) 2407 dcmd->sgl.sge32[0].phys_addr = 2408 cpu_to_le32(instance->vf_affiliation_h); 2409 else 2410 dcmd->sgl.sge32[0].phys_addr = 2411 cpu_to_le32(new_affiliation_h); 2412 2413 dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * 2414 sizeof(struct MR_LD_VF_AFFILIATION)); 2415 2416 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " 2417 "scsi%d\n", instance->host->host_no); 2418 2419 2420 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { 2421 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" 2422 " failed with status 0x%x for scsi%d\n", 2423 dcmd->cmd_status, instance->host->host_no); 2424 retval = 1; /* Do a scan if we couldn't get affiliation */ 2425 goto out; 2426 } 2427 2428 if (!initial) { 2429 if (!new_affiliation->ldCount) { 2430 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " 2431 "affiliation for passive path for scsi%d\n", 2432 instance->host->host_no); 2433 retval = 1; 2434 goto out; 2435 } 2436 newmap = new_affiliation->map; 2437 savedmap = instance->vf_affiliation->map; 2438 thisVf = new_affiliation->thisVf; 2439 for (i = 0 ; i < new_affiliation->ldCount; i++) { 2440 found = 0; 2441 for (j = 0; j < instance->vf_affiliation->ldCount; 2442 j++) { 2443 if (newmap->ref.targetId == 2444 savedmap->ref.targetId) { 2445 found = 1; 2446 if (newmap->policy[thisVf] != 2447 savedmap->policy[thisVf]) { 2448 doscan = 1; 2449 goto out; 2450 } 2451 } 2452 savedmap = (struct MR_LD_VF_MAP *) 2453 ((unsigned char *)savedmap + 2454 savedmap->size); 2455 } 2456 if (!found && newmap->policy[thisVf] != 2457 MR_LD_ACCESS_HIDDEN) { 2458 doscan = 1; 2459 goto out; 2460 } 2461 newmap = (struct MR_LD_VF_MAP *) 2462 ((unsigned char *)newmap + newmap->size); 2463 } 2464 2465 newmap = new_affiliation->map; 2466 savedmap = instance->vf_affiliation->map; 2467 2468 for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) { 2469 found = 0; 2470 for (j = 0 ; j < new_affiliation->ldCount; j++) { 2471 if (savedmap->ref.targetId == 2472 newmap->ref.targetId) { 2473 found = 1; 2474 if (savedmap->policy[thisVf] != 2475 newmap->policy[thisVf]) { 2476 doscan = 1; 2477 goto out; 2478 } 2479 } 2480 newmap = (struct MR_LD_VF_MAP *) 2481 ((unsigned char *)newmap + 2482 newmap->size); 2483 } 2484 if (!found && savedmap->policy[thisVf] != 2485 MR_LD_ACCESS_HIDDEN) { 2486 doscan = 1; 2487 goto out; 2488 } 2489 savedmap = (struct MR_LD_VF_MAP *) 2490 ((unsigned char *)savedmap + 2491 savedmap->size); 2492 } 2493 } 2494 out: 2495 if (doscan) { 2496 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " 2497 "affiliation for scsi%d\n", instance->host->host_no); 2498 memcpy(instance->vf_affiliation, new_affiliation, 2499 new_affiliation->size); 2500 retval = 1; 2501 } 2502 2503 if (new_affiliation) 2504 dma_free_coherent(&instance->pdev->dev, 2505 (MAX_LOGICAL_DRIVES + 1) * 2506 sizeof(struct MR_LD_VF_AFFILIATION), 2507 new_affiliation, new_affiliation_h); 2508 megasas_return_cmd(instance, cmd); 2509 2510 return retval; 2511 } 2512 2513 /* This function will get the current SR-IOV LD/VF affiliation */ 2514 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 2515 int initial) 2516 { 2517 int retval; 2518 2519 if (instance->PlasmaFW111) 2520 retval = megasas_get_ld_vf_affiliation_111(instance, initial); 2521 else 2522 retval = megasas_get_ld_vf_affiliation_12(instance, initial); 2523 return retval; 2524 } 2525 2526 /* This function will tell FW to start the SR-IOV heartbeat */ 2527 int megasas_sriov_start_heartbeat(struct megasas_instance *instance, 2528 int initial) 2529 { 2530 struct megasas_cmd *cmd; 2531 struct megasas_dcmd_frame *dcmd; 2532 int retval = 0; 2533 2534 cmd = megasas_get_cmd(instance); 2535 2536 if (!cmd) { 2537 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: " 2538 "Failed to get cmd for scsi%d\n", 2539 instance->host->host_no); 2540 return -ENOMEM; 2541 } 2542 2543 dcmd = &cmd->frame->dcmd; 2544 2545 if (initial) { 2546 instance->hb_host_mem = 2547 dma_alloc_coherent(&instance->pdev->dev, 2548 sizeof(struct MR_CTRL_HB_HOST_MEM), 2549 &instance->hb_host_mem_h, 2550 GFP_KERNEL); 2551 if (!instance->hb_host_mem) { 2552 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate" 2553 " memory for heartbeat host memory for scsi%d\n", 2554 instance->host->host_no); 2555 retval = -ENOMEM; 2556 goto out; 2557 } 2558 } 2559 2560 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2561 2562 dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2563 dcmd->cmd = MFI_CMD_DCMD; 2564 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2565 dcmd->sge_count = 1; 2566 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2567 dcmd->timeout = 0; 2568 dcmd->pad_0 = 0; 2569 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2570 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC); 2571 2572 megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h, 2573 sizeof(struct MR_CTRL_HB_HOST_MEM)); 2574 2575 dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n", 2576 instance->host->host_no); 2577 2578 if ((instance->adapter_type != MFI_SERIES) && 2579 !instance->mask_interrupts) 2580 retval = megasas_issue_blocked_cmd(instance, cmd, 2581 MEGASAS_ROUTINE_WAIT_TIME_VF); 2582 else 2583 retval = megasas_issue_polled(instance, cmd); 2584 2585 if (retval) { 2586 dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST" 2587 "_MEM_ALLOC DCMD %s for scsi%d\n", 2588 (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ? 2589 "timed out" : "failed", instance->host->host_no); 2590 retval = 1; 2591 } 2592 2593 out: 2594 megasas_return_cmd(instance, cmd); 2595 2596 return retval; 2597 } 2598 2599 /* Handler for SR-IOV heartbeat */ 2600 static void megasas_sriov_heartbeat_handler(struct timer_list *t) 2601 { 2602 struct megasas_instance *instance = 2603 from_timer(instance, t, sriov_heartbeat_timer); 2604 2605 if (instance->hb_host_mem->HB.fwCounter != 2606 instance->hb_host_mem->HB.driverCounter) { 2607 instance->hb_host_mem->HB.driverCounter = 2608 instance->hb_host_mem->HB.fwCounter; 2609 mod_timer(&instance->sriov_heartbeat_timer, 2610 jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 2611 } else { 2612 dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never " 2613 "completed for scsi%d\n", instance->host->host_no); 2614 schedule_work(&instance->work_init); 2615 } 2616 } 2617 2618 /** 2619 * megasas_wait_for_outstanding - Wait for all outstanding cmds 2620 * @instance: Adapter soft state 2621 * 2622 * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to 2623 * complete all its outstanding commands. Returns error if one or more IOs 2624 * are pending after this time period. It also marks the controller dead. 2625 */ 2626 static int megasas_wait_for_outstanding(struct megasas_instance *instance) 2627 { 2628 int i, sl, outstanding; 2629 u32 reset_index; 2630 u32 wait_time = MEGASAS_RESET_WAIT_TIME; 2631 unsigned long flags; 2632 struct list_head clist_local; 2633 struct megasas_cmd *reset_cmd; 2634 u32 fw_state; 2635 2636 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2637 dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n", 2638 __func__, __LINE__); 2639 return FAILED; 2640 } 2641 2642 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 2643 2644 INIT_LIST_HEAD(&clist_local); 2645 spin_lock_irqsave(&instance->hba_lock, flags); 2646 list_splice_init(&instance->internal_reset_pending_q, 2647 &clist_local); 2648 spin_unlock_irqrestore(&instance->hba_lock, flags); 2649 2650 dev_notice(&instance->pdev->dev, "HBA reset wait ...\n"); 2651 for (i = 0; i < wait_time; i++) { 2652 msleep(1000); 2653 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) 2654 break; 2655 } 2656 2657 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 2658 dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n"); 2659 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); 2660 return FAILED; 2661 } 2662 2663 reset_index = 0; 2664 while (!list_empty(&clist_local)) { 2665 reset_cmd = list_entry((&clist_local)->next, 2666 struct megasas_cmd, list); 2667 list_del_init(&reset_cmd->list); 2668 if (reset_cmd->scmd) { 2669 reset_cmd->scmd->result = DID_REQUEUE << 16; 2670 dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n", 2671 reset_index, reset_cmd, 2672 reset_cmd->scmd->cmnd[0]); 2673 2674 reset_cmd->scmd->scsi_done(reset_cmd->scmd); 2675 megasas_return_cmd(instance, reset_cmd); 2676 } else if (reset_cmd->sync_cmd) { 2677 dev_notice(&instance->pdev->dev, "%p synch cmds" 2678 "reset queue\n", 2679 reset_cmd); 2680 2681 reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 2682 instance->instancet->fire_cmd(instance, 2683 reset_cmd->frame_phys_addr, 2684 0, instance->reg_set); 2685 } else { 2686 dev_notice(&instance->pdev->dev, "%p unexpected" 2687 "cmds lst\n", 2688 reset_cmd); 2689 } 2690 reset_index++; 2691 } 2692 2693 return SUCCESS; 2694 } 2695 2696 for (i = 0; i < resetwaittime; i++) { 2697 outstanding = atomic_read(&instance->fw_outstanding); 2698 2699 if (!outstanding) 2700 break; 2701 2702 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 2703 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d " 2704 "commands to complete\n",i,outstanding); 2705 /* 2706 * Call cmd completion routine. Cmd to be 2707 * be completed directly without depending on isr. 2708 */ 2709 megasas_complete_cmd_dpc((unsigned long)instance); 2710 } 2711 2712 msleep(1000); 2713 } 2714 2715 i = 0; 2716 outstanding = atomic_read(&instance->fw_outstanding); 2717 fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK; 2718 2719 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) 2720 goto no_outstanding; 2721 2722 if (instance->disableOnlineCtrlReset) 2723 goto kill_hba_and_failed; 2724 do { 2725 if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) { 2726 dev_info(&instance->pdev->dev, 2727 "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, outstanding 0x%x\n", 2728 __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding)); 2729 if (i == 3) 2730 goto kill_hba_and_failed; 2731 megasas_do_ocr(instance); 2732 2733 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2734 dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n", 2735 __func__, __LINE__); 2736 return FAILED; 2737 } 2738 dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n", 2739 __func__, __LINE__); 2740 2741 for (sl = 0; sl < 10; sl++) 2742 msleep(500); 2743 2744 outstanding = atomic_read(&instance->fw_outstanding); 2745 2746 fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK; 2747 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) 2748 goto no_outstanding; 2749 } 2750 i++; 2751 } while (i <= 3); 2752 2753 no_outstanding: 2754 2755 dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n", 2756 __func__, __LINE__); 2757 return SUCCESS; 2758 2759 kill_hba_and_failed: 2760 2761 /* Reset not supported, kill adapter */ 2762 dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d" 2763 " disableOnlineCtrlReset %d fw_outstanding %d \n", 2764 __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset, 2765 atomic_read(&instance->fw_outstanding)); 2766 megasas_dump_pending_frames(instance); 2767 megaraid_sas_kill_hba(instance); 2768 2769 return FAILED; 2770 } 2771 2772 /** 2773 * megasas_generic_reset - Generic reset routine 2774 * @scmd: Mid-layer SCSI command 2775 * 2776 * This routine implements a generic reset handler for device, bus and host 2777 * reset requests. Device, bus and host specific reset handlers can use this 2778 * function after they do their specific tasks. 2779 */ 2780 static int megasas_generic_reset(struct scsi_cmnd *scmd) 2781 { 2782 int ret_val; 2783 struct megasas_instance *instance; 2784 2785 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2786 2787 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n", 2788 scmd->cmnd[0], scmd->retries); 2789 2790 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2791 dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n"); 2792 return FAILED; 2793 } 2794 2795 ret_val = megasas_wait_for_outstanding(instance); 2796 if (ret_val == SUCCESS) 2797 dev_notice(&instance->pdev->dev, "reset successful\n"); 2798 else 2799 dev_err(&instance->pdev->dev, "failed to do reset\n"); 2800 2801 return ret_val; 2802 } 2803 2804 /** 2805 * megasas_reset_timer - quiesce the adapter if required 2806 * @scmd: scsi cmnd 2807 * 2808 * Sets the FW busy flag and reduces the host->can_queue if the 2809 * cmd has not been completed within the timeout period. 2810 */ 2811 static enum 2812 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) 2813 { 2814 struct megasas_instance *instance; 2815 unsigned long flags; 2816 2817 if (time_after(jiffies, scmd->jiffies_at_alloc + 2818 (scmd_timeout * 2) * HZ)) { 2819 return BLK_EH_DONE; 2820 } 2821 2822 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2823 if (!(instance->flag & MEGASAS_FW_BUSY)) { 2824 /* FW is busy, throttle IO */ 2825 spin_lock_irqsave(instance->host->host_lock, flags); 2826 2827 instance->host->can_queue = instance->throttlequeuedepth; 2828 instance->last_time = jiffies; 2829 instance->flag |= MEGASAS_FW_BUSY; 2830 2831 spin_unlock_irqrestore(instance->host->host_lock, flags); 2832 } 2833 return BLK_EH_RESET_TIMER; 2834 } 2835 2836 /** 2837 * megasas_dump_frame - This function will dump MPT/MFI frame 2838 */ 2839 static inline void 2840 megasas_dump_frame(void *mpi_request, int sz) 2841 { 2842 int i; 2843 __le32 *mfp = (__le32 *)mpi_request; 2844 2845 printk(KERN_INFO "IO request frame:\n\t"); 2846 for (i = 0; i < sz / sizeof(__le32); i++) { 2847 if (i && ((i % 8) == 0)) 2848 printk("\n\t"); 2849 printk("%08x ", le32_to_cpu(mfp[i])); 2850 } 2851 printk("\n"); 2852 } 2853 2854 /** 2855 * megasas_reset_bus_host - Bus & host reset handler entry point 2856 */ 2857 static int megasas_reset_bus_host(struct scsi_cmnd *scmd) 2858 { 2859 int ret; 2860 struct megasas_instance *instance; 2861 2862 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2863 2864 scmd_printk(KERN_INFO, scmd, 2865 "Controller reset is requested due to IO timeout\n" 2866 "SCSI command pointer: (%p)\t SCSI host state: %d\t" 2867 " SCSI host busy: %d\t FW outstanding: %d\n", 2868 scmd, scmd->device->host->shost_state, 2869 scsi_host_busy(scmd->device->host), 2870 atomic_read(&instance->fw_outstanding)); 2871 2872 /* 2873 * First wait for all commands to complete 2874 */ 2875 if (instance->adapter_type == MFI_SERIES) { 2876 ret = megasas_generic_reset(scmd); 2877 } else { 2878 struct megasas_cmd_fusion *cmd; 2879 cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr; 2880 if (cmd) 2881 megasas_dump_frame(cmd->io_request, 2882 MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE); 2883 ret = megasas_reset_fusion(scmd->device->host, 2884 SCSIIO_TIMEOUT_OCR); 2885 } 2886 2887 return ret; 2888 } 2889 2890 /** 2891 * megasas_task_abort - Issues task abort request to firmware 2892 * (supported only for fusion adapters) 2893 * @scmd: SCSI command pointer 2894 */ 2895 static int megasas_task_abort(struct scsi_cmnd *scmd) 2896 { 2897 int ret; 2898 struct megasas_instance *instance; 2899 2900 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2901 2902 if (instance->adapter_type != MFI_SERIES) 2903 ret = megasas_task_abort_fusion(scmd); 2904 else { 2905 sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n"); 2906 ret = FAILED; 2907 } 2908 2909 return ret; 2910 } 2911 2912 /** 2913 * megasas_reset_target: Issues target reset request to firmware 2914 * (supported only for fusion adapters) 2915 * @scmd: SCSI command pointer 2916 */ 2917 static int megasas_reset_target(struct scsi_cmnd *scmd) 2918 { 2919 int ret; 2920 struct megasas_instance *instance; 2921 2922 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2923 2924 if (instance->adapter_type != MFI_SERIES) 2925 ret = megasas_reset_target_fusion(scmd); 2926 else { 2927 sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n"); 2928 ret = FAILED; 2929 } 2930 2931 return ret; 2932 } 2933 2934 /** 2935 * megasas_bios_param - Returns disk geometry for a disk 2936 * @sdev: device handle 2937 * @bdev: block device 2938 * @capacity: drive capacity 2939 * @geom: geometry parameters 2940 */ 2941 static int 2942 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev, 2943 sector_t capacity, int geom[]) 2944 { 2945 int heads; 2946 int sectors; 2947 sector_t cylinders; 2948 unsigned long tmp; 2949 2950 /* Default heads (64) & sectors (32) */ 2951 heads = 64; 2952 sectors = 32; 2953 2954 tmp = heads * sectors; 2955 cylinders = capacity; 2956 2957 sector_div(cylinders, tmp); 2958 2959 /* 2960 * Handle extended translation size for logical drives > 1Gb 2961 */ 2962 2963 if (capacity >= 0x200000) { 2964 heads = 255; 2965 sectors = 63; 2966 tmp = heads*sectors; 2967 cylinders = capacity; 2968 sector_div(cylinders, tmp); 2969 } 2970 2971 geom[0] = heads; 2972 geom[1] = sectors; 2973 geom[2] = cylinders; 2974 2975 return 0; 2976 } 2977 2978 static void megasas_aen_polling(struct work_struct *work); 2979 2980 /** 2981 * megasas_service_aen - Processes an event notification 2982 * @instance: Adapter soft state 2983 * @cmd: AEN command completed by the ISR 2984 * 2985 * For AEN, driver sends a command down to FW that is held by the FW till an 2986 * event occurs. When an event of interest occurs, FW completes the command 2987 * that it was previously holding. 2988 * 2989 * This routines sends SIGIO signal to processes that have registered with the 2990 * driver for AEN. 2991 */ 2992 static void 2993 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) 2994 { 2995 unsigned long flags; 2996 2997 /* 2998 * Don't signal app if it is just an aborted previously registered aen 2999 */ 3000 if ((!cmd->abort_aen) && (instance->unload == 0)) { 3001 spin_lock_irqsave(&poll_aen_lock, flags); 3002 megasas_poll_wait_aen = 1; 3003 spin_unlock_irqrestore(&poll_aen_lock, flags); 3004 wake_up(&megasas_poll_wait); 3005 kill_fasync(&megasas_async_queue, SIGIO, POLL_IN); 3006 } 3007 else 3008 cmd->abort_aen = 0; 3009 3010 instance->aen_cmd = NULL; 3011 3012 megasas_return_cmd(instance, cmd); 3013 3014 if ((instance->unload == 0) && 3015 ((instance->issuepend_done == 1))) { 3016 struct megasas_aen_event *ev; 3017 3018 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 3019 if (!ev) { 3020 dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n"); 3021 } else { 3022 ev->instance = instance; 3023 instance->ev = ev; 3024 INIT_DELAYED_WORK(&ev->hotplug_work, 3025 megasas_aen_polling); 3026 schedule_delayed_work(&ev->hotplug_work, 0); 3027 } 3028 } 3029 } 3030 3031 static ssize_t 3032 megasas_fw_crash_buffer_store(struct device *cdev, 3033 struct device_attribute *attr, const char *buf, size_t count) 3034 { 3035 struct Scsi_Host *shost = class_to_shost(cdev); 3036 struct megasas_instance *instance = 3037 (struct megasas_instance *) shost->hostdata; 3038 int val = 0; 3039 unsigned long flags; 3040 3041 if (kstrtoint(buf, 0, &val) != 0) 3042 return -EINVAL; 3043 3044 spin_lock_irqsave(&instance->crashdump_lock, flags); 3045 instance->fw_crash_buffer_offset = val; 3046 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3047 return strlen(buf); 3048 } 3049 3050 static ssize_t 3051 megasas_fw_crash_buffer_show(struct device *cdev, 3052 struct device_attribute *attr, char *buf) 3053 { 3054 struct Scsi_Host *shost = class_to_shost(cdev); 3055 struct megasas_instance *instance = 3056 (struct megasas_instance *) shost->hostdata; 3057 u32 size; 3058 unsigned long buff_addr; 3059 unsigned long dmachunk = CRASH_DMA_BUF_SIZE; 3060 unsigned long src_addr; 3061 unsigned long flags; 3062 u32 buff_offset; 3063 3064 spin_lock_irqsave(&instance->crashdump_lock, flags); 3065 buff_offset = instance->fw_crash_buffer_offset; 3066 if (!instance->crash_dump_buf && 3067 !((instance->fw_crash_state == AVAILABLE) || 3068 (instance->fw_crash_state == COPYING))) { 3069 dev_err(&instance->pdev->dev, 3070 "Firmware crash dump is not available\n"); 3071 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3072 return -EINVAL; 3073 } 3074 3075 buff_addr = (unsigned long) buf; 3076 3077 if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) { 3078 dev_err(&instance->pdev->dev, 3079 "Firmware crash dump offset is out of range\n"); 3080 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3081 return 0; 3082 } 3083 3084 size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset; 3085 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; 3086 3087 src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] + 3088 (buff_offset % dmachunk); 3089 memcpy(buf, (void *)src_addr, size); 3090 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3091 3092 return size; 3093 } 3094 3095 static ssize_t 3096 megasas_fw_crash_buffer_size_show(struct device *cdev, 3097 struct device_attribute *attr, char *buf) 3098 { 3099 struct Scsi_Host *shost = class_to_shost(cdev); 3100 struct megasas_instance *instance = 3101 (struct megasas_instance *) shost->hostdata; 3102 3103 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long) 3104 ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE); 3105 } 3106 3107 static ssize_t 3108 megasas_fw_crash_state_store(struct device *cdev, 3109 struct device_attribute *attr, const char *buf, size_t count) 3110 { 3111 struct Scsi_Host *shost = class_to_shost(cdev); 3112 struct megasas_instance *instance = 3113 (struct megasas_instance *) shost->hostdata; 3114 int val = 0; 3115 unsigned long flags; 3116 3117 if (kstrtoint(buf, 0, &val) != 0) 3118 return -EINVAL; 3119 3120 if ((val <= AVAILABLE || val > COPY_ERROR)) { 3121 dev_err(&instance->pdev->dev, "application updates invalid " 3122 "firmware crash state\n"); 3123 return -EINVAL; 3124 } 3125 3126 instance->fw_crash_state = val; 3127 3128 if ((val == COPIED) || (val == COPY_ERROR)) { 3129 spin_lock_irqsave(&instance->crashdump_lock, flags); 3130 megasas_free_host_crash_buffer(instance); 3131 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3132 if (val == COPY_ERROR) 3133 dev_info(&instance->pdev->dev, "application failed to " 3134 "copy Firmware crash dump\n"); 3135 else 3136 dev_info(&instance->pdev->dev, "Firmware crash dump " 3137 "copied successfully\n"); 3138 } 3139 return strlen(buf); 3140 } 3141 3142 static ssize_t 3143 megasas_fw_crash_state_show(struct device *cdev, 3144 struct device_attribute *attr, char *buf) 3145 { 3146 struct Scsi_Host *shost = class_to_shost(cdev); 3147 struct megasas_instance *instance = 3148 (struct megasas_instance *) shost->hostdata; 3149 3150 return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state); 3151 } 3152 3153 static ssize_t 3154 megasas_page_size_show(struct device *cdev, 3155 struct device_attribute *attr, char *buf) 3156 { 3157 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1); 3158 } 3159 3160 static ssize_t 3161 megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr, 3162 char *buf) 3163 { 3164 struct Scsi_Host *shost = class_to_shost(cdev); 3165 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3166 3167 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding)); 3168 } 3169 3170 static ssize_t 3171 megasas_fw_cmds_outstanding_show(struct device *cdev, 3172 struct device_attribute *attr, char *buf) 3173 { 3174 struct Scsi_Host *shost = class_to_shost(cdev); 3175 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3176 3177 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding)); 3178 } 3179 3180 static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR, 3181 megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store); 3182 static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO, 3183 megasas_fw_crash_buffer_size_show, NULL); 3184 static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR, 3185 megasas_fw_crash_state_show, megasas_fw_crash_state_store); 3186 static DEVICE_ATTR(page_size, S_IRUGO, 3187 megasas_page_size_show, NULL); 3188 static DEVICE_ATTR(ldio_outstanding, S_IRUGO, 3189 megasas_ldio_outstanding_show, NULL); 3190 static DEVICE_ATTR(fw_cmds_outstanding, S_IRUGO, 3191 megasas_fw_cmds_outstanding_show, NULL); 3192 3193 struct device_attribute *megaraid_host_attrs[] = { 3194 &dev_attr_fw_crash_buffer_size, 3195 &dev_attr_fw_crash_buffer, 3196 &dev_attr_fw_crash_state, 3197 &dev_attr_page_size, 3198 &dev_attr_ldio_outstanding, 3199 &dev_attr_fw_cmds_outstanding, 3200 NULL, 3201 }; 3202 3203 /* 3204 * Scsi host template for megaraid_sas driver 3205 */ 3206 static struct scsi_host_template megasas_template = { 3207 3208 .module = THIS_MODULE, 3209 .name = "Avago SAS based MegaRAID driver", 3210 .proc_name = "megaraid_sas", 3211 .slave_configure = megasas_slave_configure, 3212 .slave_alloc = megasas_slave_alloc, 3213 .slave_destroy = megasas_slave_destroy, 3214 .queuecommand = megasas_queue_command, 3215 .eh_target_reset_handler = megasas_reset_target, 3216 .eh_abort_handler = megasas_task_abort, 3217 .eh_host_reset_handler = megasas_reset_bus_host, 3218 .eh_timed_out = megasas_reset_timer, 3219 .shost_attrs = megaraid_host_attrs, 3220 .bios_param = megasas_bios_param, 3221 .change_queue_depth = scsi_change_queue_depth, 3222 .no_write_same = 1, 3223 }; 3224 3225 /** 3226 * megasas_complete_int_cmd - Completes an internal command 3227 * @instance: Adapter soft state 3228 * @cmd: Command to be completed 3229 * 3230 * The megasas_issue_blocked_cmd() function waits for a command to complete 3231 * after it issues a command. This function wakes up that waiting routine by 3232 * calling wake_up() on the wait queue. 3233 */ 3234 static void 3235 megasas_complete_int_cmd(struct megasas_instance *instance, 3236 struct megasas_cmd *cmd) 3237 { 3238 cmd->cmd_status_drv = cmd->frame->io.cmd_status; 3239 wake_up(&instance->int_cmd_wait_q); 3240 } 3241 3242 /** 3243 * megasas_complete_abort - Completes aborting a command 3244 * @instance: Adapter soft state 3245 * @cmd: Cmd that was issued to abort another cmd 3246 * 3247 * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q 3248 * after it issues an abort on a previously issued command. This function 3249 * wakes up all functions waiting on the same wait queue. 3250 */ 3251 static void 3252 megasas_complete_abort(struct megasas_instance *instance, 3253 struct megasas_cmd *cmd) 3254 { 3255 if (cmd->sync_cmd) { 3256 cmd->sync_cmd = 0; 3257 cmd->cmd_status_drv = 0; 3258 wake_up(&instance->abort_cmd_wait_q); 3259 } 3260 } 3261 3262 /** 3263 * megasas_complete_cmd - Completes a command 3264 * @instance: Adapter soft state 3265 * @cmd: Command to be completed 3266 * @alt_status: If non-zero, use this value as status to 3267 * SCSI mid-layer instead of the value returned 3268 * by the FW. This should be used if caller wants 3269 * an alternate status (as in the case of aborted 3270 * commands) 3271 */ 3272 void 3273 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 3274 u8 alt_status) 3275 { 3276 int exception = 0; 3277 struct megasas_header *hdr = &cmd->frame->hdr; 3278 unsigned long flags; 3279 struct fusion_context *fusion = instance->ctrl_context; 3280 u32 opcode, status; 3281 3282 /* flag for the retry reset */ 3283 cmd->retry_for_fw_reset = 0; 3284 3285 if (cmd->scmd) 3286 cmd->scmd->SCp.ptr = NULL; 3287 3288 switch (hdr->cmd) { 3289 case MFI_CMD_INVALID: 3290 /* Some older 1068 controller FW may keep a pended 3291 MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel 3292 when booting the kdump kernel. Ignore this command to 3293 prevent a kernel panic on shutdown of the kdump kernel. */ 3294 dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command " 3295 "completed\n"); 3296 dev_warn(&instance->pdev->dev, "If you have a controller " 3297 "other than PERC5, please upgrade your firmware\n"); 3298 break; 3299 case MFI_CMD_PD_SCSI_IO: 3300 case MFI_CMD_LD_SCSI_IO: 3301 3302 /* 3303 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been 3304 * issued either through an IO path or an IOCTL path. If it 3305 * was via IOCTL, we will send it to internal completion. 3306 */ 3307 if (cmd->sync_cmd) { 3308 cmd->sync_cmd = 0; 3309 megasas_complete_int_cmd(instance, cmd); 3310 break; 3311 } 3312 /* fall through */ 3313 3314 case MFI_CMD_LD_READ: 3315 case MFI_CMD_LD_WRITE: 3316 3317 if (alt_status) { 3318 cmd->scmd->result = alt_status << 16; 3319 exception = 1; 3320 } 3321 3322 if (exception) { 3323 3324 atomic_dec(&instance->fw_outstanding); 3325 3326 scsi_dma_unmap(cmd->scmd); 3327 cmd->scmd->scsi_done(cmd->scmd); 3328 megasas_return_cmd(instance, cmd); 3329 3330 break; 3331 } 3332 3333 switch (hdr->cmd_status) { 3334 3335 case MFI_STAT_OK: 3336 cmd->scmd->result = DID_OK << 16; 3337 break; 3338 3339 case MFI_STAT_SCSI_IO_FAILED: 3340 case MFI_STAT_LD_INIT_IN_PROGRESS: 3341 cmd->scmd->result = 3342 (DID_ERROR << 16) | hdr->scsi_status; 3343 break; 3344 3345 case MFI_STAT_SCSI_DONE_WITH_ERROR: 3346 3347 cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status; 3348 3349 if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) { 3350 memset(cmd->scmd->sense_buffer, 0, 3351 SCSI_SENSE_BUFFERSIZE); 3352 memcpy(cmd->scmd->sense_buffer, cmd->sense, 3353 hdr->sense_len); 3354 3355 cmd->scmd->result |= DRIVER_SENSE << 24; 3356 } 3357 3358 break; 3359 3360 case MFI_STAT_LD_OFFLINE: 3361 case MFI_STAT_DEVICE_NOT_FOUND: 3362 cmd->scmd->result = DID_BAD_TARGET << 16; 3363 break; 3364 3365 default: 3366 dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n", 3367 hdr->cmd_status); 3368 cmd->scmd->result = DID_ERROR << 16; 3369 break; 3370 } 3371 3372 atomic_dec(&instance->fw_outstanding); 3373 3374 scsi_dma_unmap(cmd->scmd); 3375 cmd->scmd->scsi_done(cmd->scmd); 3376 megasas_return_cmd(instance, cmd); 3377 3378 break; 3379 3380 case MFI_CMD_SMP: 3381 case MFI_CMD_STP: 3382 case MFI_CMD_NVME: 3383 megasas_complete_int_cmd(instance, cmd); 3384 break; 3385 3386 case MFI_CMD_DCMD: 3387 opcode = le32_to_cpu(cmd->frame->dcmd.opcode); 3388 /* Check for LD map update */ 3389 if ((opcode == MR_DCMD_LD_MAP_GET_INFO) 3390 && (cmd->frame->dcmd.mbox.b[1] == 1)) { 3391 fusion->fast_path_io = 0; 3392 spin_lock_irqsave(instance->host->host_lock, flags); 3393 status = cmd->frame->hdr.cmd_status; 3394 instance->map_update_cmd = NULL; 3395 if (status != MFI_STAT_OK) { 3396 if (status != MFI_STAT_NOT_FOUND) 3397 dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n", 3398 cmd->frame->hdr.cmd_status); 3399 else { 3400 megasas_return_cmd(instance, cmd); 3401 spin_unlock_irqrestore( 3402 instance->host->host_lock, 3403 flags); 3404 break; 3405 } 3406 } 3407 3408 megasas_return_cmd(instance, cmd); 3409 3410 /* 3411 * Set fast path IO to ZERO. 3412 * Validate Map will set proper value. 3413 * Meanwhile all IOs will go as LD IO. 3414 */ 3415 if (status == MFI_STAT_OK && 3416 (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) { 3417 instance->map_id++; 3418 fusion->fast_path_io = 1; 3419 } else { 3420 fusion->fast_path_io = 0; 3421 } 3422 3423 megasas_sync_map_info(instance); 3424 spin_unlock_irqrestore(instance->host->host_lock, 3425 flags); 3426 break; 3427 } 3428 if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO || 3429 opcode == MR_DCMD_CTRL_EVENT_GET) { 3430 spin_lock_irqsave(&poll_aen_lock, flags); 3431 megasas_poll_wait_aen = 0; 3432 spin_unlock_irqrestore(&poll_aen_lock, flags); 3433 } 3434 3435 /* FW has an updated PD sequence */ 3436 if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && 3437 (cmd->frame->dcmd.mbox.b[0] == 1)) { 3438 3439 spin_lock_irqsave(instance->host->host_lock, flags); 3440 status = cmd->frame->hdr.cmd_status; 3441 instance->jbod_seq_cmd = NULL; 3442 megasas_return_cmd(instance, cmd); 3443 3444 if (status == MFI_STAT_OK) { 3445 instance->pd_seq_map_id++; 3446 /* Re-register a pd sync seq num cmd */ 3447 if (megasas_sync_pd_seq_num(instance, true)) 3448 instance->use_seqnum_jbod_fp = false; 3449 } else 3450 instance->use_seqnum_jbod_fp = false; 3451 3452 spin_unlock_irqrestore(instance->host->host_lock, flags); 3453 break; 3454 } 3455 3456 /* 3457 * See if got an event notification 3458 */ 3459 if (opcode == MR_DCMD_CTRL_EVENT_WAIT) 3460 megasas_service_aen(instance, cmd); 3461 else 3462 megasas_complete_int_cmd(instance, cmd); 3463 3464 break; 3465 3466 case MFI_CMD_ABORT: 3467 /* 3468 * Cmd issued to abort another cmd returned 3469 */ 3470 megasas_complete_abort(instance, cmd); 3471 break; 3472 3473 default: 3474 dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n", 3475 hdr->cmd); 3476 megasas_complete_int_cmd(instance, cmd); 3477 break; 3478 } 3479 } 3480 3481 /** 3482 * megasas_issue_pending_cmds_again - issue all pending cmds 3483 * in FW again because of the fw reset 3484 * @instance: Adapter soft state 3485 */ 3486 static inline void 3487 megasas_issue_pending_cmds_again(struct megasas_instance *instance) 3488 { 3489 struct megasas_cmd *cmd; 3490 struct list_head clist_local; 3491 union megasas_evt_class_locale class_locale; 3492 unsigned long flags; 3493 u32 seq_num; 3494 3495 INIT_LIST_HEAD(&clist_local); 3496 spin_lock_irqsave(&instance->hba_lock, flags); 3497 list_splice_init(&instance->internal_reset_pending_q, &clist_local); 3498 spin_unlock_irqrestore(&instance->hba_lock, flags); 3499 3500 while (!list_empty(&clist_local)) { 3501 cmd = list_entry((&clist_local)->next, 3502 struct megasas_cmd, list); 3503 list_del_init(&cmd->list); 3504 3505 if (cmd->sync_cmd || cmd->scmd) { 3506 dev_notice(&instance->pdev->dev, "command %p, %p:%d" 3507 "detected to be pending while HBA reset\n", 3508 cmd, cmd->scmd, cmd->sync_cmd); 3509 3510 cmd->retry_for_fw_reset++; 3511 3512 if (cmd->retry_for_fw_reset == 3) { 3513 dev_notice(&instance->pdev->dev, "cmd %p, %p:%d" 3514 "was tried multiple times during reset." 3515 "Shutting down the HBA\n", 3516 cmd, cmd->scmd, cmd->sync_cmd); 3517 instance->instancet->disable_intr(instance); 3518 atomic_set(&instance->fw_reset_no_pci_access, 1); 3519 megaraid_sas_kill_hba(instance); 3520 return; 3521 } 3522 } 3523 3524 if (cmd->sync_cmd == 1) { 3525 if (cmd->scmd) { 3526 dev_notice(&instance->pdev->dev, "unexpected" 3527 "cmd attached to internal command!\n"); 3528 } 3529 dev_notice(&instance->pdev->dev, "%p synchronous cmd" 3530 "on the internal reset queue," 3531 "issue it again.\n", cmd); 3532 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 3533 instance->instancet->fire_cmd(instance, 3534 cmd->frame_phys_addr, 3535 0, instance->reg_set); 3536 } else if (cmd->scmd) { 3537 dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]" 3538 "detected on the internal queue, issue again.\n", 3539 cmd, cmd->scmd->cmnd[0]); 3540 3541 atomic_inc(&instance->fw_outstanding); 3542 instance->instancet->fire_cmd(instance, 3543 cmd->frame_phys_addr, 3544 cmd->frame_count-1, instance->reg_set); 3545 } else { 3546 dev_notice(&instance->pdev->dev, "%p unexpected cmd on the" 3547 "internal reset defer list while re-issue!!\n", 3548 cmd); 3549 } 3550 } 3551 3552 if (instance->aen_cmd) { 3553 dev_notice(&instance->pdev->dev, "aen_cmd in def process\n"); 3554 megasas_return_cmd(instance, instance->aen_cmd); 3555 3556 instance->aen_cmd = NULL; 3557 } 3558 3559 /* 3560 * Initiate AEN (Asynchronous Event Notification) 3561 */ 3562 seq_num = instance->last_seq_num; 3563 class_locale.members.reserved = 0; 3564 class_locale.members.locale = MR_EVT_LOCALE_ALL; 3565 class_locale.members.class = MR_EVT_CLASS_DEBUG; 3566 3567 megasas_register_aen(instance, seq_num, class_locale.word); 3568 } 3569 3570 /** 3571 * Move the internal reset pending commands to a deferred queue. 3572 * 3573 * We move the commands pending at internal reset time to a 3574 * pending queue. This queue would be flushed after successful 3575 * completion of the internal reset sequence. if the internal reset 3576 * did not complete in time, the kernel reset handler would flush 3577 * these commands. 3578 **/ 3579 static void 3580 megasas_internal_reset_defer_cmds(struct megasas_instance *instance) 3581 { 3582 struct megasas_cmd *cmd; 3583 int i; 3584 u16 max_cmd = instance->max_fw_cmds; 3585 u32 defer_index; 3586 unsigned long flags; 3587 3588 defer_index = 0; 3589 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 3590 for (i = 0; i < max_cmd; i++) { 3591 cmd = instance->cmd_list[i]; 3592 if (cmd->sync_cmd == 1 || cmd->scmd) { 3593 dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p" 3594 "on the defer queue as internal\n", 3595 defer_index, cmd, cmd->sync_cmd, cmd->scmd); 3596 3597 if (!list_empty(&cmd->list)) { 3598 dev_notice(&instance->pdev->dev, "ERROR while" 3599 " moving this cmd:%p, %d %p, it was" 3600 "discovered on some list?\n", 3601 cmd, cmd->sync_cmd, cmd->scmd); 3602 3603 list_del_init(&cmd->list); 3604 } 3605 defer_index++; 3606 list_add_tail(&cmd->list, 3607 &instance->internal_reset_pending_q); 3608 } 3609 } 3610 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 3611 } 3612 3613 3614 static void 3615 process_fw_state_change_wq(struct work_struct *work) 3616 { 3617 struct megasas_instance *instance = 3618 container_of(work, struct megasas_instance, work_init); 3619 u32 wait; 3620 unsigned long flags; 3621 3622 if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) { 3623 dev_notice(&instance->pdev->dev, "error, recovery st %x\n", 3624 atomic_read(&instance->adprecovery)); 3625 return ; 3626 } 3627 3628 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { 3629 dev_notice(&instance->pdev->dev, "FW detected to be in fault" 3630 "state, restarting it...\n"); 3631 3632 instance->instancet->disable_intr(instance); 3633 atomic_set(&instance->fw_outstanding, 0); 3634 3635 atomic_set(&instance->fw_reset_no_pci_access, 1); 3636 instance->instancet->adp_reset(instance, instance->reg_set); 3637 atomic_set(&instance->fw_reset_no_pci_access, 0); 3638 3639 dev_notice(&instance->pdev->dev, "FW restarted successfully," 3640 "initiating next stage...\n"); 3641 3642 dev_notice(&instance->pdev->dev, "HBA recovery state machine," 3643 "state 2 starting...\n"); 3644 3645 /* waiting for about 20 second before start the second init */ 3646 for (wait = 0; wait < 30; wait++) { 3647 msleep(1000); 3648 } 3649 3650 if (megasas_transition_to_ready(instance, 1)) { 3651 dev_notice(&instance->pdev->dev, "adapter not ready\n"); 3652 3653 atomic_set(&instance->fw_reset_no_pci_access, 1); 3654 megaraid_sas_kill_hba(instance); 3655 return ; 3656 } 3657 3658 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 3659 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 3660 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR) 3661 ) { 3662 *instance->consumer = *instance->producer; 3663 } else { 3664 *instance->consumer = 0; 3665 *instance->producer = 0; 3666 } 3667 3668 megasas_issue_init_mfi(instance); 3669 3670 spin_lock_irqsave(&instance->hba_lock, flags); 3671 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 3672 spin_unlock_irqrestore(&instance->hba_lock, flags); 3673 instance->instancet->enable_intr(instance); 3674 3675 megasas_issue_pending_cmds_again(instance); 3676 instance->issuepend_done = 1; 3677 } 3678 } 3679 3680 /** 3681 * megasas_deplete_reply_queue - Processes all completed commands 3682 * @instance: Adapter soft state 3683 * @alt_status: Alternate status to be returned to 3684 * SCSI mid-layer instead of the status 3685 * returned by the FW 3686 * Note: this must be called with hba lock held 3687 */ 3688 static int 3689 megasas_deplete_reply_queue(struct megasas_instance *instance, 3690 u8 alt_status) 3691 { 3692 u32 mfiStatus; 3693 u32 fw_state; 3694 3695 if ((mfiStatus = instance->instancet->check_reset(instance, 3696 instance->reg_set)) == 1) { 3697 return IRQ_HANDLED; 3698 } 3699 3700 mfiStatus = instance->instancet->clear_intr(instance); 3701 if (mfiStatus == 0) { 3702 /* Hardware may not set outbound_intr_status in MSI-X mode */ 3703 if (!instance->msix_vectors) 3704 return IRQ_NONE; 3705 } 3706 3707 instance->mfiStatus = mfiStatus; 3708 3709 if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) { 3710 fw_state = instance->instancet->read_fw_status_reg( 3711 instance) & MFI_STATE_MASK; 3712 3713 if (fw_state != MFI_STATE_FAULT) { 3714 dev_notice(&instance->pdev->dev, "fw state:%x\n", 3715 fw_state); 3716 } 3717 3718 if ((fw_state == MFI_STATE_FAULT) && 3719 (instance->disableOnlineCtrlReset == 0)) { 3720 dev_notice(&instance->pdev->dev, "wait adp restart\n"); 3721 3722 if ((instance->pdev->device == 3723 PCI_DEVICE_ID_LSI_SAS1064R) || 3724 (instance->pdev->device == 3725 PCI_DEVICE_ID_DELL_PERC5) || 3726 (instance->pdev->device == 3727 PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 3728 3729 *instance->consumer = 3730 cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); 3731 } 3732 3733 3734 instance->instancet->disable_intr(instance); 3735 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 3736 instance->issuepend_done = 0; 3737 3738 atomic_set(&instance->fw_outstanding, 0); 3739 megasas_internal_reset_defer_cmds(instance); 3740 3741 dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n", 3742 fw_state, atomic_read(&instance->adprecovery)); 3743 3744 schedule_work(&instance->work_init); 3745 return IRQ_HANDLED; 3746 3747 } else { 3748 dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n", 3749 fw_state, instance->disableOnlineCtrlReset); 3750 } 3751 } 3752 3753 tasklet_schedule(&instance->isr_tasklet); 3754 return IRQ_HANDLED; 3755 } 3756 /** 3757 * megasas_isr - isr entry point 3758 */ 3759 static irqreturn_t megasas_isr(int irq, void *devp) 3760 { 3761 struct megasas_irq_context *irq_context = devp; 3762 struct megasas_instance *instance = irq_context->instance; 3763 unsigned long flags; 3764 irqreturn_t rc; 3765 3766 if (atomic_read(&instance->fw_reset_no_pci_access)) 3767 return IRQ_HANDLED; 3768 3769 spin_lock_irqsave(&instance->hba_lock, flags); 3770 rc = megasas_deplete_reply_queue(instance, DID_OK); 3771 spin_unlock_irqrestore(&instance->hba_lock, flags); 3772 3773 return rc; 3774 } 3775 3776 /** 3777 * megasas_transition_to_ready - Move the FW to READY state 3778 * @instance: Adapter soft state 3779 * 3780 * During the initialization, FW passes can potentially be in any one of 3781 * several possible states. If the FW in operational, waiting-for-handshake 3782 * states, driver must take steps to bring it to ready state. Otherwise, it 3783 * has to wait for the ready state. 3784 */ 3785 int 3786 megasas_transition_to_ready(struct megasas_instance *instance, int ocr) 3787 { 3788 int i; 3789 u8 max_wait; 3790 u32 fw_state; 3791 u32 cur_state; 3792 u32 abs_state, curr_abs_state; 3793 3794 abs_state = instance->instancet->read_fw_status_reg(instance); 3795 fw_state = abs_state & MFI_STATE_MASK; 3796 3797 if (fw_state != MFI_STATE_READY) 3798 dev_info(&instance->pdev->dev, "Waiting for FW to come to ready" 3799 " state\n"); 3800 3801 while (fw_state != MFI_STATE_READY) { 3802 3803 switch (fw_state) { 3804 3805 case MFI_STATE_FAULT: 3806 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW in FAULT state!!\n"); 3807 if (ocr) { 3808 max_wait = MEGASAS_RESET_WAIT_TIME; 3809 cur_state = MFI_STATE_FAULT; 3810 break; 3811 } else 3812 return -ENODEV; 3813 3814 case MFI_STATE_WAIT_HANDSHAKE: 3815 /* 3816 * Set the CLR bit in inbound doorbell 3817 */ 3818 if ((instance->pdev->device == 3819 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 3820 (instance->pdev->device == 3821 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 3822 (instance->adapter_type != MFI_SERIES)) 3823 writel( 3824 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 3825 &instance->reg_set->doorbell); 3826 else 3827 writel( 3828 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 3829 &instance->reg_set->inbound_doorbell); 3830 3831 max_wait = MEGASAS_RESET_WAIT_TIME; 3832 cur_state = MFI_STATE_WAIT_HANDSHAKE; 3833 break; 3834 3835 case MFI_STATE_BOOT_MESSAGE_PENDING: 3836 if ((instance->pdev->device == 3837 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 3838 (instance->pdev->device == 3839 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 3840 (instance->adapter_type != MFI_SERIES)) 3841 writel(MFI_INIT_HOTPLUG, 3842 &instance->reg_set->doorbell); 3843 else 3844 writel(MFI_INIT_HOTPLUG, 3845 &instance->reg_set->inbound_doorbell); 3846 3847 max_wait = MEGASAS_RESET_WAIT_TIME; 3848 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 3849 break; 3850 3851 case MFI_STATE_OPERATIONAL: 3852 /* 3853 * Bring it to READY state; assuming max wait 10 secs 3854 */ 3855 instance->instancet->disable_intr(instance); 3856 if ((instance->pdev->device == 3857 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 3858 (instance->pdev->device == 3859 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 3860 (instance->adapter_type != MFI_SERIES)) { 3861 writel(MFI_RESET_FLAGS, 3862 &instance->reg_set->doorbell); 3863 3864 if (instance->adapter_type != MFI_SERIES) { 3865 for (i = 0; i < (10 * 1000); i += 20) { 3866 if (megasas_readl( 3867 instance, 3868 &instance-> 3869 reg_set-> 3870 doorbell) & 1) 3871 msleep(20); 3872 else 3873 break; 3874 } 3875 } 3876 } else 3877 writel(MFI_RESET_FLAGS, 3878 &instance->reg_set->inbound_doorbell); 3879 3880 max_wait = MEGASAS_RESET_WAIT_TIME; 3881 cur_state = MFI_STATE_OPERATIONAL; 3882 break; 3883 3884 case MFI_STATE_UNDEFINED: 3885 /* 3886 * This state should not last for more than 2 seconds 3887 */ 3888 max_wait = MEGASAS_RESET_WAIT_TIME; 3889 cur_state = MFI_STATE_UNDEFINED; 3890 break; 3891 3892 case MFI_STATE_BB_INIT: 3893 max_wait = MEGASAS_RESET_WAIT_TIME; 3894 cur_state = MFI_STATE_BB_INIT; 3895 break; 3896 3897 case MFI_STATE_FW_INIT: 3898 max_wait = MEGASAS_RESET_WAIT_TIME; 3899 cur_state = MFI_STATE_FW_INIT; 3900 break; 3901 3902 case MFI_STATE_FW_INIT_2: 3903 max_wait = MEGASAS_RESET_WAIT_TIME; 3904 cur_state = MFI_STATE_FW_INIT_2; 3905 break; 3906 3907 case MFI_STATE_DEVICE_SCAN: 3908 max_wait = MEGASAS_RESET_WAIT_TIME; 3909 cur_state = MFI_STATE_DEVICE_SCAN; 3910 break; 3911 3912 case MFI_STATE_FLUSH_CACHE: 3913 max_wait = MEGASAS_RESET_WAIT_TIME; 3914 cur_state = MFI_STATE_FLUSH_CACHE; 3915 break; 3916 3917 default: 3918 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n", 3919 fw_state); 3920 return -ENODEV; 3921 } 3922 3923 /* 3924 * The cur_state should not last for more than max_wait secs 3925 */ 3926 for (i = 0; i < max_wait * 50; i++) { 3927 curr_abs_state = instance->instancet-> 3928 read_fw_status_reg(instance); 3929 3930 if (abs_state == curr_abs_state) { 3931 msleep(20); 3932 } else 3933 break; 3934 } 3935 3936 /* 3937 * Return error if fw_state hasn't changed after max_wait 3938 */ 3939 if (curr_abs_state == abs_state) { 3940 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed " 3941 "in %d secs\n", fw_state, max_wait); 3942 return -ENODEV; 3943 } 3944 3945 abs_state = curr_abs_state; 3946 fw_state = curr_abs_state & MFI_STATE_MASK; 3947 } 3948 dev_info(&instance->pdev->dev, "FW now in Ready state\n"); 3949 3950 return 0; 3951 } 3952 3953 /** 3954 * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool 3955 * @instance: Adapter soft state 3956 */ 3957 static void megasas_teardown_frame_pool(struct megasas_instance *instance) 3958 { 3959 int i; 3960 u16 max_cmd = instance->max_mfi_cmds; 3961 struct megasas_cmd *cmd; 3962 3963 if (!instance->frame_dma_pool) 3964 return; 3965 3966 /* 3967 * Return all frames to pool 3968 */ 3969 for (i = 0; i < max_cmd; i++) { 3970 3971 cmd = instance->cmd_list[i]; 3972 3973 if (cmd->frame) 3974 dma_pool_free(instance->frame_dma_pool, cmd->frame, 3975 cmd->frame_phys_addr); 3976 3977 if (cmd->sense) 3978 dma_pool_free(instance->sense_dma_pool, cmd->sense, 3979 cmd->sense_phys_addr); 3980 } 3981 3982 /* 3983 * Now destroy the pool itself 3984 */ 3985 dma_pool_destroy(instance->frame_dma_pool); 3986 dma_pool_destroy(instance->sense_dma_pool); 3987 3988 instance->frame_dma_pool = NULL; 3989 instance->sense_dma_pool = NULL; 3990 } 3991 3992 /** 3993 * megasas_create_frame_pool - Creates DMA pool for cmd frames 3994 * @instance: Adapter soft state 3995 * 3996 * Each command packet has an embedded DMA memory buffer that is used for 3997 * filling MFI frame and the SG list that immediately follows the frame. This 3998 * function creates those DMA memory buffers for each command packet by using 3999 * PCI pool facility. 4000 */ 4001 static int megasas_create_frame_pool(struct megasas_instance *instance) 4002 { 4003 int i; 4004 u16 max_cmd; 4005 u32 sge_sz; 4006 u32 frame_count; 4007 struct megasas_cmd *cmd; 4008 4009 max_cmd = instance->max_mfi_cmds; 4010 4011 /* 4012 * Size of our frame is 64 bytes for MFI frame, followed by max SG 4013 * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer 4014 */ 4015 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : 4016 sizeof(struct megasas_sge32); 4017 4018 if (instance->flag_ieee) 4019 sge_sz = sizeof(struct megasas_sge_skinny); 4020 4021 /* 4022 * For MFI controllers. 4023 * max_num_sge = 60 4024 * max_sge_sz = 16 byte (sizeof megasas_sge_skinny) 4025 * Total 960 byte (15 MFI frame of 64 byte) 4026 * 4027 * Fusion adapter require only 3 extra frame. 4028 * max_num_sge = 16 (defined as MAX_IOCTL_SGE) 4029 * max_sge_sz = 12 byte (sizeof megasas_sge64) 4030 * Total 192 byte (3 MFI frame of 64 byte) 4031 */ 4032 frame_count = (instance->adapter_type == MFI_SERIES) ? 4033 (15 + 1) : (3 + 1); 4034 instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count; 4035 /* 4036 * Use DMA pool facility provided by PCI layer 4037 */ 4038 instance->frame_dma_pool = dma_pool_create("megasas frame pool", 4039 &instance->pdev->dev, 4040 instance->mfi_frame_size, 256, 0); 4041 4042 if (!instance->frame_dma_pool) { 4043 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n"); 4044 return -ENOMEM; 4045 } 4046 4047 instance->sense_dma_pool = dma_pool_create("megasas sense pool", 4048 &instance->pdev->dev, 128, 4049 4, 0); 4050 4051 if (!instance->sense_dma_pool) { 4052 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n"); 4053 4054 dma_pool_destroy(instance->frame_dma_pool); 4055 instance->frame_dma_pool = NULL; 4056 4057 return -ENOMEM; 4058 } 4059 4060 /* 4061 * Allocate and attach a frame to each of the commands in cmd_list. 4062 * By making cmd->index as the context instead of the &cmd, we can 4063 * always use 32bit context regardless of the architecture 4064 */ 4065 for (i = 0; i < max_cmd; i++) { 4066 4067 cmd = instance->cmd_list[i]; 4068 4069 cmd->frame = dma_pool_zalloc(instance->frame_dma_pool, 4070 GFP_KERNEL, &cmd->frame_phys_addr); 4071 4072 cmd->sense = dma_pool_alloc(instance->sense_dma_pool, 4073 GFP_KERNEL, &cmd->sense_phys_addr); 4074 4075 /* 4076 * megasas_teardown_frame_pool() takes care of freeing 4077 * whatever has been allocated 4078 */ 4079 if (!cmd->frame || !cmd->sense) { 4080 dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n"); 4081 megasas_teardown_frame_pool(instance); 4082 return -ENOMEM; 4083 } 4084 4085 cmd->frame->io.context = cpu_to_le32(cmd->index); 4086 cmd->frame->io.pad_0 = 0; 4087 if ((instance->adapter_type == MFI_SERIES) && reset_devices) 4088 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 4089 } 4090 4091 return 0; 4092 } 4093 4094 /** 4095 * megasas_free_cmds - Free all the cmds in the free cmd pool 4096 * @instance: Adapter soft state 4097 */ 4098 void megasas_free_cmds(struct megasas_instance *instance) 4099 { 4100 int i; 4101 4102 /* First free the MFI frame pool */ 4103 megasas_teardown_frame_pool(instance); 4104 4105 /* Free all the commands in the cmd_list */ 4106 for (i = 0; i < instance->max_mfi_cmds; i++) 4107 4108 kfree(instance->cmd_list[i]); 4109 4110 /* Free the cmd_list buffer itself */ 4111 kfree(instance->cmd_list); 4112 instance->cmd_list = NULL; 4113 4114 INIT_LIST_HEAD(&instance->cmd_pool); 4115 } 4116 4117 /** 4118 * megasas_alloc_cmds - Allocates the command packets 4119 * @instance: Adapter soft state 4120 * 4121 * Each command that is issued to the FW, whether IO commands from the OS or 4122 * internal commands like IOCTLs, are wrapped in local data structure called 4123 * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to 4124 * the FW. 4125 * 4126 * Each frame has a 32-bit field called context (tag). This context is used 4127 * to get back the megasas_cmd from the frame when a frame gets completed in 4128 * the ISR. Typically the address of the megasas_cmd itself would be used as 4129 * the context. But we wanted to keep the differences between 32 and 64 bit 4130 * systems to the mininum. We always use 32 bit integers for the context. In 4131 * this driver, the 32 bit values are the indices into an array cmd_list. 4132 * This array is used only to look up the megasas_cmd given the context. The 4133 * free commands themselves are maintained in a linked list called cmd_pool. 4134 */ 4135 int megasas_alloc_cmds(struct megasas_instance *instance) 4136 { 4137 int i; 4138 int j; 4139 u16 max_cmd; 4140 struct megasas_cmd *cmd; 4141 4142 max_cmd = instance->max_mfi_cmds; 4143 4144 /* 4145 * instance->cmd_list is an array of struct megasas_cmd pointers. 4146 * Allocate the dynamic array first and then allocate individual 4147 * commands. 4148 */ 4149 instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL); 4150 4151 if (!instance->cmd_list) { 4152 dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n"); 4153 return -ENOMEM; 4154 } 4155 4156 memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd); 4157 4158 for (i = 0; i < max_cmd; i++) { 4159 instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd), 4160 GFP_KERNEL); 4161 4162 if (!instance->cmd_list[i]) { 4163 4164 for (j = 0; j < i; j++) 4165 kfree(instance->cmd_list[j]); 4166 4167 kfree(instance->cmd_list); 4168 instance->cmd_list = NULL; 4169 4170 return -ENOMEM; 4171 } 4172 } 4173 4174 for (i = 0; i < max_cmd; i++) { 4175 cmd = instance->cmd_list[i]; 4176 memset(cmd, 0, sizeof(struct megasas_cmd)); 4177 cmd->index = i; 4178 cmd->scmd = NULL; 4179 cmd->instance = instance; 4180 4181 list_add_tail(&cmd->list, &instance->cmd_pool); 4182 } 4183 4184 /* 4185 * Create a frame pool and assign one frame to each cmd 4186 */ 4187 if (megasas_create_frame_pool(instance)) { 4188 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n"); 4189 megasas_free_cmds(instance); 4190 return -ENOMEM; 4191 } 4192 4193 return 0; 4194 } 4195 4196 /* 4197 * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state. 4198 * @instance: Adapter soft state 4199 * 4200 * Return 0 for only Fusion adapter, if driver load/unload is not in progress 4201 * or FW is not under OCR. 4202 */ 4203 inline int 4204 dcmd_timeout_ocr_possible(struct megasas_instance *instance) { 4205 4206 if (instance->adapter_type == MFI_SERIES) 4207 return KILL_ADAPTER; 4208 else if (instance->unload || 4209 test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) 4210 return IGNORE_TIMEOUT; 4211 else 4212 return INITIATE_OCR; 4213 } 4214 4215 static void 4216 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev) 4217 { 4218 int ret; 4219 struct megasas_cmd *cmd; 4220 struct megasas_dcmd_frame *dcmd; 4221 4222 struct MR_PRIV_DEVICE *mr_device_priv_data; 4223 u16 device_id = 0; 4224 4225 device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; 4226 cmd = megasas_get_cmd(instance); 4227 4228 if (!cmd) { 4229 dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__); 4230 return; 4231 } 4232 4233 dcmd = &cmd->frame->dcmd; 4234 4235 memset(instance->pd_info, 0, sizeof(*instance->pd_info)); 4236 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4237 4238 dcmd->mbox.s[0] = cpu_to_le16(device_id); 4239 dcmd->cmd = MFI_CMD_DCMD; 4240 dcmd->cmd_status = 0xFF; 4241 dcmd->sge_count = 1; 4242 dcmd->flags = MFI_FRAME_DIR_READ; 4243 dcmd->timeout = 0; 4244 dcmd->pad_0 = 0; 4245 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO)); 4246 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO); 4247 4248 megasas_set_dma_settings(instance, dcmd, instance->pd_info_h, 4249 sizeof(struct MR_PD_INFO)); 4250 4251 if ((instance->adapter_type != MFI_SERIES) && 4252 !instance->mask_interrupts) 4253 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4254 else 4255 ret = megasas_issue_polled(instance, cmd); 4256 4257 switch (ret) { 4258 case DCMD_SUCCESS: 4259 mr_device_priv_data = sdev->hostdata; 4260 le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType); 4261 mr_device_priv_data->interface_type = 4262 instance->pd_info->state.ddf.pdType.intf; 4263 break; 4264 4265 case DCMD_TIMEOUT: 4266 4267 switch (dcmd_timeout_ocr_possible(instance)) { 4268 case INITIATE_OCR: 4269 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4270 megasas_reset_fusion(instance->host, 4271 MFI_IO_TIMEOUT_OCR); 4272 break; 4273 case KILL_ADAPTER: 4274 megaraid_sas_kill_hba(instance); 4275 break; 4276 case IGNORE_TIMEOUT: 4277 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4278 __func__, __LINE__); 4279 break; 4280 } 4281 4282 break; 4283 } 4284 4285 if (ret != DCMD_TIMEOUT) 4286 megasas_return_cmd(instance, cmd); 4287 4288 return; 4289 } 4290 /* 4291 * megasas_get_pd_list_info - Returns FW's pd_list structure 4292 * @instance: Adapter soft state 4293 * @pd_list: pd_list structure 4294 * 4295 * Issues an internal command (DCMD) to get the FW's controller PD 4296 * list structure. This information is mainly used to find out SYSTEM 4297 * supported by the FW. 4298 */ 4299 static int 4300 megasas_get_pd_list(struct megasas_instance *instance) 4301 { 4302 int ret = 0, pd_index = 0; 4303 struct megasas_cmd *cmd; 4304 struct megasas_dcmd_frame *dcmd; 4305 struct MR_PD_LIST *ci; 4306 struct MR_PD_ADDRESS *pd_addr; 4307 dma_addr_t ci_h = 0; 4308 4309 if (instance->pd_list_not_supported) { 4310 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " 4311 "not supported by firmware\n"); 4312 return ret; 4313 } 4314 4315 ci = instance->pd_list_buf; 4316 ci_h = instance->pd_list_buf_h; 4317 4318 cmd = megasas_get_cmd(instance); 4319 4320 if (!cmd) { 4321 dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n"); 4322 return -ENOMEM; 4323 } 4324 4325 dcmd = &cmd->frame->dcmd; 4326 4327 memset(ci, 0, sizeof(*ci)); 4328 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4329 4330 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; 4331 dcmd->mbox.b[1] = 0; 4332 dcmd->cmd = MFI_CMD_DCMD; 4333 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4334 dcmd->sge_count = 1; 4335 dcmd->flags = MFI_FRAME_DIR_READ; 4336 dcmd->timeout = 0; 4337 dcmd->pad_0 = 0; 4338 dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); 4339 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY); 4340 4341 megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h, 4342 (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST))); 4343 4344 if ((instance->adapter_type != MFI_SERIES) && 4345 !instance->mask_interrupts) 4346 ret = megasas_issue_blocked_cmd(instance, cmd, 4347 MFI_IO_TIMEOUT_SECS); 4348 else 4349 ret = megasas_issue_polled(instance, cmd); 4350 4351 switch (ret) { 4352 case DCMD_FAILED: 4353 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " 4354 "failed/not supported by firmware\n"); 4355 4356 if (instance->adapter_type != MFI_SERIES) 4357 megaraid_sas_kill_hba(instance); 4358 else 4359 instance->pd_list_not_supported = 1; 4360 break; 4361 case DCMD_TIMEOUT: 4362 4363 switch (dcmd_timeout_ocr_possible(instance)) { 4364 case INITIATE_OCR: 4365 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4366 /* 4367 * DCMD failed from AEN path. 4368 * AEN path already hold reset_mutex to avoid PCI access 4369 * while OCR is in progress. 4370 */ 4371 mutex_unlock(&instance->reset_mutex); 4372 megasas_reset_fusion(instance->host, 4373 MFI_IO_TIMEOUT_OCR); 4374 mutex_lock(&instance->reset_mutex); 4375 break; 4376 case KILL_ADAPTER: 4377 megaraid_sas_kill_hba(instance); 4378 break; 4379 case IGNORE_TIMEOUT: 4380 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n", 4381 __func__, __LINE__); 4382 break; 4383 } 4384 4385 break; 4386 4387 case DCMD_SUCCESS: 4388 pd_addr = ci->addr; 4389 4390 if ((le32_to_cpu(ci->count) > 4391 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) 4392 break; 4393 4394 memset(instance->local_pd_list, 0, 4395 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); 4396 4397 for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) { 4398 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid = 4399 le16_to_cpu(pd_addr->deviceId); 4400 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType = 4401 pd_addr->scsiDevType; 4402 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState = 4403 MR_PD_STATE_SYSTEM; 4404 pd_addr++; 4405 } 4406 4407 memcpy(instance->pd_list, instance->local_pd_list, 4408 sizeof(instance->pd_list)); 4409 break; 4410 4411 } 4412 4413 if (ret != DCMD_TIMEOUT) 4414 megasas_return_cmd(instance, cmd); 4415 4416 return ret; 4417 } 4418 4419 /* 4420 * megasas_get_ld_list_info - Returns FW's ld_list structure 4421 * @instance: Adapter soft state 4422 * @ld_list: ld_list structure 4423 * 4424 * Issues an internal command (DCMD) to get the FW's controller PD 4425 * list structure. This information is mainly used to find out SYSTEM 4426 * supported by the FW. 4427 */ 4428 static int 4429 megasas_get_ld_list(struct megasas_instance *instance) 4430 { 4431 int ret = 0, ld_index = 0, ids = 0; 4432 struct megasas_cmd *cmd; 4433 struct megasas_dcmd_frame *dcmd; 4434 struct MR_LD_LIST *ci; 4435 dma_addr_t ci_h = 0; 4436 u32 ld_count; 4437 4438 ci = instance->ld_list_buf; 4439 ci_h = instance->ld_list_buf_h; 4440 4441 cmd = megasas_get_cmd(instance); 4442 4443 if (!cmd) { 4444 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n"); 4445 return -ENOMEM; 4446 } 4447 4448 dcmd = &cmd->frame->dcmd; 4449 4450 memset(ci, 0, sizeof(*ci)); 4451 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4452 4453 if (instance->supportmax256vd) 4454 dcmd->mbox.b[0] = 1; 4455 dcmd->cmd = MFI_CMD_DCMD; 4456 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4457 dcmd->sge_count = 1; 4458 dcmd->flags = MFI_FRAME_DIR_READ; 4459 dcmd->timeout = 0; 4460 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST)); 4461 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST); 4462 dcmd->pad_0 = 0; 4463 4464 megasas_set_dma_settings(instance, dcmd, ci_h, 4465 sizeof(struct MR_LD_LIST)); 4466 4467 if ((instance->adapter_type != MFI_SERIES) && 4468 !instance->mask_interrupts) 4469 ret = megasas_issue_blocked_cmd(instance, cmd, 4470 MFI_IO_TIMEOUT_SECS); 4471 else 4472 ret = megasas_issue_polled(instance, cmd); 4473 4474 ld_count = le32_to_cpu(ci->ldCount); 4475 4476 switch (ret) { 4477 case DCMD_FAILED: 4478 megaraid_sas_kill_hba(instance); 4479 break; 4480 case DCMD_TIMEOUT: 4481 4482 switch (dcmd_timeout_ocr_possible(instance)) { 4483 case INITIATE_OCR: 4484 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4485 /* 4486 * DCMD failed from AEN path. 4487 * AEN path already hold reset_mutex to avoid PCI access 4488 * while OCR is in progress. 4489 */ 4490 mutex_unlock(&instance->reset_mutex); 4491 megasas_reset_fusion(instance->host, 4492 MFI_IO_TIMEOUT_OCR); 4493 mutex_lock(&instance->reset_mutex); 4494 break; 4495 case KILL_ADAPTER: 4496 megaraid_sas_kill_hba(instance); 4497 break; 4498 case IGNORE_TIMEOUT: 4499 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4500 __func__, __LINE__); 4501 break; 4502 } 4503 4504 break; 4505 4506 case DCMD_SUCCESS: 4507 if (ld_count > instance->fw_supported_vd_count) 4508 break; 4509 4510 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 4511 4512 for (ld_index = 0; ld_index < ld_count; ld_index++) { 4513 if (ci->ldList[ld_index].state != 0) { 4514 ids = ci->ldList[ld_index].ref.targetId; 4515 instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId; 4516 } 4517 } 4518 4519 break; 4520 } 4521 4522 if (ret != DCMD_TIMEOUT) 4523 megasas_return_cmd(instance, cmd); 4524 4525 return ret; 4526 } 4527 4528 /** 4529 * megasas_ld_list_query - Returns FW's ld_list structure 4530 * @instance: Adapter soft state 4531 * @ld_list: ld_list structure 4532 * 4533 * Issues an internal command (DCMD) to get the FW's controller PD 4534 * list structure. This information is mainly used to find out SYSTEM 4535 * supported by the FW. 4536 */ 4537 static int 4538 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) 4539 { 4540 int ret = 0, ld_index = 0, ids = 0; 4541 struct megasas_cmd *cmd; 4542 struct megasas_dcmd_frame *dcmd; 4543 struct MR_LD_TARGETID_LIST *ci; 4544 dma_addr_t ci_h = 0; 4545 u32 tgtid_count; 4546 4547 ci = instance->ld_targetid_list_buf; 4548 ci_h = instance->ld_targetid_list_buf_h; 4549 4550 cmd = megasas_get_cmd(instance); 4551 4552 if (!cmd) { 4553 dev_warn(&instance->pdev->dev, 4554 "megasas_ld_list_query: Failed to get cmd\n"); 4555 return -ENOMEM; 4556 } 4557 4558 dcmd = &cmd->frame->dcmd; 4559 4560 memset(ci, 0, sizeof(*ci)); 4561 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4562 4563 dcmd->mbox.b[0] = query_type; 4564 if (instance->supportmax256vd) 4565 dcmd->mbox.b[2] = 1; 4566 4567 dcmd->cmd = MFI_CMD_DCMD; 4568 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4569 dcmd->sge_count = 1; 4570 dcmd->flags = MFI_FRAME_DIR_READ; 4571 dcmd->timeout = 0; 4572 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); 4573 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY); 4574 dcmd->pad_0 = 0; 4575 4576 megasas_set_dma_settings(instance, dcmd, ci_h, 4577 sizeof(struct MR_LD_TARGETID_LIST)); 4578 4579 if ((instance->adapter_type != MFI_SERIES) && 4580 !instance->mask_interrupts) 4581 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4582 else 4583 ret = megasas_issue_polled(instance, cmd); 4584 4585 switch (ret) { 4586 case DCMD_FAILED: 4587 dev_info(&instance->pdev->dev, 4588 "DCMD not supported by firmware - %s %d\n", 4589 __func__, __LINE__); 4590 ret = megasas_get_ld_list(instance); 4591 break; 4592 case DCMD_TIMEOUT: 4593 switch (dcmd_timeout_ocr_possible(instance)) { 4594 case INITIATE_OCR: 4595 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4596 /* 4597 * DCMD failed from AEN path. 4598 * AEN path already hold reset_mutex to avoid PCI access 4599 * while OCR is in progress. 4600 */ 4601 mutex_unlock(&instance->reset_mutex); 4602 megasas_reset_fusion(instance->host, 4603 MFI_IO_TIMEOUT_OCR); 4604 mutex_lock(&instance->reset_mutex); 4605 break; 4606 case KILL_ADAPTER: 4607 megaraid_sas_kill_hba(instance); 4608 break; 4609 case IGNORE_TIMEOUT: 4610 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4611 __func__, __LINE__); 4612 break; 4613 } 4614 4615 break; 4616 case DCMD_SUCCESS: 4617 tgtid_count = le32_to_cpu(ci->count); 4618 4619 if ((tgtid_count > (instance->fw_supported_vd_count))) 4620 break; 4621 4622 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 4623 for (ld_index = 0; ld_index < tgtid_count; ld_index++) { 4624 ids = ci->targetId[ld_index]; 4625 instance->ld_ids[ids] = ci->targetId[ld_index]; 4626 } 4627 4628 break; 4629 } 4630 4631 if (ret != DCMD_TIMEOUT) 4632 megasas_return_cmd(instance, cmd); 4633 4634 return ret; 4635 } 4636 4637 /** 4638 * dcmd.opcode - MR_DCMD_CTRL_DEVICE_LIST_GET 4639 * dcmd.mbox - reserved 4640 * dcmd.sge IN - ptr to return MR_HOST_DEVICE_LIST structure 4641 * Desc: This DCMD will return the combined device list 4642 * Status: MFI_STAT_OK - List returned successfully 4643 * MFI_STAT_INVALID_CMD - Firmware support for the feature has been 4644 * disabled 4645 * @instance: Adapter soft state 4646 * @is_probe: Driver probe check 4647 * Return: 0 if DCMD succeeded 4648 * non-zero if failed 4649 */ 4650 static int 4651 megasas_host_device_list_query(struct megasas_instance *instance, 4652 bool is_probe) 4653 { 4654 int ret, i, target_id; 4655 struct megasas_cmd *cmd; 4656 struct megasas_dcmd_frame *dcmd; 4657 struct MR_HOST_DEVICE_LIST *ci; 4658 u32 count; 4659 dma_addr_t ci_h; 4660 4661 ci = instance->host_device_list_buf; 4662 ci_h = instance->host_device_list_buf_h; 4663 4664 cmd = megasas_get_cmd(instance); 4665 4666 if (!cmd) { 4667 dev_warn(&instance->pdev->dev, 4668 "%s: failed to get cmd\n", 4669 __func__); 4670 return -ENOMEM; 4671 } 4672 4673 dcmd = &cmd->frame->dcmd; 4674 4675 memset(ci, 0, sizeof(*ci)); 4676 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4677 4678 dcmd->mbox.b[0] = is_probe ? 0 : 1; 4679 dcmd->cmd = MFI_CMD_DCMD; 4680 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4681 dcmd->sge_count = 1; 4682 dcmd->flags = MFI_FRAME_DIR_READ; 4683 dcmd->timeout = 0; 4684 dcmd->pad_0 = 0; 4685 dcmd->data_xfer_len = cpu_to_le32(HOST_DEVICE_LIST_SZ); 4686 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_DEVICE_LIST_GET); 4687 4688 megasas_set_dma_settings(instance, dcmd, ci_h, HOST_DEVICE_LIST_SZ); 4689 4690 if (!instance->mask_interrupts) { 4691 ret = megasas_issue_blocked_cmd(instance, cmd, 4692 MFI_IO_TIMEOUT_SECS); 4693 } else { 4694 ret = megasas_issue_polled(instance, cmd); 4695 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4696 } 4697 4698 switch (ret) { 4699 case DCMD_SUCCESS: 4700 /* Fill the internal pd_list and ld_ids array based on 4701 * targetIds returned by FW 4702 */ 4703 count = le32_to_cpu(ci->count); 4704 4705 memset(instance->local_pd_list, 0, 4706 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); 4707 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 4708 for (i = 0; i < count; i++) { 4709 target_id = le16_to_cpu(ci->host_device_list[i].target_id); 4710 if (ci->host_device_list[i].flags.u.bits.is_sys_pd) { 4711 instance->local_pd_list[target_id].tid = target_id; 4712 instance->local_pd_list[target_id].driveType = 4713 ci->host_device_list[i].scsi_type; 4714 instance->local_pd_list[target_id].driveState = 4715 MR_PD_STATE_SYSTEM; 4716 } else { 4717 instance->ld_ids[target_id] = target_id; 4718 } 4719 } 4720 4721 memcpy(instance->pd_list, instance->local_pd_list, 4722 sizeof(instance->pd_list)); 4723 break; 4724 4725 case DCMD_TIMEOUT: 4726 switch (dcmd_timeout_ocr_possible(instance)) { 4727 case INITIATE_OCR: 4728 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4729 megasas_reset_fusion(instance->host, 4730 MFI_IO_TIMEOUT_OCR); 4731 break; 4732 case KILL_ADAPTER: 4733 megaraid_sas_kill_hba(instance); 4734 break; 4735 case IGNORE_TIMEOUT: 4736 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4737 __func__, __LINE__); 4738 break; 4739 } 4740 break; 4741 case DCMD_FAILED: 4742 dev_err(&instance->pdev->dev, 4743 "%s: MR_DCMD_CTRL_DEVICE_LIST_GET failed\n", 4744 __func__); 4745 break; 4746 } 4747 4748 if (ret != DCMD_TIMEOUT) 4749 megasas_return_cmd(instance, cmd); 4750 4751 return ret; 4752 } 4753 4754 /* 4755 * megasas_update_ext_vd_details : Update details w.r.t Extended VD 4756 * instance : Controller's instance 4757 */ 4758 static void megasas_update_ext_vd_details(struct megasas_instance *instance) 4759 { 4760 struct fusion_context *fusion; 4761 u32 ventura_map_sz = 0; 4762 4763 fusion = instance->ctrl_context; 4764 /* For MFI based controllers return dummy success */ 4765 if (!fusion) 4766 return; 4767 4768 instance->supportmax256vd = 4769 instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs; 4770 /* Below is additional check to address future FW enhancement */ 4771 if (instance->ctrl_info_buf->max_lds > 64) 4772 instance->supportmax256vd = 1; 4773 4774 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS 4775 * MEGASAS_MAX_DEV_PER_CHANNEL; 4776 instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS 4777 * MEGASAS_MAX_DEV_PER_CHANNEL; 4778 if (instance->supportmax256vd) { 4779 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; 4780 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 4781 } else { 4782 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 4783 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 4784 } 4785 4786 dev_info(&instance->pdev->dev, 4787 "FW provided supportMaxExtLDs: %d\tmax_lds: %d\n", 4788 instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0, 4789 instance->ctrl_info_buf->max_lds); 4790 4791 if (instance->max_raid_mapsize) { 4792 ventura_map_sz = instance->max_raid_mapsize * 4793 MR_MIN_MAP_SIZE; /* 64k */ 4794 fusion->current_map_sz = ventura_map_sz; 4795 fusion->max_map_sz = ventura_map_sz; 4796 } else { 4797 fusion->old_map_sz = sizeof(struct MR_FW_RAID_MAP) + 4798 (sizeof(struct MR_LD_SPAN_MAP) * 4799 (instance->fw_supported_vd_count - 1)); 4800 fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT); 4801 4802 fusion->max_map_sz = 4803 max(fusion->old_map_sz, fusion->new_map_sz); 4804 4805 if (instance->supportmax256vd) 4806 fusion->current_map_sz = fusion->new_map_sz; 4807 else 4808 fusion->current_map_sz = fusion->old_map_sz; 4809 } 4810 /* irrespective of FW raid maps, driver raid map is constant */ 4811 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL); 4812 } 4813 4814 /* 4815 * dcmd.opcode - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES 4816 * dcmd.hdr.length - number of bytes to read 4817 * dcmd.sge - Ptr to MR_SNAPDUMP_PROPERTIES 4818 * Desc: Fill in snapdump properties 4819 * Status: MFI_STAT_OK- Command successful 4820 */ 4821 void megasas_get_snapdump_properties(struct megasas_instance *instance) 4822 { 4823 int ret = 0; 4824 struct megasas_cmd *cmd; 4825 struct megasas_dcmd_frame *dcmd; 4826 struct MR_SNAPDUMP_PROPERTIES *ci; 4827 dma_addr_t ci_h = 0; 4828 4829 ci = instance->snapdump_prop; 4830 ci_h = instance->snapdump_prop_h; 4831 4832 if (!ci) 4833 return; 4834 4835 cmd = megasas_get_cmd(instance); 4836 4837 if (!cmd) { 4838 dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n"); 4839 return; 4840 } 4841 4842 dcmd = &cmd->frame->dcmd; 4843 4844 memset(ci, 0, sizeof(*ci)); 4845 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4846 4847 dcmd->cmd = MFI_CMD_DCMD; 4848 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4849 dcmd->sge_count = 1; 4850 dcmd->flags = MFI_FRAME_DIR_READ; 4851 dcmd->timeout = 0; 4852 dcmd->pad_0 = 0; 4853 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES)); 4854 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES); 4855 4856 megasas_set_dma_settings(instance, dcmd, ci_h, 4857 sizeof(struct MR_SNAPDUMP_PROPERTIES)); 4858 4859 if (!instance->mask_interrupts) { 4860 ret = megasas_issue_blocked_cmd(instance, cmd, 4861 MFI_IO_TIMEOUT_SECS); 4862 } else { 4863 ret = megasas_issue_polled(instance, cmd); 4864 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4865 } 4866 4867 switch (ret) { 4868 case DCMD_SUCCESS: 4869 instance->snapdump_wait_time = 4870 min_t(u8, ci->trigger_min_num_sec_before_ocr, 4871 MEGASAS_MAX_SNAP_DUMP_WAIT_TIME); 4872 break; 4873 4874 case DCMD_TIMEOUT: 4875 switch (dcmd_timeout_ocr_possible(instance)) { 4876 case INITIATE_OCR: 4877 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4878 megasas_reset_fusion(instance->host, 4879 MFI_IO_TIMEOUT_OCR); 4880 break; 4881 case KILL_ADAPTER: 4882 megaraid_sas_kill_hba(instance); 4883 break; 4884 case IGNORE_TIMEOUT: 4885 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4886 __func__, __LINE__); 4887 break; 4888 } 4889 } 4890 4891 if (ret != DCMD_TIMEOUT) 4892 megasas_return_cmd(instance, cmd); 4893 } 4894 4895 /** 4896 * megasas_get_controller_info - Returns FW's controller structure 4897 * @instance: Adapter soft state 4898 * 4899 * Issues an internal command (DCMD) to get the FW's controller structure. 4900 * This information is mainly used to find out the maximum IO transfer per 4901 * command supported by the FW. 4902 */ 4903 int 4904 megasas_get_ctrl_info(struct megasas_instance *instance) 4905 { 4906 int ret = 0; 4907 struct megasas_cmd *cmd; 4908 struct megasas_dcmd_frame *dcmd; 4909 struct megasas_ctrl_info *ci; 4910 dma_addr_t ci_h = 0; 4911 4912 ci = instance->ctrl_info_buf; 4913 ci_h = instance->ctrl_info_buf_h; 4914 4915 cmd = megasas_get_cmd(instance); 4916 4917 if (!cmd) { 4918 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n"); 4919 return -ENOMEM; 4920 } 4921 4922 dcmd = &cmd->frame->dcmd; 4923 4924 memset(ci, 0, sizeof(*ci)); 4925 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4926 4927 dcmd->cmd = MFI_CMD_DCMD; 4928 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4929 dcmd->sge_count = 1; 4930 dcmd->flags = MFI_FRAME_DIR_READ; 4931 dcmd->timeout = 0; 4932 dcmd->pad_0 = 0; 4933 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info)); 4934 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO); 4935 dcmd->mbox.b[0] = 1; 4936 4937 megasas_set_dma_settings(instance, dcmd, ci_h, 4938 sizeof(struct megasas_ctrl_info)); 4939 4940 if ((instance->adapter_type != MFI_SERIES) && 4941 !instance->mask_interrupts) { 4942 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4943 } else { 4944 ret = megasas_issue_polled(instance, cmd); 4945 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4946 } 4947 4948 switch (ret) { 4949 case DCMD_SUCCESS: 4950 /* Save required controller information in 4951 * CPU endianness format. 4952 */ 4953 le32_to_cpus((u32 *)&ci->properties.OnOffProperties); 4954 le16_to_cpus((u16 *)&ci->properties.on_off_properties2); 4955 le32_to_cpus((u32 *)&ci->adapterOperations2); 4956 le32_to_cpus((u32 *)&ci->adapterOperations3); 4957 le16_to_cpus((u16 *)&ci->adapter_operations4); 4958 4959 /* Update the latest Ext VD info. 4960 * From Init path, store current firmware details. 4961 * From OCR path, detect any firmware properties changes. 4962 * in case of Firmware upgrade without system reboot. 4963 */ 4964 megasas_update_ext_vd_details(instance); 4965 instance->use_seqnum_jbod_fp = 4966 ci->adapterOperations3.useSeqNumJbodFP; 4967 instance->support_morethan256jbod = 4968 ci->adapter_operations4.support_pd_map_target_id; 4969 instance->support_nvme_passthru = 4970 ci->adapter_operations4.support_nvme_passthru; 4971 instance->task_abort_tmo = ci->TaskAbortTO; 4972 instance->max_reset_tmo = ci->MaxResetTO; 4973 4974 /*Check whether controller is iMR or MR */ 4975 instance->is_imr = (ci->memory_size ? 0 : 1); 4976 4977 instance->snapdump_wait_time = 4978 (ci->properties.on_off_properties2.enable_snap_dump ? 4979 MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0); 4980 4981 instance->enable_fw_dev_list = 4982 ci->properties.on_off_properties2.enable_fw_dev_list; 4983 4984 dev_info(&instance->pdev->dev, 4985 "controller type\t: %s(%dMB)\n", 4986 instance->is_imr ? "iMR" : "MR", 4987 le16_to_cpu(ci->memory_size)); 4988 4989 instance->disableOnlineCtrlReset = 4990 ci->properties.OnOffProperties.disableOnlineCtrlReset; 4991 instance->secure_jbod_support = 4992 ci->adapterOperations3.supportSecurityonJBOD; 4993 dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n", 4994 instance->disableOnlineCtrlReset ? "Disabled" : "Enabled"); 4995 dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n", 4996 instance->secure_jbod_support ? "Yes" : "No"); 4997 dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n", 4998 instance->support_nvme_passthru ? "Yes" : "No"); 4999 dev_info(&instance->pdev->dev, 5000 "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n", 5001 instance->task_abort_tmo, instance->max_reset_tmo); 5002 5003 break; 5004 5005 case DCMD_TIMEOUT: 5006 switch (dcmd_timeout_ocr_possible(instance)) { 5007 case INITIATE_OCR: 5008 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5009 megasas_reset_fusion(instance->host, 5010 MFI_IO_TIMEOUT_OCR); 5011 break; 5012 case KILL_ADAPTER: 5013 megaraid_sas_kill_hba(instance); 5014 break; 5015 case IGNORE_TIMEOUT: 5016 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 5017 __func__, __LINE__); 5018 break; 5019 } 5020 break; 5021 case DCMD_FAILED: 5022 megaraid_sas_kill_hba(instance); 5023 break; 5024 5025 } 5026 5027 if (ret != DCMD_TIMEOUT) 5028 megasas_return_cmd(instance, cmd); 5029 5030 return ret; 5031 } 5032 5033 /* 5034 * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer 5035 * to firmware 5036 * 5037 * @instance: Adapter soft state 5038 * @crash_buf_state - tell FW to turn ON/OFF crash dump feature 5039 MR_CRASH_BUF_TURN_OFF = 0 5040 MR_CRASH_BUF_TURN_ON = 1 5041 * @return 0 on success non-zero on failure. 5042 * Issues an internal command (DCMD) to set parameters for crash dump feature. 5043 * Driver will send address of crash dump DMA buffer and set mbox to tell FW 5044 * that driver supports crash dump feature. This DCMD will be sent only if 5045 * crash dump feature is supported by the FW. 5046 * 5047 */ 5048 int megasas_set_crash_dump_params(struct megasas_instance *instance, 5049 u8 crash_buf_state) 5050 { 5051 int ret = 0; 5052 struct megasas_cmd *cmd; 5053 struct megasas_dcmd_frame *dcmd; 5054 5055 cmd = megasas_get_cmd(instance); 5056 5057 if (!cmd) { 5058 dev_err(&instance->pdev->dev, "Failed to get a free cmd\n"); 5059 return -ENOMEM; 5060 } 5061 5062 5063 dcmd = &cmd->frame->dcmd; 5064 5065 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5066 dcmd->mbox.b[0] = crash_buf_state; 5067 dcmd->cmd = MFI_CMD_DCMD; 5068 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 5069 dcmd->sge_count = 1; 5070 dcmd->flags = MFI_FRAME_DIR_NONE; 5071 dcmd->timeout = 0; 5072 dcmd->pad_0 = 0; 5073 dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE); 5074 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS); 5075 5076 megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h, 5077 CRASH_DMA_BUF_SIZE); 5078 5079 if ((instance->adapter_type != MFI_SERIES) && 5080 !instance->mask_interrupts) 5081 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 5082 else 5083 ret = megasas_issue_polled(instance, cmd); 5084 5085 if (ret == DCMD_TIMEOUT) { 5086 switch (dcmd_timeout_ocr_possible(instance)) { 5087 case INITIATE_OCR: 5088 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5089 megasas_reset_fusion(instance->host, 5090 MFI_IO_TIMEOUT_OCR); 5091 break; 5092 case KILL_ADAPTER: 5093 megaraid_sas_kill_hba(instance); 5094 break; 5095 case IGNORE_TIMEOUT: 5096 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 5097 __func__, __LINE__); 5098 break; 5099 } 5100 } else 5101 megasas_return_cmd(instance, cmd); 5102 5103 return ret; 5104 } 5105 5106 /** 5107 * megasas_issue_init_mfi - Initializes the FW 5108 * @instance: Adapter soft state 5109 * 5110 * Issues the INIT MFI cmd 5111 */ 5112 static int 5113 megasas_issue_init_mfi(struct megasas_instance *instance) 5114 { 5115 __le32 context; 5116 struct megasas_cmd *cmd; 5117 struct megasas_init_frame *init_frame; 5118 struct megasas_init_queue_info *initq_info; 5119 dma_addr_t init_frame_h; 5120 dma_addr_t initq_info_h; 5121 5122 /* 5123 * Prepare a init frame. Note the init frame points to queue info 5124 * structure. Each frame has SGL allocated after first 64 bytes. For 5125 * this frame - since we don't need any SGL - we use SGL's space as 5126 * queue info structure 5127 * 5128 * We will not get a NULL command below. We just created the pool. 5129 */ 5130 cmd = megasas_get_cmd(instance); 5131 5132 init_frame = (struct megasas_init_frame *)cmd->frame; 5133 initq_info = (struct megasas_init_queue_info *) 5134 ((unsigned long)init_frame + 64); 5135 5136 init_frame_h = cmd->frame_phys_addr; 5137 initq_info_h = init_frame_h + 64; 5138 5139 context = init_frame->context; 5140 memset(init_frame, 0, MEGAMFI_FRAME_SIZE); 5141 memset(initq_info, 0, sizeof(struct megasas_init_queue_info)); 5142 init_frame->context = context; 5143 5144 initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1); 5145 initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h); 5146 5147 initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h); 5148 initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h); 5149 5150 init_frame->cmd = MFI_CMD_INIT; 5151 init_frame->cmd_status = MFI_STAT_INVALID_STATUS; 5152 init_frame->queue_info_new_phys_addr_lo = 5153 cpu_to_le32(lower_32_bits(initq_info_h)); 5154 init_frame->queue_info_new_phys_addr_hi = 5155 cpu_to_le32(upper_32_bits(initq_info_h)); 5156 5157 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info)); 5158 5159 /* 5160 * disable the intr before firing the init frame to FW 5161 */ 5162 instance->instancet->disable_intr(instance); 5163 5164 /* 5165 * Issue the init frame in polled mode 5166 */ 5167 5168 if (megasas_issue_polled(instance, cmd)) { 5169 dev_err(&instance->pdev->dev, "Failed to init firmware\n"); 5170 megasas_return_cmd(instance, cmd); 5171 goto fail_fw_init; 5172 } 5173 5174 megasas_return_cmd(instance, cmd); 5175 5176 return 0; 5177 5178 fail_fw_init: 5179 return -EINVAL; 5180 } 5181 5182 static u32 5183 megasas_init_adapter_mfi(struct megasas_instance *instance) 5184 { 5185 u32 context_sz; 5186 u32 reply_q_sz; 5187 5188 /* 5189 * Get various operational parameters from status register 5190 */ 5191 instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF; 5192 /* 5193 * Reduce the max supported cmds by 1. This is to ensure that the 5194 * reply_q_sz (1 more than the max cmd that driver may send) 5195 * does not exceed max cmds that the FW can support 5196 */ 5197 instance->max_fw_cmds = instance->max_fw_cmds-1; 5198 instance->max_mfi_cmds = instance->max_fw_cmds; 5199 instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >> 5200 0x10; 5201 /* 5202 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands 5203 * are reserved for IOCTL + driver's internal DCMDs. 5204 */ 5205 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 5206 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { 5207 instance->max_scsi_cmds = (instance->max_fw_cmds - 5208 MEGASAS_SKINNY_INT_CMDS); 5209 sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); 5210 } else { 5211 instance->max_scsi_cmds = (instance->max_fw_cmds - 5212 MEGASAS_INT_CMDS); 5213 sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS)); 5214 } 5215 5216 instance->cur_can_queue = instance->max_scsi_cmds; 5217 /* 5218 * Create a pool of commands 5219 */ 5220 if (megasas_alloc_cmds(instance)) 5221 goto fail_alloc_cmds; 5222 5223 /* 5224 * Allocate memory for reply queue. Length of reply queue should 5225 * be _one_ more than the maximum commands handled by the firmware. 5226 * 5227 * Note: When FW completes commands, it places corresponding contex 5228 * values in this circular reply queue. This circular queue is a fairly 5229 * typical producer-consumer queue. FW is the producer (of completed 5230 * commands) and the driver is the consumer. 5231 */ 5232 context_sz = sizeof(u32); 5233 reply_q_sz = context_sz * (instance->max_fw_cmds + 1); 5234 5235 instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev, 5236 reply_q_sz, &instance->reply_queue_h, GFP_KERNEL); 5237 5238 if (!instance->reply_queue) { 5239 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n"); 5240 goto fail_reply_queue; 5241 } 5242 5243 if (megasas_issue_init_mfi(instance)) 5244 goto fail_fw_init; 5245 5246 if (megasas_get_ctrl_info(instance)) { 5247 dev_err(&instance->pdev->dev, "(%d): Could get controller info " 5248 "Fail from %s %d\n", instance->unique_id, 5249 __func__, __LINE__); 5250 goto fail_fw_init; 5251 } 5252 5253 instance->fw_support_ieee = 0; 5254 instance->fw_support_ieee = 5255 (instance->instancet->read_fw_status_reg(instance) & 5256 0x04000000); 5257 5258 dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d", 5259 instance->fw_support_ieee); 5260 5261 if (instance->fw_support_ieee) 5262 instance->flag_ieee = 1; 5263 5264 return 0; 5265 5266 fail_fw_init: 5267 5268 dma_free_coherent(&instance->pdev->dev, reply_q_sz, 5269 instance->reply_queue, instance->reply_queue_h); 5270 fail_reply_queue: 5271 megasas_free_cmds(instance); 5272 5273 fail_alloc_cmds: 5274 return 1; 5275 } 5276 5277 /* 5278 * megasas_setup_irqs_ioapic - register legacy interrupts. 5279 * @instance: Adapter soft state 5280 * 5281 * Do not enable interrupt, only setup ISRs. 5282 * 5283 * Return 0 on success. 5284 */ 5285 static int 5286 megasas_setup_irqs_ioapic(struct megasas_instance *instance) 5287 { 5288 struct pci_dev *pdev; 5289 5290 pdev = instance->pdev; 5291 instance->irq_context[0].instance = instance; 5292 instance->irq_context[0].MSIxIndex = 0; 5293 if (request_irq(pci_irq_vector(pdev, 0), 5294 instance->instancet->service_isr, IRQF_SHARED, 5295 "megasas", &instance->irq_context[0])) { 5296 dev_err(&instance->pdev->dev, 5297 "Failed to register IRQ from %s %d\n", 5298 __func__, __LINE__); 5299 return -1; 5300 } 5301 return 0; 5302 } 5303 5304 /** 5305 * megasas_setup_irqs_msix - register MSI-x interrupts. 5306 * @instance: Adapter soft state 5307 * @is_probe: Driver probe check 5308 * 5309 * Do not enable interrupt, only setup ISRs. 5310 * 5311 * Return 0 on success. 5312 */ 5313 static int 5314 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) 5315 { 5316 int i, j; 5317 struct pci_dev *pdev; 5318 5319 pdev = instance->pdev; 5320 5321 /* Try MSI-x */ 5322 for (i = 0; i < instance->msix_vectors; i++) { 5323 instance->irq_context[i].instance = instance; 5324 instance->irq_context[i].MSIxIndex = i; 5325 if (request_irq(pci_irq_vector(pdev, i), 5326 instance->instancet->service_isr, 0, "megasas", 5327 &instance->irq_context[i])) { 5328 dev_err(&instance->pdev->dev, 5329 "Failed to register IRQ for vector %d.\n", i); 5330 for (j = 0; j < i; j++) 5331 free_irq(pci_irq_vector(pdev, j), 5332 &instance->irq_context[j]); 5333 /* Retry irq register for IO_APIC*/ 5334 instance->msix_vectors = 0; 5335 if (is_probe) { 5336 pci_free_irq_vectors(instance->pdev); 5337 return megasas_setup_irqs_ioapic(instance); 5338 } else { 5339 return -1; 5340 } 5341 } 5342 } 5343 return 0; 5344 } 5345 5346 /* 5347 * megasas_destroy_irqs- unregister interrupts. 5348 * @instance: Adapter soft state 5349 * return: void 5350 */ 5351 static void 5352 megasas_destroy_irqs(struct megasas_instance *instance) { 5353 5354 int i; 5355 5356 if (instance->msix_vectors) 5357 for (i = 0; i < instance->msix_vectors; i++) { 5358 free_irq(pci_irq_vector(instance->pdev, i), 5359 &instance->irq_context[i]); 5360 } 5361 else 5362 free_irq(pci_irq_vector(instance->pdev, 0), 5363 &instance->irq_context[0]); 5364 } 5365 5366 /** 5367 * megasas_setup_jbod_map - setup jbod map for FP seq_number. 5368 * @instance: Adapter soft state 5369 * @is_probe: Driver probe check 5370 * 5371 * Return 0 on success. 5372 */ 5373 void 5374 megasas_setup_jbod_map(struct megasas_instance *instance) 5375 { 5376 int i; 5377 struct fusion_context *fusion = instance->ctrl_context; 5378 u32 pd_seq_map_sz; 5379 5380 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 5381 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1)); 5382 5383 if (reset_devices || !fusion || 5384 !instance->ctrl_info_buf->adapterOperations3.useSeqNumJbodFP) { 5385 dev_info(&instance->pdev->dev, 5386 "Jbod map is not supported %s %d\n", 5387 __func__, __LINE__); 5388 instance->use_seqnum_jbod_fp = false; 5389 return; 5390 } 5391 5392 if (fusion->pd_seq_sync[0]) 5393 goto skip_alloc; 5394 5395 for (i = 0; i < JBOD_MAPS_COUNT; i++) { 5396 fusion->pd_seq_sync[i] = dma_alloc_coherent 5397 (&instance->pdev->dev, pd_seq_map_sz, 5398 &fusion->pd_seq_phys[i], GFP_KERNEL); 5399 if (!fusion->pd_seq_sync[i]) { 5400 dev_err(&instance->pdev->dev, 5401 "Failed to allocate memory from %s %d\n", 5402 __func__, __LINE__); 5403 if (i == 1) { 5404 dma_free_coherent(&instance->pdev->dev, 5405 pd_seq_map_sz, fusion->pd_seq_sync[0], 5406 fusion->pd_seq_phys[0]); 5407 fusion->pd_seq_sync[0] = NULL; 5408 } 5409 instance->use_seqnum_jbod_fp = false; 5410 return; 5411 } 5412 } 5413 5414 skip_alloc: 5415 if (!megasas_sync_pd_seq_num(instance, false) && 5416 !megasas_sync_pd_seq_num(instance, true)) 5417 instance->use_seqnum_jbod_fp = true; 5418 else 5419 instance->use_seqnum_jbod_fp = false; 5420 } 5421 5422 static void megasas_setup_reply_map(struct megasas_instance *instance) 5423 { 5424 const struct cpumask *mask; 5425 unsigned int queue, cpu; 5426 5427 for (queue = 0; queue < instance->msix_vectors; queue++) { 5428 mask = pci_irq_get_affinity(instance->pdev, queue); 5429 if (!mask) 5430 goto fallback; 5431 5432 for_each_cpu(cpu, mask) 5433 instance->reply_map[cpu] = queue; 5434 } 5435 return; 5436 5437 fallback: 5438 for_each_possible_cpu(cpu) 5439 instance->reply_map[cpu] = cpu % instance->msix_vectors; 5440 } 5441 5442 /** 5443 * megasas_get_device_list - Get the PD and LD device list from FW. 5444 * @instance: Adapter soft state 5445 * @return: Success or failure 5446 * 5447 * Issue DCMDs to Firmware to get the PD and LD list. 5448 * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination 5449 * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list. 5450 */ 5451 static 5452 int megasas_get_device_list(struct megasas_instance *instance) 5453 { 5454 memset(instance->pd_list, 0, 5455 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); 5456 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 5457 5458 if (instance->enable_fw_dev_list) { 5459 if (megasas_host_device_list_query(instance, true)) 5460 return FAILED; 5461 } else { 5462 if (megasas_get_pd_list(instance) < 0) { 5463 dev_err(&instance->pdev->dev, "failed to get PD list\n"); 5464 return FAILED; 5465 } 5466 5467 if (megasas_ld_list_query(instance, 5468 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) { 5469 dev_err(&instance->pdev->dev, "failed to get LD list\n"); 5470 return FAILED; 5471 } 5472 } 5473 5474 return SUCCESS; 5475 } 5476 /** 5477 * megasas_init_fw - Initializes the FW 5478 * @instance: Adapter soft state 5479 * 5480 * This is the main function for initializing firmware 5481 */ 5482 5483 static int megasas_init_fw(struct megasas_instance *instance) 5484 { 5485 u32 max_sectors_1; 5486 u32 max_sectors_2, tmp_sectors, msix_enable; 5487 u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg; 5488 resource_size_t base_addr; 5489 struct megasas_ctrl_info *ctrl_info = NULL; 5490 unsigned long bar_list; 5491 int i, j, loop, fw_msix_count = 0; 5492 struct IOV_111 *iovPtr; 5493 struct fusion_context *fusion; 5494 bool do_adp_reset = true; 5495 5496 fusion = instance->ctrl_context; 5497 5498 /* Find first memory bar */ 5499 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); 5500 instance->bar = find_first_bit(&bar_list, BITS_PER_LONG); 5501 if (pci_request_selected_regions(instance->pdev, 1<<instance->bar, 5502 "megasas: LSI")) { 5503 dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n"); 5504 return -EBUSY; 5505 } 5506 5507 base_addr = pci_resource_start(instance->pdev, instance->bar); 5508 instance->reg_set = ioremap_nocache(base_addr, 8192); 5509 5510 if (!instance->reg_set) { 5511 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n"); 5512 goto fail_ioremap; 5513 } 5514 5515 if (instance->adapter_type != MFI_SERIES) 5516 instance->instancet = &megasas_instance_template_fusion; 5517 else { 5518 switch (instance->pdev->device) { 5519 case PCI_DEVICE_ID_LSI_SAS1078R: 5520 case PCI_DEVICE_ID_LSI_SAS1078DE: 5521 instance->instancet = &megasas_instance_template_ppc; 5522 break; 5523 case PCI_DEVICE_ID_LSI_SAS1078GEN2: 5524 case PCI_DEVICE_ID_LSI_SAS0079GEN2: 5525 instance->instancet = &megasas_instance_template_gen2; 5526 break; 5527 case PCI_DEVICE_ID_LSI_SAS0073SKINNY: 5528 case PCI_DEVICE_ID_LSI_SAS0071SKINNY: 5529 instance->instancet = &megasas_instance_template_skinny; 5530 break; 5531 case PCI_DEVICE_ID_LSI_SAS1064R: 5532 case PCI_DEVICE_ID_DELL_PERC5: 5533 default: 5534 instance->instancet = &megasas_instance_template_xscale; 5535 instance->pd_list_not_supported = 1; 5536 break; 5537 } 5538 } 5539 5540 if (megasas_transition_to_ready(instance, 0)) { 5541 if (instance->adapter_type >= INVADER_SERIES) { 5542 status_reg = instance->instancet->read_fw_status_reg( 5543 instance); 5544 do_adp_reset = status_reg & MFI_RESET_ADAPTER; 5545 } 5546 5547 if (do_adp_reset) { 5548 atomic_set(&instance->fw_reset_no_pci_access, 1); 5549 instance->instancet->adp_reset 5550 (instance, instance->reg_set); 5551 atomic_set(&instance->fw_reset_no_pci_access, 0); 5552 dev_info(&instance->pdev->dev, 5553 "FW restarted successfully from %s!\n", 5554 __func__); 5555 5556 /*waiting for about 30 second before retry*/ 5557 ssleep(30); 5558 5559 if (megasas_transition_to_ready(instance, 0)) 5560 goto fail_ready_state; 5561 } else { 5562 goto fail_ready_state; 5563 } 5564 } 5565 5566 megasas_init_ctrl_params(instance); 5567 5568 if (megasas_set_dma_mask(instance)) 5569 goto fail_ready_state; 5570 5571 if (megasas_alloc_ctrl_mem(instance)) 5572 goto fail_alloc_dma_buf; 5573 5574 if (megasas_alloc_ctrl_dma_buffers(instance)) 5575 goto fail_alloc_dma_buf; 5576 5577 fusion = instance->ctrl_context; 5578 5579 if (instance->adapter_type >= VENTURA_SERIES) { 5580 scratch_pad_2 = 5581 megasas_readl(instance, 5582 &instance->reg_set->outbound_scratch_pad_2); 5583 instance->max_raid_mapsize = ((scratch_pad_2 >> 5584 MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) & 5585 MR_MAX_RAID_MAP_SIZE_MASK); 5586 } 5587 5588 /* Check if MSI-X is supported while in ready state */ 5589 msix_enable = (instance->instancet->read_fw_status_reg(instance) & 5590 0x4000000) >> 0x1a; 5591 if (msix_enable && !msix_disable) { 5592 int irq_flags = PCI_IRQ_MSIX; 5593 5594 scratch_pad_1 = megasas_readl 5595 (instance, &instance->reg_set->outbound_scratch_pad_1); 5596 /* Check max MSI-X vectors */ 5597 if (fusion) { 5598 if (instance->adapter_type == THUNDERBOLT_SERIES) { 5599 /* Thunderbolt Series*/ 5600 instance->msix_vectors = (scratch_pad_1 5601 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; 5602 fw_msix_count = instance->msix_vectors; 5603 } else { 5604 instance->msix_vectors = ((scratch_pad_1 5605 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) 5606 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; 5607 5608 /* 5609 * For Invader series, > 8 MSI-x vectors 5610 * supported by FW/HW implies combined 5611 * reply queue mode is enabled. 5612 * For Ventura series, > 16 MSI-x vectors 5613 * supported by FW/HW implies combined 5614 * reply queue mode is enabled. 5615 */ 5616 switch (instance->adapter_type) { 5617 case INVADER_SERIES: 5618 if (instance->msix_vectors > 8) 5619 instance->msix_combined = true; 5620 break; 5621 case AERO_SERIES: 5622 case VENTURA_SERIES: 5623 if (instance->msix_vectors > 16) 5624 instance->msix_combined = true; 5625 break; 5626 } 5627 5628 if (rdpq_enable) 5629 instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ? 5630 1 : 0; 5631 fw_msix_count = instance->msix_vectors; 5632 /* Save 1-15 reply post index address to local memory 5633 * Index 0 is already saved from reg offset 5634 * MPI2_REPLY_POST_HOST_INDEX_OFFSET 5635 */ 5636 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) { 5637 instance->reply_post_host_index_addr[loop] = 5638 (u32 __iomem *) 5639 ((u8 __iomem *)instance->reg_set + 5640 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET 5641 + (loop * 0x10)); 5642 } 5643 } 5644 if (msix_vectors) 5645 instance->msix_vectors = min(msix_vectors, 5646 instance->msix_vectors); 5647 } else /* MFI adapters */ 5648 instance->msix_vectors = 1; 5649 /* Don't bother allocating more MSI-X vectors than cpus */ 5650 instance->msix_vectors = min(instance->msix_vectors, 5651 (unsigned int)num_online_cpus()); 5652 if (smp_affinity_enable) 5653 irq_flags |= PCI_IRQ_AFFINITY; 5654 i = pci_alloc_irq_vectors(instance->pdev, 1, 5655 instance->msix_vectors, irq_flags); 5656 if (i > 0) 5657 instance->msix_vectors = i; 5658 else 5659 instance->msix_vectors = 0; 5660 } 5661 /* 5662 * MSI-X host index 0 is common for all adapter. 5663 * It is used for all MPT based Adapters. 5664 */ 5665 if (instance->msix_combined) { 5666 instance->reply_post_host_index_addr[0] = 5667 (u32 *)((u8 *)instance->reg_set + 5668 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET); 5669 } else { 5670 instance->reply_post_host_index_addr[0] = 5671 (u32 *)((u8 *)instance->reg_set + 5672 MPI2_REPLY_POST_HOST_INDEX_OFFSET); 5673 } 5674 5675 if (!instance->msix_vectors) { 5676 i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); 5677 if (i < 0) 5678 goto fail_init_adapter; 5679 } 5680 5681 megasas_setup_reply_map(instance); 5682 5683 dev_info(&instance->pdev->dev, 5684 "firmware supports msix\t: (%d)", fw_msix_count); 5685 dev_info(&instance->pdev->dev, 5686 "current msix/online cpus\t: (%d/%d)\n", 5687 instance->msix_vectors, (unsigned int)num_online_cpus()); 5688 dev_info(&instance->pdev->dev, 5689 "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled"); 5690 5691 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 5692 (unsigned long)instance); 5693 5694 /* 5695 * Below are default value for legacy Firmware. 5696 * non-fusion based controllers 5697 */ 5698 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 5699 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 5700 /* Get operational params, sge flags, send init cmd to controller */ 5701 if (instance->instancet->init_adapter(instance)) 5702 goto fail_init_adapter; 5703 5704 if (instance->adapter_type >= VENTURA_SERIES) { 5705 scratch_pad_3 = 5706 megasas_readl(instance, 5707 &instance->reg_set->outbound_scratch_pad_3); 5708 if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >= 5709 MR_DEFAULT_NVME_PAGE_SHIFT) 5710 instance->nvme_page_size = 5711 (1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK)); 5712 5713 dev_info(&instance->pdev->dev, 5714 "NVME page size\t: (%d)\n", instance->nvme_page_size); 5715 } 5716 5717 if (instance->msix_vectors ? 5718 megasas_setup_irqs_msix(instance, 1) : 5719 megasas_setup_irqs_ioapic(instance)) 5720 goto fail_init_adapter; 5721 5722 instance->instancet->enable_intr(instance); 5723 5724 dev_info(&instance->pdev->dev, "INIT adapter done\n"); 5725 5726 megasas_setup_jbod_map(instance); 5727 5728 if (megasas_get_device_list(instance) != SUCCESS) { 5729 dev_err(&instance->pdev->dev, 5730 "%s: megasas_get_device_list failed\n", 5731 __func__); 5732 goto fail_get_ld_pd_list; 5733 } 5734 5735 /* stream detection initialization */ 5736 if (instance->adapter_type >= VENTURA_SERIES) { 5737 fusion->stream_detect_by_ld = 5738 kcalloc(MAX_LOGICAL_DRIVES_EXT, 5739 sizeof(struct LD_STREAM_DETECT *), 5740 GFP_KERNEL); 5741 if (!fusion->stream_detect_by_ld) { 5742 dev_err(&instance->pdev->dev, 5743 "unable to allocate stream detection for pool of LDs\n"); 5744 goto fail_get_ld_pd_list; 5745 } 5746 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) { 5747 fusion->stream_detect_by_ld[i] = 5748 kzalloc(sizeof(struct LD_STREAM_DETECT), 5749 GFP_KERNEL); 5750 if (!fusion->stream_detect_by_ld[i]) { 5751 dev_err(&instance->pdev->dev, 5752 "unable to allocate stream detect by LD\n "); 5753 for (j = 0; j < i; ++j) 5754 kfree(fusion->stream_detect_by_ld[j]); 5755 kfree(fusion->stream_detect_by_ld); 5756 fusion->stream_detect_by_ld = NULL; 5757 goto fail_get_ld_pd_list; 5758 } 5759 fusion->stream_detect_by_ld[i]->mru_bit_map 5760 = MR_STREAM_BITMAP; 5761 } 5762 } 5763 5764 /* 5765 * Compute the max allowed sectors per IO: The controller info has two 5766 * limits on max sectors. Driver should use the minimum of these two. 5767 * 5768 * 1 << stripe_sz_ops.min = max sectors per strip 5769 * 5770 * Note that older firmwares ( < FW ver 30) didn't report information 5771 * to calculate max_sectors_1. So the number ended up as zero always. 5772 */ 5773 tmp_sectors = 0; 5774 ctrl_info = instance->ctrl_info_buf; 5775 5776 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * 5777 le16_to_cpu(ctrl_info->max_strips_per_io); 5778 max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size); 5779 5780 tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2); 5781 5782 instance->peerIsPresent = ctrl_info->cluster.peerIsPresent; 5783 instance->passive = ctrl_info->cluster.passive; 5784 memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId)); 5785 instance->UnevenSpanSupport = 5786 ctrl_info->adapterOperations2.supportUnevenSpans; 5787 if (instance->UnevenSpanSupport) { 5788 struct fusion_context *fusion = instance->ctrl_context; 5789 if (MR_ValidateMapInfo(instance, instance->map_id)) 5790 fusion->fast_path_io = 1; 5791 else 5792 fusion->fast_path_io = 0; 5793 5794 } 5795 if (ctrl_info->host_interface.SRIOV) { 5796 instance->requestorId = ctrl_info->iov.requestorId; 5797 if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) { 5798 if (!ctrl_info->adapterOperations2.activePassive) 5799 instance->PlasmaFW111 = 1; 5800 5801 dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n", 5802 instance->PlasmaFW111 ? "1.11" : "new"); 5803 5804 if (instance->PlasmaFW111) { 5805 iovPtr = (struct IOV_111 *) 5806 ((unsigned char *)ctrl_info + IOV_111_OFFSET); 5807 instance->requestorId = iovPtr->requestorId; 5808 } 5809 } 5810 dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n", 5811 instance->requestorId); 5812 } 5813 5814 instance->crash_dump_fw_support = 5815 ctrl_info->adapterOperations3.supportCrashDump; 5816 instance->crash_dump_drv_support = 5817 (instance->crash_dump_fw_support && 5818 instance->crash_dump_buf); 5819 if (instance->crash_dump_drv_support) 5820 megasas_set_crash_dump_params(instance, 5821 MR_CRASH_BUF_TURN_OFF); 5822 5823 else { 5824 if (instance->crash_dump_buf) 5825 dma_free_coherent(&instance->pdev->dev, 5826 CRASH_DMA_BUF_SIZE, 5827 instance->crash_dump_buf, 5828 instance->crash_dump_h); 5829 instance->crash_dump_buf = NULL; 5830 } 5831 5832 if (instance->snapdump_wait_time) { 5833 megasas_get_snapdump_properties(instance); 5834 dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n", 5835 instance->snapdump_wait_time); 5836 } 5837 5838 dev_info(&instance->pdev->dev, 5839 "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n", 5840 le16_to_cpu(ctrl_info->pci.vendor_id), 5841 le16_to_cpu(ctrl_info->pci.device_id), 5842 le16_to_cpu(ctrl_info->pci.sub_vendor_id), 5843 le16_to_cpu(ctrl_info->pci.sub_device_id)); 5844 dev_info(&instance->pdev->dev, "unevenspan support : %s\n", 5845 instance->UnevenSpanSupport ? "yes" : "no"); 5846 dev_info(&instance->pdev->dev, "firmware crash dump : %s\n", 5847 instance->crash_dump_drv_support ? "yes" : "no"); 5848 dev_info(&instance->pdev->dev, "jbod sync map : %s\n", 5849 instance->use_seqnum_jbod_fp ? "yes" : "no"); 5850 5851 instance->max_sectors_per_req = instance->max_num_sge * 5852 SGE_BUFFER_SIZE / 512; 5853 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) 5854 instance->max_sectors_per_req = tmp_sectors; 5855 5856 /* Check for valid throttlequeuedepth module parameter */ 5857 if (throttlequeuedepth && 5858 throttlequeuedepth <= instance->max_scsi_cmds) 5859 instance->throttlequeuedepth = throttlequeuedepth; 5860 else 5861 instance->throttlequeuedepth = 5862 MEGASAS_THROTTLE_QUEUE_DEPTH; 5863 5864 if ((resetwaittime < 1) || 5865 (resetwaittime > MEGASAS_RESET_WAIT_TIME)) 5866 resetwaittime = MEGASAS_RESET_WAIT_TIME; 5867 5868 if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT)) 5869 scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; 5870 5871 /* Launch SR-IOV heartbeat timer */ 5872 if (instance->requestorId) { 5873 if (!megasas_sriov_start_heartbeat(instance, 1)) { 5874 megasas_start_timer(instance); 5875 } else { 5876 instance->skip_heartbeat_timer_del = 1; 5877 goto fail_get_ld_pd_list; 5878 } 5879 } 5880 5881 /* 5882 * Create and start watchdog thread which will monitor 5883 * controller state every 1 sec and trigger OCR when 5884 * it enters fault state 5885 */ 5886 if (instance->adapter_type != MFI_SERIES) 5887 if (megasas_fusion_start_watchdog(instance) != SUCCESS) 5888 goto fail_start_watchdog; 5889 5890 return 0; 5891 5892 fail_start_watchdog: 5893 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 5894 del_timer_sync(&instance->sriov_heartbeat_timer); 5895 fail_get_ld_pd_list: 5896 instance->instancet->disable_intr(instance); 5897 megasas_destroy_irqs(instance); 5898 fail_init_adapter: 5899 if (instance->msix_vectors) 5900 pci_free_irq_vectors(instance->pdev); 5901 instance->msix_vectors = 0; 5902 fail_alloc_dma_buf: 5903 megasas_free_ctrl_dma_buffers(instance); 5904 megasas_free_ctrl_mem(instance); 5905 fail_ready_state: 5906 iounmap(instance->reg_set); 5907 5908 fail_ioremap: 5909 pci_release_selected_regions(instance->pdev, 1<<instance->bar); 5910 5911 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 5912 __func__, __LINE__); 5913 return -EINVAL; 5914 } 5915 5916 /** 5917 * megasas_release_mfi - Reverses the FW initialization 5918 * @instance: Adapter soft state 5919 */ 5920 static void megasas_release_mfi(struct megasas_instance *instance) 5921 { 5922 u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1); 5923 5924 if (instance->reply_queue) 5925 dma_free_coherent(&instance->pdev->dev, reply_q_sz, 5926 instance->reply_queue, instance->reply_queue_h); 5927 5928 megasas_free_cmds(instance); 5929 5930 iounmap(instance->reg_set); 5931 5932 pci_release_selected_regions(instance->pdev, 1<<instance->bar); 5933 } 5934 5935 /** 5936 * megasas_get_seq_num - Gets latest event sequence numbers 5937 * @instance: Adapter soft state 5938 * @eli: FW event log sequence numbers information 5939 * 5940 * FW maintains a log of all events in a non-volatile area. Upper layers would 5941 * usually find out the latest sequence number of the events, the seq number at 5942 * the boot etc. They would "read" all the events below the latest seq number 5943 * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq 5944 * number), they would subsribe to AEN (asynchronous event notification) and 5945 * wait for the events to happen. 5946 */ 5947 static int 5948 megasas_get_seq_num(struct megasas_instance *instance, 5949 struct megasas_evt_log_info *eli) 5950 { 5951 struct megasas_cmd *cmd; 5952 struct megasas_dcmd_frame *dcmd; 5953 struct megasas_evt_log_info *el_info; 5954 dma_addr_t el_info_h = 0; 5955 int ret; 5956 5957 cmd = megasas_get_cmd(instance); 5958 5959 if (!cmd) { 5960 return -ENOMEM; 5961 } 5962 5963 dcmd = &cmd->frame->dcmd; 5964 el_info = dma_alloc_coherent(&instance->pdev->dev, 5965 sizeof(struct megasas_evt_log_info), 5966 &el_info_h, GFP_KERNEL); 5967 if (!el_info) { 5968 megasas_return_cmd(instance, cmd); 5969 return -ENOMEM; 5970 } 5971 5972 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5973 5974 dcmd->cmd = MFI_CMD_DCMD; 5975 dcmd->cmd_status = 0x0; 5976 dcmd->sge_count = 1; 5977 dcmd->flags = MFI_FRAME_DIR_READ; 5978 dcmd->timeout = 0; 5979 dcmd->pad_0 = 0; 5980 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info)); 5981 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO); 5982 5983 megasas_set_dma_settings(instance, dcmd, el_info_h, 5984 sizeof(struct megasas_evt_log_info)); 5985 5986 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 5987 if (ret != DCMD_SUCCESS) { 5988 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 5989 __func__, __LINE__); 5990 goto dcmd_failed; 5991 } 5992 5993 /* 5994 * Copy the data back into callers buffer 5995 */ 5996 eli->newest_seq_num = el_info->newest_seq_num; 5997 eli->oldest_seq_num = el_info->oldest_seq_num; 5998 eli->clear_seq_num = el_info->clear_seq_num; 5999 eli->shutdown_seq_num = el_info->shutdown_seq_num; 6000 eli->boot_seq_num = el_info->boot_seq_num; 6001 6002 dcmd_failed: 6003 dma_free_coherent(&instance->pdev->dev, 6004 sizeof(struct megasas_evt_log_info), 6005 el_info, el_info_h); 6006 6007 megasas_return_cmd(instance, cmd); 6008 6009 return ret; 6010 } 6011 6012 /** 6013 * megasas_register_aen - Registers for asynchronous event notification 6014 * @instance: Adapter soft state 6015 * @seq_num: The starting sequence number 6016 * @class_locale: Class of the event 6017 * 6018 * This function subscribes for AEN for events beyond the @seq_num. It requests 6019 * to be notified if and only if the event is of type @class_locale 6020 */ 6021 static int 6022 megasas_register_aen(struct megasas_instance *instance, u32 seq_num, 6023 u32 class_locale_word) 6024 { 6025 int ret_val; 6026 struct megasas_cmd *cmd; 6027 struct megasas_dcmd_frame *dcmd; 6028 union megasas_evt_class_locale curr_aen; 6029 union megasas_evt_class_locale prev_aen; 6030 6031 /* 6032 * If there an AEN pending already (aen_cmd), check if the 6033 * class_locale of that pending AEN is inclusive of the new 6034 * AEN request we currently have. If it is, then we don't have 6035 * to do anything. In other words, whichever events the current 6036 * AEN request is subscribing to, have already been subscribed 6037 * to. 6038 * 6039 * If the old_cmd is _not_ inclusive, then we have to abort 6040 * that command, form a class_locale that is superset of both 6041 * old and current and re-issue to the FW 6042 */ 6043 6044 curr_aen.word = class_locale_word; 6045 6046 if (instance->aen_cmd) { 6047 6048 prev_aen.word = 6049 le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]); 6050 6051 if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) || 6052 (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) { 6053 dev_info(&instance->pdev->dev, 6054 "%s %d out of range class %d send by application\n", 6055 __func__, __LINE__, curr_aen.members.class); 6056 return 0; 6057 } 6058 6059 /* 6060 * A class whose enum value is smaller is inclusive of all 6061 * higher values. If a PROGRESS (= -1) was previously 6062 * registered, then a new registration requests for higher 6063 * classes need not be sent to FW. They are automatically 6064 * included. 6065 * 6066 * Locale numbers don't have such hierarchy. They are bitmap 6067 * values 6068 */ 6069 if ((prev_aen.members.class <= curr_aen.members.class) && 6070 !((prev_aen.members.locale & curr_aen.members.locale) ^ 6071 curr_aen.members.locale)) { 6072 /* 6073 * Previously issued event registration includes 6074 * current request. Nothing to do. 6075 */ 6076 return 0; 6077 } else { 6078 curr_aen.members.locale |= prev_aen.members.locale; 6079 6080 if (prev_aen.members.class < curr_aen.members.class) 6081 curr_aen.members.class = prev_aen.members.class; 6082 6083 instance->aen_cmd->abort_aen = 1; 6084 ret_val = megasas_issue_blocked_abort_cmd(instance, 6085 instance-> 6086 aen_cmd, 30); 6087 6088 if (ret_val) { 6089 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort " 6090 "previous AEN command\n"); 6091 return ret_val; 6092 } 6093 } 6094 } 6095 6096 cmd = megasas_get_cmd(instance); 6097 6098 if (!cmd) 6099 return -ENOMEM; 6100 6101 dcmd = &cmd->frame->dcmd; 6102 6103 memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail)); 6104 6105 /* 6106 * Prepare DCMD for aen registration 6107 */ 6108 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6109 6110 dcmd->cmd = MFI_CMD_DCMD; 6111 dcmd->cmd_status = 0x0; 6112 dcmd->sge_count = 1; 6113 dcmd->flags = MFI_FRAME_DIR_READ; 6114 dcmd->timeout = 0; 6115 dcmd->pad_0 = 0; 6116 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail)); 6117 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT); 6118 dcmd->mbox.w[0] = cpu_to_le32(seq_num); 6119 instance->last_seq_num = seq_num; 6120 dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word); 6121 6122 megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h, 6123 sizeof(struct megasas_evt_detail)); 6124 6125 if (instance->aen_cmd != NULL) { 6126 megasas_return_cmd(instance, cmd); 6127 return 0; 6128 } 6129 6130 /* 6131 * Store reference to the cmd used to register for AEN. When an 6132 * application wants us to register for AEN, we have to abort this 6133 * cmd and re-register with a new EVENT LOCALE supplied by that app 6134 */ 6135 instance->aen_cmd = cmd; 6136 6137 /* 6138 * Issue the aen registration frame 6139 */ 6140 instance->instancet->issue_dcmd(instance, cmd); 6141 6142 return 0; 6143 } 6144 6145 /* megasas_get_target_prop - Send DCMD with below details to firmware. 6146 * 6147 * This DCMD will fetch few properties of LD/system PD defined 6148 * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value. 6149 * 6150 * DCMD send by drivers whenever new target is added to the OS. 6151 * 6152 * dcmd.opcode - MR_DCMD_DEV_GET_TARGET_PROP 6153 * dcmd.mbox.b[0] - DCMD is to be fired for LD or system PD. 6154 * 0 = system PD, 1 = LD. 6155 * dcmd.mbox.s[1] - TargetID for LD/system PD. 6156 * dcmd.sge IN - Pointer to return MR_TARGET_DEV_PROPERTIES. 6157 * 6158 * @instance: Adapter soft state 6159 * @sdev: OS provided scsi device 6160 * 6161 * Returns 0 on success non-zero on failure. 6162 */ 6163 int 6164 megasas_get_target_prop(struct megasas_instance *instance, 6165 struct scsi_device *sdev) 6166 { 6167 int ret; 6168 struct megasas_cmd *cmd; 6169 struct megasas_dcmd_frame *dcmd; 6170 u16 targetId = (sdev->channel % 2) + sdev->id; 6171 6172 cmd = megasas_get_cmd(instance); 6173 6174 if (!cmd) { 6175 dev_err(&instance->pdev->dev, 6176 "Failed to get cmd %s\n", __func__); 6177 return -ENOMEM; 6178 } 6179 6180 dcmd = &cmd->frame->dcmd; 6181 6182 memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop)); 6183 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6184 dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev); 6185 6186 dcmd->mbox.s[1] = cpu_to_le16(targetId); 6187 dcmd->cmd = MFI_CMD_DCMD; 6188 dcmd->cmd_status = 0xFF; 6189 dcmd->sge_count = 1; 6190 dcmd->flags = MFI_FRAME_DIR_READ; 6191 dcmd->timeout = 0; 6192 dcmd->pad_0 = 0; 6193 dcmd->data_xfer_len = 6194 cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES)); 6195 dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP); 6196 6197 megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h, 6198 sizeof(struct MR_TARGET_PROPERTIES)); 6199 6200 if ((instance->adapter_type != MFI_SERIES) && 6201 !instance->mask_interrupts) 6202 ret = megasas_issue_blocked_cmd(instance, 6203 cmd, MFI_IO_TIMEOUT_SECS); 6204 else 6205 ret = megasas_issue_polled(instance, cmd); 6206 6207 switch (ret) { 6208 case DCMD_TIMEOUT: 6209 switch (dcmd_timeout_ocr_possible(instance)) { 6210 case INITIATE_OCR: 6211 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 6212 megasas_reset_fusion(instance->host, 6213 MFI_IO_TIMEOUT_OCR); 6214 break; 6215 case KILL_ADAPTER: 6216 megaraid_sas_kill_hba(instance); 6217 break; 6218 case IGNORE_TIMEOUT: 6219 dev_info(&instance->pdev->dev, 6220 "Ignore DCMD timeout: %s %d\n", 6221 __func__, __LINE__); 6222 break; 6223 } 6224 break; 6225 6226 default: 6227 megasas_return_cmd(instance, cmd); 6228 } 6229 if (ret != DCMD_SUCCESS) 6230 dev_err(&instance->pdev->dev, 6231 "return from %s %d return value %d\n", 6232 __func__, __LINE__, ret); 6233 6234 return ret; 6235 } 6236 6237 /** 6238 * megasas_start_aen - Subscribes to AEN during driver load time 6239 * @instance: Adapter soft state 6240 */ 6241 static int megasas_start_aen(struct megasas_instance *instance) 6242 { 6243 struct megasas_evt_log_info eli; 6244 union megasas_evt_class_locale class_locale; 6245 6246 /* 6247 * Get the latest sequence number from FW 6248 */ 6249 memset(&eli, 0, sizeof(eli)); 6250 6251 if (megasas_get_seq_num(instance, &eli)) 6252 return -1; 6253 6254 /* 6255 * Register AEN with FW for latest sequence number plus 1 6256 */ 6257 class_locale.members.reserved = 0; 6258 class_locale.members.locale = MR_EVT_LOCALE_ALL; 6259 class_locale.members.class = MR_EVT_CLASS_DEBUG; 6260 6261 return megasas_register_aen(instance, 6262 le32_to_cpu(eli.newest_seq_num) + 1, 6263 class_locale.word); 6264 } 6265 6266 /** 6267 * megasas_io_attach - Attaches this driver to SCSI mid-layer 6268 * @instance: Adapter soft state 6269 */ 6270 static int megasas_io_attach(struct megasas_instance *instance) 6271 { 6272 struct Scsi_Host *host = instance->host; 6273 6274 /* 6275 * Export parameters required by SCSI mid-layer 6276 */ 6277 host->unique_id = instance->unique_id; 6278 host->can_queue = instance->max_scsi_cmds; 6279 host->this_id = instance->init_id; 6280 host->sg_tablesize = instance->max_num_sge; 6281 6282 if (instance->fw_support_ieee) 6283 instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE; 6284 6285 /* 6286 * Check if the module parameter value for max_sectors can be used 6287 */ 6288 if (max_sectors && max_sectors < instance->max_sectors_per_req) 6289 instance->max_sectors_per_req = max_sectors; 6290 else { 6291 if (max_sectors) { 6292 if (((instance->pdev->device == 6293 PCI_DEVICE_ID_LSI_SAS1078GEN2) || 6294 (instance->pdev->device == 6295 PCI_DEVICE_ID_LSI_SAS0079GEN2)) && 6296 (max_sectors <= MEGASAS_MAX_SECTORS)) { 6297 instance->max_sectors_per_req = max_sectors; 6298 } else { 6299 dev_info(&instance->pdev->dev, "max_sectors should be > 0" 6300 "and <= %d (or < 1MB for GEN2 controller)\n", 6301 instance->max_sectors_per_req); 6302 } 6303 } 6304 } 6305 6306 host->max_sectors = instance->max_sectors_per_req; 6307 host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN; 6308 host->max_channel = MEGASAS_MAX_CHANNELS - 1; 6309 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; 6310 host->max_lun = MEGASAS_MAX_LUN; 6311 host->max_cmd_len = 16; 6312 6313 /* 6314 * Notify the mid-layer about the new controller 6315 */ 6316 if (scsi_add_host(host, &instance->pdev->dev)) { 6317 dev_err(&instance->pdev->dev, 6318 "Failed to add host from %s %d\n", 6319 __func__, __LINE__); 6320 return -ENODEV; 6321 } 6322 6323 return 0; 6324 } 6325 6326 /** 6327 * megasas_set_dma_mask - Set DMA mask for supported controllers 6328 * 6329 * @instance: Adapter soft state 6330 * Description: 6331 * 6332 * For Ventura, driver/FW will operate in 63bit DMA addresses. 6333 * 6334 * For invader- 6335 * By default, driver/FW will operate in 32bit DMA addresses 6336 * for consistent DMA mapping but if 32 bit consistent 6337 * DMA mask fails, driver will try with 63 bit consistent 6338 * mask provided FW is true 63bit DMA capable 6339 * 6340 * For older controllers(Thunderbolt and MFI based adapters)- 6341 * driver/FW will operate in 32 bit consistent DMA addresses. 6342 */ 6343 static int 6344 megasas_set_dma_mask(struct megasas_instance *instance) 6345 { 6346 u64 consistent_mask; 6347 struct pci_dev *pdev; 6348 u32 scratch_pad_1; 6349 6350 pdev = instance->pdev; 6351 consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ? 6352 DMA_BIT_MASK(63) : DMA_BIT_MASK(32); 6353 6354 if (IS_DMA64) { 6355 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) && 6356 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) 6357 goto fail_set_dma_mask; 6358 6359 if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) && 6360 (dma_set_coherent_mask(&pdev->dev, consistent_mask) && 6361 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) { 6362 /* 6363 * If 32 bit DMA mask fails, then try for 64 bit mask 6364 * for FW capable of handling 64 bit DMA. 6365 */ 6366 scratch_pad_1 = megasas_readl 6367 (instance, &instance->reg_set->outbound_scratch_pad_1); 6368 6369 if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET)) 6370 goto fail_set_dma_mask; 6371 else if (dma_set_mask_and_coherent(&pdev->dev, 6372 DMA_BIT_MASK(63))) 6373 goto fail_set_dma_mask; 6374 } 6375 } else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) 6376 goto fail_set_dma_mask; 6377 6378 if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32)) 6379 instance->consistent_mask_64bit = false; 6380 else 6381 instance->consistent_mask_64bit = true; 6382 6383 dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n", 6384 ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"), 6385 (instance->consistent_mask_64bit ? "63" : "32")); 6386 6387 return 0; 6388 6389 fail_set_dma_mask: 6390 dev_err(&pdev->dev, "Failed to set DMA mask\n"); 6391 return -1; 6392 6393 } 6394 6395 /* 6396 * megasas_set_adapter_type - Set adapter type. 6397 * Supported controllers can be divided in 6398 * different categories- 6399 * enum MR_ADAPTER_TYPE { 6400 * MFI_SERIES = 1, 6401 * THUNDERBOLT_SERIES = 2, 6402 * INVADER_SERIES = 3, 6403 * VENTURA_SERIES = 4, 6404 * AERO_SERIES = 5, 6405 * }; 6406 * @instance: Adapter soft state 6407 * return: void 6408 */ 6409 static inline void megasas_set_adapter_type(struct megasas_instance *instance) 6410 { 6411 if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) && 6412 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) { 6413 instance->adapter_type = MFI_SERIES; 6414 } else { 6415 switch (instance->pdev->device) { 6416 case PCI_DEVICE_ID_LSI_AERO_10E1: 6417 case PCI_DEVICE_ID_LSI_AERO_10E2: 6418 case PCI_DEVICE_ID_LSI_AERO_10E5: 6419 case PCI_DEVICE_ID_LSI_AERO_10E6: 6420 instance->adapter_type = AERO_SERIES; 6421 break; 6422 case PCI_DEVICE_ID_LSI_VENTURA: 6423 case PCI_DEVICE_ID_LSI_CRUSADER: 6424 case PCI_DEVICE_ID_LSI_HARPOON: 6425 case PCI_DEVICE_ID_LSI_TOMCAT: 6426 case PCI_DEVICE_ID_LSI_VENTURA_4PORT: 6427 case PCI_DEVICE_ID_LSI_CRUSADER_4PORT: 6428 instance->adapter_type = VENTURA_SERIES; 6429 break; 6430 case PCI_DEVICE_ID_LSI_FUSION: 6431 case PCI_DEVICE_ID_LSI_PLASMA: 6432 instance->adapter_type = THUNDERBOLT_SERIES; 6433 break; 6434 case PCI_DEVICE_ID_LSI_INVADER: 6435 case PCI_DEVICE_ID_LSI_INTRUDER: 6436 case PCI_DEVICE_ID_LSI_INTRUDER_24: 6437 case PCI_DEVICE_ID_LSI_CUTLASS_52: 6438 case PCI_DEVICE_ID_LSI_CUTLASS_53: 6439 case PCI_DEVICE_ID_LSI_FURY: 6440 instance->adapter_type = INVADER_SERIES; 6441 break; 6442 default: /* For all other supported controllers */ 6443 instance->adapter_type = MFI_SERIES; 6444 break; 6445 } 6446 } 6447 } 6448 6449 static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance) 6450 { 6451 instance->producer = dma_alloc_coherent(&instance->pdev->dev, 6452 sizeof(u32), &instance->producer_h, GFP_KERNEL); 6453 instance->consumer = dma_alloc_coherent(&instance->pdev->dev, 6454 sizeof(u32), &instance->consumer_h, GFP_KERNEL); 6455 6456 if (!instance->producer || !instance->consumer) { 6457 dev_err(&instance->pdev->dev, 6458 "Failed to allocate memory for producer, consumer\n"); 6459 return -1; 6460 } 6461 6462 *instance->producer = 0; 6463 *instance->consumer = 0; 6464 return 0; 6465 } 6466 6467 /** 6468 * megasas_alloc_ctrl_mem - Allocate per controller memory for core data 6469 * structures which are not common across MFI 6470 * adapters and fusion adapters. 6471 * For MFI based adapters, allocate producer and 6472 * consumer buffers. For fusion adapters, allocate 6473 * memory for fusion context. 6474 * @instance: Adapter soft state 6475 * return: 0 for SUCCESS 6476 */ 6477 static int megasas_alloc_ctrl_mem(struct megasas_instance *instance) 6478 { 6479 instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int), 6480 GFP_KERNEL); 6481 if (!instance->reply_map) 6482 return -ENOMEM; 6483 6484 switch (instance->adapter_type) { 6485 case MFI_SERIES: 6486 if (megasas_alloc_mfi_ctrl_mem(instance)) 6487 goto fail; 6488 break; 6489 case AERO_SERIES: 6490 case VENTURA_SERIES: 6491 case THUNDERBOLT_SERIES: 6492 case INVADER_SERIES: 6493 if (megasas_alloc_fusion_context(instance)) 6494 goto fail; 6495 break; 6496 } 6497 6498 return 0; 6499 fail: 6500 kfree(instance->reply_map); 6501 instance->reply_map = NULL; 6502 return -ENOMEM; 6503 } 6504 6505 /* 6506 * megasas_free_ctrl_mem - Free fusion context for fusion adapters and 6507 * producer, consumer buffers for MFI adapters 6508 * 6509 * @instance - Adapter soft instance 6510 * 6511 */ 6512 static inline void megasas_free_ctrl_mem(struct megasas_instance *instance) 6513 { 6514 kfree(instance->reply_map); 6515 if (instance->adapter_type == MFI_SERIES) { 6516 if (instance->producer) 6517 dma_free_coherent(&instance->pdev->dev, sizeof(u32), 6518 instance->producer, 6519 instance->producer_h); 6520 if (instance->consumer) 6521 dma_free_coherent(&instance->pdev->dev, sizeof(u32), 6522 instance->consumer, 6523 instance->consumer_h); 6524 } else { 6525 megasas_free_fusion_context(instance); 6526 } 6527 } 6528 6529 /** 6530 * megasas_alloc_ctrl_dma_buffers - Allocate consistent DMA buffers during 6531 * driver load time 6532 * 6533 * @instance- Adapter soft instance 6534 * @return- O for SUCCESS 6535 */ 6536 static inline 6537 int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance) 6538 { 6539 struct pci_dev *pdev = instance->pdev; 6540 struct fusion_context *fusion = instance->ctrl_context; 6541 6542 instance->evt_detail = dma_alloc_coherent(&pdev->dev, 6543 sizeof(struct megasas_evt_detail), 6544 &instance->evt_detail_h, GFP_KERNEL); 6545 6546 if (!instance->evt_detail) { 6547 dev_err(&instance->pdev->dev, 6548 "Failed to allocate event detail buffer\n"); 6549 return -ENOMEM; 6550 } 6551 6552 if (fusion) { 6553 fusion->ioc_init_request = 6554 dma_alloc_coherent(&pdev->dev, 6555 sizeof(struct MPI2_IOC_INIT_REQUEST), 6556 &fusion->ioc_init_request_phys, 6557 GFP_KERNEL); 6558 6559 if (!fusion->ioc_init_request) { 6560 dev_err(&pdev->dev, 6561 "Failed to allocate PD list buffer\n"); 6562 return -ENOMEM; 6563 } 6564 6565 instance->snapdump_prop = dma_alloc_coherent(&pdev->dev, 6566 sizeof(struct MR_SNAPDUMP_PROPERTIES), 6567 &instance->snapdump_prop_h, GFP_KERNEL); 6568 6569 if (!instance->snapdump_prop) 6570 dev_err(&pdev->dev, 6571 "Failed to allocate snapdump properties buffer\n"); 6572 6573 instance->host_device_list_buf = dma_alloc_coherent(&pdev->dev, 6574 HOST_DEVICE_LIST_SZ, 6575 &instance->host_device_list_buf_h, 6576 GFP_KERNEL); 6577 6578 if (!instance->host_device_list_buf) { 6579 dev_err(&pdev->dev, 6580 "Failed to allocate targetid list buffer\n"); 6581 return -ENOMEM; 6582 } 6583 6584 } 6585 6586 instance->pd_list_buf = 6587 dma_alloc_coherent(&pdev->dev, 6588 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), 6589 &instance->pd_list_buf_h, GFP_KERNEL); 6590 6591 if (!instance->pd_list_buf) { 6592 dev_err(&pdev->dev, "Failed to allocate PD list buffer\n"); 6593 return -ENOMEM; 6594 } 6595 6596 instance->ctrl_info_buf = 6597 dma_alloc_coherent(&pdev->dev, 6598 sizeof(struct megasas_ctrl_info), 6599 &instance->ctrl_info_buf_h, GFP_KERNEL); 6600 6601 if (!instance->ctrl_info_buf) { 6602 dev_err(&pdev->dev, 6603 "Failed to allocate controller info buffer\n"); 6604 return -ENOMEM; 6605 } 6606 6607 instance->ld_list_buf = 6608 dma_alloc_coherent(&pdev->dev, 6609 sizeof(struct MR_LD_LIST), 6610 &instance->ld_list_buf_h, GFP_KERNEL); 6611 6612 if (!instance->ld_list_buf) { 6613 dev_err(&pdev->dev, "Failed to allocate LD list buffer\n"); 6614 return -ENOMEM; 6615 } 6616 6617 instance->ld_targetid_list_buf = 6618 dma_alloc_coherent(&pdev->dev, 6619 sizeof(struct MR_LD_TARGETID_LIST), 6620 &instance->ld_targetid_list_buf_h, GFP_KERNEL); 6621 6622 if (!instance->ld_targetid_list_buf) { 6623 dev_err(&pdev->dev, 6624 "Failed to allocate LD targetid list buffer\n"); 6625 return -ENOMEM; 6626 } 6627 6628 if (!reset_devices) { 6629 instance->system_info_buf = 6630 dma_alloc_coherent(&pdev->dev, 6631 sizeof(struct MR_DRV_SYSTEM_INFO), 6632 &instance->system_info_h, GFP_KERNEL); 6633 instance->pd_info = 6634 dma_alloc_coherent(&pdev->dev, 6635 sizeof(struct MR_PD_INFO), 6636 &instance->pd_info_h, GFP_KERNEL); 6637 instance->tgt_prop = 6638 dma_alloc_coherent(&pdev->dev, 6639 sizeof(struct MR_TARGET_PROPERTIES), 6640 &instance->tgt_prop_h, GFP_KERNEL); 6641 instance->crash_dump_buf = 6642 dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE, 6643 &instance->crash_dump_h, GFP_KERNEL); 6644 6645 if (!instance->system_info_buf) 6646 dev_err(&instance->pdev->dev, 6647 "Failed to allocate system info buffer\n"); 6648 6649 if (!instance->pd_info) 6650 dev_err(&instance->pdev->dev, 6651 "Failed to allocate pd_info buffer\n"); 6652 6653 if (!instance->tgt_prop) 6654 dev_err(&instance->pdev->dev, 6655 "Failed to allocate tgt_prop buffer\n"); 6656 6657 if (!instance->crash_dump_buf) 6658 dev_err(&instance->pdev->dev, 6659 "Failed to allocate crash dump buffer\n"); 6660 } 6661 6662 return 0; 6663 } 6664 6665 /* 6666 * megasas_free_ctrl_dma_buffers - Free consistent DMA buffers allocated 6667 * during driver load time 6668 * 6669 * @instance- Adapter soft instance 6670 * 6671 */ 6672 static inline 6673 void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance) 6674 { 6675 struct pci_dev *pdev = instance->pdev; 6676 struct fusion_context *fusion = instance->ctrl_context; 6677 6678 if (instance->evt_detail) 6679 dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail), 6680 instance->evt_detail, 6681 instance->evt_detail_h); 6682 6683 if (fusion && fusion->ioc_init_request) 6684 dma_free_coherent(&pdev->dev, 6685 sizeof(struct MPI2_IOC_INIT_REQUEST), 6686 fusion->ioc_init_request, 6687 fusion->ioc_init_request_phys); 6688 6689 if (instance->pd_list_buf) 6690 dma_free_coherent(&pdev->dev, 6691 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), 6692 instance->pd_list_buf, 6693 instance->pd_list_buf_h); 6694 6695 if (instance->ld_list_buf) 6696 dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST), 6697 instance->ld_list_buf, 6698 instance->ld_list_buf_h); 6699 6700 if (instance->ld_targetid_list_buf) 6701 dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST), 6702 instance->ld_targetid_list_buf, 6703 instance->ld_targetid_list_buf_h); 6704 6705 if (instance->ctrl_info_buf) 6706 dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info), 6707 instance->ctrl_info_buf, 6708 instance->ctrl_info_buf_h); 6709 6710 if (instance->system_info_buf) 6711 dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO), 6712 instance->system_info_buf, 6713 instance->system_info_h); 6714 6715 if (instance->pd_info) 6716 dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO), 6717 instance->pd_info, instance->pd_info_h); 6718 6719 if (instance->tgt_prop) 6720 dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES), 6721 instance->tgt_prop, instance->tgt_prop_h); 6722 6723 if (instance->crash_dump_buf) 6724 dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE, 6725 instance->crash_dump_buf, 6726 instance->crash_dump_h); 6727 6728 if (instance->snapdump_prop) 6729 dma_free_coherent(&pdev->dev, 6730 sizeof(struct MR_SNAPDUMP_PROPERTIES), 6731 instance->snapdump_prop, 6732 instance->snapdump_prop_h); 6733 6734 if (instance->host_device_list_buf) 6735 dma_free_coherent(&pdev->dev, 6736 HOST_DEVICE_LIST_SZ, 6737 instance->host_device_list_buf, 6738 instance->host_device_list_buf_h); 6739 6740 } 6741 6742 /* 6743 * megasas_init_ctrl_params - Initialize controller's instance 6744 * parameters before FW init 6745 * @instance - Adapter soft instance 6746 * @return - void 6747 */ 6748 static inline void megasas_init_ctrl_params(struct megasas_instance *instance) 6749 { 6750 instance->fw_crash_state = UNAVAILABLE; 6751 6752 megasas_poll_wait_aen = 0; 6753 instance->issuepend_done = 1; 6754 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 6755 6756 /* 6757 * Initialize locks and queues 6758 */ 6759 INIT_LIST_HEAD(&instance->cmd_pool); 6760 INIT_LIST_HEAD(&instance->internal_reset_pending_q); 6761 6762 atomic_set(&instance->fw_outstanding, 0); 6763 6764 init_waitqueue_head(&instance->int_cmd_wait_q); 6765 init_waitqueue_head(&instance->abort_cmd_wait_q); 6766 6767 spin_lock_init(&instance->crashdump_lock); 6768 spin_lock_init(&instance->mfi_pool_lock); 6769 spin_lock_init(&instance->hba_lock); 6770 spin_lock_init(&instance->stream_lock); 6771 spin_lock_init(&instance->completion_lock); 6772 6773 mutex_init(&instance->reset_mutex); 6774 6775 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 6776 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) 6777 instance->flag_ieee = 1; 6778 6779 megasas_dbg_lvl = 0; 6780 instance->flag = 0; 6781 instance->unload = 1; 6782 instance->last_time = 0; 6783 instance->disableOnlineCtrlReset = 1; 6784 instance->UnevenSpanSupport = 0; 6785 6786 if (instance->adapter_type != MFI_SERIES) 6787 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); 6788 else 6789 INIT_WORK(&instance->work_init, process_fw_state_change_wq); 6790 } 6791 6792 /** 6793 * megasas_probe_one - PCI hotplug entry point 6794 * @pdev: PCI device structure 6795 * @id: PCI ids of supported hotplugged adapter 6796 */ 6797 static int megasas_probe_one(struct pci_dev *pdev, 6798 const struct pci_device_id *id) 6799 { 6800 int rval, pos; 6801 struct Scsi_Host *host; 6802 struct megasas_instance *instance; 6803 u16 control = 0; 6804 6805 switch (pdev->device) { 6806 case PCI_DEVICE_ID_LSI_AERO_10E1: 6807 case PCI_DEVICE_ID_LSI_AERO_10E5: 6808 dev_info(&pdev->dev, "Adapter is in configurable secure mode\n"); 6809 break; 6810 } 6811 6812 /* Reset MSI-X in the kdump kernel */ 6813 if (reset_devices) { 6814 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 6815 if (pos) { 6816 pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, 6817 &control); 6818 if (control & PCI_MSIX_FLAGS_ENABLE) { 6819 dev_info(&pdev->dev, "resetting MSI-X\n"); 6820 pci_write_config_word(pdev, 6821 pos + PCI_MSIX_FLAGS, 6822 control & 6823 ~PCI_MSIX_FLAGS_ENABLE); 6824 } 6825 } 6826 } 6827 6828 /* 6829 * PCI prepping: enable device set bus mastering and dma mask 6830 */ 6831 rval = pci_enable_device_mem(pdev); 6832 6833 if (rval) { 6834 return rval; 6835 } 6836 6837 pci_set_master(pdev); 6838 6839 host = scsi_host_alloc(&megasas_template, 6840 sizeof(struct megasas_instance)); 6841 6842 if (!host) { 6843 dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n"); 6844 goto fail_alloc_instance; 6845 } 6846 6847 instance = (struct megasas_instance *)host->hostdata; 6848 memset(instance, 0, sizeof(*instance)); 6849 atomic_set(&instance->fw_reset_no_pci_access, 0); 6850 6851 /* 6852 * Initialize PCI related and misc parameters 6853 */ 6854 instance->pdev = pdev; 6855 instance->host = host; 6856 instance->unique_id = pdev->bus->number << 8 | pdev->devfn; 6857 instance->init_id = MEGASAS_DEFAULT_INIT_ID; 6858 6859 megasas_set_adapter_type(instance); 6860 6861 /* 6862 * Initialize MFI Firmware 6863 */ 6864 if (megasas_init_fw(instance)) 6865 goto fail_init_mfi; 6866 6867 if (instance->requestorId) { 6868 if (instance->PlasmaFW111) { 6869 instance->vf_affiliation_111 = 6870 dma_alloc_coherent(&pdev->dev, 6871 sizeof(struct MR_LD_VF_AFFILIATION_111), 6872 &instance->vf_affiliation_111_h, 6873 GFP_KERNEL); 6874 if (!instance->vf_affiliation_111) 6875 dev_warn(&pdev->dev, "Can't allocate " 6876 "memory for VF affiliation buffer\n"); 6877 } else { 6878 instance->vf_affiliation = 6879 dma_alloc_coherent(&pdev->dev, 6880 (MAX_LOGICAL_DRIVES + 1) * 6881 sizeof(struct MR_LD_VF_AFFILIATION), 6882 &instance->vf_affiliation_h, 6883 GFP_KERNEL); 6884 if (!instance->vf_affiliation) 6885 dev_warn(&pdev->dev, "Can't allocate " 6886 "memory for VF affiliation buffer\n"); 6887 } 6888 } 6889 6890 /* 6891 * Store instance in PCI softstate 6892 */ 6893 pci_set_drvdata(pdev, instance); 6894 6895 /* 6896 * Add this controller to megasas_mgmt_info structure so that it 6897 * can be exported to management applications 6898 */ 6899 megasas_mgmt_info.count++; 6900 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance; 6901 megasas_mgmt_info.max_index++; 6902 6903 /* 6904 * Register with SCSI mid-layer 6905 */ 6906 if (megasas_io_attach(instance)) 6907 goto fail_io_attach; 6908 6909 instance->unload = 0; 6910 /* 6911 * Trigger SCSI to scan our drives 6912 */ 6913 if (!instance->enable_fw_dev_list || 6914 (instance->host_device_list_buf->count > 0)) 6915 scsi_scan_host(host); 6916 6917 /* 6918 * Initiate AEN (Asynchronous Event Notification) 6919 */ 6920 if (megasas_start_aen(instance)) { 6921 dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n"); 6922 goto fail_start_aen; 6923 } 6924 6925 /* Get current SR-IOV LD/VF affiliation */ 6926 if (instance->requestorId) 6927 megasas_get_ld_vf_affiliation(instance, 1); 6928 6929 return 0; 6930 6931 fail_start_aen: 6932 fail_io_attach: 6933 megasas_mgmt_info.count--; 6934 megasas_mgmt_info.max_index--; 6935 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; 6936 6937 instance->instancet->disable_intr(instance); 6938 megasas_destroy_irqs(instance); 6939 6940 if (instance->adapter_type != MFI_SERIES) 6941 megasas_release_fusion(instance); 6942 else 6943 megasas_release_mfi(instance); 6944 if (instance->msix_vectors) 6945 pci_free_irq_vectors(instance->pdev); 6946 fail_init_mfi: 6947 scsi_host_put(host); 6948 fail_alloc_instance: 6949 pci_disable_device(pdev); 6950 6951 return -ENODEV; 6952 } 6953 6954 /** 6955 * megasas_flush_cache - Requests FW to flush all its caches 6956 * @instance: Adapter soft state 6957 */ 6958 static void megasas_flush_cache(struct megasas_instance *instance) 6959 { 6960 struct megasas_cmd *cmd; 6961 struct megasas_dcmd_frame *dcmd; 6962 6963 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 6964 return; 6965 6966 cmd = megasas_get_cmd(instance); 6967 6968 if (!cmd) 6969 return; 6970 6971 dcmd = &cmd->frame->dcmd; 6972 6973 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6974 6975 dcmd->cmd = MFI_CMD_DCMD; 6976 dcmd->cmd_status = 0x0; 6977 dcmd->sge_count = 0; 6978 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 6979 dcmd->timeout = 0; 6980 dcmd->pad_0 = 0; 6981 dcmd->data_xfer_len = 0; 6982 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH); 6983 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 6984 6985 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) 6986 != DCMD_SUCCESS) { 6987 dev_err(&instance->pdev->dev, 6988 "return from %s %d\n", __func__, __LINE__); 6989 return; 6990 } 6991 6992 megasas_return_cmd(instance, cmd); 6993 } 6994 6995 /** 6996 * megasas_shutdown_controller - Instructs FW to shutdown the controller 6997 * @instance: Adapter soft state 6998 * @opcode: Shutdown/Hibernate 6999 */ 7000 static void megasas_shutdown_controller(struct megasas_instance *instance, 7001 u32 opcode) 7002 { 7003 struct megasas_cmd *cmd; 7004 struct megasas_dcmd_frame *dcmd; 7005 7006 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 7007 return; 7008 7009 cmd = megasas_get_cmd(instance); 7010 7011 if (!cmd) 7012 return; 7013 7014 if (instance->aen_cmd) 7015 megasas_issue_blocked_abort_cmd(instance, 7016 instance->aen_cmd, MFI_IO_TIMEOUT_SECS); 7017 if (instance->map_update_cmd) 7018 megasas_issue_blocked_abort_cmd(instance, 7019 instance->map_update_cmd, MFI_IO_TIMEOUT_SECS); 7020 if (instance->jbod_seq_cmd) 7021 megasas_issue_blocked_abort_cmd(instance, 7022 instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS); 7023 7024 dcmd = &cmd->frame->dcmd; 7025 7026 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 7027 7028 dcmd->cmd = MFI_CMD_DCMD; 7029 dcmd->cmd_status = 0x0; 7030 dcmd->sge_count = 0; 7031 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 7032 dcmd->timeout = 0; 7033 dcmd->pad_0 = 0; 7034 dcmd->data_xfer_len = 0; 7035 dcmd->opcode = cpu_to_le32(opcode); 7036 7037 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) 7038 != DCMD_SUCCESS) { 7039 dev_err(&instance->pdev->dev, 7040 "return from %s %d\n", __func__, __LINE__); 7041 return; 7042 } 7043 7044 megasas_return_cmd(instance, cmd); 7045 } 7046 7047 #ifdef CONFIG_PM 7048 /** 7049 * megasas_suspend - driver suspend entry point 7050 * @pdev: PCI device structure 7051 * @state: PCI power state to suspend routine 7052 */ 7053 static int 7054 megasas_suspend(struct pci_dev *pdev, pm_message_t state) 7055 { 7056 struct Scsi_Host *host; 7057 struct megasas_instance *instance; 7058 7059 instance = pci_get_drvdata(pdev); 7060 host = instance->host; 7061 instance->unload = 1; 7062 7063 /* Shutdown SR-IOV heartbeat timer */ 7064 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7065 del_timer_sync(&instance->sriov_heartbeat_timer); 7066 7067 /* Stop the FW fault detection watchdog */ 7068 if (instance->adapter_type != MFI_SERIES) 7069 megasas_fusion_stop_watchdog(instance); 7070 7071 megasas_flush_cache(instance); 7072 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); 7073 7074 /* cancel the delayed work if this work still in queue */ 7075 if (instance->ev != NULL) { 7076 struct megasas_aen_event *ev = instance->ev; 7077 cancel_delayed_work_sync(&ev->hotplug_work); 7078 instance->ev = NULL; 7079 } 7080 7081 tasklet_kill(&instance->isr_tasklet); 7082 7083 pci_set_drvdata(instance->pdev, instance); 7084 instance->instancet->disable_intr(instance); 7085 7086 megasas_destroy_irqs(instance); 7087 7088 if (instance->msix_vectors) 7089 pci_free_irq_vectors(instance->pdev); 7090 7091 pci_save_state(pdev); 7092 pci_disable_device(pdev); 7093 7094 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 7095 7096 return 0; 7097 } 7098 7099 /** 7100 * megasas_resume- driver resume entry point 7101 * @pdev: PCI device structure 7102 */ 7103 static int 7104 megasas_resume(struct pci_dev *pdev) 7105 { 7106 int rval; 7107 struct Scsi_Host *host; 7108 struct megasas_instance *instance; 7109 int irq_flags = PCI_IRQ_LEGACY; 7110 7111 instance = pci_get_drvdata(pdev); 7112 host = instance->host; 7113 pci_set_power_state(pdev, PCI_D0); 7114 pci_enable_wake(pdev, PCI_D0, 0); 7115 pci_restore_state(pdev); 7116 7117 /* 7118 * PCI prepping: enable device set bus mastering and dma mask 7119 */ 7120 rval = pci_enable_device_mem(pdev); 7121 7122 if (rval) { 7123 dev_err(&pdev->dev, "Enable device failed\n"); 7124 return rval; 7125 } 7126 7127 pci_set_master(pdev); 7128 7129 /* 7130 * We expect the FW state to be READY 7131 */ 7132 if (megasas_transition_to_ready(instance, 0)) 7133 goto fail_ready_state; 7134 7135 if (megasas_set_dma_mask(instance)) 7136 goto fail_set_dma_mask; 7137 7138 /* 7139 * Initialize MFI Firmware 7140 */ 7141 7142 atomic_set(&instance->fw_outstanding, 0); 7143 atomic_set(&instance->ldio_outstanding, 0); 7144 7145 /* Now re-enable MSI-X */ 7146 if (instance->msix_vectors) { 7147 irq_flags = PCI_IRQ_MSIX; 7148 if (smp_affinity_enable) 7149 irq_flags |= PCI_IRQ_AFFINITY; 7150 } 7151 rval = pci_alloc_irq_vectors(instance->pdev, 1, 7152 instance->msix_vectors ? 7153 instance->msix_vectors : 1, irq_flags); 7154 if (rval < 0) 7155 goto fail_reenable_msix; 7156 7157 megasas_setup_reply_map(instance); 7158 7159 if (instance->adapter_type != MFI_SERIES) { 7160 megasas_reset_reply_desc(instance); 7161 if (megasas_ioc_init_fusion(instance)) { 7162 megasas_free_cmds(instance); 7163 megasas_free_cmds_fusion(instance); 7164 goto fail_init_mfi; 7165 } 7166 if (!megasas_get_map_info(instance)) 7167 megasas_sync_map_info(instance); 7168 } else { 7169 *instance->producer = 0; 7170 *instance->consumer = 0; 7171 if (megasas_issue_init_mfi(instance)) 7172 goto fail_init_mfi; 7173 } 7174 7175 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) 7176 goto fail_init_mfi; 7177 7178 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 7179 (unsigned long)instance); 7180 7181 if (instance->msix_vectors ? 7182 megasas_setup_irqs_msix(instance, 0) : 7183 megasas_setup_irqs_ioapic(instance)) 7184 goto fail_init_mfi; 7185 7186 /* Re-launch SR-IOV heartbeat timer */ 7187 if (instance->requestorId) { 7188 if (!megasas_sriov_start_heartbeat(instance, 0)) 7189 megasas_start_timer(instance); 7190 else { 7191 instance->skip_heartbeat_timer_del = 1; 7192 goto fail_init_mfi; 7193 } 7194 } 7195 7196 instance->instancet->enable_intr(instance); 7197 megasas_setup_jbod_map(instance); 7198 instance->unload = 0; 7199 7200 /* 7201 * Initiate AEN (Asynchronous Event Notification) 7202 */ 7203 if (megasas_start_aen(instance)) 7204 dev_err(&instance->pdev->dev, "Start AEN failed\n"); 7205 7206 /* Re-launch FW fault watchdog */ 7207 if (instance->adapter_type != MFI_SERIES) 7208 if (megasas_fusion_start_watchdog(instance) != SUCCESS) 7209 goto fail_start_watchdog; 7210 7211 return 0; 7212 7213 fail_start_watchdog: 7214 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7215 del_timer_sync(&instance->sriov_heartbeat_timer); 7216 fail_init_mfi: 7217 megasas_free_ctrl_dma_buffers(instance); 7218 megasas_free_ctrl_mem(instance); 7219 scsi_host_put(host); 7220 7221 fail_reenable_msix: 7222 fail_set_dma_mask: 7223 fail_ready_state: 7224 7225 pci_disable_device(pdev); 7226 7227 return -ENODEV; 7228 } 7229 #else 7230 #define megasas_suspend NULL 7231 #define megasas_resume NULL 7232 #endif 7233 7234 static inline int 7235 megasas_wait_for_adapter_operational(struct megasas_instance *instance) 7236 { 7237 int wait_time = MEGASAS_RESET_WAIT_TIME * 2; 7238 int i; 7239 u8 adp_state; 7240 7241 for (i = 0; i < wait_time; i++) { 7242 adp_state = atomic_read(&instance->adprecovery); 7243 if ((adp_state == MEGASAS_HBA_OPERATIONAL) || 7244 (adp_state == MEGASAS_HW_CRITICAL_ERROR)) 7245 break; 7246 7247 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) 7248 dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n"); 7249 7250 msleep(1000); 7251 } 7252 7253 if (adp_state != MEGASAS_HBA_OPERATIONAL) { 7254 dev_info(&instance->pdev->dev, 7255 "%s HBA failed to become operational, adp_state %d\n", 7256 __func__, adp_state); 7257 return 1; 7258 } 7259 7260 return 0; 7261 } 7262 7263 /** 7264 * megasas_detach_one - PCI hot"un"plug entry point 7265 * @pdev: PCI device structure 7266 */ 7267 static void megasas_detach_one(struct pci_dev *pdev) 7268 { 7269 int i; 7270 struct Scsi_Host *host; 7271 struct megasas_instance *instance; 7272 struct fusion_context *fusion; 7273 u32 pd_seq_map_sz; 7274 7275 instance = pci_get_drvdata(pdev); 7276 host = instance->host; 7277 fusion = instance->ctrl_context; 7278 7279 /* Shutdown SR-IOV heartbeat timer */ 7280 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7281 del_timer_sync(&instance->sriov_heartbeat_timer); 7282 7283 /* Stop the FW fault detection watchdog */ 7284 if (instance->adapter_type != MFI_SERIES) 7285 megasas_fusion_stop_watchdog(instance); 7286 7287 if (instance->fw_crash_state != UNAVAILABLE) 7288 megasas_free_host_crash_buffer(instance); 7289 scsi_remove_host(instance->host); 7290 instance->unload = 1; 7291 7292 if (megasas_wait_for_adapter_operational(instance)) 7293 goto skip_firing_dcmds; 7294 7295 megasas_flush_cache(instance); 7296 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 7297 7298 skip_firing_dcmds: 7299 /* cancel the delayed work if this work still in queue*/ 7300 if (instance->ev != NULL) { 7301 struct megasas_aen_event *ev = instance->ev; 7302 cancel_delayed_work_sync(&ev->hotplug_work); 7303 instance->ev = NULL; 7304 } 7305 7306 /* cancel all wait events */ 7307 wake_up_all(&instance->int_cmd_wait_q); 7308 7309 tasklet_kill(&instance->isr_tasklet); 7310 7311 /* 7312 * Take the instance off the instance array. Note that we will not 7313 * decrement the max_index. We let this array be sparse array 7314 */ 7315 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 7316 if (megasas_mgmt_info.instance[i] == instance) { 7317 megasas_mgmt_info.count--; 7318 megasas_mgmt_info.instance[i] = NULL; 7319 7320 break; 7321 } 7322 } 7323 7324 instance->instancet->disable_intr(instance); 7325 7326 megasas_destroy_irqs(instance); 7327 7328 if (instance->msix_vectors) 7329 pci_free_irq_vectors(instance->pdev); 7330 7331 if (instance->adapter_type >= VENTURA_SERIES) { 7332 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) 7333 kfree(fusion->stream_detect_by_ld[i]); 7334 kfree(fusion->stream_detect_by_ld); 7335 fusion->stream_detect_by_ld = NULL; 7336 } 7337 7338 7339 if (instance->adapter_type != MFI_SERIES) { 7340 megasas_release_fusion(instance); 7341 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 7342 (sizeof(struct MR_PD_CFG_SEQ) * 7343 (MAX_PHYSICAL_DEVICES - 1)); 7344 for (i = 0; i < 2 ; i++) { 7345 if (fusion->ld_map[i]) 7346 dma_free_coherent(&instance->pdev->dev, 7347 fusion->max_map_sz, 7348 fusion->ld_map[i], 7349 fusion->ld_map_phys[i]); 7350 if (fusion->ld_drv_map[i]) { 7351 if (is_vmalloc_addr(fusion->ld_drv_map[i])) 7352 vfree(fusion->ld_drv_map[i]); 7353 else 7354 free_pages((ulong)fusion->ld_drv_map[i], 7355 fusion->drv_map_pages); 7356 } 7357 7358 if (fusion->pd_seq_sync[i]) 7359 dma_free_coherent(&instance->pdev->dev, 7360 pd_seq_map_sz, 7361 fusion->pd_seq_sync[i], 7362 fusion->pd_seq_phys[i]); 7363 } 7364 } else { 7365 megasas_release_mfi(instance); 7366 } 7367 7368 if (instance->vf_affiliation) 7369 dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) * 7370 sizeof(struct MR_LD_VF_AFFILIATION), 7371 instance->vf_affiliation, 7372 instance->vf_affiliation_h); 7373 7374 if (instance->vf_affiliation_111) 7375 dma_free_coherent(&pdev->dev, 7376 sizeof(struct MR_LD_VF_AFFILIATION_111), 7377 instance->vf_affiliation_111, 7378 instance->vf_affiliation_111_h); 7379 7380 if (instance->hb_host_mem) 7381 dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM), 7382 instance->hb_host_mem, 7383 instance->hb_host_mem_h); 7384 7385 megasas_free_ctrl_dma_buffers(instance); 7386 7387 megasas_free_ctrl_mem(instance); 7388 7389 scsi_host_put(host); 7390 7391 pci_disable_device(pdev); 7392 } 7393 7394 /** 7395 * megasas_shutdown - Shutdown entry point 7396 * @device: Generic device structure 7397 */ 7398 static void megasas_shutdown(struct pci_dev *pdev) 7399 { 7400 struct megasas_instance *instance = pci_get_drvdata(pdev); 7401 7402 instance->unload = 1; 7403 7404 if (megasas_wait_for_adapter_operational(instance)) 7405 goto skip_firing_dcmds; 7406 7407 megasas_flush_cache(instance); 7408 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 7409 7410 skip_firing_dcmds: 7411 instance->instancet->disable_intr(instance); 7412 megasas_destroy_irqs(instance); 7413 7414 if (instance->msix_vectors) 7415 pci_free_irq_vectors(instance->pdev); 7416 } 7417 7418 /** 7419 * megasas_mgmt_open - char node "open" entry point 7420 */ 7421 static int megasas_mgmt_open(struct inode *inode, struct file *filep) 7422 { 7423 /* 7424 * Allow only those users with admin rights 7425 */ 7426 if (!capable(CAP_SYS_ADMIN)) 7427 return -EACCES; 7428 7429 return 0; 7430 } 7431 7432 /** 7433 * megasas_mgmt_fasync - Async notifier registration from applications 7434 * 7435 * This function adds the calling process to a driver global queue. When an 7436 * event occurs, SIGIO will be sent to all processes in this queue. 7437 */ 7438 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode) 7439 { 7440 int rc; 7441 7442 mutex_lock(&megasas_async_queue_mutex); 7443 7444 rc = fasync_helper(fd, filep, mode, &megasas_async_queue); 7445 7446 mutex_unlock(&megasas_async_queue_mutex); 7447 7448 if (rc >= 0) { 7449 /* For sanity check when we get ioctl */ 7450 filep->private_data = filep; 7451 return 0; 7452 } 7453 7454 printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc); 7455 7456 return rc; 7457 } 7458 7459 /** 7460 * megasas_mgmt_poll - char node "poll" entry point 7461 * */ 7462 static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait) 7463 { 7464 __poll_t mask; 7465 unsigned long flags; 7466 7467 poll_wait(file, &megasas_poll_wait, wait); 7468 spin_lock_irqsave(&poll_aen_lock, flags); 7469 if (megasas_poll_wait_aen) 7470 mask = (EPOLLIN | EPOLLRDNORM); 7471 else 7472 mask = 0; 7473 megasas_poll_wait_aen = 0; 7474 spin_unlock_irqrestore(&poll_aen_lock, flags); 7475 return mask; 7476 } 7477 7478 /* 7479 * megasas_set_crash_dump_params_ioctl: 7480 * Send CRASH_DUMP_MODE DCMD to all controllers 7481 * @cmd: MFI command frame 7482 */ 7483 7484 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd) 7485 { 7486 struct megasas_instance *local_instance; 7487 int i, error = 0; 7488 int crash_support; 7489 7490 crash_support = cmd->frame->dcmd.mbox.w[0]; 7491 7492 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 7493 local_instance = megasas_mgmt_info.instance[i]; 7494 if (local_instance && local_instance->crash_dump_drv_support) { 7495 if ((atomic_read(&local_instance->adprecovery) == 7496 MEGASAS_HBA_OPERATIONAL) && 7497 !megasas_set_crash_dump_params(local_instance, 7498 crash_support)) { 7499 local_instance->crash_dump_app_support = 7500 crash_support; 7501 dev_info(&local_instance->pdev->dev, 7502 "Application firmware crash " 7503 "dump mode set success\n"); 7504 error = 0; 7505 } else { 7506 dev_info(&local_instance->pdev->dev, 7507 "Application firmware crash " 7508 "dump mode set failed\n"); 7509 error = -1; 7510 } 7511 } 7512 } 7513 return error; 7514 } 7515 7516 /** 7517 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW 7518 * @instance: Adapter soft state 7519 * @argp: User's ioctl packet 7520 */ 7521 static int 7522 megasas_mgmt_fw_ioctl(struct megasas_instance *instance, 7523 struct megasas_iocpacket __user * user_ioc, 7524 struct megasas_iocpacket *ioc) 7525 { 7526 struct megasas_sge64 *kern_sge64 = NULL; 7527 struct megasas_sge32 *kern_sge32 = NULL; 7528 struct megasas_cmd *cmd; 7529 void *kbuff_arr[MAX_IOCTL_SGE]; 7530 dma_addr_t buf_handle = 0; 7531 int error = 0, i; 7532 void *sense = NULL; 7533 dma_addr_t sense_handle; 7534 unsigned long *sense_ptr; 7535 u32 opcode = 0; 7536 7537 memset(kbuff_arr, 0, sizeof(kbuff_arr)); 7538 7539 if (ioc->sge_count > MAX_IOCTL_SGE) { 7540 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n", 7541 ioc->sge_count, MAX_IOCTL_SGE); 7542 return -EINVAL; 7543 } 7544 7545 if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) || 7546 ((ioc->frame.hdr.cmd == MFI_CMD_NVME) && 7547 !instance->support_nvme_passthru)) { 7548 dev_err(&instance->pdev->dev, 7549 "Received invalid ioctl command 0x%x\n", 7550 ioc->frame.hdr.cmd); 7551 return -ENOTSUPP; 7552 } 7553 7554 cmd = megasas_get_cmd(instance); 7555 if (!cmd) { 7556 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n"); 7557 return -ENOMEM; 7558 } 7559 7560 /* 7561 * User's IOCTL packet has 2 frames (maximum). Copy those two 7562 * frames into our cmd's frames. cmd->frame's context will get 7563 * overwritten when we copy from user's frames. So set that value 7564 * alone separately 7565 */ 7566 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); 7567 cmd->frame->hdr.context = cpu_to_le32(cmd->index); 7568 cmd->frame->hdr.pad_0 = 0; 7569 7570 cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE); 7571 7572 if (instance->consistent_mask_64bit) 7573 cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 | 7574 MFI_FRAME_SENSE64)); 7575 else 7576 cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 | 7577 MFI_FRAME_SENSE64)); 7578 7579 if (cmd->frame->hdr.cmd == MFI_CMD_DCMD) 7580 opcode = le32_to_cpu(cmd->frame->dcmd.opcode); 7581 7582 if (opcode == MR_DCMD_CTRL_SHUTDOWN) { 7583 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) { 7584 megasas_return_cmd(instance, cmd); 7585 return -1; 7586 } 7587 } 7588 7589 if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) { 7590 error = megasas_set_crash_dump_params_ioctl(cmd); 7591 megasas_return_cmd(instance, cmd); 7592 return error; 7593 } 7594 7595 /* 7596 * The management interface between applications and the fw uses 7597 * MFI frames. E.g, RAID configuration changes, LD property changes 7598 * etc are accomplishes through different kinds of MFI frames. The 7599 * driver needs to care only about substituting user buffers with 7600 * kernel buffers in SGLs. The location of SGL is embedded in the 7601 * struct iocpacket itself. 7602 */ 7603 if (instance->consistent_mask_64bit) 7604 kern_sge64 = (struct megasas_sge64 *) 7605 ((unsigned long)cmd->frame + ioc->sgl_off); 7606 else 7607 kern_sge32 = (struct megasas_sge32 *) 7608 ((unsigned long)cmd->frame + ioc->sgl_off); 7609 7610 /* 7611 * For each user buffer, create a mirror buffer and copy in 7612 */ 7613 for (i = 0; i < ioc->sge_count; i++) { 7614 if (!ioc->sgl[i].iov_len) 7615 continue; 7616 7617 kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev, 7618 ioc->sgl[i].iov_len, 7619 &buf_handle, GFP_KERNEL); 7620 if (!kbuff_arr[i]) { 7621 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc " 7622 "kernel SGL buffer for IOCTL\n"); 7623 error = -ENOMEM; 7624 goto out; 7625 } 7626 7627 /* 7628 * We don't change the dma_coherent_mask, so 7629 * dma_alloc_coherent only returns 32bit addresses 7630 */ 7631 if (instance->consistent_mask_64bit) { 7632 kern_sge64[i].phys_addr = cpu_to_le64(buf_handle); 7633 kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len); 7634 } else { 7635 kern_sge32[i].phys_addr = cpu_to_le32(buf_handle); 7636 kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len); 7637 } 7638 7639 /* 7640 * We created a kernel buffer corresponding to the 7641 * user buffer. Now copy in from the user buffer 7642 */ 7643 if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base, 7644 (u32) (ioc->sgl[i].iov_len))) { 7645 error = -EFAULT; 7646 goto out; 7647 } 7648 } 7649 7650 if (ioc->sense_len) { 7651 sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len, 7652 &sense_handle, GFP_KERNEL); 7653 if (!sense) { 7654 error = -ENOMEM; 7655 goto out; 7656 } 7657 7658 sense_ptr = 7659 (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off); 7660 if (instance->consistent_mask_64bit) 7661 *sense_ptr = cpu_to_le64(sense_handle); 7662 else 7663 *sense_ptr = cpu_to_le32(sense_handle); 7664 } 7665 7666 /* 7667 * Set the sync_cmd flag so that the ISR knows not to complete this 7668 * cmd to the SCSI mid-layer 7669 */ 7670 cmd->sync_cmd = 1; 7671 if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) { 7672 cmd->sync_cmd = 0; 7673 dev_err(&instance->pdev->dev, 7674 "return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n", 7675 __func__, __LINE__, cmd->frame->hdr.cmd, opcode, 7676 cmd->cmd_status_drv); 7677 return -EBUSY; 7678 } 7679 7680 cmd->sync_cmd = 0; 7681 7682 if (instance->unload == 1) { 7683 dev_info(&instance->pdev->dev, "Driver unload is in progress " 7684 "don't submit data to application\n"); 7685 goto out; 7686 } 7687 /* 7688 * copy out the kernel buffers to user buffers 7689 */ 7690 for (i = 0; i < ioc->sge_count; i++) { 7691 if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i], 7692 ioc->sgl[i].iov_len)) { 7693 error = -EFAULT; 7694 goto out; 7695 } 7696 } 7697 7698 /* 7699 * copy out the sense 7700 */ 7701 if (ioc->sense_len) { 7702 /* 7703 * sense_ptr points to the location that has the user 7704 * sense buffer address 7705 */ 7706 sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw + 7707 ioc->sense_off); 7708 7709 if (copy_to_user((void __user *)((unsigned long) 7710 get_unaligned((unsigned long *)sense_ptr)), 7711 sense, ioc->sense_len)) { 7712 dev_err(&instance->pdev->dev, "Failed to copy out to user " 7713 "sense data\n"); 7714 error = -EFAULT; 7715 goto out; 7716 } 7717 } 7718 7719 /* 7720 * copy the status codes returned by the fw 7721 */ 7722 if (copy_to_user(&user_ioc->frame.hdr.cmd_status, 7723 &cmd->frame->hdr.cmd_status, sizeof(u8))) { 7724 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n"); 7725 error = -EFAULT; 7726 } 7727 7728 out: 7729 if (sense) { 7730 dma_free_coherent(&instance->pdev->dev, ioc->sense_len, 7731 sense, sense_handle); 7732 } 7733 7734 for (i = 0; i < ioc->sge_count; i++) { 7735 if (kbuff_arr[i]) { 7736 if (instance->consistent_mask_64bit) 7737 dma_free_coherent(&instance->pdev->dev, 7738 le32_to_cpu(kern_sge64[i].length), 7739 kbuff_arr[i], 7740 le64_to_cpu(kern_sge64[i].phys_addr)); 7741 else 7742 dma_free_coherent(&instance->pdev->dev, 7743 le32_to_cpu(kern_sge32[i].length), 7744 kbuff_arr[i], 7745 le32_to_cpu(kern_sge32[i].phys_addr)); 7746 kbuff_arr[i] = NULL; 7747 } 7748 } 7749 7750 megasas_return_cmd(instance, cmd); 7751 return error; 7752 } 7753 7754 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) 7755 { 7756 struct megasas_iocpacket __user *user_ioc = 7757 (struct megasas_iocpacket __user *)arg; 7758 struct megasas_iocpacket *ioc; 7759 struct megasas_instance *instance; 7760 int error; 7761 7762 ioc = memdup_user(user_ioc, sizeof(*ioc)); 7763 if (IS_ERR(ioc)) 7764 return PTR_ERR(ioc); 7765 7766 instance = megasas_lookup_instance(ioc->host_no); 7767 if (!instance) { 7768 error = -ENODEV; 7769 goto out_kfree_ioc; 7770 } 7771 7772 /* Block ioctls in VF mode */ 7773 if (instance->requestorId && !allow_vf_ioctls) { 7774 error = -ENODEV; 7775 goto out_kfree_ioc; 7776 } 7777 7778 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 7779 dev_err(&instance->pdev->dev, "Controller in crit error\n"); 7780 error = -ENODEV; 7781 goto out_kfree_ioc; 7782 } 7783 7784 if (instance->unload == 1) { 7785 error = -ENODEV; 7786 goto out_kfree_ioc; 7787 } 7788 7789 if (down_interruptible(&instance->ioctl_sem)) { 7790 error = -ERESTARTSYS; 7791 goto out_kfree_ioc; 7792 } 7793 7794 if (megasas_wait_for_adapter_operational(instance)) { 7795 error = -ENODEV; 7796 goto out_up; 7797 } 7798 7799 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); 7800 out_up: 7801 up(&instance->ioctl_sem); 7802 7803 out_kfree_ioc: 7804 kfree(ioc); 7805 return error; 7806 } 7807 7808 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) 7809 { 7810 struct megasas_instance *instance; 7811 struct megasas_aen aen; 7812 int error; 7813 7814 if (file->private_data != file) { 7815 printk(KERN_DEBUG "megasas: fasync_helper was not " 7816 "called first\n"); 7817 return -EINVAL; 7818 } 7819 7820 if (copy_from_user(&aen, (void __user *)arg, sizeof(aen))) 7821 return -EFAULT; 7822 7823 instance = megasas_lookup_instance(aen.host_no); 7824 7825 if (!instance) 7826 return -ENODEV; 7827 7828 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 7829 return -ENODEV; 7830 } 7831 7832 if (instance->unload == 1) { 7833 return -ENODEV; 7834 } 7835 7836 if (megasas_wait_for_adapter_operational(instance)) 7837 return -ENODEV; 7838 7839 mutex_lock(&instance->reset_mutex); 7840 error = megasas_register_aen(instance, aen.seq_num, 7841 aen.class_locale_word); 7842 mutex_unlock(&instance->reset_mutex); 7843 return error; 7844 } 7845 7846 /** 7847 * megasas_mgmt_ioctl - char node ioctl entry point 7848 */ 7849 static long 7850 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 7851 { 7852 switch (cmd) { 7853 case MEGASAS_IOC_FIRMWARE: 7854 return megasas_mgmt_ioctl_fw(file, arg); 7855 7856 case MEGASAS_IOC_GET_AEN: 7857 return megasas_mgmt_ioctl_aen(file, arg); 7858 } 7859 7860 return -ENOTTY; 7861 } 7862 7863 #ifdef CONFIG_COMPAT 7864 static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) 7865 { 7866 struct compat_megasas_iocpacket __user *cioc = 7867 (struct compat_megasas_iocpacket __user *)arg; 7868 struct megasas_iocpacket __user *ioc = 7869 compat_alloc_user_space(sizeof(struct megasas_iocpacket)); 7870 int i; 7871 int error = 0; 7872 compat_uptr_t ptr; 7873 u32 local_sense_off; 7874 u32 local_sense_len; 7875 u32 user_sense_off; 7876 7877 if (clear_user(ioc, sizeof(*ioc))) 7878 return -EFAULT; 7879 7880 if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) || 7881 copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) || 7882 copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) || 7883 copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) || 7884 copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) || 7885 copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32))) 7886 return -EFAULT; 7887 7888 /* 7889 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when 7890 * sense_len is not null, so prepare the 64bit value under 7891 * the same condition. 7892 */ 7893 if (get_user(local_sense_off, &ioc->sense_off) || 7894 get_user(local_sense_len, &ioc->sense_len) || 7895 get_user(user_sense_off, &cioc->sense_off)) 7896 return -EFAULT; 7897 7898 if (local_sense_off != user_sense_off) 7899 return -EINVAL; 7900 7901 if (local_sense_len) { 7902 void __user **sense_ioc_ptr = 7903 (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off); 7904 compat_uptr_t *sense_cioc_ptr = 7905 (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off); 7906 if (get_user(ptr, sense_cioc_ptr) || 7907 put_user(compat_ptr(ptr), sense_ioc_ptr)) 7908 return -EFAULT; 7909 } 7910 7911 for (i = 0; i < MAX_IOCTL_SGE; i++) { 7912 if (get_user(ptr, &cioc->sgl[i].iov_base) || 7913 put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) || 7914 copy_in_user(&ioc->sgl[i].iov_len, 7915 &cioc->sgl[i].iov_len, sizeof(compat_size_t))) 7916 return -EFAULT; 7917 } 7918 7919 error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc); 7920 7921 if (copy_in_user(&cioc->frame.hdr.cmd_status, 7922 &ioc->frame.hdr.cmd_status, sizeof(u8))) { 7923 printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n"); 7924 return -EFAULT; 7925 } 7926 return error; 7927 } 7928 7929 static long 7930 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd, 7931 unsigned long arg) 7932 { 7933 switch (cmd) { 7934 case MEGASAS_IOC_FIRMWARE32: 7935 return megasas_mgmt_compat_ioctl_fw(file, arg); 7936 case MEGASAS_IOC_GET_AEN: 7937 return megasas_mgmt_ioctl_aen(file, arg); 7938 } 7939 7940 return -ENOTTY; 7941 } 7942 #endif 7943 7944 /* 7945 * File operations structure for management interface 7946 */ 7947 static const struct file_operations megasas_mgmt_fops = { 7948 .owner = THIS_MODULE, 7949 .open = megasas_mgmt_open, 7950 .fasync = megasas_mgmt_fasync, 7951 .unlocked_ioctl = megasas_mgmt_ioctl, 7952 .poll = megasas_mgmt_poll, 7953 #ifdef CONFIG_COMPAT 7954 .compat_ioctl = megasas_mgmt_compat_ioctl, 7955 #endif 7956 .llseek = noop_llseek, 7957 }; 7958 7959 /* 7960 * PCI hotplug support registration structure 7961 */ 7962 static struct pci_driver megasas_pci_driver = { 7963 7964 .name = "megaraid_sas", 7965 .id_table = megasas_pci_table, 7966 .probe = megasas_probe_one, 7967 .remove = megasas_detach_one, 7968 .suspend = megasas_suspend, 7969 .resume = megasas_resume, 7970 .shutdown = megasas_shutdown, 7971 }; 7972 7973 /* 7974 * Sysfs driver attributes 7975 */ 7976 static ssize_t version_show(struct device_driver *dd, char *buf) 7977 { 7978 return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n", 7979 MEGASAS_VERSION); 7980 } 7981 static DRIVER_ATTR_RO(version); 7982 7983 static ssize_t release_date_show(struct device_driver *dd, char *buf) 7984 { 7985 return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n", 7986 MEGASAS_RELDATE); 7987 } 7988 static DRIVER_ATTR_RO(release_date); 7989 7990 static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf) 7991 { 7992 return sprintf(buf, "%u\n", support_poll_for_event); 7993 } 7994 static DRIVER_ATTR_RO(support_poll_for_event); 7995 7996 static ssize_t support_device_change_show(struct device_driver *dd, char *buf) 7997 { 7998 return sprintf(buf, "%u\n", support_device_change); 7999 } 8000 static DRIVER_ATTR_RO(support_device_change); 8001 8002 static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf) 8003 { 8004 return sprintf(buf, "%u\n", megasas_dbg_lvl); 8005 } 8006 8007 static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf, 8008 size_t count) 8009 { 8010 int retval = count; 8011 8012 if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) { 8013 printk(KERN_ERR "megasas: could not set dbg_lvl\n"); 8014 retval = -EINVAL; 8015 } 8016 return retval; 8017 } 8018 static DRIVER_ATTR_RW(dbg_lvl); 8019 8020 static ssize_t 8021 support_nvme_encapsulation_show(struct device_driver *dd, char *buf) 8022 { 8023 return sprintf(buf, "%u\n", support_nvme_encapsulation); 8024 } 8025 8026 static DRIVER_ATTR_RO(support_nvme_encapsulation); 8027 8028 static inline void megasas_remove_scsi_device(struct scsi_device *sdev) 8029 { 8030 sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n"); 8031 scsi_remove_device(sdev); 8032 scsi_device_put(sdev); 8033 } 8034 8035 /** 8036 * megasas_update_device_list - Update the PD and LD device list from FW 8037 * after an AEN event notification 8038 * @instance: Adapter soft state 8039 * @event_type: Indicates type of event (PD or LD event) 8040 * 8041 * @return: Success or failure 8042 * 8043 * Issue DCMDs to Firmware to update the internal device list in driver. 8044 * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination 8045 * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list. 8046 */ 8047 static 8048 int megasas_update_device_list(struct megasas_instance *instance, 8049 int event_type) 8050 { 8051 int dcmd_ret = DCMD_SUCCESS; 8052 8053 if (instance->enable_fw_dev_list) { 8054 dcmd_ret = megasas_host_device_list_query(instance, false); 8055 if (dcmd_ret != DCMD_SUCCESS) 8056 goto out; 8057 } else { 8058 if (event_type & SCAN_PD_CHANNEL) { 8059 dcmd_ret = megasas_get_pd_list(instance); 8060 8061 if (dcmd_ret != DCMD_SUCCESS) 8062 goto out; 8063 } 8064 8065 if (event_type & SCAN_VD_CHANNEL) { 8066 if (!instance->requestorId || 8067 (instance->requestorId && 8068 megasas_get_ld_vf_affiliation(instance, 0))) { 8069 dcmd_ret = megasas_ld_list_query(instance, 8070 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); 8071 if (dcmd_ret != DCMD_SUCCESS) 8072 goto out; 8073 } 8074 } 8075 } 8076 8077 out: 8078 return dcmd_ret; 8079 } 8080 8081 /** 8082 * megasas_add_remove_devices - Add/remove devices to SCSI mid-layer 8083 * after an AEN event notification 8084 * @instance: Adapter soft state 8085 * @scan_type: Indicates type of devices (PD/LD) to add 8086 * @return void 8087 */ 8088 static 8089 void megasas_add_remove_devices(struct megasas_instance *instance, 8090 int scan_type) 8091 { 8092 int i, j; 8093 u16 pd_index = 0; 8094 u16 ld_index = 0; 8095 u16 channel = 0, id = 0; 8096 struct Scsi_Host *host; 8097 struct scsi_device *sdev1; 8098 struct MR_HOST_DEVICE_LIST *targetid_list = NULL; 8099 struct MR_HOST_DEVICE_LIST_ENTRY *targetid_entry = NULL; 8100 8101 host = instance->host; 8102 8103 if (instance->enable_fw_dev_list) { 8104 targetid_list = instance->host_device_list_buf; 8105 for (i = 0; i < targetid_list->count; i++) { 8106 targetid_entry = &targetid_list->host_device_list[i]; 8107 if (targetid_entry->flags.u.bits.is_sys_pd) { 8108 channel = le16_to_cpu(targetid_entry->target_id) / 8109 MEGASAS_MAX_DEV_PER_CHANNEL; 8110 id = le16_to_cpu(targetid_entry->target_id) % 8111 MEGASAS_MAX_DEV_PER_CHANNEL; 8112 } else { 8113 channel = MEGASAS_MAX_PD_CHANNELS + 8114 (le16_to_cpu(targetid_entry->target_id) / 8115 MEGASAS_MAX_DEV_PER_CHANNEL); 8116 id = le16_to_cpu(targetid_entry->target_id) % 8117 MEGASAS_MAX_DEV_PER_CHANNEL; 8118 } 8119 sdev1 = scsi_device_lookup(host, channel, id, 0); 8120 if (!sdev1) { 8121 scsi_add_device(host, channel, id, 0); 8122 } else { 8123 scsi_device_put(sdev1); 8124 } 8125 } 8126 } 8127 8128 if (scan_type & SCAN_PD_CHANNEL) { 8129 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { 8130 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 8131 pd_index = i * MEGASAS_MAX_DEV_PER_CHANNEL + j; 8132 sdev1 = scsi_device_lookup(host, i, j, 0); 8133 if (instance->pd_list[pd_index].driveState == 8134 MR_PD_STATE_SYSTEM) { 8135 if (!sdev1) 8136 scsi_add_device(host, i, j, 0); 8137 else 8138 scsi_device_put(sdev1); 8139 } else { 8140 if (sdev1) 8141 megasas_remove_scsi_device(sdev1); 8142 } 8143 } 8144 } 8145 } 8146 8147 if (scan_type & SCAN_VD_CHANNEL) { 8148 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { 8149 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 8150 ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; 8151 sdev1 = scsi_device_lookup(host, 8152 MEGASAS_MAX_PD_CHANNELS + i, j, 0); 8153 if (instance->ld_ids[ld_index] != 0xff) { 8154 if (!sdev1) 8155 scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); 8156 else 8157 scsi_device_put(sdev1); 8158 } else { 8159 if (sdev1) 8160 megasas_remove_scsi_device(sdev1); 8161 } 8162 } 8163 } 8164 } 8165 8166 } 8167 8168 static void 8169 megasas_aen_polling(struct work_struct *work) 8170 { 8171 struct megasas_aen_event *ev = 8172 container_of(work, struct megasas_aen_event, hotplug_work.work); 8173 struct megasas_instance *instance = ev->instance; 8174 union megasas_evt_class_locale class_locale; 8175 int event_type = 0; 8176 u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME; 8177 int error; 8178 u8 dcmd_ret = DCMD_SUCCESS; 8179 8180 if (!instance) { 8181 printk(KERN_ERR "invalid instance!\n"); 8182 kfree(ev); 8183 return; 8184 } 8185 8186 /* Adjust event workqueue thread wait time for VF mode */ 8187 if (instance->requestorId) 8188 wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF; 8189 8190 /* Don't run the event workqueue thread if OCR is running */ 8191 mutex_lock(&instance->reset_mutex); 8192 8193 instance->ev = NULL; 8194 if (instance->evt_detail) { 8195 megasas_decode_evt(instance); 8196 8197 switch (le32_to_cpu(instance->evt_detail->code)) { 8198 8199 case MR_EVT_PD_INSERTED: 8200 case MR_EVT_PD_REMOVED: 8201 event_type = SCAN_PD_CHANNEL; 8202 break; 8203 8204 case MR_EVT_LD_OFFLINE: 8205 case MR_EVT_CFG_CLEARED: 8206 case MR_EVT_LD_DELETED: 8207 case MR_EVT_LD_CREATED: 8208 event_type = SCAN_VD_CHANNEL; 8209 break; 8210 8211 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: 8212 case MR_EVT_FOREIGN_CFG_IMPORTED: 8213 case MR_EVT_LD_STATE_CHANGE: 8214 event_type = SCAN_PD_CHANNEL | SCAN_VD_CHANNEL; 8215 dev_info(&instance->pdev->dev, "scanning for scsi%d...\n", 8216 instance->host->host_no); 8217 break; 8218 8219 case MR_EVT_CTRL_PROP_CHANGED: 8220 dcmd_ret = megasas_get_ctrl_info(instance); 8221 if (dcmd_ret == DCMD_SUCCESS && 8222 instance->snapdump_wait_time) { 8223 megasas_get_snapdump_properties(instance); 8224 dev_info(&instance->pdev->dev, 8225 "Snap dump wait time\t: %d\n", 8226 instance->snapdump_wait_time); 8227 } 8228 break; 8229 default: 8230 event_type = 0; 8231 break; 8232 } 8233 } else { 8234 dev_err(&instance->pdev->dev, "invalid evt_detail!\n"); 8235 mutex_unlock(&instance->reset_mutex); 8236 kfree(ev); 8237 return; 8238 } 8239 8240 if (event_type) 8241 dcmd_ret = megasas_update_device_list(instance, event_type); 8242 8243 mutex_unlock(&instance->reset_mutex); 8244 8245 if (event_type && dcmd_ret == DCMD_SUCCESS) 8246 megasas_add_remove_devices(instance, event_type); 8247 8248 if (dcmd_ret == DCMD_SUCCESS) 8249 seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1; 8250 else 8251 seq_num = instance->last_seq_num; 8252 8253 /* Register AEN with FW for latest sequence number plus 1 */ 8254 class_locale.members.reserved = 0; 8255 class_locale.members.locale = MR_EVT_LOCALE_ALL; 8256 class_locale.members.class = MR_EVT_CLASS_DEBUG; 8257 8258 if (instance->aen_cmd != NULL) { 8259 kfree(ev); 8260 return; 8261 } 8262 8263 mutex_lock(&instance->reset_mutex); 8264 error = megasas_register_aen(instance, seq_num, 8265 class_locale.word); 8266 if (error) 8267 dev_err(&instance->pdev->dev, 8268 "register aen failed error %x\n", error); 8269 8270 mutex_unlock(&instance->reset_mutex); 8271 kfree(ev); 8272 } 8273 8274 /** 8275 * megasas_init - Driver load entry point 8276 */ 8277 static int __init megasas_init(void) 8278 { 8279 int rval; 8280 8281 /* 8282 * Booted in kdump kernel, minimize memory footprints by 8283 * disabling few features 8284 */ 8285 if (reset_devices) { 8286 msix_vectors = 1; 8287 rdpq_enable = 0; 8288 dual_qdepth_disable = 1; 8289 } 8290 8291 /* 8292 * Announce driver version and other information 8293 */ 8294 pr_info("megasas: %s\n", MEGASAS_VERSION); 8295 8296 spin_lock_init(&poll_aen_lock); 8297 8298 support_poll_for_event = 2; 8299 support_device_change = 1; 8300 support_nvme_encapsulation = true; 8301 8302 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); 8303 8304 /* 8305 * Register character device node 8306 */ 8307 rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops); 8308 8309 if (rval < 0) { 8310 printk(KERN_DEBUG "megasas: failed to open device node\n"); 8311 return rval; 8312 } 8313 8314 megasas_mgmt_majorno = rval; 8315 8316 /* 8317 * Register ourselves as PCI hotplug module 8318 */ 8319 rval = pci_register_driver(&megasas_pci_driver); 8320 8321 if (rval) { 8322 printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n"); 8323 goto err_pcidrv; 8324 } 8325 8326 rval = driver_create_file(&megasas_pci_driver.driver, 8327 &driver_attr_version); 8328 if (rval) 8329 goto err_dcf_attr_ver; 8330 8331 rval = driver_create_file(&megasas_pci_driver.driver, 8332 &driver_attr_release_date); 8333 if (rval) 8334 goto err_dcf_rel_date; 8335 8336 rval = driver_create_file(&megasas_pci_driver.driver, 8337 &driver_attr_support_poll_for_event); 8338 if (rval) 8339 goto err_dcf_support_poll_for_event; 8340 8341 rval = driver_create_file(&megasas_pci_driver.driver, 8342 &driver_attr_dbg_lvl); 8343 if (rval) 8344 goto err_dcf_dbg_lvl; 8345 rval = driver_create_file(&megasas_pci_driver.driver, 8346 &driver_attr_support_device_change); 8347 if (rval) 8348 goto err_dcf_support_device_change; 8349 8350 rval = driver_create_file(&megasas_pci_driver.driver, 8351 &driver_attr_support_nvme_encapsulation); 8352 if (rval) 8353 goto err_dcf_support_nvme_encapsulation; 8354 8355 return rval; 8356 8357 err_dcf_support_nvme_encapsulation: 8358 driver_remove_file(&megasas_pci_driver.driver, 8359 &driver_attr_support_device_change); 8360 8361 err_dcf_support_device_change: 8362 driver_remove_file(&megasas_pci_driver.driver, 8363 &driver_attr_dbg_lvl); 8364 err_dcf_dbg_lvl: 8365 driver_remove_file(&megasas_pci_driver.driver, 8366 &driver_attr_support_poll_for_event); 8367 err_dcf_support_poll_for_event: 8368 driver_remove_file(&megasas_pci_driver.driver, 8369 &driver_attr_release_date); 8370 err_dcf_rel_date: 8371 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 8372 err_dcf_attr_ver: 8373 pci_unregister_driver(&megasas_pci_driver); 8374 err_pcidrv: 8375 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 8376 return rval; 8377 } 8378 8379 /** 8380 * megasas_exit - Driver unload entry point 8381 */ 8382 static void __exit megasas_exit(void) 8383 { 8384 driver_remove_file(&megasas_pci_driver.driver, 8385 &driver_attr_dbg_lvl); 8386 driver_remove_file(&megasas_pci_driver.driver, 8387 &driver_attr_support_poll_for_event); 8388 driver_remove_file(&megasas_pci_driver.driver, 8389 &driver_attr_support_device_change); 8390 driver_remove_file(&megasas_pci_driver.driver, 8391 &driver_attr_release_date); 8392 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 8393 driver_remove_file(&megasas_pci_driver.driver, 8394 &driver_attr_support_nvme_encapsulation); 8395 8396 pci_unregister_driver(&megasas_pci_driver); 8397 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 8398 } 8399 8400 module_init(megasas_init); 8401 module_exit(megasas_exit); 8402