1 /* 2 * Linux MegaRAID driver for SAS based RAID controllers 3 * 4 * Copyright (c) 2003-2013 LSI Corporation 5 * Copyright (c) 2013-2016 Avago Technologies 6 * Copyright (c) 2016-2018 Broadcom Inc. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 2 11 * of the License, or (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 * 21 * Authors: Broadcom Inc. 22 * Sreenivas Bagalkote 23 * Sumant Patro 24 * Bo Yang 25 * Adam Radford 26 * Kashyap Desai <kashyap.desai@broadcom.com> 27 * Sumit Saxena <sumit.saxena@broadcom.com> 28 * 29 * Send feedback to: megaraidlinux.pdl@broadcom.com 30 */ 31 32 #include <linux/kernel.h> 33 #include <linux/types.h> 34 #include <linux/pci.h> 35 #include <linux/list.h> 36 #include <linux/moduleparam.h> 37 #include <linux/module.h> 38 #include <linux/spinlock.h> 39 #include <linux/interrupt.h> 40 #include <linux/delay.h> 41 #include <linux/uio.h> 42 #include <linux/slab.h> 43 #include <linux/uaccess.h> 44 #include <asm/unaligned.h> 45 #include <linux/fs.h> 46 #include <linux/compat.h> 47 #include <linux/blkdev.h> 48 #include <linux/mutex.h> 49 #include <linux/poll.h> 50 #include <linux/vmalloc.h> 51 52 #include <scsi/scsi.h> 53 #include <scsi/scsi_cmnd.h> 54 #include <scsi/scsi_device.h> 55 #include <scsi/scsi_host.h> 56 #include <scsi/scsi_tcq.h> 57 #include "megaraid_sas_fusion.h" 58 #include "megaraid_sas.h" 59 60 /* 61 * Number of sectors per IO command 62 * Will be set in megasas_init_mfi if user does not provide 63 */ 64 static unsigned int max_sectors; 65 module_param_named(max_sectors, max_sectors, int, 0); 66 MODULE_PARM_DESC(max_sectors, 67 "Maximum number of sectors per IO command"); 68 69 static int msix_disable; 70 module_param(msix_disable, int, S_IRUGO); 71 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0"); 72 73 static unsigned int msix_vectors; 74 module_param(msix_vectors, int, S_IRUGO); 75 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW"); 76 77 static int allow_vf_ioctls; 78 module_param(allow_vf_ioctls, int, S_IRUGO); 79 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0"); 80 81 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; 82 module_param(throttlequeuedepth, int, S_IRUGO); 83 MODULE_PARM_DESC(throttlequeuedepth, 84 "Adapter queue depth when throttled due to I/O timeout. Default: 16"); 85 86 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME; 87 module_param(resetwaittime, int, S_IRUGO); 88 MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s"); 89 90 int smp_affinity_enable = 1; 91 module_param(smp_affinity_enable, int, S_IRUGO); 92 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)"); 93 94 int rdpq_enable = 1; 95 module_param(rdpq_enable, int, S_IRUGO); 96 MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)"); 97 98 unsigned int dual_qdepth_disable; 99 module_param(dual_qdepth_disable, int, S_IRUGO); 100 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0"); 101 102 unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; 103 module_param(scmd_timeout, int, S_IRUGO); 104 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer."); 105 106 MODULE_LICENSE("GPL"); 107 MODULE_VERSION(MEGASAS_VERSION); 108 MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com"); 109 MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver"); 110 111 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); 112 static int megasas_get_pd_list(struct megasas_instance *instance); 113 static int megasas_ld_list_query(struct megasas_instance *instance, 114 u8 query_type); 115 static int megasas_issue_init_mfi(struct megasas_instance *instance); 116 static int megasas_register_aen(struct megasas_instance *instance, 117 u32 seq_num, u32 class_locale_word); 118 static void megasas_get_pd_info(struct megasas_instance *instance, 119 struct scsi_device *sdev); 120 121 /* 122 * PCI ID table for all supported controllers 123 */ 124 static struct pci_device_id megasas_pci_table[] = { 125 126 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)}, 127 /* xscale IOP */ 128 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)}, 129 /* ppc IOP */ 130 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)}, 131 /* ppc IOP */ 132 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)}, 133 /* gen2*/ 134 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)}, 135 /* gen2*/ 136 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)}, 137 /* skinny*/ 138 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)}, 139 /* skinny*/ 140 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, 141 /* xscale IOP, vega */ 142 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, 143 /* xscale IOP */ 144 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)}, 145 /* Fusion */ 146 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)}, 147 /* Plasma */ 148 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)}, 149 /* Invader */ 150 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)}, 151 /* Fury */ 152 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)}, 153 /* Intruder */ 154 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)}, 155 /* Intruder 24 port*/ 156 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)}, 157 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)}, 158 /* VENTURA */ 159 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)}, 160 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)}, 161 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)}, 162 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)}, 163 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)}, 164 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)}, 165 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)}, 166 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)}, 167 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)}, 168 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)}, 169 {} 170 }; 171 172 MODULE_DEVICE_TABLE(pci, megasas_pci_table); 173 174 static int megasas_mgmt_majorno; 175 struct megasas_mgmt_info megasas_mgmt_info; 176 static struct fasync_struct *megasas_async_queue; 177 static DEFINE_MUTEX(megasas_async_queue_mutex); 178 179 static int megasas_poll_wait_aen; 180 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait); 181 static u32 support_poll_for_event; 182 u32 megasas_dbg_lvl; 183 static u32 support_device_change; 184 static bool support_nvme_encapsulation; 185 186 /* define lock for aen poll */ 187 spinlock_t poll_aen_lock; 188 189 void 190 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 191 u8 alt_status); 192 static u32 193 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance); 194 static int 195 megasas_adp_reset_gen2(struct megasas_instance *instance, 196 struct megasas_register_set __iomem *reg_set); 197 static irqreturn_t megasas_isr(int irq, void *devp); 198 static u32 199 megasas_init_adapter_mfi(struct megasas_instance *instance); 200 u32 201 megasas_build_and_issue_cmd(struct megasas_instance *instance, 202 struct scsi_cmnd *scmd); 203 static void megasas_complete_cmd_dpc(unsigned long instance_addr); 204 int 205 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, 206 int seconds); 207 void megasas_fusion_ocr_wq(struct work_struct *work); 208 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 209 int initial); 210 static int 211 megasas_set_dma_mask(struct megasas_instance *instance); 212 static int 213 megasas_alloc_ctrl_mem(struct megasas_instance *instance); 214 static inline void 215 megasas_free_ctrl_mem(struct megasas_instance *instance); 216 static inline int 217 megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance); 218 static inline void 219 megasas_free_ctrl_dma_buffers(struct megasas_instance *instance); 220 static inline void 221 megasas_init_ctrl_params(struct megasas_instance *instance); 222 223 u32 megasas_readl(struct megasas_instance *instance, 224 const volatile void __iomem *addr) 225 { 226 u32 i = 0, ret_val; 227 /* 228 * Due to a HW errata in Aero controllers, reads to certain 229 * Fusion registers could intermittently return all zeroes. 230 * This behavior is transient in nature and subsequent reads will 231 * return valid value. As a workaround in driver, retry readl for 232 * upto three times until a non-zero value is read. 233 */ 234 if (instance->adapter_type == AERO_SERIES) { 235 do { 236 ret_val = readl(addr); 237 i++; 238 } while (ret_val == 0 && i < 3); 239 return ret_val; 240 } else { 241 return readl(addr); 242 } 243 } 244 245 /** 246 * megasas_set_dma_settings - Populate DMA address, length and flags for DCMDs 247 * @instance: Adapter soft state 248 * @dcmd: DCMD frame inside MFI command 249 * @dma_addr: DMA address of buffer to be passed to FW 250 * @dma_len: Length of DMA buffer to be passed to FW 251 * @return: void 252 */ 253 void megasas_set_dma_settings(struct megasas_instance *instance, 254 struct megasas_dcmd_frame *dcmd, 255 dma_addr_t dma_addr, u32 dma_len) 256 { 257 if (instance->consistent_mask_64bit) { 258 dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr); 259 dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len); 260 dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64); 261 262 } else { 263 dcmd->sgl.sge32[0].phys_addr = 264 cpu_to_le32(lower_32_bits(dma_addr)); 265 dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len); 266 dcmd->flags = cpu_to_le16(dcmd->flags); 267 } 268 } 269 270 void 271 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 272 { 273 instance->instancet->fire_cmd(instance, 274 cmd->frame_phys_addr, 0, instance->reg_set); 275 return; 276 } 277 278 /** 279 * megasas_get_cmd - Get a command from the free pool 280 * @instance: Adapter soft state 281 * 282 * Returns a free command from the pool 283 */ 284 struct megasas_cmd *megasas_get_cmd(struct megasas_instance 285 *instance) 286 { 287 unsigned long flags; 288 struct megasas_cmd *cmd = NULL; 289 290 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 291 292 if (!list_empty(&instance->cmd_pool)) { 293 cmd = list_entry((&instance->cmd_pool)->next, 294 struct megasas_cmd, list); 295 list_del_init(&cmd->list); 296 } else { 297 dev_err(&instance->pdev->dev, "Command pool empty!\n"); 298 } 299 300 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 301 return cmd; 302 } 303 304 /** 305 * megasas_return_cmd - Return a cmd to free command pool 306 * @instance: Adapter soft state 307 * @cmd: Command packet to be returned to free command pool 308 */ 309 void 310 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 311 { 312 unsigned long flags; 313 u32 blk_tags; 314 struct megasas_cmd_fusion *cmd_fusion; 315 struct fusion_context *fusion = instance->ctrl_context; 316 317 /* This flag is used only for fusion adapter. 318 * Wait for Interrupt for Polled mode DCMD 319 */ 320 if (cmd->flags & DRV_DCMD_POLLED_MODE) 321 return; 322 323 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 324 325 if (fusion) { 326 blk_tags = instance->max_scsi_cmds + cmd->index; 327 cmd_fusion = fusion->cmd_list[blk_tags]; 328 megasas_return_cmd_fusion(instance, cmd_fusion); 329 } 330 cmd->scmd = NULL; 331 cmd->frame_count = 0; 332 cmd->flags = 0; 333 memset(cmd->frame, 0, instance->mfi_frame_size); 334 cmd->frame->io.context = cpu_to_le32(cmd->index); 335 if (!fusion && reset_devices) 336 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 337 list_add(&cmd->list, (&instance->cmd_pool)->next); 338 339 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 340 341 } 342 343 static const char * 344 format_timestamp(uint32_t timestamp) 345 { 346 static char buffer[32]; 347 348 if ((timestamp & 0xff000000) == 0xff000000) 349 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp & 350 0x00ffffff); 351 else 352 snprintf(buffer, sizeof(buffer), "%us", timestamp); 353 return buffer; 354 } 355 356 static const char * 357 format_class(int8_t class) 358 { 359 static char buffer[6]; 360 361 switch (class) { 362 case MFI_EVT_CLASS_DEBUG: 363 return "debug"; 364 case MFI_EVT_CLASS_PROGRESS: 365 return "progress"; 366 case MFI_EVT_CLASS_INFO: 367 return "info"; 368 case MFI_EVT_CLASS_WARNING: 369 return "WARN"; 370 case MFI_EVT_CLASS_CRITICAL: 371 return "CRIT"; 372 case MFI_EVT_CLASS_FATAL: 373 return "FATAL"; 374 case MFI_EVT_CLASS_DEAD: 375 return "DEAD"; 376 default: 377 snprintf(buffer, sizeof(buffer), "%d", class); 378 return buffer; 379 } 380 } 381 382 /** 383 * megasas_decode_evt: Decode FW AEN event and print critical event 384 * for information. 385 * @instance: Adapter soft state 386 */ 387 static void 388 megasas_decode_evt(struct megasas_instance *instance) 389 { 390 struct megasas_evt_detail *evt_detail = instance->evt_detail; 391 union megasas_evt_class_locale class_locale; 392 class_locale.word = le32_to_cpu(evt_detail->cl.word); 393 394 if (class_locale.members.class >= MFI_EVT_CLASS_CRITICAL) 395 dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n", 396 le32_to_cpu(evt_detail->seq_num), 397 format_timestamp(le32_to_cpu(evt_detail->time_stamp)), 398 (class_locale.members.locale), 399 format_class(class_locale.members.class), 400 evt_detail->description); 401 } 402 403 /** 404 * The following functions are defined for xscale 405 * (deviceid : 1064R, PERC5) controllers 406 */ 407 408 /** 409 * megasas_enable_intr_xscale - Enables interrupts 410 * @regs: MFI register set 411 */ 412 static inline void 413 megasas_enable_intr_xscale(struct megasas_instance *instance) 414 { 415 struct megasas_register_set __iomem *regs; 416 417 regs = instance->reg_set; 418 writel(0, &(regs)->outbound_intr_mask); 419 420 /* Dummy readl to force pci flush */ 421 readl(®s->outbound_intr_mask); 422 } 423 424 /** 425 * megasas_disable_intr_xscale -Disables interrupt 426 * @regs: MFI register set 427 */ 428 static inline void 429 megasas_disable_intr_xscale(struct megasas_instance *instance) 430 { 431 struct megasas_register_set __iomem *regs; 432 u32 mask = 0x1f; 433 434 regs = instance->reg_set; 435 writel(mask, ®s->outbound_intr_mask); 436 /* Dummy readl to force pci flush */ 437 readl(®s->outbound_intr_mask); 438 } 439 440 /** 441 * megasas_read_fw_status_reg_xscale - returns the current FW status value 442 * @regs: MFI register set 443 */ 444 static u32 445 megasas_read_fw_status_reg_xscale(struct megasas_instance *instance) 446 { 447 return readl(&instance->reg_set->outbound_msg_0); 448 } 449 /** 450 * megasas_clear_interrupt_xscale - Check & clear interrupt 451 * @regs: MFI register set 452 */ 453 static int 454 megasas_clear_intr_xscale(struct megasas_instance *instance) 455 { 456 u32 status; 457 u32 mfiStatus = 0; 458 struct megasas_register_set __iomem *regs; 459 regs = instance->reg_set; 460 461 /* 462 * Check if it is our interrupt 463 */ 464 status = readl(®s->outbound_intr_status); 465 466 if (status & MFI_OB_INTR_STATUS_MASK) 467 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 468 if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT) 469 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 470 471 /* 472 * Clear the interrupt by writing back the same value 473 */ 474 if (mfiStatus) 475 writel(status, ®s->outbound_intr_status); 476 477 /* Dummy readl to force pci flush */ 478 readl(®s->outbound_intr_status); 479 480 return mfiStatus; 481 } 482 483 /** 484 * megasas_fire_cmd_xscale - Sends command to the FW 485 * @frame_phys_addr : Physical address of cmd 486 * @frame_count : Number of frames for the command 487 * @regs : MFI register set 488 */ 489 static inline void 490 megasas_fire_cmd_xscale(struct megasas_instance *instance, 491 dma_addr_t frame_phys_addr, 492 u32 frame_count, 493 struct megasas_register_set __iomem *regs) 494 { 495 unsigned long flags; 496 497 spin_lock_irqsave(&instance->hba_lock, flags); 498 writel((frame_phys_addr >> 3)|(frame_count), 499 &(regs)->inbound_queue_port); 500 spin_unlock_irqrestore(&instance->hba_lock, flags); 501 } 502 503 /** 504 * megasas_adp_reset_xscale - For controller reset 505 * @regs: MFI register set 506 */ 507 static int 508 megasas_adp_reset_xscale(struct megasas_instance *instance, 509 struct megasas_register_set __iomem *regs) 510 { 511 u32 i; 512 u32 pcidata; 513 514 writel(MFI_ADP_RESET, ®s->inbound_doorbell); 515 516 for (i = 0; i < 3; i++) 517 msleep(1000); /* sleep for 3 secs */ 518 pcidata = 0; 519 pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata); 520 dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata); 521 if (pcidata & 0x2) { 522 dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata); 523 pcidata &= ~0x2; 524 pci_write_config_dword(instance->pdev, 525 MFI_1068_PCSR_OFFSET, pcidata); 526 527 for (i = 0; i < 2; i++) 528 msleep(1000); /* need to wait 2 secs again */ 529 530 pcidata = 0; 531 pci_read_config_dword(instance->pdev, 532 MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata); 533 dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata); 534 if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) { 535 dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata); 536 pcidata = 0; 537 pci_write_config_dword(instance->pdev, 538 MFI_1068_FW_HANDSHAKE_OFFSET, pcidata); 539 } 540 } 541 return 0; 542 } 543 544 /** 545 * megasas_check_reset_xscale - For controller reset check 546 * @regs: MFI register set 547 */ 548 static int 549 megasas_check_reset_xscale(struct megasas_instance *instance, 550 struct megasas_register_set __iomem *regs) 551 { 552 if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) && 553 (le32_to_cpu(*instance->consumer) == 554 MEGASAS_ADPRESET_INPROG_SIGN)) 555 return 1; 556 return 0; 557 } 558 559 static struct megasas_instance_template megasas_instance_template_xscale = { 560 561 .fire_cmd = megasas_fire_cmd_xscale, 562 .enable_intr = megasas_enable_intr_xscale, 563 .disable_intr = megasas_disable_intr_xscale, 564 .clear_intr = megasas_clear_intr_xscale, 565 .read_fw_status_reg = megasas_read_fw_status_reg_xscale, 566 .adp_reset = megasas_adp_reset_xscale, 567 .check_reset = megasas_check_reset_xscale, 568 .service_isr = megasas_isr, 569 .tasklet = megasas_complete_cmd_dpc, 570 .init_adapter = megasas_init_adapter_mfi, 571 .build_and_issue_cmd = megasas_build_and_issue_cmd, 572 .issue_dcmd = megasas_issue_dcmd, 573 }; 574 575 /** 576 * This is the end of set of functions & definitions specific 577 * to xscale (deviceid : 1064R, PERC5) controllers 578 */ 579 580 /** 581 * The following functions are defined for ppc (deviceid : 0x60) 582 * controllers 583 */ 584 585 /** 586 * megasas_enable_intr_ppc - Enables interrupts 587 * @regs: MFI register set 588 */ 589 static inline void 590 megasas_enable_intr_ppc(struct megasas_instance *instance) 591 { 592 struct megasas_register_set __iomem *regs; 593 594 regs = instance->reg_set; 595 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 596 597 writel(~0x80000000, &(regs)->outbound_intr_mask); 598 599 /* Dummy readl to force pci flush */ 600 readl(®s->outbound_intr_mask); 601 } 602 603 /** 604 * megasas_disable_intr_ppc - Disable interrupt 605 * @regs: MFI register set 606 */ 607 static inline void 608 megasas_disable_intr_ppc(struct megasas_instance *instance) 609 { 610 struct megasas_register_set __iomem *regs; 611 u32 mask = 0xFFFFFFFF; 612 613 regs = instance->reg_set; 614 writel(mask, ®s->outbound_intr_mask); 615 /* Dummy readl to force pci flush */ 616 readl(®s->outbound_intr_mask); 617 } 618 619 /** 620 * megasas_read_fw_status_reg_ppc - returns the current FW status value 621 * @regs: MFI register set 622 */ 623 static u32 624 megasas_read_fw_status_reg_ppc(struct megasas_instance *instance) 625 { 626 return readl(&instance->reg_set->outbound_scratch_pad_0); 627 } 628 629 /** 630 * megasas_clear_interrupt_ppc - Check & clear interrupt 631 * @regs: MFI register set 632 */ 633 static int 634 megasas_clear_intr_ppc(struct megasas_instance *instance) 635 { 636 u32 status, mfiStatus = 0; 637 struct megasas_register_set __iomem *regs; 638 regs = instance->reg_set; 639 640 /* 641 * Check if it is our interrupt 642 */ 643 status = readl(®s->outbound_intr_status); 644 645 if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT) 646 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 647 648 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) 649 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 650 651 /* 652 * Clear the interrupt by writing back the same value 653 */ 654 writel(status, ®s->outbound_doorbell_clear); 655 656 /* Dummy readl to force pci flush */ 657 readl(®s->outbound_doorbell_clear); 658 659 return mfiStatus; 660 } 661 662 /** 663 * megasas_fire_cmd_ppc - Sends command to the FW 664 * @frame_phys_addr : Physical address of cmd 665 * @frame_count : Number of frames for the command 666 * @regs : MFI register set 667 */ 668 static inline void 669 megasas_fire_cmd_ppc(struct megasas_instance *instance, 670 dma_addr_t frame_phys_addr, 671 u32 frame_count, 672 struct megasas_register_set __iomem *regs) 673 { 674 unsigned long flags; 675 676 spin_lock_irqsave(&instance->hba_lock, flags); 677 writel((frame_phys_addr | (frame_count<<1))|1, 678 &(regs)->inbound_queue_port); 679 spin_unlock_irqrestore(&instance->hba_lock, flags); 680 } 681 682 /** 683 * megasas_check_reset_ppc - For controller reset check 684 * @regs: MFI register set 685 */ 686 static int 687 megasas_check_reset_ppc(struct megasas_instance *instance, 688 struct megasas_register_set __iomem *regs) 689 { 690 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 691 return 1; 692 693 return 0; 694 } 695 696 static struct megasas_instance_template megasas_instance_template_ppc = { 697 698 .fire_cmd = megasas_fire_cmd_ppc, 699 .enable_intr = megasas_enable_intr_ppc, 700 .disable_intr = megasas_disable_intr_ppc, 701 .clear_intr = megasas_clear_intr_ppc, 702 .read_fw_status_reg = megasas_read_fw_status_reg_ppc, 703 .adp_reset = megasas_adp_reset_xscale, 704 .check_reset = megasas_check_reset_ppc, 705 .service_isr = megasas_isr, 706 .tasklet = megasas_complete_cmd_dpc, 707 .init_adapter = megasas_init_adapter_mfi, 708 .build_and_issue_cmd = megasas_build_and_issue_cmd, 709 .issue_dcmd = megasas_issue_dcmd, 710 }; 711 712 /** 713 * megasas_enable_intr_skinny - Enables interrupts 714 * @regs: MFI register set 715 */ 716 static inline void 717 megasas_enable_intr_skinny(struct megasas_instance *instance) 718 { 719 struct megasas_register_set __iomem *regs; 720 721 regs = instance->reg_set; 722 writel(0xFFFFFFFF, &(regs)->outbound_intr_mask); 723 724 writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 725 726 /* Dummy readl to force pci flush */ 727 readl(®s->outbound_intr_mask); 728 } 729 730 /** 731 * megasas_disable_intr_skinny - Disables interrupt 732 * @regs: MFI register set 733 */ 734 static inline void 735 megasas_disable_intr_skinny(struct megasas_instance *instance) 736 { 737 struct megasas_register_set __iomem *regs; 738 u32 mask = 0xFFFFFFFF; 739 740 regs = instance->reg_set; 741 writel(mask, ®s->outbound_intr_mask); 742 /* Dummy readl to force pci flush */ 743 readl(®s->outbound_intr_mask); 744 } 745 746 /** 747 * megasas_read_fw_status_reg_skinny - returns the current FW status value 748 * @regs: MFI register set 749 */ 750 static u32 751 megasas_read_fw_status_reg_skinny(struct megasas_instance *instance) 752 { 753 return readl(&instance->reg_set->outbound_scratch_pad_0); 754 } 755 756 /** 757 * megasas_clear_interrupt_skinny - Check & clear interrupt 758 * @regs: MFI register set 759 */ 760 static int 761 megasas_clear_intr_skinny(struct megasas_instance *instance) 762 { 763 u32 status; 764 u32 mfiStatus = 0; 765 struct megasas_register_set __iomem *regs; 766 regs = instance->reg_set; 767 768 /* 769 * Check if it is our interrupt 770 */ 771 status = readl(®s->outbound_intr_status); 772 773 if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) { 774 return 0; 775 } 776 777 /* 778 * Check if it is our interrupt 779 */ 780 if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) == 781 MFI_STATE_FAULT) { 782 mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 783 } else 784 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 785 786 /* 787 * Clear the interrupt by writing back the same value 788 */ 789 writel(status, ®s->outbound_intr_status); 790 791 /* 792 * dummy read to flush PCI 793 */ 794 readl(®s->outbound_intr_status); 795 796 return mfiStatus; 797 } 798 799 /** 800 * megasas_fire_cmd_skinny - Sends command to the FW 801 * @frame_phys_addr : Physical address of cmd 802 * @frame_count : Number of frames for the command 803 * @regs : MFI register set 804 */ 805 static inline void 806 megasas_fire_cmd_skinny(struct megasas_instance *instance, 807 dma_addr_t frame_phys_addr, 808 u32 frame_count, 809 struct megasas_register_set __iomem *regs) 810 { 811 unsigned long flags; 812 813 spin_lock_irqsave(&instance->hba_lock, flags); 814 writel(upper_32_bits(frame_phys_addr), 815 &(regs)->inbound_high_queue_port); 816 writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1, 817 &(regs)->inbound_low_queue_port); 818 mmiowb(); 819 spin_unlock_irqrestore(&instance->hba_lock, flags); 820 } 821 822 /** 823 * megasas_check_reset_skinny - For controller reset check 824 * @regs: MFI register set 825 */ 826 static int 827 megasas_check_reset_skinny(struct megasas_instance *instance, 828 struct megasas_register_set __iomem *regs) 829 { 830 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 831 return 1; 832 833 return 0; 834 } 835 836 static struct megasas_instance_template megasas_instance_template_skinny = { 837 838 .fire_cmd = megasas_fire_cmd_skinny, 839 .enable_intr = megasas_enable_intr_skinny, 840 .disable_intr = megasas_disable_intr_skinny, 841 .clear_intr = megasas_clear_intr_skinny, 842 .read_fw_status_reg = megasas_read_fw_status_reg_skinny, 843 .adp_reset = megasas_adp_reset_gen2, 844 .check_reset = megasas_check_reset_skinny, 845 .service_isr = megasas_isr, 846 .tasklet = megasas_complete_cmd_dpc, 847 .init_adapter = megasas_init_adapter_mfi, 848 .build_and_issue_cmd = megasas_build_and_issue_cmd, 849 .issue_dcmd = megasas_issue_dcmd, 850 }; 851 852 853 /** 854 * The following functions are defined for gen2 (deviceid : 0x78 0x79) 855 * controllers 856 */ 857 858 /** 859 * megasas_enable_intr_gen2 - Enables interrupts 860 * @regs: MFI register set 861 */ 862 static inline void 863 megasas_enable_intr_gen2(struct megasas_instance *instance) 864 { 865 struct megasas_register_set __iomem *regs; 866 867 regs = instance->reg_set; 868 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 869 870 /* write ~0x00000005 (4 & 1) to the intr mask*/ 871 writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 872 873 /* Dummy readl to force pci flush */ 874 readl(®s->outbound_intr_mask); 875 } 876 877 /** 878 * megasas_disable_intr_gen2 - Disables interrupt 879 * @regs: MFI register set 880 */ 881 static inline void 882 megasas_disable_intr_gen2(struct megasas_instance *instance) 883 { 884 struct megasas_register_set __iomem *regs; 885 u32 mask = 0xFFFFFFFF; 886 887 regs = instance->reg_set; 888 writel(mask, ®s->outbound_intr_mask); 889 /* Dummy readl to force pci flush */ 890 readl(®s->outbound_intr_mask); 891 } 892 893 /** 894 * megasas_read_fw_status_reg_gen2 - returns the current FW status value 895 * @regs: MFI register set 896 */ 897 static u32 898 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance) 899 { 900 return readl(&instance->reg_set->outbound_scratch_pad_0); 901 } 902 903 /** 904 * megasas_clear_interrupt_gen2 - Check & clear interrupt 905 * @regs: MFI register set 906 */ 907 static int 908 megasas_clear_intr_gen2(struct megasas_instance *instance) 909 { 910 u32 status; 911 u32 mfiStatus = 0; 912 struct megasas_register_set __iomem *regs; 913 regs = instance->reg_set; 914 915 /* 916 * Check if it is our interrupt 917 */ 918 status = readl(®s->outbound_intr_status); 919 920 if (status & MFI_INTR_FLAG_REPLY_MESSAGE) { 921 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 922 } 923 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) { 924 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 925 } 926 927 /* 928 * Clear the interrupt by writing back the same value 929 */ 930 if (mfiStatus) 931 writel(status, ®s->outbound_doorbell_clear); 932 933 /* Dummy readl to force pci flush */ 934 readl(®s->outbound_intr_status); 935 936 return mfiStatus; 937 } 938 /** 939 * megasas_fire_cmd_gen2 - Sends command to the FW 940 * @frame_phys_addr : Physical address of cmd 941 * @frame_count : Number of frames for the command 942 * @regs : MFI register set 943 */ 944 static inline void 945 megasas_fire_cmd_gen2(struct megasas_instance *instance, 946 dma_addr_t frame_phys_addr, 947 u32 frame_count, 948 struct megasas_register_set __iomem *regs) 949 { 950 unsigned long flags; 951 952 spin_lock_irqsave(&instance->hba_lock, flags); 953 writel((frame_phys_addr | (frame_count<<1))|1, 954 &(regs)->inbound_queue_port); 955 spin_unlock_irqrestore(&instance->hba_lock, flags); 956 } 957 958 /** 959 * megasas_adp_reset_gen2 - For controller reset 960 * @regs: MFI register set 961 */ 962 static int 963 megasas_adp_reset_gen2(struct megasas_instance *instance, 964 struct megasas_register_set __iomem *reg_set) 965 { 966 u32 retry = 0 ; 967 u32 HostDiag; 968 u32 __iomem *seq_offset = ®_set->seq_offset; 969 u32 __iomem *hostdiag_offset = ®_set->host_diag; 970 971 if (instance->instancet == &megasas_instance_template_skinny) { 972 seq_offset = ®_set->fusion_seq_offset; 973 hostdiag_offset = ®_set->fusion_host_diag; 974 } 975 976 writel(0, seq_offset); 977 writel(4, seq_offset); 978 writel(0xb, seq_offset); 979 writel(2, seq_offset); 980 writel(7, seq_offset); 981 writel(0xd, seq_offset); 982 983 msleep(1000); 984 985 HostDiag = (u32)readl(hostdiag_offset); 986 987 while (!(HostDiag & DIAG_WRITE_ENABLE)) { 988 msleep(100); 989 HostDiag = (u32)readl(hostdiag_offset); 990 dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n", 991 retry, HostDiag); 992 993 if (retry++ >= 100) 994 return 1; 995 996 } 997 998 dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag); 999 1000 writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset); 1001 1002 ssleep(10); 1003 1004 HostDiag = (u32)readl(hostdiag_offset); 1005 while (HostDiag & DIAG_RESET_ADAPTER) { 1006 msleep(100); 1007 HostDiag = (u32)readl(hostdiag_offset); 1008 dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n", 1009 retry, HostDiag); 1010 1011 if (retry++ >= 1000) 1012 return 1; 1013 1014 } 1015 return 0; 1016 } 1017 1018 /** 1019 * megasas_check_reset_gen2 - For controller reset check 1020 * @regs: MFI register set 1021 */ 1022 static int 1023 megasas_check_reset_gen2(struct megasas_instance *instance, 1024 struct megasas_register_set __iomem *regs) 1025 { 1026 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 1027 return 1; 1028 1029 return 0; 1030 } 1031 1032 static struct megasas_instance_template megasas_instance_template_gen2 = { 1033 1034 .fire_cmd = megasas_fire_cmd_gen2, 1035 .enable_intr = megasas_enable_intr_gen2, 1036 .disable_intr = megasas_disable_intr_gen2, 1037 .clear_intr = megasas_clear_intr_gen2, 1038 .read_fw_status_reg = megasas_read_fw_status_reg_gen2, 1039 .adp_reset = megasas_adp_reset_gen2, 1040 .check_reset = megasas_check_reset_gen2, 1041 .service_isr = megasas_isr, 1042 .tasklet = megasas_complete_cmd_dpc, 1043 .init_adapter = megasas_init_adapter_mfi, 1044 .build_and_issue_cmd = megasas_build_and_issue_cmd, 1045 .issue_dcmd = megasas_issue_dcmd, 1046 }; 1047 1048 /** 1049 * This is the end of set of functions & definitions 1050 * specific to gen2 (deviceid : 0x78, 0x79) controllers 1051 */ 1052 1053 /* 1054 * Template added for TB (Fusion) 1055 */ 1056 extern struct megasas_instance_template megasas_instance_template_fusion; 1057 1058 /** 1059 * megasas_issue_polled - Issues a polling command 1060 * @instance: Adapter soft state 1061 * @cmd: Command packet to be issued 1062 * 1063 * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting. 1064 */ 1065 int 1066 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) 1067 { 1068 struct megasas_header *frame_hdr = &cmd->frame->hdr; 1069 1070 frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS; 1071 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); 1072 1073 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1074 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1075 __func__, __LINE__); 1076 return DCMD_NOT_FIRED; 1077 } 1078 1079 instance->instancet->issue_dcmd(instance, cmd); 1080 1081 return wait_and_poll(instance, cmd, instance->requestorId ? 1082 MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS); 1083 } 1084 1085 /** 1086 * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds 1087 * @instance: Adapter soft state 1088 * @cmd: Command to be issued 1089 * @timeout: Timeout in seconds 1090 * 1091 * This function waits on an event for the command to be returned from ISR. 1092 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 1093 * Used to issue ioctl commands. 1094 */ 1095 int 1096 megasas_issue_blocked_cmd(struct megasas_instance *instance, 1097 struct megasas_cmd *cmd, int timeout) 1098 { 1099 int ret = 0; 1100 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 1101 1102 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1103 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1104 __func__, __LINE__); 1105 return DCMD_NOT_FIRED; 1106 } 1107 1108 instance->instancet->issue_dcmd(instance, cmd); 1109 1110 if (timeout) { 1111 ret = wait_event_timeout(instance->int_cmd_wait_q, 1112 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); 1113 if (!ret) { 1114 dev_err(&instance->pdev->dev, "Failed from %s %d DCMD Timed out\n", 1115 __func__, __LINE__); 1116 return DCMD_TIMEOUT; 1117 } 1118 } else 1119 wait_event(instance->int_cmd_wait_q, 1120 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS); 1121 1122 return (cmd->cmd_status_drv == MFI_STAT_OK) ? 1123 DCMD_SUCCESS : DCMD_FAILED; 1124 } 1125 1126 /** 1127 * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd 1128 * @instance: Adapter soft state 1129 * @cmd_to_abort: Previously issued cmd to be aborted 1130 * @timeout: Timeout in seconds 1131 * 1132 * MFI firmware can abort previously issued AEN comamnd (automatic event 1133 * notification). The megasas_issue_blocked_abort_cmd() issues such abort 1134 * cmd and waits for return status. 1135 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 1136 */ 1137 static int 1138 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, 1139 struct megasas_cmd *cmd_to_abort, int timeout) 1140 { 1141 struct megasas_cmd *cmd; 1142 struct megasas_abort_frame *abort_fr; 1143 int ret = 0; 1144 1145 cmd = megasas_get_cmd(instance); 1146 1147 if (!cmd) 1148 return -1; 1149 1150 abort_fr = &cmd->frame->abort; 1151 1152 /* 1153 * Prepare and issue the abort frame 1154 */ 1155 abort_fr->cmd = MFI_CMD_ABORT; 1156 abort_fr->cmd_status = MFI_STAT_INVALID_STATUS; 1157 abort_fr->flags = cpu_to_le16(0); 1158 abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index); 1159 abort_fr->abort_mfi_phys_addr_lo = 1160 cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr)); 1161 abort_fr->abort_mfi_phys_addr_hi = 1162 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); 1163 1164 cmd->sync_cmd = 1; 1165 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 1166 1167 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1168 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1169 __func__, __LINE__); 1170 return DCMD_NOT_FIRED; 1171 } 1172 1173 instance->instancet->issue_dcmd(instance, cmd); 1174 1175 if (timeout) { 1176 ret = wait_event_timeout(instance->abort_cmd_wait_q, 1177 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); 1178 if (!ret) { 1179 dev_err(&instance->pdev->dev, "Failed from %s %d Abort Timed out\n", 1180 __func__, __LINE__); 1181 return DCMD_TIMEOUT; 1182 } 1183 } else 1184 wait_event(instance->abort_cmd_wait_q, 1185 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS); 1186 1187 cmd->sync_cmd = 0; 1188 1189 megasas_return_cmd(instance, cmd); 1190 return (cmd->cmd_status_drv == MFI_STAT_OK) ? 1191 DCMD_SUCCESS : DCMD_FAILED; 1192 } 1193 1194 /** 1195 * megasas_make_sgl32 - Prepares 32-bit SGL 1196 * @instance: Adapter soft state 1197 * @scp: SCSI command from the mid-layer 1198 * @mfi_sgl: SGL to be filled in 1199 * 1200 * If successful, this function returns the number of SG elements. Otherwise, 1201 * it returnes -1. 1202 */ 1203 static int 1204 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp, 1205 union megasas_sgl *mfi_sgl) 1206 { 1207 int i; 1208 int sge_count; 1209 struct scatterlist *os_sgl; 1210 1211 sge_count = scsi_dma_map(scp); 1212 BUG_ON(sge_count < 0); 1213 1214 if (sge_count) { 1215 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1216 mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl)); 1217 mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl)); 1218 } 1219 } 1220 return sge_count; 1221 } 1222 1223 /** 1224 * megasas_make_sgl64 - Prepares 64-bit SGL 1225 * @instance: Adapter soft state 1226 * @scp: SCSI command from the mid-layer 1227 * @mfi_sgl: SGL to be filled in 1228 * 1229 * If successful, this function returns the number of SG elements. Otherwise, 1230 * it returnes -1. 1231 */ 1232 static int 1233 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, 1234 union megasas_sgl *mfi_sgl) 1235 { 1236 int i; 1237 int sge_count; 1238 struct scatterlist *os_sgl; 1239 1240 sge_count = scsi_dma_map(scp); 1241 BUG_ON(sge_count < 0); 1242 1243 if (sge_count) { 1244 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1245 mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl)); 1246 mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl)); 1247 } 1248 } 1249 return sge_count; 1250 } 1251 1252 /** 1253 * megasas_make_sgl_skinny - Prepares IEEE SGL 1254 * @instance: Adapter soft state 1255 * @scp: SCSI command from the mid-layer 1256 * @mfi_sgl: SGL to be filled in 1257 * 1258 * If successful, this function returns the number of SG elements. Otherwise, 1259 * it returnes -1. 1260 */ 1261 static int 1262 megasas_make_sgl_skinny(struct megasas_instance *instance, 1263 struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) 1264 { 1265 int i; 1266 int sge_count; 1267 struct scatterlist *os_sgl; 1268 1269 sge_count = scsi_dma_map(scp); 1270 1271 if (sge_count) { 1272 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1273 mfi_sgl->sge_skinny[i].length = 1274 cpu_to_le32(sg_dma_len(os_sgl)); 1275 mfi_sgl->sge_skinny[i].phys_addr = 1276 cpu_to_le64(sg_dma_address(os_sgl)); 1277 mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0); 1278 } 1279 } 1280 return sge_count; 1281 } 1282 1283 /** 1284 * megasas_get_frame_count - Computes the number of frames 1285 * @frame_type : type of frame- io or pthru frame 1286 * @sge_count : number of sg elements 1287 * 1288 * Returns the number of frames required for numnber of sge's (sge_count) 1289 */ 1290 1291 static u32 megasas_get_frame_count(struct megasas_instance *instance, 1292 u8 sge_count, u8 frame_type) 1293 { 1294 int num_cnt; 1295 int sge_bytes; 1296 u32 sge_sz; 1297 u32 frame_count = 0; 1298 1299 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : 1300 sizeof(struct megasas_sge32); 1301 1302 if (instance->flag_ieee) { 1303 sge_sz = sizeof(struct megasas_sge_skinny); 1304 } 1305 1306 /* 1307 * Main frame can contain 2 SGEs for 64-bit SGLs and 1308 * 3 SGEs for 32-bit SGLs for ldio & 1309 * 1 SGEs for 64-bit SGLs and 1310 * 2 SGEs for 32-bit SGLs for pthru frame 1311 */ 1312 if (unlikely(frame_type == PTHRU_FRAME)) { 1313 if (instance->flag_ieee == 1) { 1314 num_cnt = sge_count - 1; 1315 } else if (IS_DMA64) 1316 num_cnt = sge_count - 1; 1317 else 1318 num_cnt = sge_count - 2; 1319 } else { 1320 if (instance->flag_ieee == 1) { 1321 num_cnt = sge_count - 1; 1322 } else if (IS_DMA64) 1323 num_cnt = sge_count - 2; 1324 else 1325 num_cnt = sge_count - 3; 1326 } 1327 1328 if (num_cnt > 0) { 1329 sge_bytes = sge_sz * num_cnt; 1330 1331 frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + 1332 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ; 1333 } 1334 /* Main frame */ 1335 frame_count += 1; 1336 1337 if (frame_count > 7) 1338 frame_count = 8; 1339 return frame_count; 1340 } 1341 1342 /** 1343 * megasas_build_dcdb - Prepares a direct cdb (DCDB) command 1344 * @instance: Adapter soft state 1345 * @scp: SCSI command 1346 * @cmd: Command to be prepared in 1347 * 1348 * This function prepares CDB commands. These are typcially pass-through 1349 * commands to the devices. 1350 */ 1351 static int 1352 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, 1353 struct megasas_cmd *cmd) 1354 { 1355 u32 is_logical; 1356 u32 device_id; 1357 u16 flags = 0; 1358 struct megasas_pthru_frame *pthru; 1359 1360 is_logical = MEGASAS_IS_LOGICAL(scp->device); 1361 device_id = MEGASAS_DEV_INDEX(scp); 1362 pthru = (struct megasas_pthru_frame *)cmd->frame; 1363 1364 if (scp->sc_data_direction == DMA_TO_DEVICE) 1365 flags = MFI_FRAME_DIR_WRITE; 1366 else if (scp->sc_data_direction == DMA_FROM_DEVICE) 1367 flags = MFI_FRAME_DIR_READ; 1368 else if (scp->sc_data_direction == DMA_NONE) 1369 flags = MFI_FRAME_DIR_NONE; 1370 1371 if (instance->flag_ieee == 1) { 1372 flags |= MFI_FRAME_IEEE; 1373 } 1374 1375 /* 1376 * Prepare the DCDB frame 1377 */ 1378 pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO; 1379 pthru->cmd_status = 0x0; 1380 pthru->scsi_status = 0x0; 1381 pthru->target_id = device_id; 1382 pthru->lun = scp->device->lun; 1383 pthru->cdb_len = scp->cmd_len; 1384 pthru->timeout = 0; 1385 pthru->pad_0 = 0; 1386 pthru->flags = cpu_to_le16(flags); 1387 pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp)); 1388 1389 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); 1390 1391 /* 1392 * If the command is for the tape device, set the 1393 * pthru timeout to the os layer timeout value. 1394 */ 1395 if (scp->device->type == TYPE_TAPE) { 1396 if ((scp->request->timeout / HZ) > 0xFFFF) 1397 pthru->timeout = cpu_to_le16(0xFFFF); 1398 else 1399 pthru->timeout = cpu_to_le16(scp->request->timeout / HZ); 1400 } 1401 1402 /* 1403 * Construct SGL 1404 */ 1405 if (instance->flag_ieee == 1) { 1406 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1407 pthru->sge_count = megasas_make_sgl_skinny(instance, scp, 1408 &pthru->sgl); 1409 } else if (IS_DMA64) { 1410 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1411 pthru->sge_count = megasas_make_sgl64(instance, scp, 1412 &pthru->sgl); 1413 } else 1414 pthru->sge_count = megasas_make_sgl32(instance, scp, 1415 &pthru->sgl); 1416 1417 if (pthru->sge_count > instance->max_num_sge) { 1418 dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n", 1419 pthru->sge_count); 1420 return 0; 1421 } 1422 1423 /* 1424 * Sense info specific 1425 */ 1426 pthru->sense_len = SCSI_SENSE_BUFFERSIZE; 1427 pthru->sense_buf_phys_addr_hi = 1428 cpu_to_le32(upper_32_bits(cmd->sense_phys_addr)); 1429 pthru->sense_buf_phys_addr_lo = 1430 cpu_to_le32(lower_32_bits(cmd->sense_phys_addr)); 1431 1432 /* 1433 * Compute the total number of frames this command consumes. FW uses 1434 * this number to pull sufficient number of frames from host memory. 1435 */ 1436 cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count, 1437 PTHRU_FRAME); 1438 1439 return cmd->frame_count; 1440 } 1441 1442 /** 1443 * megasas_build_ldio - Prepares IOs to logical devices 1444 * @instance: Adapter soft state 1445 * @scp: SCSI command 1446 * @cmd: Command to be prepared 1447 * 1448 * Frames (and accompanying SGLs) for regular SCSI IOs use this function. 1449 */ 1450 static int 1451 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, 1452 struct megasas_cmd *cmd) 1453 { 1454 u32 device_id; 1455 u8 sc = scp->cmnd[0]; 1456 u16 flags = 0; 1457 struct megasas_io_frame *ldio; 1458 1459 device_id = MEGASAS_DEV_INDEX(scp); 1460 ldio = (struct megasas_io_frame *)cmd->frame; 1461 1462 if (scp->sc_data_direction == DMA_TO_DEVICE) 1463 flags = MFI_FRAME_DIR_WRITE; 1464 else if (scp->sc_data_direction == DMA_FROM_DEVICE) 1465 flags = MFI_FRAME_DIR_READ; 1466 1467 if (instance->flag_ieee == 1) { 1468 flags |= MFI_FRAME_IEEE; 1469 } 1470 1471 /* 1472 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds 1473 */ 1474 ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ; 1475 ldio->cmd_status = 0x0; 1476 ldio->scsi_status = 0x0; 1477 ldio->target_id = device_id; 1478 ldio->timeout = 0; 1479 ldio->reserved_0 = 0; 1480 ldio->pad_0 = 0; 1481 ldio->flags = cpu_to_le16(flags); 1482 ldio->start_lba_hi = 0; 1483 ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0; 1484 1485 /* 1486 * 6-byte READ(0x08) or WRITE(0x0A) cdb 1487 */ 1488 if (scp->cmd_len == 6) { 1489 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]); 1490 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) | 1491 ((u32) scp->cmnd[2] << 8) | 1492 (u32) scp->cmnd[3]); 1493 1494 ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF); 1495 } 1496 1497 /* 1498 * 10-byte READ(0x28) or WRITE(0x2A) cdb 1499 */ 1500 else if (scp->cmd_len == 10) { 1501 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] | 1502 ((u32) scp->cmnd[7] << 8)); 1503 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1504 ((u32) scp->cmnd[3] << 16) | 1505 ((u32) scp->cmnd[4] << 8) | 1506 (u32) scp->cmnd[5]); 1507 } 1508 1509 /* 1510 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 1511 */ 1512 else if (scp->cmd_len == 12) { 1513 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) | 1514 ((u32) scp->cmnd[7] << 16) | 1515 ((u32) scp->cmnd[8] << 8) | 1516 (u32) scp->cmnd[9]); 1517 1518 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1519 ((u32) scp->cmnd[3] << 16) | 1520 ((u32) scp->cmnd[4] << 8) | 1521 (u32) scp->cmnd[5]); 1522 } 1523 1524 /* 1525 * 16-byte READ(0x88) or WRITE(0x8A) cdb 1526 */ 1527 else if (scp->cmd_len == 16) { 1528 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) | 1529 ((u32) scp->cmnd[11] << 16) | 1530 ((u32) scp->cmnd[12] << 8) | 1531 (u32) scp->cmnd[13]); 1532 1533 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) | 1534 ((u32) scp->cmnd[7] << 16) | 1535 ((u32) scp->cmnd[8] << 8) | 1536 (u32) scp->cmnd[9]); 1537 1538 ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1539 ((u32) scp->cmnd[3] << 16) | 1540 ((u32) scp->cmnd[4] << 8) | 1541 (u32) scp->cmnd[5]); 1542 1543 } 1544 1545 /* 1546 * Construct SGL 1547 */ 1548 if (instance->flag_ieee) { 1549 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1550 ldio->sge_count = megasas_make_sgl_skinny(instance, scp, 1551 &ldio->sgl); 1552 } else if (IS_DMA64) { 1553 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1554 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); 1555 } else 1556 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); 1557 1558 if (ldio->sge_count > instance->max_num_sge) { 1559 dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n", 1560 ldio->sge_count); 1561 return 0; 1562 } 1563 1564 /* 1565 * Sense info specific 1566 */ 1567 ldio->sense_len = SCSI_SENSE_BUFFERSIZE; 1568 ldio->sense_buf_phys_addr_hi = 0; 1569 ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr); 1570 1571 /* 1572 * Compute the total number of frames this command consumes. FW uses 1573 * this number to pull sufficient number of frames from host memory. 1574 */ 1575 cmd->frame_count = megasas_get_frame_count(instance, 1576 ldio->sge_count, IO_FRAME); 1577 1578 return cmd->frame_count; 1579 } 1580 1581 /** 1582 * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD 1583 * and whether it's RW or non RW 1584 * @scmd: SCSI command 1585 * 1586 */ 1587 inline int megasas_cmd_type(struct scsi_cmnd *cmd) 1588 { 1589 int ret; 1590 1591 switch (cmd->cmnd[0]) { 1592 case READ_10: 1593 case WRITE_10: 1594 case READ_12: 1595 case WRITE_12: 1596 case READ_6: 1597 case WRITE_6: 1598 case READ_16: 1599 case WRITE_16: 1600 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? 1601 READ_WRITE_LDIO : READ_WRITE_SYSPDIO; 1602 break; 1603 default: 1604 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? 1605 NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO; 1606 } 1607 return ret; 1608 } 1609 1610 /** 1611 * megasas_dump_pending_frames - Dumps the frame address of all pending cmds 1612 * in FW 1613 * @instance: Adapter soft state 1614 */ 1615 static inline void 1616 megasas_dump_pending_frames(struct megasas_instance *instance) 1617 { 1618 struct megasas_cmd *cmd; 1619 int i,n; 1620 union megasas_sgl *mfi_sgl; 1621 struct megasas_io_frame *ldio; 1622 struct megasas_pthru_frame *pthru; 1623 u32 sgcount; 1624 u16 max_cmd = instance->max_fw_cmds; 1625 1626 dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no); 1627 dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding)); 1628 if (IS_DMA64) 1629 dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no); 1630 else 1631 dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no); 1632 1633 dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no); 1634 for (i = 0; i < max_cmd; i++) { 1635 cmd = instance->cmd_list[i]; 1636 if (!cmd->scmd) 1637 continue; 1638 dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr); 1639 if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) { 1640 ldio = (struct megasas_io_frame *)cmd->frame; 1641 mfi_sgl = &ldio->sgl; 1642 sgcount = ldio->sge_count; 1643 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x," 1644 " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1645 instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id, 1646 le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi), 1647 le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount); 1648 } else { 1649 pthru = (struct megasas_pthru_frame *) cmd->frame; 1650 mfi_sgl = &pthru->sgl; 1651 sgcount = pthru->sge_count; 1652 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, " 1653 "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1654 instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id, 1655 pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len), 1656 le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount); 1657 } 1658 if (megasas_dbg_lvl & MEGASAS_DBG_LVL) { 1659 for (n = 0; n < sgcount; n++) { 1660 if (IS_DMA64) 1661 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n", 1662 le32_to_cpu(mfi_sgl->sge64[n].length), 1663 le64_to_cpu(mfi_sgl->sge64[n].phys_addr)); 1664 else 1665 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n", 1666 le32_to_cpu(mfi_sgl->sge32[n].length), 1667 le32_to_cpu(mfi_sgl->sge32[n].phys_addr)); 1668 } 1669 } 1670 } /*for max_cmd*/ 1671 dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no); 1672 for (i = 0; i < max_cmd; i++) { 1673 1674 cmd = instance->cmd_list[i]; 1675 1676 if (cmd->sync_cmd == 1) 1677 dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr); 1678 } 1679 dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no); 1680 } 1681 1682 u32 1683 megasas_build_and_issue_cmd(struct megasas_instance *instance, 1684 struct scsi_cmnd *scmd) 1685 { 1686 struct megasas_cmd *cmd; 1687 u32 frame_count; 1688 1689 cmd = megasas_get_cmd(instance); 1690 if (!cmd) 1691 return SCSI_MLQUEUE_HOST_BUSY; 1692 1693 /* 1694 * Logical drive command 1695 */ 1696 if (megasas_cmd_type(scmd) == READ_WRITE_LDIO) 1697 frame_count = megasas_build_ldio(instance, scmd, cmd); 1698 else 1699 frame_count = megasas_build_dcdb(instance, scmd, cmd); 1700 1701 if (!frame_count) 1702 goto out_return_cmd; 1703 1704 cmd->scmd = scmd; 1705 scmd->SCp.ptr = (char *)cmd; 1706 1707 /* 1708 * Issue the command to the FW 1709 */ 1710 atomic_inc(&instance->fw_outstanding); 1711 1712 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 1713 cmd->frame_count-1, instance->reg_set); 1714 1715 return 0; 1716 out_return_cmd: 1717 megasas_return_cmd(instance, cmd); 1718 return SCSI_MLQUEUE_HOST_BUSY; 1719 } 1720 1721 1722 /** 1723 * megasas_queue_command - Queue entry point 1724 * @scmd: SCSI command to be queued 1725 * @done: Callback entry point 1726 */ 1727 static int 1728 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) 1729 { 1730 struct megasas_instance *instance; 1731 struct MR_PRIV_DEVICE *mr_device_priv_data; 1732 1733 instance = (struct megasas_instance *) 1734 scmd->device->host->hostdata; 1735 1736 if (instance->unload == 1) { 1737 scmd->result = DID_NO_CONNECT << 16; 1738 scmd->scsi_done(scmd); 1739 return 0; 1740 } 1741 1742 if (instance->issuepend_done == 0) 1743 return SCSI_MLQUEUE_HOST_BUSY; 1744 1745 1746 /* Check for an mpio path and adjust behavior */ 1747 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { 1748 if (megasas_check_mpio_paths(instance, scmd) == 1749 (DID_REQUEUE << 16)) { 1750 return SCSI_MLQUEUE_HOST_BUSY; 1751 } else { 1752 scmd->result = DID_NO_CONNECT << 16; 1753 scmd->scsi_done(scmd); 1754 return 0; 1755 } 1756 } 1757 1758 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1759 scmd->result = DID_NO_CONNECT << 16; 1760 scmd->scsi_done(scmd); 1761 return 0; 1762 } 1763 1764 mr_device_priv_data = scmd->device->hostdata; 1765 if (!mr_device_priv_data) { 1766 scmd->result = DID_NO_CONNECT << 16; 1767 scmd->scsi_done(scmd); 1768 return 0; 1769 } 1770 1771 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 1772 return SCSI_MLQUEUE_HOST_BUSY; 1773 1774 if (mr_device_priv_data->tm_busy) 1775 return SCSI_MLQUEUE_DEVICE_BUSY; 1776 1777 1778 scmd->result = 0; 1779 1780 if (MEGASAS_IS_LOGICAL(scmd->device) && 1781 (scmd->device->id >= instance->fw_supported_vd_count || 1782 scmd->device->lun)) { 1783 scmd->result = DID_BAD_TARGET << 16; 1784 goto out_done; 1785 } 1786 1787 if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && 1788 MEGASAS_IS_LOGICAL(scmd->device) && 1789 (!instance->fw_sync_cache_support)) { 1790 scmd->result = DID_OK << 16; 1791 goto out_done; 1792 } 1793 1794 return instance->instancet->build_and_issue_cmd(instance, scmd); 1795 1796 out_done: 1797 scmd->scsi_done(scmd); 1798 return 0; 1799 } 1800 1801 static struct megasas_instance *megasas_lookup_instance(u16 host_no) 1802 { 1803 int i; 1804 1805 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 1806 1807 if ((megasas_mgmt_info.instance[i]) && 1808 (megasas_mgmt_info.instance[i]->host->host_no == host_no)) 1809 return megasas_mgmt_info.instance[i]; 1810 } 1811 1812 return NULL; 1813 } 1814 1815 /* 1816 * megasas_set_dynamic_target_properties - 1817 * Device property set by driver may not be static and it is required to be 1818 * updated after OCR 1819 * 1820 * set tm_capable. 1821 * set dma alignment (only for eedp protection enable vd). 1822 * 1823 * @sdev: OS provided scsi device 1824 * 1825 * Returns void 1826 */ 1827 void megasas_set_dynamic_target_properties(struct scsi_device *sdev, 1828 bool is_target_prop) 1829 { 1830 u16 pd_index = 0, ld; 1831 u32 device_id; 1832 struct megasas_instance *instance; 1833 struct fusion_context *fusion; 1834 struct MR_PRIV_DEVICE *mr_device_priv_data; 1835 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 1836 struct MR_LD_RAID *raid; 1837 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 1838 1839 instance = megasas_lookup_instance(sdev->host->host_no); 1840 fusion = instance->ctrl_context; 1841 mr_device_priv_data = sdev->hostdata; 1842 1843 if (!fusion || !mr_device_priv_data) 1844 return; 1845 1846 if (MEGASAS_IS_LOGICAL(sdev)) { 1847 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) 1848 + sdev->id; 1849 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 1850 ld = MR_TargetIdToLdGet(device_id, local_map_ptr); 1851 if (ld >= instance->fw_supported_vd_count) 1852 return; 1853 raid = MR_LdRaidGet(ld, local_map_ptr); 1854 1855 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) 1856 blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1857 1858 mr_device_priv_data->is_tm_capable = 1859 raid->capability.tmCapable; 1860 } else if (instance->use_seqnum_jbod_fp) { 1861 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1862 sdev->id; 1863 pd_sync = (void *)fusion->pd_seq_sync 1864 [(instance->pd_seq_map_id - 1) & 1]; 1865 mr_device_priv_data->is_tm_capable = 1866 pd_sync->seq[pd_index].capability.tmCapable; 1867 } 1868 1869 if (is_target_prop && instance->tgt_prop->reset_tmo) { 1870 /* 1871 * If FW provides a target reset timeout value, driver will use 1872 * it. If not set, fallback to default values. 1873 */ 1874 mr_device_priv_data->target_reset_tmo = 1875 min_t(u8, instance->max_reset_tmo, 1876 instance->tgt_prop->reset_tmo); 1877 mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo; 1878 } else { 1879 mr_device_priv_data->target_reset_tmo = 1880 MEGASAS_DEFAULT_TM_TIMEOUT; 1881 mr_device_priv_data->task_abort_tmo = 1882 MEGASAS_DEFAULT_TM_TIMEOUT; 1883 } 1884 } 1885 1886 /* 1887 * megasas_set_nvme_device_properties - 1888 * set nomerges=2 1889 * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K). 1890 * set maximum io transfer = MDTS of NVME device provided by MR firmware. 1891 * 1892 * MR firmware provides value in KB. Caller of this function converts 1893 * kb into bytes. 1894 * 1895 * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size, 1896 * MR firmware provides value 128 as (32 * 4K) = 128K. 1897 * 1898 * @sdev: scsi device 1899 * @max_io_size: maximum io transfer size 1900 * 1901 */ 1902 static inline void 1903 megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size) 1904 { 1905 struct megasas_instance *instance; 1906 u32 mr_nvme_pg_size; 1907 1908 instance = (struct megasas_instance *)sdev->host->hostdata; 1909 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, 1910 MR_DEFAULT_NVME_PAGE_SIZE); 1911 1912 blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512)); 1913 1914 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue); 1915 blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1); 1916 } 1917 1918 1919 /* 1920 * megasas_set_static_target_properties - 1921 * Device property set by driver are static and it is not required to be 1922 * updated after OCR. 1923 * 1924 * set io timeout 1925 * set device queue depth 1926 * set nvme device properties. see - megasas_set_nvme_device_properties 1927 * 1928 * @sdev: scsi device 1929 * @is_target_prop true, if fw provided target properties. 1930 */ 1931 static void megasas_set_static_target_properties(struct scsi_device *sdev, 1932 bool is_target_prop) 1933 { 1934 u16 target_index = 0; 1935 u8 interface_type; 1936 u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN; 1937 u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB; 1938 u32 tgt_device_qd; 1939 struct megasas_instance *instance; 1940 struct MR_PRIV_DEVICE *mr_device_priv_data; 1941 1942 instance = megasas_lookup_instance(sdev->host->host_no); 1943 mr_device_priv_data = sdev->hostdata; 1944 interface_type = mr_device_priv_data->interface_type; 1945 1946 /* 1947 * The RAID firmware may require extended timeouts. 1948 */ 1949 blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ); 1950 1951 target_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; 1952 1953 switch (interface_type) { 1954 case SAS_PD: 1955 device_qd = MEGASAS_SAS_QD; 1956 break; 1957 case SATA_PD: 1958 device_qd = MEGASAS_SATA_QD; 1959 break; 1960 case NVME_PD: 1961 device_qd = MEGASAS_NVME_QD; 1962 break; 1963 } 1964 1965 if (is_target_prop) { 1966 tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth); 1967 if (tgt_device_qd && 1968 (tgt_device_qd <= instance->host->can_queue)) 1969 device_qd = tgt_device_qd; 1970 1971 /* max_io_size_kb will be set to non zero for 1972 * nvme based vd and syspd. 1973 */ 1974 max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb); 1975 } 1976 1977 if (instance->nvme_page_size && max_io_size_kb) 1978 megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10)); 1979 1980 scsi_change_queue_depth(sdev, device_qd); 1981 1982 } 1983 1984 1985 static int megasas_slave_configure(struct scsi_device *sdev) 1986 { 1987 u16 pd_index = 0; 1988 struct megasas_instance *instance; 1989 int ret_target_prop = DCMD_FAILED; 1990 bool is_target_prop = false; 1991 1992 instance = megasas_lookup_instance(sdev->host->host_no); 1993 if (instance->pd_list_not_supported) { 1994 if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) { 1995 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1996 sdev->id; 1997 if (instance->pd_list[pd_index].driveState != 1998 MR_PD_STATE_SYSTEM) 1999 return -ENXIO; 2000 } 2001 } 2002 2003 mutex_lock(&instance->reset_mutex); 2004 /* Send DCMD to Firmware and cache the information */ 2005 if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev)) 2006 megasas_get_pd_info(instance, sdev); 2007 2008 /* Some ventura firmware may not have instance->nvme_page_size set. 2009 * Do not send MR_DCMD_DRV_GET_TARGET_PROP 2010 */ 2011 if ((instance->tgt_prop) && (instance->nvme_page_size)) 2012 ret_target_prop = megasas_get_target_prop(instance, sdev); 2013 2014 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; 2015 megasas_set_static_target_properties(sdev, is_target_prop); 2016 2017 /* This sdev property may change post OCR */ 2018 megasas_set_dynamic_target_properties(sdev, is_target_prop); 2019 2020 mutex_unlock(&instance->reset_mutex); 2021 2022 return 0; 2023 } 2024 2025 static int megasas_slave_alloc(struct scsi_device *sdev) 2026 { 2027 u16 pd_index = 0; 2028 struct megasas_instance *instance ; 2029 struct MR_PRIV_DEVICE *mr_device_priv_data; 2030 2031 instance = megasas_lookup_instance(sdev->host->host_no); 2032 if (!MEGASAS_IS_LOGICAL(sdev)) { 2033 /* 2034 * Open the OS scan to the SYSTEM PD 2035 */ 2036 pd_index = 2037 (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 2038 sdev->id; 2039 if ((instance->pd_list_not_supported || 2040 instance->pd_list[pd_index].driveState == 2041 MR_PD_STATE_SYSTEM)) { 2042 goto scan_target; 2043 } 2044 return -ENXIO; 2045 } 2046 2047 scan_target: 2048 mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data), 2049 GFP_KERNEL); 2050 if (!mr_device_priv_data) 2051 return -ENOMEM; 2052 sdev->hostdata = mr_device_priv_data; 2053 2054 atomic_set(&mr_device_priv_data->r1_ldio_hint, 2055 instance->r1_ldio_hint_default); 2056 return 0; 2057 } 2058 2059 static void megasas_slave_destroy(struct scsi_device *sdev) 2060 { 2061 kfree(sdev->hostdata); 2062 sdev->hostdata = NULL; 2063 } 2064 2065 /* 2066 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a 2067 * kill adapter 2068 * @instance: Adapter soft state 2069 * 2070 */ 2071 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance) 2072 { 2073 int i; 2074 struct megasas_cmd *cmd_mfi; 2075 struct megasas_cmd_fusion *cmd_fusion; 2076 struct fusion_context *fusion = instance->ctrl_context; 2077 2078 /* Find all outstanding ioctls */ 2079 if (fusion) { 2080 for (i = 0; i < instance->max_fw_cmds; i++) { 2081 cmd_fusion = fusion->cmd_list[i]; 2082 if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) { 2083 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 2084 if (cmd_mfi->sync_cmd && 2085 (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) { 2086 cmd_mfi->frame->hdr.cmd_status = 2087 MFI_STAT_WRONG_STATE; 2088 megasas_complete_cmd(instance, 2089 cmd_mfi, DID_OK); 2090 } 2091 } 2092 } 2093 } else { 2094 for (i = 0; i < instance->max_fw_cmds; i++) { 2095 cmd_mfi = instance->cmd_list[i]; 2096 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != 2097 MFI_CMD_ABORT) 2098 megasas_complete_cmd(instance, cmd_mfi, DID_OK); 2099 } 2100 } 2101 } 2102 2103 2104 void megaraid_sas_kill_hba(struct megasas_instance *instance) 2105 { 2106 /* Set critical error to block I/O & ioctls in case caller didn't */ 2107 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); 2108 /* Wait 1 second to ensure IO or ioctls in build have posted */ 2109 msleep(1000); 2110 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 2111 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 2112 (instance->adapter_type != MFI_SERIES)) { 2113 if (!instance->requestorId) { 2114 writel(MFI_STOP_ADP, &instance->reg_set->doorbell); 2115 /* Flush */ 2116 readl(&instance->reg_set->doorbell); 2117 } 2118 if (instance->requestorId && instance->peerIsPresent) 2119 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 2120 } else { 2121 writel(MFI_STOP_ADP, 2122 &instance->reg_set->inbound_doorbell); 2123 } 2124 /* Complete outstanding ioctls when adapter is killed */ 2125 megasas_complete_outstanding_ioctls(instance); 2126 } 2127 2128 /** 2129 * megasas_check_and_restore_queue_depth - Check if queue depth needs to be 2130 * restored to max value 2131 * @instance: Adapter soft state 2132 * 2133 */ 2134 void 2135 megasas_check_and_restore_queue_depth(struct megasas_instance *instance) 2136 { 2137 unsigned long flags; 2138 2139 if (instance->flag & MEGASAS_FW_BUSY 2140 && time_after(jiffies, instance->last_time + 5 * HZ) 2141 && atomic_read(&instance->fw_outstanding) < 2142 instance->throttlequeuedepth + 1) { 2143 2144 spin_lock_irqsave(instance->host->host_lock, flags); 2145 instance->flag &= ~MEGASAS_FW_BUSY; 2146 2147 instance->host->can_queue = instance->cur_can_queue; 2148 spin_unlock_irqrestore(instance->host->host_lock, flags); 2149 } 2150 } 2151 2152 /** 2153 * megasas_complete_cmd_dpc - Returns FW's controller structure 2154 * @instance_addr: Address of adapter soft state 2155 * 2156 * Tasklet to complete cmds 2157 */ 2158 static void megasas_complete_cmd_dpc(unsigned long instance_addr) 2159 { 2160 u32 producer; 2161 u32 consumer; 2162 u32 context; 2163 struct megasas_cmd *cmd; 2164 struct megasas_instance *instance = 2165 (struct megasas_instance *)instance_addr; 2166 unsigned long flags; 2167 2168 /* If we have already declared adapter dead, donot complete cmds */ 2169 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 2170 return; 2171 2172 spin_lock_irqsave(&instance->completion_lock, flags); 2173 2174 producer = le32_to_cpu(*instance->producer); 2175 consumer = le32_to_cpu(*instance->consumer); 2176 2177 while (consumer != producer) { 2178 context = le32_to_cpu(instance->reply_queue[consumer]); 2179 if (context >= instance->max_fw_cmds) { 2180 dev_err(&instance->pdev->dev, "Unexpected context value %x\n", 2181 context); 2182 BUG(); 2183 } 2184 2185 cmd = instance->cmd_list[context]; 2186 2187 megasas_complete_cmd(instance, cmd, DID_OK); 2188 2189 consumer++; 2190 if (consumer == (instance->max_fw_cmds + 1)) { 2191 consumer = 0; 2192 } 2193 } 2194 2195 *instance->consumer = cpu_to_le32(producer); 2196 2197 spin_unlock_irqrestore(&instance->completion_lock, flags); 2198 2199 /* 2200 * Check if we can restore can_queue 2201 */ 2202 megasas_check_and_restore_queue_depth(instance); 2203 } 2204 2205 static void megasas_sriov_heartbeat_handler(struct timer_list *t); 2206 2207 /** 2208 * megasas_start_timer - Initializes sriov heartbeat timer object 2209 * @instance: Adapter soft state 2210 * 2211 */ 2212 void megasas_start_timer(struct megasas_instance *instance) 2213 { 2214 struct timer_list *timer = &instance->sriov_heartbeat_timer; 2215 2216 timer_setup(timer, megasas_sriov_heartbeat_handler, 0); 2217 timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF; 2218 add_timer(timer); 2219 } 2220 2221 static void 2222 megasas_internal_reset_defer_cmds(struct megasas_instance *instance); 2223 2224 static void 2225 process_fw_state_change_wq(struct work_struct *work); 2226 2227 void megasas_do_ocr(struct megasas_instance *instance) 2228 { 2229 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 2230 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 2231 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 2232 *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); 2233 } 2234 instance->instancet->disable_intr(instance); 2235 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 2236 instance->issuepend_done = 0; 2237 2238 atomic_set(&instance->fw_outstanding, 0); 2239 megasas_internal_reset_defer_cmds(instance); 2240 process_fw_state_change_wq(&instance->work_init); 2241 } 2242 2243 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance, 2244 int initial) 2245 { 2246 struct megasas_cmd *cmd; 2247 struct megasas_dcmd_frame *dcmd; 2248 struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL; 2249 dma_addr_t new_affiliation_111_h; 2250 int ld, retval = 0; 2251 u8 thisVf; 2252 2253 cmd = megasas_get_cmd(instance); 2254 2255 if (!cmd) { 2256 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:" 2257 "Failed to get cmd for scsi%d\n", 2258 instance->host->host_no); 2259 return -ENOMEM; 2260 } 2261 2262 dcmd = &cmd->frame->dcmd; 2263 2264 if (!instance->vf_affiliation_111) { 2265 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " 2266 "affiliation for scsi%d\n", instance->host->host_no); 2267 megasas_return_cmd(instance, cmd); 2268 return -ENOMEM; 2269 } 2270 2271 if (initial) 2272 memset(instance->vf_affiliation_111, 0, 2273 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2274 else { 2275 new_affiliation_111 = 2276 dma_alloc_coherent(&instance->pdev->dev, 2277 sizeof(struct MR_LD_VF_AFFILIATION_111), 2278 &new_affiliation_111_h, GFP_KERNEL); 2279 if (!new_affiliation_111) { 2280 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2281 "memory for new affiliation for scsi%d\n", 2282 instance->host->host_no); 2283 megasas_return_cmd(instance, cmd); 2284 return -ENOMEM; 2285 } 2286 } 2287 2288 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2289 2290 dcmd->cmd = MFI_CMD_DCMD; 2291 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2292 dcmd->sge_count = 1; 2293 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2294 dcmd->timeout = 0; 2295 dcmd->pad_0 = 0; 2296 dcmd->data_xfer_len = 2297 cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111)); 2298 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111); 2299 2300 if (initial) 2301 dcmd->sgl.sge32[0].phys_addr = 2302 cpu_to_le32(instance->vf_affiliation_111_h); 2303 else 2304 dcmd->sgl.sge32[0].phys_addr = 2305 cpu_to_le32(new_affiliation_111_h); 2306 2307 dcmd->sgl.sge32[0].length = cpu_to_le32( 2308 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2309 2310 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " 2311 "scsi%d\n", instance->host->host_no); 2312 2313 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { 2314 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" 2315 " failed with status 0x%x for scsi%d\n", 2316 dcmd->cmd_status, instance->host->host_no); 2317 retval = 1; /* Do a scan if we couldn't get affiliation */ 2318 goto out; 2319 } 2320 2321 if (!initial) { 2322 thisVf = new_affiliation_111->thisVf; 2323 for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++) 2324 if (instance->vf_affiliation_111->map[ld].policy[thisVf] != 2325 new_affiliation_111->map[ld].policy[thisVf]) { 2326 dev_warn(&instance->pdev->dev, "SR-IOV: " 2327 "Got new LD/VF affiliation for scsi%d\n", 2328 instance->host->host_no); 2329 memcpy(instance->vf_affiliation_111, 2330 new_affiliation_111, 2331 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2332 retval = 1; 2333 goto out; 2334 } 2335 } 2336 out: 2337 if (new_affiliation_111) { 2338 dma_free_coherent(&instance->pdev->dev, 2339 sizeof(struct MR_LD_VF_AFFILIATION_111), 2340 new_affiliation_111, 2341 new_affiliation_111_h); 2342 } 2343 2344 megasas_return_cmd(instance, cmd); 2345 2346 return retval; 2347 } 2348 2349 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance, 2350 int initial) 2351 { 2352 struct megasas_cmd *cmd; 2353 struct megasas_dcmd_frame *dcmd; 2354 struct MR_LD_VF_AFFILIATION *new_affiliation = NULL; 2355 struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL; 2356 dma_addr_t new_affiliation_h; 2357 int i, j, retval = 0, found = 0, doscan = 0; 2358 u8 thisVf; 2359 2360 cmd = megasas_get_cmd(instance); 2361 2362 if (!cmd) { 2363 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: " 2364 "Failed to get cmd for scsi%d\n", 2365 instance->host->host_no); 2366 return -ENOMEM; 2367 } 2368 2369 dcmd = &cmd->frame->dcmd; 2370 2371 if (!instance->vf_affiliation) { 2372 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " 2373 "affiliation for scsi%d\n", instance->host->host_no); 2374 megasas_return_cmd(instance, cmd); 2375 return -ENOMEM; 2376 } 2377 2378 if (initial) 2379 memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) * 2380 sizeof(struct MR_LD_VF_AFFILIATION)); 2381 else { 2382 new_affiliation = 2383 dma_alloc_coherent(&instance->pdev->dev, 2384 (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION), 2385 &new_affiliation_h, GFP_KERNEL); 2386 if (!new_affiliation) { 2387 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2388 "memory for new affiliation for scsi%d\n", 2389 instance->host->host_no); 2390 megasas_return_cmd(instance, cmd); 2391 return -ENOMEM; 2392 } 2393 } 2394 2395 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2396 2397 dcmd->cmd = MFI_CMD_DCMD; 2398 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2399 dcmd->sge_count = 1; 2400 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2401 dcmd->timeout = 0; 2402 dcmd->pad_0 = 0; 2403 dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * 2404 sizeof(struct MR_LD_VF_AFFILIATION)); 2405 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS); 2406 2407 if (initial) 2408 dcmd->sgl.sge32[0].phys_addr = 2409 cpu_to_le32(instance->vf_affiliation_h); 2410 else 2411 dcmd->sgl.sge32[0].phys_addr = 2412 cpu_to_le32(new_affiliation_h); 2413 2414 dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * 2415 sizeof(struct MR_LD_VF_AFFILIATION)); 2416 2417 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " 2418 "scsi%d\n", instance->host->host_no); 2419 2420 2421 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { 2422 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" 2423 " failed with status 0x%x for scsi%d\n", 2424 dcmd->cmd_status, instance->host->host_no); 2425 retval = 1; /* Do a scan if we couldn't get affiliation */ 2426 goto out; 2427 } 2428 2429 if (!initial) { 2430 if (!new_affiliation->ldCount) { 2431 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " 2432 "affiliation for passive path for scsi%d\n", 2433 instance->host->host_no); 2434 retval = 1; 2435 goto out; 2436 } 2437 newmap = new_affiliation->map; 2438 savedmap = instance->vf_affiliation->map; 2439 thisVf = new_affiliation->thisVf; 2440 for (i = 0 ; i < new_affiliation->ldCount; i++) { 2441 found = 0; 2442 for (j = 0; j < instance->vf_affiliation->ldCount; 2443 j++) { 2444 if (newmap->ref.targetId == 2445 savedmap->ref.targetId) { 2446 found = 1; 2447 if (newmap->policy[thisVf] != 2448 savedmap->policy[thisVf]) { 2449 doscan = 1; 2450 goto out; 2451 } 2452 } 2453 savedmap = (struct MR_LD_VF_MAP *) 2454 ((unsigned char *)savedmap + 2455 savedmap->size); 2456 } 2457 if (!found && newmap->policy[thisVf] != 2458 MR_LD_ACCESS_HIDDEN) { 2459 doscan = 1; 2460 goto out; 2461 } 2462 newmap = (struct MR_LD_VF_MAP *) 2463 ((unsigned char *)newmap + newmap->size); 2464 } 2465 2466 newmap = new_affiliation->map; 2467 savedmap = instance->vf_affiliation->map; 2468 2469 for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) { 2470 found = 0; 2471 for (j = 0 ; j < new_affiliation->ldCount; j++) { 2472 if (savedmap->ref.targetId == 2473 newmap->ref.targetId) { 2474 found = 1; 2475 if (savedmap->policy[thisVf] != 2476 newmap->policy[thisVf]) { 2477 doscan = 1; 2478 goto out; 2479 } 2480 } 2481 newmap = (struct MR_LD_VF_MAP *) 2482 ((unsigned char *)newmap + 2483 newmap->size); 2484 } 2485 if (!found && savedmap->policy[thisVf] != 2486 MR_LD_ACCESS_HIDDEN) { 2487 doscan = 1; 2488 goto out; 2489 } 2490 savedmap = (struct MR_LD_VF_MAP *) 2491 ((unsigned char *)savedmap + 2492 savedmap->size); 2493 } 2494 } 2495 out: 2496 if (doscan) { 2497 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " 2498 "affiliation for scsi%d\n", instance->host->host_no); 2499 memcpy(instance->vf_affiliation, new_affiliation, 2500 new_affiliation->size); 2501 retval = 1; 2502 } 2503 2504 if (new_affiliation) 2505 dma_free_coherent(&instance->pdev->dev, 2506 (MAX_LOGICAL_DRIVES + 1) * 2507 sizeof(struct MR_LD_VF_AFFILIATION), 2508 new_affiliation, new_affiliation_h); 2509 megasas_return_cmd(instance, cmd); 2510 2511 return retval; 2512 } 2513 2514 /* This function will get the current SR-IOV LD/VF affiliation */ 2515 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 2516 int initial) 2517 { 2518 int retval; 2519 2520 if (instance->PlasmaFW111) 2521 retval = megasas_get_ld_vf_affiliation_111(instance, initial); 2522 else 2523 retval = megasas_get_ld_vf_affiliation_12(instance, initial); 2524 return retval; 2525 } 2526 2527 /* This function will tell FW to start the SR-IOV heartbeat */ 2528 int megasas_sriov_start_heartbeat(struct megasas_instance *instance, 2529 int initial) 2530 { 2531 struct megasas_cmd *cmd; 2532 struct megasas_dcmd_frame *dcmd; 2533 int retval = 0; 2534 2535 cmd = megasas_get_cmd(instance); 2536 2537 if (!cmd) { 2538 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: " 2539 "Failed to get cmd for scsi%d\n", 2540 instance->host->host_no); 2541 return -ENOMEM; 2542 } 2543 2544 dcmd = &cmd->frame->dcmd; 2545 2546 if (initial) { 2547 instance->hb_host_mem = 2548 dma_alloc_coherent(&instance->pdev->dev, 2549 sizeof(struct MR_CTRL_HB_HOST_MEM), 2550 &instance->hb_host_mem_h, 2551 GFP_KERNEL); 2552 if (!instance->hb_host_mem) { 2553 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate" 2554 " memory for heartbeat host memory for scsi%d\n", 2555 instance->host->host_no); 2556 retval = -ENOMEM; 2557 goto out; 2558 } 2559 } 2560 2561 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2562 2563 dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2564 dcmd->cmd = MFI_CMD_DCMD; 2565 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2566 dcmd->sge_count = 1; 2567 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2568 dcmd->timeout = 0; 2569 dcmd->pad_0 = 0; 2570 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2571 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC); 2572 2573 megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h, 2574 sizeof(struct MR_CTRL_HB_HOST_MEM)); 2575 2576 dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n", 2577 instance->host->host_no); 2578 2579 if ((instance->adapter_type != MFI_SERIES) && 2580 !instance->mask_interrupts) 2581 retval = megasas_issue_blocked_cmd(instance, cmd, 2582 MEGASAS_ROUTINE_WAIT_TIME_VF); 2583 else 2584 retval = megasas_issue_polled(instance, cmd); 2585 2586 if (retval) { 2587 dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST" 2588 "_MEM_ALLOC DCMD %s for scsi%d\n", 2589 (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ? 2590 "timed out" : "failed", instance->host->host_no); 2591 retval = 1; 2592 } 2593 2594 out: 2595 megasas_return_cmd(instance, cmd); 2596 2597 return retval; 2598 } 2599 2600 /* Handler for SR-IOV heartbeat */ 2601 static void megasas_sriov_heartbeat_handler(struct timer_list *t) 2602 { 2603 struct megasas_instance *instance = 2604 from_timer(instance, t, sriov_heartbeat_timer); 2605 2606 if (instance->hb_host_mem->HB.fwCounter != 2607 instance->hb_host_mem->HB.driverCounter) { 2608 instance->hb_host_mem->HB.driverCounter = 2609 instance->hb_host_mem->HB.fwCounter; 2610 mod_timer(&instance->sriov_heartbeat_timer, 2611 jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 2612 } else { 2613 dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never " 2614 "completed for scsi%d\n", instance->host->host_no); 2615 schedule_work(&instance->work_init); 2616 } 2617 } 2618 2619 /** 2620 * megasas_wait_for_outstanding - Wait for all outstanding cmds 2621 * @instance: Adapter soft state 2622 * 2623 * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to 2624 * complete all its outstanding commands. Returns error if one or more IOs 2625 * are pending after this time period. It also marks the controller dead. 2626 */ 2627 static int megasas_wait_for_outstanding(struct megasas_instance *instance) 2628 { 2629 int i, sl, outstanding; 2630 u32 reset_index; 2631 u32 wait_time = MEGASAS_RESET_WAIT_TIME; 2632 unsigned long flags; 2633 struct list_head clist_local; 2634 struct megasas_cmd *reset_cmd; 2635 u32 fw_state; 2636 2637 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2638 dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n", 2639 __func__, __LINE__); 2640 return FAILED; 2641 } 2642 2643 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 2644 2645 INIT_LIST_HEAD(&clist_local); 2646 spin_lock_irqsave(&instance->hba_lock, flags); 2647 list_splice_init(&instance->internal_reset_pending_q, 2648 &clist_local); 2649 spin_unlock_irqrestore(&instance->hba_lock, flags); 2650 2651 dev_notice(&instance->pdev->dev, "HBA reset wait ...\n"); 2652 for (i = 0; i < wait_time; i++) { 2653 msleep(1000); 2654 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) 2655 break; 2656 } 2657 2658 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 2659 dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n"); 2660 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); 2661 return FAILED; 2662 } 2663 2664 reset_index = 0; 2665 while (!list_empty(&clist_local)) { 2666 reset_cmd = list_entry((&clist_local)->next, 2667 struct megasas_cmd, list); 2668 list_del_init(&reset_cmd->list); 2669 if (reset_cmd->scmd) { 2670 reset_cmd->scmd->result = DID_REQUEUE << 16; 2671 dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n", 2672 reset_index, reset_cmd, 2673 reset_cmd->scmd->cmnd[0]); 2674 2675 reset_cmd->scmd->scsi_done(reset_cmd->scmd); 2676 megasas_return_cmd(instance, reset_cmd); 2677 } else if (reset_cmd->sync_cmd) { 2678 dev_notice(&instance->pdev->dev, "%p synch cmds" 2679 "reset queue\n", 2680 reset_cmd); 2681 2682 reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 2683 instance->instancet->fire_cmd(instance, 2684 reset_cmd->frame_phys_addr, 2685 0, instance->reg_set); 2686 } else { 2687 dev_notice(&instance->pdev->dev, "%p unexpected" 2688 "cmds lst\n", 2689 reset_cmd); 2690 } 2691 reset_index++; 2692 } 2693 2694 return SUCCESS; 2695 } 2696 2697 for (i = 0; i < resetwaittime; i++) { 2698 outstanding = atomic_read(&instance->fw_outstanding); 2699 2700 if (!outstanding) 2701 break; 2702 2703 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 2704 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d " 2705 "commands to complete\n",i,outstanding); 2706 /* 2707 * Call cmd completion routine. Cmd to be 2708 * be completed directly without depending on isr. 2709 */ 2710 megasas_complete_cmd_dpc((unsigned long)instance); 2711 } 2712 2713 msleep(1000); 2714 } 2715 2716 i = 0; 2717 outstanding = atomic_read(&instance->fw_outstanding); 2718 fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK; 2719 2720 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) 2721 goto no_outstanding; 2722 2723 if (instance->disableOnlineCtrlReset) 2724 goto kill_hba_and_failed; 2725 do { 2726 if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) { 2727 dev_info(&instance->pdev->dev, 2728 "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, oustanding 0x%x\n", 2729 __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding)); 2730 if (i == 3) 2731 goto kill_hba_and_failed; 2732 megasas_do_ocr(instance); 2733 2734 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2735 dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n", 2736 __func__, __LINE__); 2737 return FAILED; 2738 } 2739 dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n", 2740 __func__, __LINE__); 2741 2742 for (sl = 0; sl < 10; sl++) 2743 msleep(500); 2744 2745 outstanding = atomic_read(&instance->fw_outstanding); 2746 2747 fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK; 2748 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) 2749 goto no_outstanding; 2750 } 2751 i++; 2752 } while (i <= 3); 2753 2754 no_outstanding: 2755 2756 dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n", 2757 __func__, __LINE__); 2758 return SUCCESS; 2759 2760 kill_hba_and_failed: 2761 2762 /* Reset not supported, kill adapter */ 2763 dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d" 2764 " disableOnlineCtrlReset %d fw_outstanding %d \n", 2765 __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset, 2766 atomic_read(&instance->fw_outstanding)); 2767 megasas_dump_pending_frames(instance); 2768 megaraid_sas_kill_hba(instance); 2769 2770 return FAILED; 2771 } 2772 2773 /** 2774 * megasas_generic_reset - Generic reset routine 2775 * @scmd: Mid-layer SCSI command 2776 * 2777 * This routine implements a generic reset handler for device, bus and host 2778 * reset requests. Device, bus and host specific reset handlers can use this 2779 * function after they do their specific tasks. 2780 */ 2781 static int megasas_generic_reset(struct scsi_cmnd *scmd) 2782 { 2783 int ret_val; 2784 struct megasas_instance *instance; 2785 2786 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2787 2788 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n", 2789 scmd->cmnd[0], scmd->retries); 2790 2791 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2792 dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n"); 2793 return FAILED; 2794 } 2795 2796 ret_val = megasas_wait_for_outstanding(instance); 2797 if (ret_val == SUCCESS) 2798 dev_notice(&instance->pdev->dev, "reset successful\n"); 2799 else 2800 dev_err(&instance->pdev->dev, "failed to do reset\n"); 2801 2802 return ret_val; 2803 } 2804 2805 /** 2806 * megasas_reset_timer - quiesce the adapter if required 2807 * @scmd: scsi cmnd 2808 * 2809 * Sets the FW busy flag and reduces the host->can_queue if the 2810 * cmd has not been completed within the timeout period. 2811 */ 2812 static enum 2813 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) 2814 { 2815 struct megasas_instance *instance; 2816 unsigned long flags; 2817 2818 if (time_after(jiffies, scmd->jiffies_at_alloc + 2819 (scmd_timeout * 2) * HZ)) { 2820 return BLK_EH_DONE; 2821 } 2822 2823 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2824 if (!(instance->flag & MEGASAS_FW_BUSY)) { 2825 /* FW is busy, throttle IO */ 2826 spin_lock_irqsave(instance->host->host_lock, flags); 2827 2828 instance->host->can_queue = instance->throttlequeuedepth; 2829 instance->last_time = jiffies; 2830 instance->flag |= MEGASAS_FW_BUSY; 2831 2832 spin_unlock_irqrestore(instance->host->host_lock, flags); 2833 } 2834 return BLK_EH_RESET_TIMER; 2835 } 2836 2837 /** 2838 * megasas_dump_frame - This function will dump MPT/MFI frame 2839 */ 2840 static inline void 2841 megasas_dump_frame(void *mpi_request, int sz) 2842 { 2843 int i; 2844 __le32 *mfp = (__le32 *)mpi_request; 2845 2846 printk(KERN_INFO "IO request frame:\n\t"); 2847 for (i = 0; i < sz / sizeof(__le32); i++) { 2848 if (i && ((i % 8) == 0)) 2849 printk("\n\t"); 2850 printk("%08x ", le32_to_cpu(mfp[i])); 2851 } 2852 printk("\n"); 2853 } 2854 2855 /** 2856 * megasas_reset_bus_host - Bus & host reset handler entry point 2857 */ 2858 static int megasas_reset_bus_host(struct scsi_cmnd *scmd) 2859 { 2860 int ret; 2861 struct megasas_instance *instance; 2862 2863 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2864 2865 scmd_printk(KERN_INFO, scmd, 2866 "Controller reset is requested due to IO timeout\n" 2867 "SCSI command pointer: (%p)\t SCSI host state: %d\t" 2868 " SCSI host busy: %d\t FW outstanding: %d\n", 2869 scmd, scmd->device->host->shost_state, 2870 scsi_host_busy(scmd->device->host), 2871 atomic_read(&instance->fw_outstanding)); 2872 2873 /* 2874 * First wait for all commands to complete 2875 */ 2876 if (instance->adapter_type == MFI_SERIES) { 2877 ret = megasas_generic_reset(scmd); 2878 } else { 2879 struct megasas_cmd_fusion *cmd; 2880 cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr; 2881 if (cmd) 2882 megasas_dump_frame(cmd->io_request, 2883 MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE); 2884 ret = megasas_reset_fusion(scmd->device->host, 2885 SCSIIO_TIMEOUT_OCR); 2886 } 2887 2888 return ret; 2889 } 2890 2891 /** 2892 * megasas_task_abort - Issues task abort request to firmware 2893 * (supported only for fusion adapters) 2894 * @scmd: SCSI command pointer 2895 */ 2896 static int megasas_task_abort(struct scsi_cmnd *scmd) 2897 { 2898 int ret; 2899 struct megasas_instance *instance; 2900 2901 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2902 2903 if (instance->adapter_type != MFI_SERIES) 2904 ret = megasas_task_abort_fusion(scmd); 2905 else { 2906 sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n"); 2907 ret = FAILED; 2908 } 2909 2910 return ret; 2911 } 2912 2913 /** 2914 * megasas_reset_target: Issues target reset request to firmware 2915 * (supported only for fusion adapters) 2916 * @scmd: SCSI command pointer 2917 */ 2918 static int megasas_reset_target(struct scsi_cmnd *scmd) 2919 { 2920 int ret; 2921 struct megasas_instance *instance; 2922 2923 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2924 2925 if (instance->adapter_type != MFI_SERIES) 2926 ret = megasas_reset_target_fusion(scmd); 2927 else { 2928 sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n"); 2929 ret = FAILED; 2930 } 2931 2932 return ret; 2933 } 2934 2935 /** 2936 * megasas_bios_param - Returns disk geometry for a disk 2937 * @sdev: device handle 2938 * @bdev: block device 2939 * @capacity: drive capacity 2940 * @geom: geometry parameters 2941 */ 2942 static int 2943 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev, 2944 sector_t capacity, int geom[]) 2945 { 2946 int heads; 2947 int sectors; 2948 sector_t cylinders; 2949 unsigned long tmp; 2950 2951 /* Default heads (64) & sectors (32) */ 2952 heads = 64; 2953 sectors = 32; 2954 2955 tmp = heads * sectors; 2956 cylinders = capacity; 2957 2958 sector_div(cylinders, tmp); 2959 2960 /* 2961 * Handle extended translation size for logical drives > 1Gb 2962 */ 2963 2964 if (capacity >= 0x200000) { 2965 heads = 255; 2966 sectors = 63; 2967 tmp = heads*sectors; 2968 cylinders = capacity; 2969 sector_div(cylinders, tmp); 2970 } 2971 2972 geom[0] = heads; 2973 geom[1] = sectors; 2974 geom[2] = cylinders; 2975 2976 return 0; 2977 } 2978 2979 static void megasas_aen_polling(struct work_struct *work); 2980 2981 /** 2982 * megasas_service_aen - Processes an event notification 2983 * @instance: Adapter soft state 2984 * @cmd: AEN command completed by the ISR 2985 * 2986 * For AEN, driver sends a command down to FW that is held by the FW till an 2987 * event occurs. When an event of interest occurs, FW completes the command 2988 * that it was previously holding. 2989 * 2990 * This routines sends SIGIO signal to processes that have registered with the 2991 * driver for AEN. 2992 */ 2993 static void 2994 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) 2995 { 2996 unsigned long flags; 2997 2998 /* 2999 * Don't signal app if it is just an aborted previously registered aen 3000 */ 3001 if ((!cmd->abort_aen) && (instance->unload == 0)) { 3002 spin_lock_irqsave(&poll_aen_lock, flags); 3003 megasas_poll_wait_aen = 1; 3004 spin_unlock_irqrestore(&poll_aen_lock, flags); 3005 wake_up(&megasas_poll_wait); 3006 kill_fasync(&megasas_async_queue, SIGIO, POLL_IN); 3007 } 3008 else 3009 cmd->abort_aen = 0; 3010 3011 instance->aen_cmd = NULL; 3012 3013 megasas_return_cmd(instance, cmd); 3014 3015 if ((instance->unload == 0) && 3016 ((instance->issuepend_done == 1))) { 3017 struct megasas_aen_event *ev; 3018 3019 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 3020 if (!ev) { 3021 dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n"); 3022 } else { 3023 ev->instance = instance; 3024 instance->ev = ev; 3025 INIT_DELAYED_WORK(&ev->hotplug_work, 3026 megasas_aen_polling); 3027 schedule_delayed_work(&ev->hotplug_work, 0); 3028 } 3029 } 3030 } 3031 3032 static ssize_t 3033 megasas_fw_crash_buffer_store(struct device *cdev, 3034 struct device_attribute *attr, const char *buf, size_t count) 3035 { 3036 struct Scsi_Host *shost = class_to_shost(cdev); 3037 struct megasas_instance *instance = 3038 (struct megasas_instance *) shost->hostdata; 3039 int val = 0; 3040 unsigned long flags; 3041 3042 if (kstrtoint(buf, 0, &val) != 0) 3043 return -EINVAL; 3044 3045 spin_lock_irqsave(&instance->crashdump_lock, flags); 3046 instance->fw_crash_buffer_offset = val; 3047 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3048 return strlen(buf); 3049 } 3050 3051 static ssize_t 3052 megasas_fw_crash_buffer_show(struct device *cdev, 3053 struct device_attribute *attr, char *buf) 3054 { 3055 struct Scsi_Host *shost = class_to_shost(cdev); 3056 struct megasas_instance *instance = 3057 (struct megasas_instance *) shost->hostdata; 3058 u32 size; 3059 unsigned long buff_addr; 3060 unsigned long dmachunk = CRASH_DMA_BUF_SIZE; 3061 unsigned long src_addr; 3062 unsigned long flags; 3063 u32 buff_offset; 3064 3065 spin_lock_irqsave(&instance->crashdump_lock, flags); 3066 buff_offset = instance->fw_crash_buffer_offset; 3067 if (!instance->crash_dump_buf && 3068 !((instance->fw_crash_state == AVAILABLE) || 3069 (instance->fw_crash_state == COPYING))) { 3070 dev_err(&instance->pdev->dev, 3071 "Firmware crash dump is not available\n"); 3072 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3073 return -EINVAL; 3074 } 3075 3076 buff_addr = (unsigned long) buf; 3077 3078 if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) { 3079 dev_err(&instance->pdev->dev, 3080 "Firmware crash dump offset is out of range\n"); 3081 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3082 return 0; 3083 } 3084 3085 size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset; 3086 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; 3087 3088 src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] + 3089 (buff_offset % dmachunk); 3090 memcpy(buf, (void *)src_addr, size); 3091 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3092 3093 return size; 3094 } 3095 3096 static ssize_t 3097 megasas_fw_crash_buffer_size_show(struct device *cdev, 3098 struct device_attribute *attr, char *buf) 3099 { 3100 struct Scsi_Host *shost = class_to_shost(cdev); 3101 struct megasas_instance *instance = 3102 (struct megasas_instance *) shost->hostdata; 3103 3104 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long) 3105 ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE); 3106 } 3107 3108 static ssize_t 3109 megasas_fw_crash_state_store(struct device *cdev, 3110 struct device_attribute *attr, const char *buf, size_t count) 3111 { 3112 struct Scsi_Host *shost = class_to_shost(cdev); 3113 struct megasas_instance *instance = 3114 (struct megasas_instance *) shost->hostdata; 3115 int val = 0; 3116 unsigned long flags; 3117 3118 if (kstrtoint(buf, 0, &val) != 0) 3119 return -EINVAL; 3120 3121 if ((val <= AVAILABLE || val > COPY_ERROR)) { 3122 dev_err(&instance->pdev->dev, "application updates invalid " 3123 "firmware crash state\n"); 3124 return -EINVAL; 3125 } 3126 3127 instance->fw_crash_state = val; 3128 3129 if ((val == COPIED) || (val == COPY_ERROR)) { 3130 spin_lock_irqsave(&instance->crashdump_lock, flags); 3131 megasas_free_host_crash_buffer(instance); 3132 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3133 if (val == COPY_ERROR) 3134 dev_info(&instance->pdev->dev, "application failed to " 3135 "copy Firmware crash dump\n"); 3136 else 3137 dev_info(&instance->pdev->dev, "Firmware crash dump " 3138 "copied successfully\n"); 3139 } 3140 return strlen(buf); 3141 } 3142 3143 static ssize_t 3144 megasas_fw_crash_state_show(struct device *cdev, 3145 struct device_attribute *attr, char *buf) 3146 { 3147 struct Scsi_Host *shost = class_to_shost(cdev); 3148 struct megasas_instance *instance = 3149 (struct megasas_instance *) shost->hostdata; 3150 3151 return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state); 3152 } 3153 3154 static ssize_t 3155 megasas_page_size_show(struct device *cdev, 3156 struct device_attribute *attr, char *buf) 3157 { 3158 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1); 3159 } 3160 3161 static ssize_t 3162 megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr, 3163 char *buf) 3164 { 3165 struct Scsi_Host *shost = class_to_shost(cdev); 3166 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3167 3168 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding)); 3169 } 3170 3171 static ssize_t 3172 megasas_fw_cmds_outstanding_show(struct device *cdev, 3173 struct device_attribute *attr, char *buf) 3174 { 3175 struct Scsi_Host *shost = class_to_shost(cdev); 3176 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3177 3178 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding)); 3179 } 3180 3181 static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR, 3182 megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store); 3183 static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO, 3184 megasas_fw_crash_buffer_size_show, NULL); 3185 static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR, 3186 megasas_fw_crash_state_show, megasas_fw_crash_state_store); 3187 static DEVICE_ATTR(page_size, S_IRUGO, 3188 megasas_page_size_show, NULL); 3189 static DEVICE_ATTR(ldio_outstanding, S_IRUGO, 3190 megasas_ldio_outstanding_show, NULL); 3191 static DEVICE_ATTR(fw_cmds_outstanding, S_IRUGO, 3192 megasas_fw_cmds_outstanding_show, NULL); 3193 3194 struct device_attribute *megaraid_host_attrs[] = { 3195 &dev_attr_fw_crash_buffer_size, 3196 &dev_attr_fw_crash_buffer, 3197 &dev_attr_fw_crash_state, 3198 &dev_attr_page_size, 3199 &dev_attr_ldio_outstanding, 3200 &dev_attr_fw_cmds_outstanding, 3201 NULL, 3202 }; 3203 3204 /* 3205 * Scsi host template for megaraid_sas driver 3206 */ 3207 static struct scsi_host_template megasas_template = { 3208 3209 .module = THIS_MODULE, 3210 .name = "Avago SAS based MegaRAID driver", 3211 .proc_name = "megaraid_sas", 3212 .slave_configure = megasas_slave_configure, 3213 .slave_alloc = megasas_slave_alloc, 3214 .slave_destroy = megasas_slave_destroy, 3215 .queuecommand = megasas_queue_command, 3216 .eh_target_reset_handler = megasas_reset_target, 3217 .eh_abort_handler = megasas_task_abort, 3218 .eh_host_reset_handler = megasas_reset_bus_host, 3219 .eh_timed_out = megasas_reset_timer, 3220 .shost_attrs = megaraid_host_attrs, 3221 .bios_param = megasas_bios_param, 3222 .change_queue_depth = scsi_change_queue_depth, 3223 .no_write_same = 1, 3224 }; 3225 3226 /** 3227 * megasas_complete_int_cmd - Completes an internal command 3228 * @instance: Adapter soft state 3229 * @cmd: Command to be completed 3230 * 3231 * The megasas_issue_blocked_cmd() function waits for a command to complete 3232 * after it issues a command. This function wakes up that waiting routine by 3233 * calling wake_up() on the wait queue. 3234 */ 3235 static void 3236 megasas_complete_int_cmd(struct megasas_instance *instance, 3237 struct megasas_cmd *cmd) 3238 { 3239 cmd->cmd_status_drv = cmd->frame->io.cmd_status; 3240 wake_up(&instance->int_cmd_wait_q); 3241 } 3242 3243 /** 3244 * megasas_complete_abort - Completes aborting a command 3245 * @instance: Adapter soft state 3246 * @cmd: Cmd that was issued to abort another cmd 3247 * 3248 * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q 3249 * after it issues an abort on a previously issued command. This function 3250 * wakes up all functions waiting on the same wait queue. 3251 */ 3252 static void 3253 megasas_complete_abort(struct megasas_instance *instance, 3254 struct megasas_cmd *cmd) 3255 { 3256 if (cmd->sync_cmd) { 3257 cmd->sync_cmd = 0; 3258 cmd->cmd_status_drv = 0; 3259 wake_up(&instance->abort_cmd_wait_q); 3260 } 3261 } 3262 3263 /** 3264 * megasas_complete_cmd - Completes a command 3265 * @instance: Adapter soft state 3266 * @cmd: Command to be completed 3267 * @alt_status: If non-zero, use this value as status to 3268 * SCSI mid-layer instead of the value returned 3269 * by the FW. This should be used if caller wants 3270 * an alternate status (as in the case of aborted 3271 * commands) 3272 */ 3273 void 3274 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 3275 u8 alt_status) 3276 { 3277 int exception = 0; 3278 struct megasas_header *hdr = &cmd->frame->hdr; 3279 unsigned long flags; 3280 struct fusion_context *fusion = instance->ctrl_context; 3281 u32 opcode, status; 3282 3283 /* flag for the retry reset */ 3284 cmd->retry_for_fw_reset = 0; 3285 3286 if (cmd->scmd) 3287 cmd->scmd->SCp.ptr = NULL; 3288 3289 switch (hdr->cmd) { 3290 case MFI_CMD_INVALID: 3291 /* Some older 1068 controller FW may keep a pended 3292 MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel 3293 when booting the kdump kernel. Ignore this command to 3294 prevent a kernel panic on shutdown of the kdump kernel. */ 3295 dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command " 3296 "completed\n"); 3297 dev_warn(&instance->pdev->dev, "If you have a controller " 3298 "other than PERC5, please upgrade your firmware\n"); 3299 break; 3300 case MFI_CMD_PD_SCSI_IO: 3301 case MFI_CMD_LD_SCSI_IO: 3302 3303 /* 3304 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been 3305 * issued either through an IO path or an IOCTL path. If it 3306 * was via IOCTL, we will send it to internal completion. 3307 */ 3308 if (cmd->sync_cmd) { 3309 cmd->sync_cmd = 0; 3310 megasas_complete_int_cmd(instance, cmd); 3311 break; 3312 } 3313 /* fall through */ 3314 3315 case MFI_CMD_LD_READ: 3316 case MFI_CMD_LD_WRITE: 3317 3318 if (alt_status) { 3319 cmd->scmd->result = alt_status << 16; 3320 exception = 1; 3321 } 3322 3323 if (exception) { 3324 3325 atomic_dec(&instance->fw_outstanding); 3326 3327 scsi_dma_unmap(cmd->scmd); 3328 cmd->scmd->scsi_done(cmd->scmd); 3329 megasas_return_cmd(instance, cmd); 3330 3331 break; 3332 } 3333 3334 switch (hdr->cmd_status) { 3335 3336 case MFI_STAT_OK: 3337 cmd->scmd->result = DID_OK << 16; 3338 break; 3339 3340 case MFI_STAT_SCSI_IO_FAILED: 3341 case MFI_STAT_LD_INIT_IN_PROGRESS: 3342 cmd->scmd->result = 3343 (DID_ERROR << 16) | hdr->scsi_status; 3344 break; 3345 3346 case MFI_STAT_SCSI_DONE_WITH_ERROR: 3347 3348 cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status; 3349 3350 if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) { 3351 memset(cmd->scmd->sense_buffer, 0, 3352 SCSI_SENSE_BUFFERSIZE); 3353 memcpy(cmd->scmd->sense_buffer, cmd->sense, 3354 hdr->sense_len); 3355 3356 cmd->scmd->result |= DRIVER_SENSE << 24; 3357 } 3358 3359 break; 3360 3361 case MFI_STAT_LD_OFFLINE: 3362 case MFI_STAT_DEVICE_NOT_FOUND: 3363 cmd->scmd->result = DID_BAD_TARGET << 16; 3364 break; 3365 3366 default: 3367 dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n", 3368 hdr->cmd_status); 3369 cmd->scmd->result = DID_ERROR << 16; 3370 break; 3371 } 3372 3373 atomic_dec(&instance->fw_outstanding); 3374 3375 scsi_dma_unmap(cmd->scmd); 3376 cmd->scmd->scsi_done(cmd->scmd); 3377 megasas_return_cmd(instance, cmd); 3378 3379 break; 3380 3381 case MFI_CMD_SMP: 3382 case MFI_CMD_STP: 3383 case MFI_CMD_NVME: 3384 megasas_complete_int_cmd(instance, cmd); 3385 break; 3386 3387 case MFI_CMD_DCMD: 3388 opcode = le32_to_cpu(cmd->frame->dcmd.opcode); 3389 /* Check for LD map update */ 3390 if ((opcode == MR_DCMD_LD_MAP_GET_INFO) 3391 && (cmd->frame->dcmd.mbox.b[1] == 1)) { 3392 fusion->fast_path_io = 0; 3393 spin_lock_irqsave(instance->host->host_lock, flags); 3394 status = cmd->frame->hdr.cmd_status; 3395 instance->map_update_cmd = NULL; 3396 if (status != MFI_STAT_OK) { 3397 if (status != MFI_STAT_NOT_FOUND) 3398 dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n", 3399 cmd->frame->hdr.cmd_status); 3400 else { 3401 megasas_return_cmd(instance, cmd); 3402 spin_unlock_irqrestore( 3403 instance->host->host_lock, 3404 flags); 3405 break; 3406 } 3407 } 3408 3409 megasas_return_cmd(instance, cmd); 3410 3411 /* 3412 * Set fast path IO to ZERO. 3413 * Validate Map will set proper value. 3414 * Meanwhile all IOs will go as LD IO. 3415 */ 3416 if (status == MFI_STAT_OK && 3417 (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) { 3418 instance->map_id++; 3419 fusion->fast_path_io = 1; 3420 } else { 3421 fusion->fast_path_io = 0; 3422 } 3423 3424 megasas_sync_map_info(instance); 3425 spin_unlock_irqrestore(instance->host->host_lock, 3426 flags); 3427 break; 3428 } 3429 if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO || 3430 opcode == MR_DCMD_CTRL_EVENT_GET) { 3431 spin_lock_irqsave(&poll_aen_lock, flags); 3432 megasas_poll_wait_aen = 0; 3433 spin_unlock_irqrestore(&poll_aen_lock, flags); 3434 } 3435 3436 /* FW has an updated PD sequence */ 3437 if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && 3438 (cmd->frame->dcmd.mbox.b[0] == 1)) { 3439 3440 spin_lock_irqsave(instance->host->host_lock, flags); 3441 status = cmd->frame->hdr.cmd_status; 3442 instance->jbod_seq_cmd = NULL; 3443 megasas_return_cmd(instance, cmd); 3444 3445 if (status == MFI_STAT_OK) { 3446 instance->pd_seq_map_id++; 3447 /* Re-register a pd sync seq num cmd */ 3448 if (megasas_sync_pd_seq_num(instance, true)) 3449 instance->use_seqnum_jbod_fp = false; 3450 } else 3451 instance->use_seqnum_jbod_fp = false; 3452 3453 spin_unlock_irqrestore(instance->host->host_lock, flags); 3454 break; 3455 } 3456 3457 /* 3458 * See if got an event notification 3459 */ 3460 if (opcode == MR_DCMD_CTRL_EVENT_WAIT) 3461 megasas_service_aen(instance, cmd); 3462 else 3463 megasas_complete_int_cmd(instance, cmd); 3464 3465 break; 3466 3467 case MFI_CMD_ABORT: 3468 /* 3469 * Cmd issued to abort another cmd returned 3470 */ 3471 megasas_complete_abort(instance, cmd); 3472 break; 3473 3474 default: 3475 dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n", 3476 hdr->cmd); 3477 megasas_complete_int_cmd(instance, cmd); 3478 break; 3479 } 3480 } 3481 3482 /** 3483 * megasas_issue_pending_cmds_again - issue all pending cmds 3484 * in FW again because of the fw reset 3485 * @instance: Adapter soft state 3486 */ 3487 static inline void 3488 megasas_issue_pending_cmds_again(struct megasas_instance *instance) 3489 { 3490 struct megasas_cmd *cmd; 3491 struct list_head clist_local; 3492 union megasas_evt_class_locale class_locale; 3493 unsigned long flags; 3494 u32 seq_num; 3495 3496 INIT_LIST_HEAD(&clist_local); 3497 spin_lock_irqsave(&instance->hba_lock, flags); 3498 list_splice_init(&instance->internal_reset_pending_q, &clist_local); 3499 spin_unlock_irqrestore(&instance->hba_lock, flags); 3500 3501 while (!list_empty(&clist_local)) { 3502 cmd = list_entry((&clist_local)->next, 3503 struct megasas_cmd, list); 3504 list_del_init(&cmd->list); 3505 3506 if (cmd->sync_cmd || cmd->scmd) { 3507 dev_notice(&instance->pdev->dev, "command %p, %p:%d" 3508 "detected to be pending while HBA reset\n", 3509 cmd, cmd->scmd, cmd->sync_cmd); 3510 3511 cmd->retry_for_fw_reset++; 3512 3513 if (cmd->retry_for_fw_reset == 3) { 3514 dev_notice(&instance->pdev->dev, "cmd %p, %p:%d" 3515 "was tried multiple times during reset." 3516 "Shutting down the HBA\n", 3517 cmd, cmd->scmd, cmd->sync_cmd); 3518 instance->instancet->disable_intr(instance); 3519 atomic_set(&instance->fw_reset_no_pci_access, 1); 3520 megaraid_sas_kill_hba(instance); 3521 return; 3522 } 3523 } 3524 3525 if (cmd->sync_cmd == 1) { 3526 if (cmd->scmd) { 3527 dev_notice(&instance->pdev->dev, "unexpected" 3528 "cmd attached to internal command!\n"); 3529 } 3530 dev_notice(&instance->pdev->dev, "%p synchronous cmd" 3531 "on the internal reset queue," 3532 "issue it again.\n", cmd); 3533 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 3534 instance->instancet->fire_cmd(instance, 3535 cmd->frame_phys_addr, 3536 0, instance->reg_set); 3537 } else if (cmd->scmd) { 3538 dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]" 3539 "detected on the internal queue, issue again.\n", 3540 cmd, cmd->scmd->cmnd[0]); 3541 3542 atomic_inc(&instance->fw_outstanding); 3543 instance->instancet->fire_cmd(instance, 3544 cmd->frame_phys_addr, 3545 cmd->frame_count-1, instance->reg_set); 3546 } else { 3547 dev_notice(&instance->pdev->dev, "%p unexpected cmd on the" 3548 "internal reset defer list while re-issue!!\n", 3549 cmd); 3550 } 3551 } 3552 3553 if (instance->aen_cmd) { 3554 dev_notice(&instance->pdev->dev, "aen_cmd in def process\n"); 3555 megasas_return_cmd(instance, instance->aen_cmd); 3556 3557 instance->aen_cmd = NULL; 3558 } 3559 3560 /* 3561 * Initiate AEN (Asynchronous Event Notification) 3562 */ 3563 seq_num = instance->last_seq_num; 3564 class_locale.members.reserved = 0; 3565 class_locale.members.locale = MR_EVT_LOCALE_ALL; 3566 class_locale.members.class = MR_EVT_CLASS_DEBUG; 3567 3568 megasas_register_aen(instance, seq_num, class_locale.word); 3569 } 3570 3571 /** 3572 * Move the internal reset pending commands to a deferred queue. 3573 * 3574 * We move the commands pending at internal reset time to a 3575 * pending queue. This queue would be flushed after successful 3576 * completion of the internal reset sequence. if the internal reset 3577 * did not complete in time, the kernel reset handler would flush 3578 * these commands. 3579 **/ 3580 static void 3581 megasas_internal_reset_defer_cmds(struct megasas_instance *instance) 3582 { 3583 struct megasas_cmd *cmd; 3584 int i; 3585 u16 max_cmd = instance->max_fw_cmds; 3586 u32 defer_index; 3587 unsigned long flags; 3588 3589 defer_index = 0; 3590 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 3591 for (i = 0; i < max_cmd; i++) { 3592 cmd = instance->cmd_list[i]; 3593 if (cmd->sync_cmd == 1 || cmd->scmd) { 3594 dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p" 3595 "on the defer queue as internal\n", 3596 defer_index, cmd, cmd->sync_cmd, cmd->scmd); 3597 3598 if (!list_empty(&cmd->list)) { 3599 dev_notice(&instance->pdev->dev, "ERROR while" 3600 " moving this cmd:%p, %d %p, it was" 3601 "discovered on some list?\n", 3602 cmd, cmd->sync_cmd, cmd->scmd); 3603 3604 list_del_init(&cmd->list); 3605 } 3606 defer_index++; 3607 list_add_tail(&cmd->list, 3608 &instance->internal_reset_pending_q); 3609 } 3610 } 3611 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 3612 } 3613 3614 3615 static void 3616 process_fw_state_change_wq(struct work_struct *work) 3617 { 3618 struct megasas_instance *instance = 3619 container_of(work, struct megasas_instance, work_init); 3620 u32 wait; 3621 unsigned long flags; 3622 3623 if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) { 3624 dev_notice(&instance->pdev->dev, "error, recovery st %x\n", 3625 atomic_read(&instance->adprecovery)); 3626 return ; 3627 } 3628 3629 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { 3630 dev_notice(&instance->pdev->dev, "FW detected to be in fault" 3631 "state, restarting it...\n"); 3632 3633 instance->instancet->disable_intr(instance); 3634 atomic_set(&instance->fw_outstanding, 0); 3635 3636 atomic_set(&instance->fw_reset_no_pci_access, 1); 3637 instance->instancet->adp_reset(instance, instance->reg_set); 3638 atomic_set(&instance->fw_reset_no_pci_access, 0); 3639 3640 dev_notice(&instance->pdev->dev, "FW restarted successfully," 3641 "initiating next stage...\n"); 3642 3643 dev_notice(&instance->pdev->dev, "HBA recovery state machine," 3644 "state 2 starting...\n"); 3645 3646 /* waiting for about 20 second before start the second init */ 3647 for (wait = 0; wait < 30; wait++) { 3648 msleep(1000); 3649 } 3650 3651 if (megasas_transition_to_ready(instance, 1)) { 3652 dev_notice(&instance->pdev->dev, "adapter not ready\n"); 3653 3654 atomic_set(&instance->fw_reset_no_pci_access, 1); 3655 megaraid_sas_kill_hba(instance); 3656 return ; 3657 } 3658 3659 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 3660 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 3661 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR) 3662 ) { 3663 *instance->consumer = *instance->producer; 3664 } else { 3665 *instance->consumer = 0; 3666 *instance->producer = 0; 3667 } 3668 3669 megasas_issue_init_mfi(instance); 3670 3671 spin_lock_irqsave(&instance->hba_lock, flags); 3672 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 3673 spin_unlock_irqrestore(&instance->hba_lock, flags); 3674 instance->instancet->enable_intr(instance); 3675 3676 megasas_issue_pending_cmds_again(instance); 3677 instance->issuepend_done = 1; 3678 } 3679 } 3680 3681 /** 3682 * megasas_deplete_reply_queue - Processes all completed commands 3683 * @instance: Adapter soft state 3684 * @alt_status: Alternate status to be returned to 3685 * SCSI mid-layer instead of the status 3686 * returned by the FW 3687 * Note: this must be called with hba lock held 3688 */ 3689 static int 3690 megasas_deplete_reply_queue(struct megasas_instance *instance, 3691 u8 alt_status) 3692 { 3693 u32 mfiStatus; 3694 u32 fw_state; 3695 3696 if ((mfiStatus = instance->instancet->check_reset(instance, 3697 instance->reg_set)) == 1) { 3698 return IRQ_HANDLED; 3699 } 3700 3701 mfiStatus = instance->instancet->clear_intr(instance); 3702 if (mfiStatus == 0) { 3703 /* Hardware may not set outbound_intr_status in MSI-X mode */ 3704 if (!instance->msix_vectors) 3705 return IRQ_NONE; 3706 } 3707 3708 instance->mfiStatus = mfiStatus; 3709 3710 if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) { 3711 fw_state = instance->instancet->read_fw_status_reg( 3712 instance) & MFI_STATE_MASK; 3713 3714 if (fw_state != MFI_STATE_FAULT) { 3715 dev_notice(&instance->pdev->dev, "fw state:%x\n", 3716 fw_state); 3717 } 3718 3719 if ((fw_state == MFI_STATE_FAULT) && 3720 (instance->disableOnlineCtrlReset == 0)) { 3721 dev_notice(&instance->pdev->dev, "wait adp restart\n"); 3722 3723 if ((instance->pdev->device == 3724 PCI_DEVICE_ID_LSI_SAS1064R) || 3725 (instance->pdev->device == 3726 PCI_DEVICE_ID_DELL_PERC5) || 3727 (instance->pdev->device == 3728 PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 3729 3730 *instance->consumer = 3731 cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); 3732 } 3733 3734 3735 instance->instancet->disable_intr(instance); 3736 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 3737 instance->issuepend_done = 0; 3738 3739 atomic_set(&instance->fw_outstanding, 0); 3740 megasas_internal_reset_defer_cmds(instance); 3741 3742 dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n", 3743 fw_state, atomic_read(&instance->adprecovery)); 3744 3745 schedule_work(&instance->work_init); 3746 return IRQ_HANDLED; 3747 3748 } else { 3749 dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n", 3750 fw_state, instance->disableOnlineCtrlReset); 3751 } 3752 } 3753 3754 tasklet_schedule(&instance->isr_tasklet); 3755 return IRQ_HANDLED; 3756 } 3757 /** 3758 * megasas_isr - isr entry point 3759 */ 3760 static irqreturn_t megasas_isr(int irq, void *devp) 3761 { 3762 struct megasas_irq_context *irq_context = devp; 3763 struct megasas_instance *instance = irq_context->instance; 3764 unsigned long flags; 3765 irqreturn_t rc; 3766 3767 if (atomic_read(&instance->fw_reset_no_pci_access)) 3768 return IRQ_HANDLED; 3769 3770 spin_lock_irqsave(&instance->hba_lock, flags); 3771 rc = megasas_deplete_reply_queue(instance, DID_OK); 3772 spin_unlock_irqrestore(&instance->hba_lock, flags); 3773 3774 return rc; 3775 } 3776 3777 /** 3778 * megasas_transition_to_ready - Move the FW to READY state 3779 * @instance: Adapter soft state 3780 * 3781 * During the initialization, FW passes can potentially be in any one of 3782 * several possible states. If the FW in operational, waiting-for-handshake 3783 * states, driver must take steps to bring it to ready state. Otherwise, it 3784 * has to wait for the ready state. 3785 */ 3786 int 3787 megasas_transition_to_ready(struct megasas_instance *instance, int ocr) 3788 { 3789 int i; 3790 u8 max_wait; 3791 u32 fw_state; 3792 u32 cur_state; 3793 u32 abs_state, curr_abs_state; 3794 3795 abs_state = instance->instancet->read_fw_status_reg(instance); 3796 fw_state = abs_state & MFI_STATE_MASK; 3797 3798 if (fw_state != MFI_STATE_READY) 3799 dev_info(&instance->pdev->dev, "Waiting for FW to come to ready" 3800 " state\n"); 3801 3802 while (fw_state != MFI_STATE_READY) { 3803 3804 switch (fw_state) { 3805 3806 case MFI_STATE_FAULT: 3807 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW in FAULT state!!\n"); 3808 if (ocr) { 3809 max_wait = MEGASAS_RESET_WAIT_TIME; 3810 cur_state = MFI_STATE_FAULT; 3811 break; 3812 } else 3813 return -ENODEV; 3814 3815 case MFI_STATE_WAIT_HANDSHAKE: 3816 /* 3817 * Set the CLR bit in inbound doorbell 3818 */ 3819 if ((instance->pdev->device == 3820 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 3821 (instance->pdev->device == 3822 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 3823 (instance->adapter_type != MFI_SERIES)) 3824 writel( 3825 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 3826 &instance->reg_set->doorbell); 3827 else 3828 writel( 3829 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 3830 &instance->reg_set->inbound_doorbell); 3831 3832 max_wait = MEGASAS_RESET_WAIT_TIME; 3833 cur_state = MFI_STATE_WAIT_HANDSHAKE; 3834 break; 3835 3836 case MFI_STATE_BOOT_MESSAGE_PENDING: 3837 if ((instance->pdev->device == 3838 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 3839 (instance->pdev->device == 3840 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 3841 (instance->adapter_type != MFI_SERIES)) 3842 writel(MFI_INIT_HOTPLUG, 3843 &instance->reg_set->doorbell); 3844 else 3845 writel(MFI_INIT_HOTPLUG, 3846 &instance->reg_set->inbound_doorbell); 3847 3848 max_wait = MEGASAS_RESET_WAIT_TIME; 3849 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 3850 break; 3851 3852 case MFI_STATE_OPERATIONAL: 3853 /* 3854 * Bring it to READY state; assuming max wait 10 secs 3855 */ 3856 instance->instancet->disable_intr(instance); 3857 if ((instance->pdev->device == 3858 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 3859 (instance->pdev->device == 3860 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 3861 (instance->adapter_type != MFI_SERIES)) { 3862 writel(MFI_RESET_FLAGS, 3863 &instance->reg_set->doorbell); 3864 3865 if (instance->adapter_type != MFI_SERIES) { 3866 for (i = 0; i < (10 * 1000); i += 20) { 3867 if (megasas_readl( 3868 instance, 3869 &instance-> 3870 reg_set-> 3871 doorbell) & 1) 3872 msleep(20); 3873 else 3874 break; 3875 } 3876 } 3877 } else 3878 writel(MFI_RESET_FLAGS, 3879 &instance->reg_set->inbound_doorbell); 3880 3881 max_wait = MEGASAS_RESET_WAIT_TIME; 3882 cur_state = MFI_STATE_OPERATIONAL; 3883 break; 3884 3885 case MFI_STATE_UNDEFINED: 3886 /* 3887 * This state should not last for more than 2 seconds 3888 */ 3889 max_wait = MEGASAS_RESET_WAIT_TIME; 3890 cur_state = MFI_STATE_UNDEFINED; 3891 break; 3892 3893 case MFI_STATE_BB_INIT: 3894 max_wait = MEGASAS_RESET_WAIT_TIME; 3895 cur_state = MFI_STATE_BB_INIT; 3896 break; 3897 3898 case MFI_STATE_FW_INIT: 3899 max_wait = MEGASAS_RESET_WAIT_TIME; 3900 cur_state = MFI_STATE_FW_INIT; 3901 break; 3902 3903 case MFI_STATE_FW_INIT_2: 3904 max_wait = MEGASAS_RESET_WAIT_TIME; 3905 cur_state = MFI_STATE_FW_INIT_2; 3906 break; 3907 3908 case MFI_STATE_DEVICE_SCAN: 3909 max_wait = MEGASAS_RESET_WAIT_TIME; 3910 cur_state = MFI_STATE_DEVICE_SCAN; 3911 break; 3912 3913 case MFI_STATE_FLUSH_CACHE: 3914 max_wait = MEGASAS_RESET_WAIT_TIME; 3915 cur_state = MFI_STATE_FLUSH_CACHE; 3916 break; 3917 3918 default: 3919 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n", 3920 fw_state); 3921 return -ENODEV; 3922 } 3923 3924 /* 3925 * The cur_state should not last for more than max_wait secs 3926 */ 3927 for (i = 0; i < max_wait * 50; i++) { 3928 curr_abs_state = instance->instancet-> 3929 read_fw_status_reg(instance); 3930 3931 if (abs_state == curr_abs_state) { 3932 msleep(20); 3933 } else 3934 break; 3935 } 3936 3937 /* 3938 * Return error if fw_state hasn't changed after max_wait 3939 */ 3940 if (curr_abs_state == abs_state) { 3941 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed " 3942 "in %d secs\n", fw_state, max_wait); 3943 return -ENODEV; 3944 } 3945 3946 abs_state = curr_abs_state; 3947 fw_state = curr_abs_state & MFI_STATE_MASK; 3948 } 3949 dev_info(&instance->pdev->dev, "FW now in Ready state\n"); 3950 3951 return 0; 3952 } 3953 3954 /** 3955 * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool 3956 * @instance: Adapter soft state 3957 */ 3958 static void megasas_teardown_frame_pool(struct megasas_instance *instance) 3959 { 3960 int i; 3961 u16 max_cmd = instance->max_mfi_cmds; 3962 struct megasas_cmd *cmd; 3963 3964 if (!instance->frame_dma_pool) 3965 return; 3966 3967 /* 3968 * Return all frames to pool 3969 */ 3970 for (i = 0; i < max_cmd; i++) { 3971 3972 cmd = instance->cmd_list[i]; 3973 3974 if (cmd->frame) 3975 dma_pool_free(instance->frame_dma_pool, cmd->frame, 3976 cmd->frame_phys_addr); 3977 3978 if (cmd->sense) 3979 dma_pool_free(instance->sense_dma_pool, cmd->sense, 3980 cmd->sense_phys_addr); 3981 } 3982 3983 /* 3984 * Now destroy the pool itself 3985 */ 3986 dma_pool_destroy(instance->frame_dma_pool); 3987 dma_pool_destroy(instance->sense_dma_pool); 3988 3989 instance->frame_dma_pool = NULL; 3990 instance->sense_dma_pool = NULL; 3991 } 3992 3993 /** 3994 * megasas_create_frame_pool - Creates DMA pool for cmd frames 3995 * @instance: Adapter soft state 3996 * 3997 * Each command packet has an embedded DMA memory buffer that is used for 3998 * filling MFI frame and the SG list that immediately follows the frame. This 3999 * function creates those DMA memory buffers for each command packet by using 4000 * PCI pool facility. 4001 */ 4002 static int megasas_create_frame_pool(struct megasas_instance *instance) 4003 { 4004 int i; 4005 u16 max_cmd; 4006 u32 sge_sz; 4007 u32 frame_count; 4008 struct megasas_cmd *cmd; 4009 4010 max_cmd = instance->max_mfi_cmds; 4011 4012 /* 4013 * Size of our frame is 64 bytes for MFI frame, followed by max SG 4014 * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer 4015 */ 4016 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : 4017 sizeof(struct megasas_sge32); 4018 4019 if (instance->flag_ieee) 4020 sge_sz = sizeof(struct megasas_sge_skinny); 4021 4022 /* 4023 * For MFI controllers. 4024 * max_num_sge = 60 4025 * max_sge_sz = 16 byte (sizeof megasas_sge_skinny) 4026 * Total 960 byte (15 MFI frame of 64 byte) 4027 * 4028 * Fusion adapter require only 3 extra frame. 4029 * max_num_sge = 16 (defined as MAX_IOCTL_SGE) 4030 * max_sge_sz = 12 byte (sizeof megasas_sge64) 4031 * Total 192 byte (3 MFI frame of 64 byte) 4032 */ 4033 frame_count = (instance->adapter_type == MFI_SERIES) ? 4034 (15 + 1) : (3 + 1); 4035 instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count; 4036 /* 4037 * Use DMA pool facility provided by PCI layer 4038 */ 4039 instance->frame_dma_pool = dma_pool_create("megasas frame pool", 4040 &instance->pdev->dev, 4041 instance->mfi_frame_size, 256, 0); 4042 4043 if (!instance->frame_dma_pool) { 4044 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n"); 4045 return -ENOMEM; 4046 } 4047 4048 instance->sense_dma_pool = dma_pool_create("megasas sense pool", 4049 &instance->pdev->dev, 128, 4050 4, 0); 4051 4052 if (!instance->sense_dma_pool) { 4053 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n"); 4054 4055 dma_pool_destroy(instance->frame_dma_pool); 4056 instance->frame_dma_pool = NULL; 4057 4058 return -ENOMEM; 4059 } 4060 4061 /* 4062 * Allocate and attach a frame to each of the commands in cmd_list. 4063 * By making cmd->index as the context instead of the &cmd, we can 4064 * always use 32bit context regardless of the architecture 4065 */ 4066 for (i = 0; i < max_cmd; i++) { 4067 4068 cmd = instance->cmd_list[i]; 4069 4070 cmd->frame = dma_pool_zalloc(instance->frame_dma_pool, 4071 GFP_KERNEL, &cmd->frame_phys_addr); 4072 4073 cmd->sense = dma_pool_alloc(instance->sense_dma_pool, 4074 GFP_KERNEL, &cmd->sense_phys_addr); 4075 4076 /* 4077 * megasas_teardown_frame_pool() takes care of freeing 4078 * whatever has been allocated 4079 */ 4080 if (!cmd->frame || !cmd->sense) { 4081 dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n"); 4082 megasas_teardown_frame_pool(instance); 4083 return -ENOMEM; 4084 } 4085 4086 cmd->frame->io.context = cpu_to_le32(cmd->index); 4087 cmd->frame->io.pad_0 = 0; 4088 if ((instance->adapter_type == MFI_SERIES) && reset_devices) 4089 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 4090 } 4091 4092 return 0; 4093 } 4094 4095 /** 4096 * megasas_free_cmds - Free all the cmds in the free cmd pool 4097 * @instance: Adapter soft state 4098 */ 4099 void megasas_free_cmds(struct megasas_instance *instance) 4100 { 4101 int i; 4102 4103 /* First free the MFI frame pool */ 4104 megasas_teardown_frame_pool(instance); 4105 4106 /* Free all the commands in the cmd_list */ 4107 for (i = 0; i < instance->max_mfi_cmds; i++) 4108 4109 kfree(instance->cmd_list[i]); 4110 4111 /* Free the cmd_list buffer itself */ 4112 kfree(instance->cmd_list); 4113 instance->cmd_list = NULL; 4114 4115 INIT_LIST_HEAD(&instance->cmd_pool); 4116 } 4117 4118 /** 4119 * megasas_alloc_cmds - Allocates the command packets 4120 * @instance: Adapter soft state 4121 * 4122 * Each command that is issued to the FW, whether IO commands from the OS or 4123 * internal commands like IOCTLs, are wrapped in local data structure called 4124 * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to 4125 * the FW. 4126 * 4127 * Each frame has a 32-bit field called context (tag). This context is used 4128 * to get back the megasas_cmd from the frame when a frame gets completed in 4129 * the ISR. Typically the address of the megasas_cmd itself would be used as 4130 * the context. But we wanted to keep the differences between 32 and 64 bit 4131 * systems to the mininum. We always use 32 bit integers for the context. In 4132 * this driver, the 32 bit values are the indices into an array cmd_list. 4133 * This array is used only to look up the megasas_cmd given the context. The 4134 * free commands themselves are maintained in a linked list called cmd_pool. 4135 */ 4136 int megasas_alloc_cmds(struct megasas_instance *instance) 4137 { 4138 int i; 4139 int j; 4140 u16 max_cmd; 4141 struct megasas_cmd *cmd; 4142 4143 max_cmd = instance->max_mfi_cmds; 4144 4145 /* 4146 * instance->cmd_list is an array of struct megasas_cmd pointers. 4147 * Allocate the dynamic array first and then allocate individual 4148 * commands. 4149 */ 4150 instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL); 4151 4152 if (!instance->cmd_list) { 4153 dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n"); 4154 return -ENOMEM; 4155 } 4156 4157 memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd); 4158 4159 for (i = 0; i < max_cmd; i++) { 4160 instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd), 4161 GFP_KERNEL); 4162 4163 if (!instance->cmd_list[i]) { 4164 4165 for (j = 0; j < i; j++) 4166 kfree(instance->cmd_list[j]); 4167 4168 kfree(instance->cmd_list); 4169 instance->cmd_list = NULL; 4170 4171 return -ENOMEM; 4172 } 4173 } 4174 4175 for (i = 0; i < max_cmd; i++) { 4176 cmd = instance->cmd_list[i]; 4177 memset(cmd, 0, sizeof(struct megasas_cmd)); 4178 cmd->index = i; 4179 cmd->scmd = NULL; 4180 cmd->instance = instance; 4181 4182 list_add_tail(&cmd->list, &instance->cmd_pool); 4183 } 4184 4185 /* 4186 * Create a frame pool and assign one frame to each cmd 4187 */ 4188 if (megasas_create_frame_pool(instance)) { 4189 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n"); 4190 megasas_free_cmds(instance); 4191 return -ENOMEM; 4192 } 4193 4194 return 0; 4195 } 4196 4197 /* 4198 * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state. 4199 * @instance: Adapter soft state 4200 * 4201 * Return 0 for only Fusion adapter, if driver load/unload is not in progress 4202 * or FW is not under OCR. 4203 */ 4204 inline int 4205 dcmd_timeout_ocr_possible(struct megasas_instance *instance) { 4206 4207 if (instance->adapter_type == MFI_SERIES) 4208 return KILL_ADAPTER; 4209 else if (instance->unload || 4210 test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) 4211 return IGNORE_TIMEOUT; 4212 else 4213 return INITIATE_OCR; 4214 } 4215 4216 static void 4217 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev) 4218 { 4219 int ret; 4220 struct megasas_cmd *cmd; 4221 struct megasas_dcmd_frame *dcmd; 4222 4223 struct MR_PRIV_DEVICE *mr_device_priv_data; 4224 u16 device_id = 0; 4225 4226 device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; 4227 cmd = megasas_get_cmd(instance); 4228 4229 if (!cmd) { 4230 dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__); 4231 return; 4232 } 4233 4234 dcmd = &cmd->frame->dcmd; 4235 4236 memset(instance->pd_info, 0, sizeof(*instance->pd_info)); 4237 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4238 4239 dcmd->mbox.s[0] = cpu_to_le16(device_id); 4240 dcmd->cmd = MFI_CMD_DCMD; 4241 dcmd->cmd_status = 0xFF; 4242 dcmd->sge_count = 1; 4243 dcmd->flags = MFI_FRAME_DIR_READ; 4244 dcmd->timeout = 0; 4245 dcmd->pad_0 = 0; 4246 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO)); 4247 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO); 4248 4249 megasas_set_dma_settings(instance, dcmd, instance->pd_info_h, 4250 sizeof(struct MR_PD_INFO)); 4251 4252 if ((instance->adapter_type != MFI_SERIES) && 4253 !instance->mask_interrupts) 4254 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4255 else 4256 ret = megasas_issue_polled(instance, cmd); 4257 4258 switch (ret) { 4259 case DCMD_SUCCESS: 4260 mr_device_priv_data = sdev->hostdata; 4261 le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType); 4262 mr_device_priv_data->interface_type = 4263 instance->pd_info->state.ddf.pdType.intf; 4264 break; 4265 4266 case DCMD_TIMEOUT: 4267 4268 switch (dcmd_timeout_ocr_possible(instance)) { 4269 case INITIATE_OCR: 4270 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4271 megasas_reset_fusion(instance->host, 4272 MFI_IO_TIMEOUT_OCR); 4273 break; 4274 case KILL_ADAPTER: 4275 megaraid_sas_kill_hba(instance); 4276 break; 4277 case IGNORE_TIMEOUT: 4278 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4279 __func__, __LINE__); 4280 break; 4281 } 4282 4283 break; 4284 } 4285 4286 if (ret != DCMD_TIMEOUT) 4287 megasas_return_cmd(instance, cmd); 4288 4289 return; 4290 } 4291 /* 4292 * megasas_get_pd_list_info - Returns FW's pd_list structure 4293 * @instance: Adapter soft state 4294 * @pd_list: pd_list structure 4295 * 4296 * Issues an internal command (DCMD) to get the FW's controller PD 4297 * list structure. This information is mainly used to find out SYSTEM 4298 * supported by the FW. 4299 */ 4300 static int 4301 megasas_get_pd_list(struct megasas_instance *instance) 4302 { 4303 int ret = 0, pd_index = 0; 4304 struct megasas_cmd *cmd; 4305 struct megasas_dcmd_frame *dcmd; 4306 struct MR_PD_LIST *ci; 4307 struct MR_PD_ADDRESS *pd_addr; 4308 dma_addr_t ci_h = 0; 4309 4310 if (instance->pd_list_not_supported) { 4311 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " 4312 "not supported by firmware\n"); 4313 return ret; 4314 } 4315 4316 ci = instance->pd_list_buf; 4317 ci_h = instance->pd_list_buf_h; 4318 4319 cmd = megasas_get_cmd(instance); 4320 4321 if (!cmd) { 4322 dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n"); 4323 return -ENOMEM; 4324 } 4325 4326 dcmd = &cmd->frame->dcmd; 4327 4328 memset(ci, 0, sizeof(*ci)); 4329 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4330 4331 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; 4332 dcmd->mbox.b[1] = 0; 4333 dcmd->cmd = MFI_CMD_DCMD; 4334 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4335 dcmd->sge_count = 1; 4336 dcmd->flags = MFI_FRAME_DIR_READ; 4337 dcmd->timeout = 0; 4338 dcmd->pad_0 = 0; 4339 dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); 4340 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY); 4341 4342 megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h, 4343 (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST))); 4344 4345 if ((instance->adapter_type != MFI_SERIES) && 4346 !instance->mask_interrupts) 4347 ret = megasas_issue_blocked_cmd(instance, cmd, 4348 MFI_IO_TIMEOUT_SECS); 4349 else 4350 ret = megasas_issue_polled(instance, cmd); 4351 4352 switch (ret) { 4353 case DCMD_FAILED: 4354 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " 4355 "failed/not supported by firmware\n"); 4356 4357 if (instance->adapter_type != MFI_SERIES) 4358 megaraid_sas_kill_hba(instance); 4359 else 4360 instance->pd_list_not_supported = 1; 4361 break; 4362 case DCMD_TIMEOUT: 4363 4364 switch (dcmd_timeout_ocr_possible(instance)) { 4365 case INITIATE_OCR: 4366 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4367 /* 4368 * DCMD failed from AEN path. 4369 * AEN path already hold reset_mutex to avoid PCI access 4370 * while OCR is in progress. 4371 */ 4372 mutex_unlock(&instance->reset_mutex); 4373 megasas_reset_fusion(instance->host, 4374 MFI_IO_TIMEOUT_OCR); 4375 mutex_lock(&instance->reset_mutex); 4376 break; 4377 case KILL_ADAPTER: 4378 megaraid_sas_kill_hba(instance); 4379 break; 4380 case IGNORE_TIMEOUT: 4381 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n", 4382 __func__, __LINE__); 4383 break; 4384 } 4385 4386 break; 4387 4388 case DCMD_SUCCESS: 4389 pd_addr = ci->addr; 4390 4391 if ((le32_to_cpu(ci->count) > 4392 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) 4393 break; 4394 4395 memset(instance->local_pd_list, 0, 4396 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); 4397 4398 for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) { 4399 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid = 4400 le16_to_cpu(pd_addr->deviceId); 4401 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType = 4402 pd_addr->scsiDevType; 4403 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState = 4404 MR_PD_STATE_SYSTEM; 4405 pd_addr++; 4406 } 4407 4408 memcpy(instance->pd_list, instance->local_pd_list, 4409 sizeof(instance->pd_list)); 4410 break; 4411 4412 } 4413 4414 if (ret != DCMD_TIMEOUT) 4415 megasas_return_cmd(instance, cmd); 4416 4417 return ret; 4418 } 4419 4420 /* 4421 * megasas_get_ld_list_info - Returns FW's ld_list structure 4422 * @instance: Adapter soft state 4423 * @ld_list: ld_list structure 4424 * 4425 * Issues an internal command (DCMD) to get the FW's controller PD 4426 * list structure. This information is mainly used to find out SYSTEM 4427 * supported by the FW. 4428 */ 4429 static int 4430 megasas_get_ld_list(struct megasas_instance *instance) 4431 { 4432 int ret = 0, ld_index = 0, ids = 0; 4433 struct megasas_cmd *cmd; 4434 struct megasas_dcmd_frame *dcmd; 4435 struct MR_LD_LIST *ci; 4436 dma_addr_t ci_h = 0; 4437 u32 ld_count; 4438 4439 ci = instance->ld_list_buf; 4440 ci_h = instance->ld_list_buf_h; 4441 4442 cmd = megasas_get_cmd(instance); 4443 4444 if (!cmd) { 4445 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n"); 4446 return -ENOMEM; 4447 } 4448 4449 dcmd = &cmd->frame->dcmd; 4450 4451 memset(ci, 0, sizeof(*ci)); 4452 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4453 4454 if (instance->supportmax256vd) 4455 dcmd->mbox.b[0] = 1; 4456 dcmd->cmd = MFI_CMD_DCMD; 4457 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4458 dcmd->sge_count = 1; 4459 dcmd->flags = MFI_FRAME_DIR_READ; 4460 dcmd->timeout = 0; 4461 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST)); 4462 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST); 4463 dcmd->pad_0 = 0; 4464 4465 megasas_set_dma_settings(instance, dcmd, ci_h, 4466 sizeof(struct MR_LD_LIST)); 4467 4468 if ((instance->adapter_type != MFI_SERIES) && 4469 !instance->mask_interrupts) 4470 ret = megasas_issue_blocked_cmd(instance, cmd, 4471 MFI_IO_TIMEOUT_SECS); 4472 else 4473 ret = megasas_issue_polled(instance, cmd); 4474 4475 ld_count = le32_to_cpu(ci->ldCount); 4476 4477 switch (ret) { 4478 case DCMD_FAILED: 4479 megaraid_sas_kill_hba(instance); 4480 break; 4481 case DCMD_TIMEOUT: 4482 4483 switch (dcmd_timeout_ocr_possible(instance)) { 4484 case INITIATE_OCR: 4485 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4486 /* 4487 * DCMD failed from AEN path. 4488 * AEN path already hold reset_mutex to avoid PCI access 4489 * while OCR is in progress. 4490 */ 4491 mutex_unlock(&instance->reset_mutex); 4492 megasas_reset_fusion(instance->host, 4493 MFI_IO_TIMEOUT_OCR); 4494 mutex_lock(&instance->reset_mutex); 4495 break; 4496 case KILL_ADAPTER: 4497 megaraid_sas_kill_hba(instance); 4498 break; 4499 case IGNORE_TIMEOUT: 4500 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4501 __func__, __LINE__); 4502 break; 4503 } 4504 4505 break; 4506 4507 case DCMD_SUCCESS: 4508 if (ld_count > instance->fw_supported_vd_count) 4509 break; 4510 4511 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 4512 4513 for (ld_index = 0; ld_index < ld_count; ld_index++) { 4514 if (ci->ldList[ld_index].state != 0) { 4515 ids = ci->ldList[ld_index].ref.targetId; 4516 instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId; 4517 } 4518 } 4519 4520 break; 4521 } 4522 4523 if (ret != DCMD_TIMEOUT) 4524 megasas_return_cmd(instance, cmd); 4525 4526 return ret; 4527 } 4528 4529 /** 4530 * megasas_ld_list_query - Returns FW's ld_list structure 4531 * @instance: Adapter soft state 4532 * @ld_list: ld_list structure 4533 * 4534 * Issues an internal command (DCMD) to get the FW's controller PD 4535 * list structure. This information is mainly used to find out SYSTEM 4536 * supported by the FW. 4537 */ 4538 static int 4539 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) 4540 { 4541 int ret = 0, ld_index = 0, ids = 0; 4542 struct megasas_cmd *cmd; 4543 struct megasas_dcmd_frame *dcmd; 4544 struct MR_LD_TARGETID_LIST *ci; 4545 dma_addr_t ci_h = 0; 4546 u32 tgtid_count; 4547 4548 ci = instance->ld_targetid_list_buf; 4549 ci_h = instance->ld_targetid_list_buf_h; 4550 4551 cmd = megasas_get_cmd(instance); 4552 4553 if (!cmd) { 4554 dev_warn(&instance->pdev->dev, 4555 "megasas_ld_list_query: Failed to get cmd\n"); 4556 return -ENOMEM; 4557 } 4558 4559 dcmd = &cmd->frame->dcmd; 4560 4561 memset(ci, 0, sizeof(*ci)); 4562 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4563 4564 dcmd->mbox.b[0] = query_type; 4565 if (instance->supportmax256vd) 4566 dcmd->mbox.b[2] = 1; 4567 4568 dcmd->cmd = MFI_CMD_DCMD; 4569 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4570 dcmd->sge_count = 1; 4571 dcmd->flags = MFI_FRAME_DIR_READ; 4572 dcmd->timeout = 0; 4573 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); 4574 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY); 4575 dcmd->pad_0 = 0; 4576 4577 megasas_set_dma_settings(instance, dcmd, ci_h, 4578 sizeof(struct MR_LD_TARGETID_LIST)); 4579 4580 if ((instance->adapter_type != MFI_SERIES) && 4581 !instance->mask_interrupts) 4582 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4583 else 4584 ret = megasas_issue_polled(instance, cmd); 4585 4586 switch (ret) { 4587 case DCMD_FAILED: 4588 dev_info(&instance->pdev->dev, 4589 "DCMD not supported by firmware - %s %d\n", 4590 __func__, __LINE__); 4591 ret = megasas_get_ld_list(instance); 4592 break; 4593 case DCMD_TIMEOUT: 4594 switch (dcmd_timeout_ocr_possible(instance)) { 4595 case INITIATE_OCR: 4596 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4597 /* 4598 * DCMD failed from AEN path. 4599 * AEN path already hold reset_mutex to avoid PCI access 4600 * while OCR is in progress. 4601 */ 4602 mutex_unlock(&instance->reset_mutex); 4603 megasas_reset_fusion(instance->host, 4604 MFI_IO_TIMEOUT_OCR); 4605 mutex_lock(&instance->reset_mutex); 4606 break; 4607 case KILL_ADAPTER: 4608 megaraid_sas_kill_hba(instance); 4609 break; 4610 case IGNORE_TIMEOUT: 4611 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4612 __func__, __LINE__); 4613 break; 4614 } 4615 4616 break; 4617 case DCMD_SUCCESS: 4618 tgtid_count = le32_to_cpu(ci->count); 4619 4620 if ((tgtid_count > (instance->fw_supported_vd_count))) 4621 break; 4622 4623 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 4624 for (ld_index = 0; ld_index < tgtid_count; ld_index++) { 4625 ids = ci->targetId[ld_index]; 4626 instance->ld_ids[ids] = ci->targetId[ld_index]; 4627 } 4628 4629 break; 4630 } 4631 4632 if (ret != DCMD_TIMEOUT) 4633 megasas_return_cmd(instance, cmd); 4634 4635 return ret; 4636 } 4637 4638 /** 4639 * dcmd.opcode - MR_DCMD_CTRL_DEVICE_LIST_GET 4640 * dcmd.mbox - reserved 4641 * dcmd.sge IN - ptr to return MR_HOST_DEVICE_LIST structure 4642 * Desc: This DCMD will return the combined device list 4643 * Status: MFI_STAT_OK - List returned successfully 4644 * MFI_STAT_INVALID_CMD - Firmware support for the feature has been 4645 * disabled 4646 * @instance: Adapter soft state 4647 * @is_probe: Driver probe check 4648 * Return: 0 if DCMD succeeded 4649 * non-zero if failed 4650 */ 4651 int 4652 megasas_host_device_list_query(struct megasas_instance *instance, 4653 bool is_probe) 4654 { 4655 int ret, i, target_id; 4656 struct megasas_cmd *cmd; 4657 struct megasas_dcmd_frame *dcmd; 4658 struct MR_HOST_DEVICE_LIST *ci; 4659 u32 count; 4660 dma_addr_t ci_h; 4661 4662 ci = instance->host_device_list_buf; 4663 ci_h = instance->host_device_list_buf_h; 4664 4665 cmd = megasas_get_cmd(instance); 4666 4667 if (!cmd) { 4668 dev_warn(&instance->pdev->dev, 4669 "%s: failed to get cmd\n", 4670 __func__); 4671 return -ENOMEM; 4672 } 4673 4674 dcmd = &cmd->frame->dcmd; 4675 4676 memset(ci, 0, sizeof(*ci)); 4677 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4678 4679 dcmd->mbox.b[0] = is_probe ? 0 : 1; 4680 dcmd->cmd = MFI_CMD_DCMD; 4681 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4682 dcmd->sge_count = 1; 4683 dcmd->flags = MFI_FRAME_DIR_READ; 4684 dcmd->timeout = 0; 4685 dcmd->pad_0 = 0; 4686 dcmd->data_xfer_len = cpu_to_le32(HOST_DEVICE_LIST_SZ); 4687 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_DEVICE_LIST_GET); 4688 4689 megasas_set_dma_settings(instance, dcmd, ci_h, HOST_DEVICE_LIST_SZ); 4690 4691 if (!instance->mask_interrupts) { 4692 ret = megasas_issue_blocked_cmd(instance, cmd, 4693 MFI_IO_TIMEOUT_SECS); 4694 } else { 4695 ret = megasas_issue_polled(instance, cmd); 4696 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4697 } 4698 4699 switch (ret) { 4700 case DCMD_SUCCESS: 4701 /* Fill the internal pd_list and ld_ids array based on 4702 * targetIds returned by FW 4703 */ 4704 count = le32_to_cpu(ci->count); 4705 4706 memset(instance->local_pd_list, 0, 4707 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); 4708 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 4709 for (i = 0; i < count; i++) { 4710 target_id = le16_to_cpu(ci->host_device_list[i].target_id); 4711 if (ci->host_device_list[i].flags.u.bits.is_sys_pd) { 4712 instance->local_pd_list[target_id].tid = target_id; 4713 instance->local_pd_list[target_id].driveType = 4714 ci->host_device_list[i].scsi_type; 4715 instance->local_pd_list[target_id].driveState = 4716 MR_PD_STATE_SYSTEM; 4717 } else { 4718 instance->ld_ids[target_id] = target_id; 4719 } 4720 } 4721 4722 memcpy(instance->pd_list, instance->local_pd_list, 4723 sizeof(instance->pd_list)); 4724 break; 4725 4726 case DCMD_TIMEOUT: 4727 switch (dcmd_timeout_ocr_possible(instance)) { 4728 case INITIATE_OCR: 4729 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4730 megasas_reset_fusion(instance->host, 4731 MFI_IO_TIMEOUT_OCR); 4732 break; 4733 case KILL_ADAPTER: 4734 megaraid_sas_kill_hba(instance); 4735 break; 4736 case IGNORE_TIMEOUT: 4737 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4738 __func__, __LINE__); 4739 break; 4740 } 4741 break; 4742 case DCMD_FAILED: 4743 dev_err(&instance->pdev->dev, 4744 "%s: MR_DCMD_CTRL_DEVICE_LIST_GET failed\n", 4745 __func__); 4746 break; 4747 } 4748 4749 if (ret != DCMD_TIMEOUT) 4750 megasas_return_cmd(instance, cmd); 4751 4752 return ret; 4753 } 4754 4755 /* 4756 * megasas_update_ext_vd_details : Update details w.r.t Extended VD 4757 * instance : Controller's instance 4758 */ 4759 static void megasas_update_ext_vd_details(struct megasas_instance *instance) 4760 { 4761 struct fusion_context *fusion; 4762 u32 ventura_map_sz = 0; 4763 4764 fusion = instance->ctrl_context; 4765 /* For MFI based controllers return dummy success */ 4766 if (!fusion) 4767 return; 4768 4769 instance->supportmax256vd = 4770 instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs; 4771 /* Below is additional check to address future FW enhancement */ 4772 if (instance->ctrl_info_buf->max_lds > 64) 4773 instance->supportmax256vd = 1; 4774 4775 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS 4776 * MEGASAS_MAX_DEV_PER_CHANNEL; 4777 instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS 4778 * MEGASAS_MAX_DEV_PER_CHANNEL; 4779 if (instance->supportmax256vd) { 4780 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; 4781 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 4782 } else { 4783 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 4784 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 4785 } 4786 4787 dev_info(&instance->pdev->dev, 4788 "FW provided supportMaxExtLDs: %d\tmax_lds: %d\n", 4789 instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0, 4790 instance->ctrl_info_buf->max_lds); 4791 4792 if (instance->max_raid_mapsize) { 4793 ventura_map_sz = instance->max_raid_mapsize * 4794 MR_MIN_MAP_SIZE; /* 64k */ 4795 fusion->current_map_sz = ventura_map_sz; 4796 fusion->max_map_sz = ventura_map_sz; 4797 } else { 4798 fusion->old_map_sz = sizeof(struct MR_FW_RAID_MAP) + 4799 (sizeof(struct MR_LD_SPAN_MAP) * 4800 (instance->fw_supported_vd_count - 1)); 4801 fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT); 4802 4803 fusion->max_map_sz = 4804 max(fusion->old_map_sz, fusion->new_map_sz); 4805 4806 if (instance->supportmax256vd) 4807 fusion->current_map_sz = fusion->new_map_sz; 4808 else 4809 fusion->current_map_sz = fusion->old_map_sz; 4810 } 4811 /* irrespective of FW raid maps, driver raid map is constant */ 4812 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL); 4813 } 4814 4815 /* 4816 * dcmd.opcode - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES 4817 * dcmd.hdr.length - number of bytes to read 4818 * dcmd.sge - Ptr to MR_SNAPDUMP_PROPERTIES 4819 * Desc: Fill in snapdump properties 4820 * Status: MFI_STAT_OK- Command successful 4821 */ 4822 void megasas_get_snapdump_properties(struct megasas_instance *instance) 4823 { 4824 int ret = 0; 4825 struct megasas_cmd *cmd; 4826 struct megasas_dcmd_frame *dcmd; 4827 struct MR_SNAPDUMP_PROPERTIES *ci; 4828 dma_addr_t ci_h = 0; 4829 4830 ci = instance->snapdump_prop; 4831 ci_h = instance->snapdump_prop_h; 4832 4833 if (!ci) 4834 return; 4835 4836 cmd = megasas_get_cmd(instance); 4837 4838 if (!cmd) { 4839 dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n"); 4840 return; 4841 } 4842 4843 dcmd = &cmd->frame->dcmd; 4844 4845 memset(ci, 0, sizeof(*ci)); 4846 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4847 4848 dcmd->cmd = MFI_CMD_DCMD; 4849 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4850 dcmd->sge_count = 1; 4851 dcmd->flags = MFI_FRAME_DIR_READ; 4852 dcmd->timeout = 0; 4853 dcmd->pad_0 = 0; 4854 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES)); 4855 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES); 4856 4857 megasas_set_dma_settings(instance, dcmd, ci_h, 4858 sizeof(struct MR_SNAPDUMP_PROPERTIES)); 4859 4860 if (!instance->mask_interrupts) { 4861 ret = megasas_issue_blocked_cmd(instance, cmd, 4862 MFI_IO_TIMEOUT_SECS); 4863 } else { 4864 ret = megasas_issue_polled(instance, cmd); 4865 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4866 } 4867 4868 switch (ret) { 4869 case DCMD_SUCCESS: 4870 instance->snapdump_wait_time = 4871 min_t(u8, ci->trigger_min_num_sec_before_ocr, 4872 MEGASAS_MAX_SNAP_DUMP_WAIT_TIME); 4873 break; 4874 4875 case DCMD_TIMEOUT: 4876 switch (dcmd_timeout_ocr_possible(instance)) { 4877 case INITIATE_OCR: 4878 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4879 megasas_reset_fusion(instance->host, 4880 MFI_IO_TIMEOUT_OCR); 4881 break; 4882 case KILL_ADAPTER: 4883 megaraid_sas_kill_hba(instance); 4884 break; 4885 case IGNORE_TIMEOUT: 4886 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4887 __func__, __LINE__); 4888 break; 4889 } 4890 } 4891 4892 if (ret != DCMD_TIMEOUT) 4893 megasas_return_cmd(instance, cmd); 4894 } 4895 4896 /** 4897 * megasas_get_controller_info - Returns FW's controller structure 4898 * @instance: Adapter soft state 4899 * 4900 * Issues an internal command (DCMD) to get the FW's controller structure. 4901 * This information is mainly used to find out the maximum IO transfer per 4902 * command supported by the FW. 4903 */ 4904 int 4905 megasas_get_ctrl_info(struct megasas_instance *instance) 4906 { 4907 int ret = 0; 4908 struct megasas_cmd *cmd; 4909 struct megasas_dcmd_frame *dcmd; 4910 struct megasas_ctrl_info *ci; 4911 dma_addr_t ci_h = 0; 4912 4913 ci = instance->ctrl_info_buf; 4914 ci_h = instance->ctrl_info_buf_h; 4915 4916 cmd = megasas_get_cmd(instance); 4917 4918 if (!cmd) { 4919 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n"); 4920 return -ENOMEM; 4921 } 4922 4923 dcmd = &cmd->frame->dcmd; 4924 4925 memset(ci, 0, sizeof(*ci)); 4926 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4927 4928 dcmd->cmd = MFI_CMD_DCMD; 4929 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4930 dcmd->sge_count = 1; 4931 dcmd->flags = MFI_FRAME_DIR_READ; 4932 dcmd->timeout = 0; 4933 dcmd->pad_0 = 0; 4934 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info)); 4935 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO); 4936 dcmd->mbox.b[0] = 1; 4937 4938 megasas_set_dma_settings(instance, dcmd, ci_h, 4939 sizeof(struct megasas_ctrl_info)); 4940 4941 if ((instance->adapter_type != MFI_SERIES) && 4942 !instance->mask_interrupts) { 4943 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4944 } else { 4945 ret = megasas_issue_polled(instance, cmd); 4946 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4947 } 4948 4949 switch (ret) { 4950 case DCMD_SUCCESS: 4951 /* Save required controller information in 4952 * CPU endianness format. 4953 */ 4954 le32_to_cpus((u32 *)&ci->properties.OnOffProperties); 4955 le16_to_cpus((u16 *)&ci->properties.on_off_properties2); 4956 le32_to_cpus((u32 *)&ci->adapterOperations2); 4957 le32_to_cpus((u32 *)&ci->adapterOperations3); 4958 le16_to_cpus((u16 *)&ci->adapter_operations4); 4959 4960 /* Update the latest Ext VD info. 4961 * From Init path, store current firmware details. 4962 * From OCR path, detect any firmware properties changes. 4963 * in case of Firmware upgrade without system reboot. 4964 */ 4965 megasas_update_ext_vd_details(instance); 4966 instance->use_seqnum_jbod_fp = 4967 ci->adapterOperations3.useSeqNumJbodFP; 4968 instance->support_morethan256jbod = 4969 ci->adapter_operations4.support_pd_map_target_id; 4970 instance->support_nvme_passthru = 4971 ci->adapter_operations4.support_nvme_passthru; 4972 instance->task_abort_tmo = ci->TaskAbortTO; 4973 instance->max_reset_tmo = ci->MaxResetTO; 4974 4975 /*Check whether controller is iMR or MR */ 4976 instance->is_imr = (ci->memory_size ? 0 : 1); 4977 4978 instance->snapdump_wait_time = 4979 (ci->properties.on_off_properties2.enable_snap_dump ? 4980 MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0); 4981 4982 instance->enable_fw_dev_list = 4983 ci->properties.on_off_properties2.enable_fw_dev_list; 4984 4985 dev_info(&instance->pdev->dev, 4986 "controller type\t: %s(%dMB)\n", 4987 instance->is_imr ? "iMR" : "MR", 4988 le16_to_cpu(ci->memory_size)); 4989 4990 instance->disableOnlineCtrlReset = 4991 ci->properties.OnOffProperties.disableOnlineCtrlReset; 4992 instance->secure_jbod_support = 4993 ci->adapterOperations3.supportSecurityonJBOD; 4994 dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n", 4995 instance->disableOnlineCtrlReset ? "Disabled" : "Enabled"); 4996 dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n", 4997 instance->secure_jbod_support ? "Yes" : "No"); 4998 dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n", 4999 instance->support_nvme_passthru ? "Yes" : "No"); 5000 dev_info(&instance->pdev->dev, 5001 "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n", 5002 instance->task_abort_tmo, instance->max_reset_tmo); 5003 5004 break; 5005 5006 case DCMD_TIMEOUT: 5007 switch (dcmd_timeout_ocr_possible(instance)) { 5008 case INITIATE_OCR: 5009 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5010 megasas_reset_fusion(instance->host, 5011 MFI_IO_TIMEOUT_OCR); 5012 break; 5013 case KILL_ADAPTER: 5014 megaraid_sas_kill_hba(instance); 5015 break; 5016 case IGNORE_TIMEOUT: 5017 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 5018 __func__, __LINE__); 5019 break; 5020 } 5021 break; 5022 case DCMD_FAILED: 5023 megaraid_sas_kill_hba(instance); 5024 break; 5025 5026 } 5027 5028 if (ret != DCMD_TIMEOUT) 5029 megasas_return_cmd(instance, cmd); 5030 5031 return ret; 5032 } 5033 5034 /* 5035 * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer 5036 * to firmware 5037 * 5038 * @instance: Adapter soft state 5039 * @crash_buf_state - tell FW to turn ON/OFF crash dump feature 5040 MR_CRASH_BUF_TURN_OFF = 0 5041 MR_CRASH_BUF_TURN_ON = 1 5042 * @return 0 on success non-zero on failure. 5043 * Issues an internal command (DCMD) to set parameters for crash dump feature. 5044 * Driver will send address of crash dump DMA buffer and set mbox to tell FW 5045 * that driver supports crash dump feature. This DCMD will be sent only if 5046 * crash dump feature is supported by the FW. 5047 * 5048 */ 5049 int megasas_set_crash_dump_params(struct megasas_instance *instance, 5050 u8 crash_buf_state) 5051 { 5052 int ret = 0; 5053 struct megasas_cmd *cmd; 5054 struct megasas_dcmd_frame *dcmd; 5055 5056 cmd = megasas_get_cmd(instance); 5057 5058 if (!cmd) { 5059 dev_err(&instance->pdev->dev, "Failed to get a free cmd\n"); 5060 return -ENOMEM; 5061 } 5062 5063 5064 dcmd = &cmd->frame->dcmd; 5065 5066 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5067 dcmd->mbox.b[0] = crash_buf_state; 5068 dcmd->cmd = MFI_CMD_DCMD; 5069 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 5070 dcmd->sge_count = 1; 5071 dcmd->flags = MFI_FRAME_DIR_NONE; 5072 dcmd->timeout = 0; 5073 dcmd->pad_0 = 0; 5074 dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE); 5075 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS); 5076 5077 megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h, 5078 CRASH_DMA_BUF_SIZE); 5079 5080 if ((instance->adapter_type != MFI_SERIES) && 5081 !instance->mask_interrupts) 5082 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 5083 else 5084 ret = megasas_issue_polled(instance, cmd); 5085 5086 if (ret == DCMD_TIMEOUT) { 5087 switch (dcmd_timeout_ocr_possible(instance)) { 5088 case INITIATE_OCR: 5089 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5090 megasas_reset_fusion(instance->host, 5091 MFI_IO_TIMEOUT_OCR); 5092 break; 5093 case KILL_ADAPTER: 5094 megaraid_sas_kill_hba(instance); 5095 break; 5096 case IGNORE_TIMEOUT: 5097 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 5098 __func__, __LINE__); 5099 break; 5100 } 5101 } else 5102 megasas_return_cmd(instance, cmd); 5103 5104 return ret; 5105 } 5106 5107 /** 5108 * megasas_issue_init_mfi - Initializes the FW 5109 * @instance: Adapter soft state 5110 * 5111 * Issues the INIT MFI cmd 5112 */ 5113 static int 5114 megasas_issue_init_mfi(struct megasas_instance *instance) 5115 { 5116 __le32 context; 5117 struct megasas_cmd *cmd; 5118 struct megasas_init_frame *init_frame; 5119 struct megasas_init_queue_info *initq_info; 5120 dma_addr_t init_frame_h; 5121 dma_addr_t initq_info_h; 5122 5123 /* 5124 * Prepare a init frame. Note the init frame points to queue info 5125 * structure. Each frame has SGL allocated after first 64 bytes. For 5126 * this frame - since we don't need any SGL - we use SGL's space as 5127 * queue info structure 5128 * 5129 * We will not get a NULL command below. We just created the pool. 5130 */ 5131 cmd = megasas_get_cmd(instance); 5132 5133 init_frame = (struct megasas_init_frame *)cmd->frame; 5134 initq_info = (struct megasas_init_queue_info *) 5135 ((unsigned long)init_frame + 64); 5136 5137 init_frame_h = cmd->frame_phys_addr; 5138 initq_info_h = init_frame_h + 64; 5139 5140 context = init_frame->context; 5141 memset(init_frame, 0, MEGAMFI_FRAME_SIZE); 5142 memset(initq_info, 0, sizeof(struct megasas_init_queue_info)); 5143 init_frame->context = context; 5144 5145 initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1); 5146 initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h); 5147 5148 initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h); 5149 initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h); 5150 5151 init_frame->cmd = MFI_CMD_INIT; 5152 init_frame->cmd_status = MFI_STAT_INVALID_STATUS; 5153 init_frame->queue_info_new_phys_addr_lo = 5154 cpu_to_le32(lower_32_bits(initq_info_h)); 5155 init_frame->queue_info_new_phys_addr_hi = 5156 cpu_to_le32(upper_32_bits(initq_info_h)); 5157 5158 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info)); 5159 5160 /* 5161 * disable the intr before firing the init frame to FW 5162 */ 5163 instance->instancet->disable_intr(instance); 5164 5165 /* 5166 * Issue the init frame in polled mode 5167 */ 5168 5169 if (megasas_issue_polled(instance, cmd)) { 5170 dev_err(&instance->pdev->dev, "Failed to init firmware\n"); 5171 megasas_return_cmd(instance, cmd); 5172 goto fail_fw_init; 5173 } 5174 5175 megasas_return_cmd(instance, cmd); 5176 5177 return 0; 5178 5179 fail_fw_init: 5180 return -EINVAL; 5181 } 5182 5183 static u32 5184 megasas_init_adapter_mfi(struct megasas_instance *instance) 5185 { 5186 u32 context_sz; 5187 u32 reply_q_sz; 5188 5189 /* 5190 * Get various operational parameters from status register 5191 */ 5192 instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF; 5193 /* 5194 * Reduce the max supported cmds by 1. This is to ensure that the 5195 * reply_q_sz (1 more than the max cmd that driver may send) 5196 * does not exceed max cmds that the FW can support 5197 */ 5198 instance->max_fw_cmds = instance->max_fw_cmds-1; 5199 instance->max_mfi_cmds = instance->max_fw_cmds; 5200 instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >> 5201 0x10; 5202 /* 5203 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands 5204 * are reserved for IOCTL + driver's internal DCMDs. 5205 */ 5206 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 5207 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { 5208 instance->max_scsi_cmds = (instance->max_fw_cmds - 5209 MEGASAS_SKINNY_INT_CMDS); 5210 sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); 5211 } else { 5212 instance->max_scsi_cmds = (instance->max_fw_cmds - 5213 MEGASAS_INT_CMDS); 5214 sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS)); 5215 } 5216 5217 instance->cur_can_queue = instance->max_scsi_cmds; 5218 /* 5219 * Create a pool of commands 5220 */ 5221 if (megasas_alloc_cmds(instance)) 5222 goto fail_alloc_cmds; 5223 5224 /* 5225 * Allocate memory for reply queue. Length of reply queue should 5226 * be _one_ more than the maximum commands handled by the firmware. 5227 * 5228 * Note: When FW completes commands, it places corresponding contex 5229 * values in this circular reply queue. This circular queue is a fairly 5230 * typical producer-consumer queue. FW is the producer (of completed 5231 * commands) and the driver is the consumer. 5232 */ 5233 context_sz = sizeof(u32); 5234 reply_q_sz = context_sz * (instance->max_fw_cmds + 1); 5235 5236 instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev, 5237 reply_q_sz, &instance->reply_queue_h, GFP_KERNEL); 5238 5239 if (!instance->reply_queue) { 5240 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n"); 5241 goto fail_reply_queue; 5242 } 5243 5244 if (megasas_issue_init_mfi(instance)) 5245 goto fail_fw_init; 5246 5247 if (megasas_get_ctrl_info(instance)) { 5248 dev_err(&instance->pdev->dev, "(%d): Could get controller info " 5249 "Fail from %s %d\n", instance->unique_id, 5250 __func__, __LINE__); 5251 goto fail_fw_init; 5252 } 5253 5254 instance->fw_support_ieee = 0; 5255 instance->fw_support_ieee = 5256 (instance->instancet->read_fw_status_reg(instance) & 5257 0x04000000); 5258 5259 dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d", 5260 instance->fw_support_ieee); 5261 5262 if (instance->fw_support_ieee) 5263 instance->flag_ieee = 1; 5264 5265 return 0; 5266 5267 fail_fw_init: 5268 5269 dma_free_coherent(&instance->pdev->dev, reply_q_sz, 5270 instance->reply_queue, instance->reply_queue_h); 5271 fail_reply_queue: 5272 megasas_free_cmds(instance); 5273 5274 fail_alloc_cmds: 5275 return 1; 5276 } 5277 5278 /* 5279 * megasas_setup_irqs_ioapic - register legacy interrupts. 5280 * @instance: Adapter soft state 5281 * 5282 * Do not enable interrupt, only setup ISRs. 5283 * 5284 * Return 0 on success. 5285 */ 5286 static int 5287 megasas_setup_irqs_ioapic(struct megasas_instance *instance) 5288 { 5289 struct pci_dev *pdev; 5290 5291 pdev = instance->pdev; 5292 instance->irq_context[0].instance = instance; 5293 instance->irq_context[0].MSIxIndex = 0; 5294 if (request_irq(pci_irq_vector(pdev, 0), 5295 instance->instancet->service_isr, IRQF_SHARED, 5296 "megasas", &instance->irq_context[0])) { 5297 dev_err(&instance->pdev->dev, 5298 "Failed to register IRQ from %s %d\n", 5299 __func__, __LINE__); 5300 return -1; 5301 } 5302 return 0; 5303 } 5304 5305 /** 5306 * megasas_setup_irqs_msix - register MSI-x interrupts. 5307 * @instance: Adapter soft state 5308 * @is_probe: Driver probe check 5309 * 5310 * Do not enable interrupt, only setup ISRs. 5311 * 5312 * Return 0 on success. 5313 */ 5314 static int 5315 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) 5316 { 5317 int i, j; 5318 struct pci_dev *pdev; 5319 5320 pdev = instance->pdev; 5321 5322 /* Try MSI-x */ 5323 for (i = 0; i < instance->msix_vectors; i++) { 5324 instance->irq_context[i].instance = instance; 5325 instance->irq_context[i].MSIxIndex = i; 5326 if (request_irq(pci_irq_vector(pdev, i), 5327 instance->instancet->service_isr, 0, "megasas", 5328 &instance->irq_context[i])) { 5329 dev_err(&instance->pdev->dev, 5330 "Failed to register IRQ for vector %d.\n", i); 5331 for (j = 0; j < i; j++) 5332 free_irq(pci_irq_vector(pdev, j), 5333 &instance->irq_context[j]); 5334 /* Retry irq register for IO_APIC*/ 5335 instance->msix_vectors = 0; 5336 if (is_probe) { 5337 pci_free_irq_vectors(instance->pdev); 5338 return megasas_setup_irqs_ioapic(instance); 5339 } else { 5340 return -1; 5341 } 5342 } 5343 } 5344 return 0; 5345 } 5346 5347 /* 5348 * megasas_destroy_irqs- unregister interrupts. 5349 * @instance: Adapter soft state 5350 * return: void 5351 */ 5352 static void 5353 megasas_destroy_irqs(struct megasas_instance *instance) { 5354 5355 int i; 5356 5357 if (instance->msix_vectors) 5358 for (i = 0; i < instance->msix_vectors; i++) { 5359 free_irq(pci_irq_vector(instance->pdev, i), 5360 &instance->irq_context[i]); 5361 } 5362 else 5363 free_irq(pci_irq_vector(instance->pdev, 0), 5364 &instance->irq_context[0]); 5365 } 5366 5367 /** 5368 * megasas_setup_jbod_map - setup jbod map for FP seq_number. 5369 * @instance: Adapter soft state 5370 * @is_probe: Driver probe check 5371 * 5372 * Return 0 on success. 5373 */ 5374 void 5375 megasas_setup_jbod_map(struct megasas_instance *instance) 5376 { 5377 int i; 5378 struct fusion_context *fusion = instance->ctrl_context; 5379 u32 pd_seq_map_sz; 5380 5381 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 5382 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1)); 5383 5384 if (reset_devices || !fusion || 5385 !instance->ctrl_info_buf->adapterOperations3.useSeqNumJbodFP) { 5386 dev_info(&instance->pdev->dev, 5387 "Jbod map is not supported %s %d\n", 5388 __func__, __LINE__); 5389 instance->use_seqnum_jbod_fp = false; 5390 return; 5391 } 5392 5393 if (fusion->pd_seq_sync[0]) 5394 goto skip_alloc; 5395 5396 for (i = 0; i < JBOD_MAPS_COUNT; i++) { 5397 fusion->pd_seq_sync[i] = dma_alloc_coherent 5398 (&instance->pdev->dev, pd_seq_map_sz, 5399 &fusion->pd_seq_phys[i], GFP_KERNEL); 5400 if (!fusion->pd_seq_sync[i]) { 5401 dev_err(&instance->pdev->dev, 5402 "Failed to allocate memory from %s %d\n", 5403 __func__, __LINE__); 5404 if (i == 1) { 5405 dma_free_coherent(&instance->pdev->dev, 5406 pd_seq_map_sz, fusion->pd_seq_sync[0], 5407 fusion->pd_seq_phys[0]); 5408 fusion->pd_seq_sync[0] = NULL; 5409 } 5410 instance->use_seqnum_jbod_fp = false; 5411 return; 5412 } 5413 } 5414 5415 skip_alloc: 5416 if (!megasas_sync_pd_seq_num(instance, false) && 5417 !megasas_sync_pd_seq_num(instance, true)) 5418 instance->use_seqnum_jbod_fp = true; 5419 else 5420 instance->use_seqnum_jbod_fp = false; 5421 } 5422 5423 static void megasas_setup_reply_map(struct megasas_instance *instance) 5424 { 5425 const struct cpumask *mask; 5426 unsigned int queue, cpu; 5427 5428 for (queue = 0; queue < instance->msix_vectors; queue++) { 5429 mask = pci_irq_get_affinity(instance->pdev, queue); 5430 if (!mask) 5431 goto fallback; 5432 5433 for_each_cpu(cpu, mask) 5434 instance->reply_map[cpu] = queue; 5435 } 5436 return; 5437 5438 fallback: 5439 for_each_possible_cpu(cpu) 5440 instance->reply_map[cpu] = cpu % instance->msix_vectors; 5441 } 5442 5443 /** 5444 * megasas_get_device_list - Get the PD and LD device list from FW. 5445 * @instance: Adapter soft state 5446 * @return: Success or failure 5447 * 5448 * Issue DCMDs to Firmware to get the PD and LD list. 5449 * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination 5450 * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list. 5451 */ 5452 static 5453 int megasas_get_device_list(struct megasas_instance *instance) 5454 { 5455 memset(instance->pd_list, 0, 5456 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); 5457 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 5458 5459 if (instance->enable_fw_dev_list) { 5460 if (megasas_host_device_list_query(instance, true)) 5461 return FAILED; 5462 } else { 5463 if (megasas_get_pd_list(instance) < 0) { 5464 dev_err(&instance->pdev->dev, "failed to get PD list\n"); 5465 return FAILED; 5466 } 5467 5468 if (megasas_ld_list_query(instance, 5469 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) { 5470 dev_err(&instance->pdev->dev, "failed to get LD list\n"); 5471 return FAILED; 5472 } 5473 } 5474 5475 return SUCCESS; 5476 } 5477 /** 5478 * megasas_init_fw - Initializes the FW 5479 * @instance: Adapter soft state 5480 * 5481 * This is the main function for initializing firmware 5482 */ 5483 5484 static int megasas_init_fw(struct megasas_instance *instance) 5485 { 5486 u32 max_sectors_1; 5487 u32 max_sectors_2, tmp_sectors, msix_enable; 5488 u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg; 5489 resource_size_t base_addr; 5490 struct megasas_ctrl_info *ctrl_info = NULL; 5491 unsigned long bar_list; 5492 int i, j, loop, fw_msix_count = 0; 5493 struct IOV_111 *iovPtr; 5494 struct fusion_context *fusion; 5495 bool do_adp_reset = true; 5496 5497 fusion = instance->ctrl_context; 5498 5499 /* Find first memory bar */ 5500 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); 5501 instance->bar = find_first_bit(&bar_list, BITS_PER_LONG); 5502 if (pci_request_selected_regions(instance->pdev, 1<<instance->bar, 5503 "megasas: LSI")) { 5504 dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n"); 5505 return -EBUSY; 5506 } 5507 5508 base_addr = pci_resource_start(instance->pdev, instance->bar); 5509 instance->reg_set = ioremap_nocache(base_addr, 8192); 5510 5511 if (!instance->reg_set) { 5512 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n"); 5513 goto fail_ioremap; 5514 } 5515 5516 if (instance->adapter_type != MFI_SERIES) 5517 instance->instancet = &megasas_instance_template_fusion; 5518 else { 5519 switch (instance->pdev->device) { 5520 case PCI_DEVICE_ID_LSI_SAS1078R: 5521 case PCI_DEVICE_ID_LSI_SAS1078DE: 5522 instance->instancet = &megasas_instance_template_ppc; 5523 break; 5524 case PCI_DEVICE_ID_LSI_SAS1078GEN2: 5525 case PCI_DEVICE_ID_LSI_SAS0079GEN2: 5526 instance->instancet = &megasas_instance_template_gen2; 5527 break; 5528 case PCI_DEVICE_ID_LSI_SAS0073SKINNY: 5529 case PCI_DEVICE_ID_LSI_SAS0071SKINNY: 5530 instance->instancet = &megasas_instance_template_skinny; 5531 break; 5532 case PCI_DEVICE_ID_LSI_SAS1064R: 5533 case PCI_DEVICE_ID_DELL_PERC5: 5534 default: 5535 instance->instancet = &megasas_instance_template_xscale; 5536 instance->pd_list_not_supported = 1; 5537 break; 5538 } 5539 } 5540 5541 if (megasas_transition_to_ready(instance, 0)) { 5542 if (instance->adapter_type >= INVADER_SERIES) { 5543 status_reg = instance->instancet->read_fw_status_reg( 5544 instance); 5545 do_adp_reset = status_reg & MFI_RESET_ADAPTER; 5546 } 5547 5548 if (do_adp_reset) { 5549 atomic_set(&instance->fw_reset_no_pci_access, 1); 5550 instance->instancet->adp_reset 5551 (instance, instance->reg_set); 5552 atomic_set(&instance->fw_reset_no_pci_access, 0); 5553 dev_info(&instance->pdev->dev, 5554 "FW restarted successfully from %s!\n", 5555 __func__); 5556 5557 /*waiting for about 30 second before retry*/ 5558 ssleep(30); 5559 5560 if (megasas_transition_to_ready(instance, 0)) 5561 goto fail_ready_state; 5562 } else { 5563 goto fail_ready_state; 5564 } 5565 } 5566 5567 megasas_init_ctrl_params(instance); 5568 5569 if (megasas_set_dma_mask(instance)) 5570 goto fail_ready_state; 5571 5572 if (megasas_alloc_ctrl_mem(instance)) 5573 goto fail_alloc_dma_buf; 5574 5575 if (megasas_alloc_ctrl_dma_buffers(instance)) 5576 goto fail_alloc_dma_buf; 5577 5578 fusion = instance->ctrl_context; 5579 5580 if (instance->adapter_type >= VENTURA_SERIES) { 5581 scratch_pad_2 = 5582 megasas_readl(instance, 5583 &instance->reg_set->outbound_scratch_pad_2); 5584 instance->max_raid_mapsize = ((scratch_pad_2 >> 5585 MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) & 5586 MR_MAX_RAID_MAP_SIZE_MASK); 5587 } 5588 5589 /* Check if MSI-X is supported while in ready state */ 5590 msix_enable = (instance->instancet->read_fw_status_reg(instance) & 5591 0x4000000) >> 0x1a; 5592 if (msix_enable && !msix_disable) { 5593 int irq_flags = PCI_IRQ_MSIX; 5594 5595 scratch_pad_1 = megasas_readl 5596 (instance, &instance->reg_set->outbound_scratch_pad_1); 5597 /* Check max MSI-X vectors */ 5598 if (fusion) { 5599 if (instance->adapter_type == THUNDERBOLT_SERIES) { 5600 /* Thunderbolt Series*/ 5601 instance->msix_vectors = (scratch_pad_1 5602 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; 5603 fw_msix_count = instance->msix_vectors; 5604 } else { 5605 instance->msix_vectors = ((scratch_pad_1 5606 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) 5607 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; 5608 5609 /* 5610 * For Invader series, > 8 MSI-x vectors 5611 * supported by FW/HW implies combined 5612 * reply queue mode is enabled. 5613 * For Ventura series, > 16 MSI-x vectors 5614 * supported by FW/HW implies combined 5615 * reply queue mode is enabled. 5616 */ 5617 switch (instance->adapter_type) { 5618 case INVADER_SERIES: 5619 if (instance->msix_vectors > 8) 5620 instance->msix_combined = true; 5621 break; 5622 case AERO_SERIES: 5623 case VENTURA_SERIES: 5624 if (instance->msix_vectors > 16) 5625 instance->msix_combined = true; 5626 break; 5627 } 5628 5629 if (rdpq_enable) 5630 instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ? 5631 1 : 0; 5632 fw_msix_count = instance->msix_vectors; 5633 /* Save 1-15 reply post index address to local memory 5634 * Index 0 is already saved from reg offset 5635 * MPI2_REPLY_POST_HOST_INDEX_OFFSET 5636 */ 5637 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) { 5638 instance->reply_post_host_index_addr[loop] = 5639 (u32 __iomem *) 5640 ((u8 __iomem *)instance->reg_set + 5641 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET 5642 + (loop * 0x10)); 5643 } 5644 } 5645 if (msix_vectors) 5646 instance->msix_vectors = min(msix_vectors, 5647 instance->msix_vectors); 5648 } else /* MFI adapters */ 5649 instance->msix_vectors = 1; 5650 /* Don't bother allocating more MSI-X vectors than cpus */ 5651 instance->msix_vectors = min(instance->msix_vectors, 5652 (unsigned int)num_online_cpus()); 5653 if (smp_affinity_enable) 5654 irq_flags |= PCI_IRQ_AFFINITY; 5655 i = pci_alloc_irq_vectors(instance->pdev, 1, 5656 instance->msix_vectors, irq_flags); 5657 if (i > 0) 5658 instance->msix_vectors = i; 5659 else 5660 instance->msix_vectors = 0; 5661 } 5662 /* 5663 * MSI-X host index 0 is common for all adapter. 5664 * It is used for all MPT based Adapters. 5665 */ 5666 if (instance->msix_combined) { 5667 instance->reply_post_host_index_addr[0] = 5668 (u32 *)((u8 *)instance->reg_set + 5669 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET); 5670 } else { 5671 instance->reply_post_host_index_addr[0] = 5672 (u32 *)((u8 *)instance->reg_set + 5673 MPI2_REPLY_POST_HOST_INDEX_OFFSET); 5674 } 5675 5676 if (!instance->msix_vectors) { 5677 i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); 5678 if (i < 0) 5679 goto fail_init_adapter; 5680 } 5681 5682 megasas_setup_reply_map(instance); 5683 5684 dev_info(&instance->pdev->dev, 5685 "firmware supports msix\t: (%d)", fw_msix_count); 5686 dev_info(&instance->pdev->dev, 5687 "current msix/online cpus\t: (%d/%d)\n", 5688 instance->msix_vectors, (unsigned int)num_online_cpus()); 5689 dev_info(&instance->pdev->dev, 5690 "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled"); 5691 5692 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 5693 (unsigned long)instance); 5694 5695 /* 5696 * Below are default value for legacy Firmware. 5697 * non-fusion based controllers 5698 */ 5699 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 5700 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 5701 /* Get operational params, sge flags, send init cmd to controller */ 5702 if (instance->instancet->init_adapter(instance)) 5703 goto fail_init_adapter; 5704 5705 if (instance->adapter_type >= VENTURA_SERIES) { 5706 scratch_pad_3 = 5707 megasas_readl(instance, 5708 &instance->reg_set->outbound_scratch_pad_3); 5709 if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >= 5710 MR_DEFAULT_NVME_PAGE_SHIFT) 5711 instance->nvme_page_size = 5712 (1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK)); 5713 5714 dev_info(&instance->pdev->dev, 5715 "NVME page size\t: (%d)\n", instance->nvme_page_size); 5716 } 5717 5718 if (instance->msix_vectors ? 5719 megasas_setup_irqs_msix(instance, 1) : 5720 megasas_setup_irqs_ioapic(instance)) 5721 goto fail_init_adapter; 5722 5723 instance->instancet->enable_intr(instance); 5724 5725 dev_info(&instance->pdev->dev, "INIT adapter done\n"); 5726 5727 megasas_setup_jbod_map(instance); 5728 5729 if (megasas_get_device_list(instance) != SUCCESS) { 5730 dev_err(&instance->pdev->dev, 5731 "%s: megasas_get_device_list failed\n", 5732 __func__); 5733 goto fail_get_ld_pd_list; 5734 } 5735 5736 /* stream detection initialization */ 5737 if (instance->adapter_type >= VENTURA_SERIES) { 5738 fusion->stream_detect_by_ld = 5739 kcalloc(MAX_LOGICAL_DRIVES_EXT, 5740 sizeof(struct LD_STREAM_DETECT *), 5741 GFP_KERNEL); 5742 if (!fusion->stream_detect_by_ld) { 5743 dev_err(&instance->pdev->dev, 5744 "unable to allocate stream detection for pool of LDs\n"); 5745 goto fail_get_ld_pd_list; 5746 } 5747 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) { 5748 fusion->stream_detect_by_ld[i] = 5749 kzalloc(sizeof(struct LD_STREAM_DETECT), 5750 GFP_KERNEL); 5751 if (!fusion->stream_detect_by_ld[i]) { 5752 dev_err(&instance->pdev->dev, 5753 "unable to allocate stream detect by LD\n "); 5754 for (j = 0; j < i; ++j) 5755 kfree(fusion->stream_detect_by_ld[j]); 5756 kfree(fusion->stream_detect_by_ld); 5757 fusion->stream_detect_by_ld = NULL; 5758 goto fail_get_ld_pd_list; 5759 } 5760 fusion->stream_detect_by_ld[i]->mru_bit_map 5761 = MR_STREAM_BITMAP; 5762 } 5763 } 5764 5765 /* 5766 * Compute the max allowed sectors per IO: The controller info has two 5767 * limits on max sectors. Driver should use the minimum of these two. 5768 * 5769 * 1 << stripe_sz_ops.min = max sectors per strip 5770 * 5771 * Note that older firmwares ( < FW ver 30) didn't report information 5772 * to calculate max_sectors_1. So the number ended up as zero always. 5773 */ 5774 tmp_sectors = 0; 5775 ctrl_info = instance->ctrl_info_buf; 5776 5777 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * 5778 le16_to_cpu(ctrl_info->max_strips_per_io); 5779 max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size); 5780 5781 tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2); 5782 5783 instance->peerIsPresent = ctrl_info->cluster.peerIsPresent; 5784 instance->passive = ctrl_info->cluster.passive; 5785 memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId)); 5786 instance->UnevenSpanSupport = 5787 ctrl_info->adapterOperations2.supportUnevenSpans; 5788 if (instance->UnevenSpanSupport) { 5789 struct fusion_context *fusion = instance->ctrl_context; 5790 if (MR_ValidateMapInfo(instance, instance->map_id)) 5791 fusion->fast_path_io = 1; 5792 else 5793 fusion->fast_path_io = 0; 5794 5795 } 5796 if (ctrl_info->host_interface.SRIOV) { 5797 instance->requestorId = ctrl_info->iov.requestorId; 5798 if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) { 5799 if (!ctrl_info->adapterOperations2.activePassive) 5800 instance->PlasmaFW111 = 1; 5801 5802 dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n", 5803 instance->PlasmaFW111 ? "1.11" : "new"); 5804 5805 if (instance->PlasmaFW111) { 5806 iovPtr = (struct IOV_111 *) 5807 ((unsigned char *)ctrl_info + IOV_111_OFFSET); 5808 instance->requestorId = iovPtr->requestorId; 5809 } 5810 } 5811 dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n", 5812 instance->requestorId); 5813 } 5814 5815 instance->crash_dump_fw_support = 5816 ctrl_info->adapterOperations3.supportCrashDump; 5817 instance->crash_dump_drv_support = 5818 (instance->crash_dump_fw_support && 5819 instance->crash_dump_buf); 5820 if (instance->crash_dump_drv_support) 5821 megasas_set_crash_dump_params(instance, 5822 MR_CRASH_BUF_TURN_OFF); 5823 5824 else { 5825 if (instance->crash_dump_buf) 5826 dma_free_coherent(&instance->pdev->dev, 5827 CRASH_DMA_BUF_SIZE, 5828 instance->crash_dump_buf, 5829 instance->crash_dump_h); 5830 instance->crash_dump_buf = NULL; 5831 } 5832 5833 if (instance->snapdump_wait_time) { 5834 megasas_get_snapdump_properties(instance); 5835 dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n", 5836 instance->snapdump_wait_time); 5837 } 5838 5839 dev_info(&instance->pdev->dev, 5840 "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n", 5841 le16_to_cpu(ctrl_info->pci.vendor_id), 5842 le16_to_cpu(ctrl_info->pci.device_id), 5843 le16_to_cpu(ctrl_info->pci.sub_vendor_id), 5844 le16_to_cpu(ctrl_info->pci.sub_device_id)); 5845 dev_info(&instance->pdev->dev, "unevenspan support : %s\n", 5846 instance->UnevenSpanSupport ? "yes" : "no"); 5847 dev_info(&instance->pdev->dev, "firmware crash dump : %s\n", 5848 instance->crash_dump_drv_support ? "yes" : "no"); 5849 dev_info(&instance->pdev->dev, "jbod sync map : %s\n", 5850 instance->use_seqnum_jbod_fp ? "yes" : "no"); 5851 5852 instance->max_sectors_per_req = instance->max_num_sge * 5853 SGE_BUFFER_SIZE / 512; 5854 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) 5855 instance->max_sectors_per_req = tmp_sectors; 5856 5857 /* Check for valid throttlequeuedepth module parameter */ 5858 if (throttlequeuedepth && 5859 throttlequeuedepth <= instance->max_scsi_cmds) 5860 instance->throttlequeuedepth = throttlequeuedepth; 5861 else 5862 instance->throttlequeuedepth = 5863 MEGASAS_THROTTLE_QUEUE_DEPTH; 5864 5865 if ((resetwaittime < 1) || 5866 (resetwaittime > MEGASAS_RESET_WAIT_TIME)) 5867 resetwaittime = MEGASAS_RESET_WAIT_TIME; 5868 5869 if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT)) 5870 scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; 5871 5872 /* Launch SR-IOV heartbeat timer */ 5873 if (instance->requestorId) { 5874 if (!megasas_sriov_start_heartbeat(instance, 1)) { 5875 megasas_start_timer(instance); 5876 } else { 5877 instance->skip_heartbeat_timer_del = 1; 5878 goto fail_get_ld_pd_list; 5879 } 5880 } 5881 5882 /* 5883 * Create and start watchdog thread which will monitor 5884 * controller state every 1 sec and trigger OCR when 5885 * it enters fault state 5886 */ 5887 if (instance->adapter_type != MFI_SERIES) 5888 if (megasas_fusion_start_watchdog(instance) != SUCCESS) 5889 goto fail_start_watchdog; 5890 5891 return 0; 5892 5893 fail_start_watchdog: 5894 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 5895 del_timer_sync(&instance->sriov_heartbeat_timer); 5896 fail_get_ld_pd_list: 5897 instance->instancet->disable_intr(instance); 5898 megasas_destroy_irqs(instance); 5899 fail_init_adapter: 5900 if (instance->msix_vectors) 5901 pci_free_irq_vectors(instance->pdev); 5902 instance->msix_vectors = 0; 5903 fail_alloc_dma_buf: 5904 megasas_free_ctrl_dma_buffers(instance); 5905 megasas_free_ctrl_mem(instance); 5906 fail_ready_state: 5907 iounmap(instance->reg_set); 5908 5909 fail_ioremap: 5910 pci_release_selected_regions(instance->pdev, 1<<instance->bar); 5911 5912 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 5913 __func__, __LINE__); 5914 return -EINVAL; 5915 } 5916 5917 /** 5918 * megasas_release_mfi - Reverses the FW initialization 5919 * @instance: Adapter soft state 5920 */ 5921 static void megasas_release_mfi(struct megasas_instance *instance) 5922 { 5923 u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1); 5924 5925 if (instance->reply_queue) 5926 dma_free_coherent(&instance->pdev->dev, reply_q_sz, 5927 instance->reply_queue, instance->reply_queue_h); 5928 5929 megasas_free_cmds(instance); 5930 5931 iounmap(instance->reg_set); 5932 5933 pci_release_selected_regions(instance->pdev, 1<<instance->bar); 5934 } 5935 5936 /** 5937 * megasas_get_seq_num - Gets latest event sequence numbers 5938 * @instance: Adapter soft state 5939 * @eli: FW event log sequence numbers information 5940 * 5941 * FW maintains a log of all events in a non-volatile area. Upper layers would 5942 * usually find out the latest sequence number of the events, the seq number at 5943 * the boot etc. They would "read" all the events below the latest seq number 5944 * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq 5945 * number), they would subsribe to AEN (asynchronous event notification) and 5946 * wait for the events to happen. 5947 */ 5948 static int 5949 megasas_get_seq_num(struct megasas_instance *instance, 5950 struct megasas_evt_log_info *eli) 5951 { 5952 struct megasas_cmd *cmd; 5953 struct megasas_dcmd_frame *dcmd; 5954 struct megasas_evt_log_info *el_info; 5955 dma_addr_t el_info_h = 0; 5956 int ret; 5957 5958 cmd = megasas_get_cmd(instance); 5959 5960 if (!cmd) { 5961 return -ENOMEM; 5962 } 5963 5964 dcmd = &cmd->frame->dcmd; 5965 el_info = dma_alloc_coherent(&instance->pdev->dev, 5966 sizeof(struct megasas_evt_log_info), 5967 &el_info_h, GFP_KERNEL); 5968 if (!el_info) { 5969 megasas_return_cmd(instance, cmd); 5970 return -ENOMEM; 5971 } 5972 5973 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5974 5975 dcmd->cmd = MFI_CMD_DCMD; 5976 dcmd->cmd_status = 0x0; 5977 dcmd->sge_count = 1; 5978 dcmd->flags = MFI_FRAME_DIR_READ; 5979 dcmd->timeout = 0; 5980 dcmd->pad_0 = 0; 5981 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info)); 5982 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO); 5983 5984 megasas_set_dma_settings(instance, dcmd, el_info_h, 5985 sizeof(struct megasas_evt_log_info)); 5986 5987 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 5988 if (ret != DCMD_SUCCESS) { 5989 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 5990 __func__, __LINE__); 5991 goto dcmd_failed; 5992 } 5993 5994 /* 5995 * Copy the data back into callers buffer 5996 */ 5997 eli->newest_seq_num = el_info->newest_seq_num; 5998 eli->oldest_seq_num = el_info->oldest_seq_num; 5999 eli->clear_seq_num = el_info->clear_seq_num; 6000 eli->shutdown_seq_num = el_info->shutdown_seq_num; 6001 eli->boot_seq_num = el_info->boot_seq_num; 6002 6003 dcmd_failed: 6004 dma_free_coherent(&instance->pdev->dev, 6005 sizeof(struct megasas_evt_log_info), 6006 el_info, el_info_h); 6007 6008 megasas_return_cmd(instance, cmd); 6009 6010 return ret; 6011 } 6012 6013 /** 6014 * megasas_register_aen - Registers for asynchronous event notification 6015 * @instance: Adapter soft state 6016 * @seq_num: The starting sequence number 6017 * @class_locale: Class of the event 6018 * 6019 * This function subscribes for AEN for events beyond the @seq_num. It requests 6020 * to be notified if and only if the event is of type @class_locale 6021 */ 6022 static int 6023 megasas_register_aen(struct megasas_instance *instance, u32 seq_num, 6024 u32 class_locale_word) 6025 { 6026 int ret_val; 6027 struct megasas_cmd *cmd; 6028 struct megasas_dcmd_frame *dcmd; 6029 union megasas_evt_class_locale curr_aen; 6030 union megasas_evt_class_locale prev_aen; 6031 6032 /* 6033 * If there an AEN pending already (aen_cmd), check if the 6034 * class_locale of that pending AEN is inclusive of the new 6035 * AEN request we currently have. If it is, then we don't have 6036 * to do anything. In other words, whichever events the current 6037 * AEN request is subscribing to, have already been subscribed 6038 * to. 6039 * 6040 * If the old_cmd is _not_ inclusive, then we have to abort 6041 * that command, form a class_locale that is superset of both 6042 * old and current and re-issue to the FW 6043 */ 6044 6045 curr_aen.word = class_locale_word; 6046 6047 if (instance->aen_cmd) { 6048 6049 prev_aen.word = 6050 le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]); 6051 6052 if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) || 6053 (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) { 6054 dev_info(&instance->pdev->dev, 6055 "%s %d out of range class %d send by application\n", 6056 __func__, __LINE__, curr_aen.members.class); 6057 return 0; 6058 } 6059 6060 /* 6061 * A class whose enum value is smaller is inclusive of all 6062 * higher values. If a PROGRESS (= -1) was previously 6063 * registered, then a new registration requests for higher 6064 * classes need not be sent to FW. They are automatically 6065 * included. 6066 * 6067 * Locale numbers don't have such hierarchy. They are bitmap 6068 * values 6069 */ 6070 if ((prev_aen.members.class <= curr_aen.members.class) && 6071 !((prev_aen.members.locale & curr_aen.members.locale) ^ 6072 curr_aen.members.locale)) { 6073 /* 6074 * Previously issued event registration includes 6075 * current request. Nothing to do. 6076 */ 6077 return 0; 6078 } else { 6079 curr_aen.members.locale |= prev_aen.members.locale; 6080 6081 if (prev_aen.members.class < curr_aen.members.class) 6082 curr_aen.members.class = prev_aen.members.class; 6083 6084 instance->aen_cmd->abort_aen = 1; 6085 ret_val = megasas_issue_blocked_abort_cmd(instance, 6086 instance-> 6087 aen_cmd, 30); 6088 6089 if (ret_val) { 6090 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort " 6091 "previous AEN command\n"); 6092 return ret_val; 6093 } 6094 } 6095 } 6096 6097 cmd = megasas_get_cmd(instance); 6098 6099 if (!cmd) 6100 return -ENOMEM; 6101 6102 dcmd = &cmd->frame->dcmd; 6103 6104 memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail)); 6105 6106 /* 6107 * Prepare DCMD for aen registration 6108 */ 6109 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6110 6111 dcmd->cmd = MFI_CMD_DCMD; 6112 dcmd->cmd_status = 0x0; 6113 dcmd->sge_count = 1; 6114 dcmd->flags = MFI_FRAME_DIR_READ; 6115 dcmd->timeout = 0; 6116 dcmd->pad_0 = 0; 6117 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail)); 6118 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT); 6119 dcmd->mbox.w[0] = cpu_to_le32(seq_num); 6120 instance->last_seq_num = seq_num; 6121 dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word); 6122 6123 megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h, 6124 sizeof(struct megasas_evt_detail)); 6125 6126 if (instance->aen_cmd != NULL) { 6127 megasas_return_cmd(instance, cmd); 6128 return 0; 6129 } 6130 6131 /* 6132 * Store reference to the cmd used to register for AEN. When an 6133 * application wants us to register for AEN, we have to abort this 6134 * cmd and re-register with a new EVENT LOCALE supplied by that app 6135 */ 6136 instance->aen_cmd = cmd; 6137 6138 /* 6139 * Issue the aen registration frame 6140 */ 6141 instance->instancet->issue_dcmd(instance, cmd); 6142 6143 return 0; 6144 } 6145 6146 /* megasas_get_target_prop - Send DCMD with below details to firmware. 6147 * 6148 * This DCMD will fetch few properties of LD/system PD defined 6149 * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value. 6150 * 6151 * DCMD send by drivers whenever new target is added to the OS. 6152 * 6153 * dcmd.opcode - MR_DCMD_DEV_GET_TARGET_PROP 6154 * dcmd.mbox.b[0] - DCMD is to be fired for LD or system PD. 6155 * 0 = system PD, 1 = LD. 6156 * dcmd.mbox.s[1] - TargetID for LD/system PD. 6157 * dcmd.sge IN - Pointer to return MR_TARGET_DEV_PROPERTIES. 6158 * 6159 * @instance: Adapter soft state 6160 * @sdev: OS provided scsi device 6161 * 6162 * Returns 0 on success non-zero on failure. 6163 */ 6164 int 6165 megasas_get_target_prop(struct megasas_instance *instance, 6166 struct scsi_device *sdev) 6167 { 6168 int ret; 6169 struct megasas_cmd *cmd; 6170 struct megasas_dcmd_frame *dcmd; 6171 u16 targetId = (sdev->channel % 2) + sdev->id; 6172 6173 cmd = megasas_get_cmd(instance); 6174 6175 if (!cmd) { 6176 dev_err(&instance->pdev->dev, 6177 "Failed to get cmd %s\n", __func__); 6178 return -ENOMEM; 6179 } 6180 6181 dcmd = &cmd->frame->dcmd; 6182 6183 memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop)); 6184 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6185 dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev); 6186 6187 dcmd->mbox.s[1] = cpu_to_le16(targetId); 6188 dcmd->cmd = MFI_CMD_DCMD; 6189 dcmd->cmd_status = 0xFF; 6190 dcmd->sge_count = 1; 6191 dcmd->flags = MFI_FRAME_DIR_READ; 6192 dcmd->timeout = 0; 6193 dcmd->pad_0 = 0; 6194 dcmd->data_xfer_len = 6195 cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES)); 6196 dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP); 6197 6198 megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h, 6199 sizeof(struct MR_TARGET_PROPERTIES)); 6200 6201 if ((instance->adapter_type != MFI_SERIES) && 6202 !instance->mask_interrupts) 6203 ret = megasas_issue_blocked_cmd(instance, 6204 cmd, MFI_IO_TIMEOUT_SECS); 6205 else 6206 ret = megasas_issue_polled(instance, cmd); 6207 6208 switch (ret) { 6209 case DCMD_TIMEOUT: 6210 switch (dcmd_timeout_ocr_possible(instance)) { 6211 case INITIATE_OCR: 6212 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 6213 megasas_reset_fusion(instance->host, 6214 MFI_IO_TIMEOUT_OCR); 6215 break; 6216 case KILL_ADAPTER: 6217 megaraid_sas_kill_hba(instance); 6218 break; 6219 case IGNORE_TIMEOUT: 6220 dev_info(&instance->pdev->dev, 6221 "Ignore DCMD timeout: %s %d\n", 6222 __func__, __LINE__); 6223 break; 6224 } 6225 break; 6226 6227 default: 6228 megasas_return_cmd(instance, cmd); 6229 } 6230 if (ret != DCMD_SUCCESS) 6231 dev_err(&instance->pdev->dev, 6232 "return from %s %d return value %d\n", 6233 __func__, __LINE__, ret); 6234 6235 return ret; 6236 } 6237 6238 /** 6239 * megasas_start_aen - Subscribes to AEN during driver load time 6240 * @instance: Adapter soft state 6241 */ 6242 static int megasas_start_aen(struct megasas_instance *instance) 6243 { 6244 struct megasas_evt_log_info eli; 6245 union megasas_evt_class_locale class_locale; 6246 6247 /* 6248 * Get the latest sequence number from FW 6249 */ 6250 memset(&eli, 0, sizeof(eli)); 6251 6252 if (megasas_get_seq_num(instance, &eli)) 6253 return -1; 6254 6255 /* 6256 * Register AEN with FW for latest sequence number plus 1 6257 */ 6258 class_locale.members.reserved = 0; 6259 class_locale.members.locale = MR_EVT_LOCALE_ALL; 6260 class_locale.members.class = MR_EVT_CLASS_DEBUG; 6261 6262 return megasas_register_aen(instance, 6263 le32_to_cpu(eli.newest_seq_num) + 1, 6264 class_locale.word); 6265 } 6266 6267 /** 6268 * megasas_io_attach - Attaches this driver to SCSI mid-layer 6269 * @instance: Adapter soft state 6270 */ 6271 static int megasas_io_attach(struct megasas_instance *instance) 6272 { 6273 struct Scsi_Host *host = instance->host; 6274 6275 /* 6276 * Export parameters required by SCSI mid-layer 6277 */ 6278 host->unique_id = instance->unique_id; 6279 host->can_queue = instance->max_scsi_cmds; 6280 host->this_id = instance->init_id; 6281 host->sg_tablesize = instance->max_num_sge; 6282 6283 if (instance->fw_support_ieee) 6284 instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE; 6285 6286 /* 6287 * Check if the module parameter value for max_sectors can be used 6288 */ 6289 if (max_sectors && max_sectors < instance->max_sectors_per_req) 6290 instance->max_sectors_per_req = max_sectors; 6291 else { 6292 if (max_sectors) { 6293 if (((instance->pdev->device == 6294 PCI_DEVICE_ID_LSI_SAS1078GEN2) || 6295 (instance->pdev->device == 6296 PCI_DEVICE_ID_LSI_SAS0079GEN2)) && 6297 (max_sectors <= MEGASAS_MAX_SECTORS)) { 6298 instance->max_sectors_per_req = max_sectors; 6299 } else { 6300 dev_info(&instance->pdev->dev, "max_sectors should be > 0" 6301 "and <= %d (or < 1MB for GEN2 controller)\n", 6302 instance->max_sectors_per_req); 6303 } 6304 } 6305 } 6306 6307 host->max_sectors = instance->max_sectors_per_req; 6308 host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN; 6309 host->max_channel = MEGASAS_MAX_CHANNELS - 1; 6310 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; 6311 host->max_lun = MEGASAS_MAX_LUN; 6312 host->max_cmd_len = 16; 6313 6314 /* 6315 * Notify the mid-layer about the new controller 6316 */ 6317 if (scsi_add_host(host, &instance->pdev->dev)) { 6318 dev_err(&instance->pdev->dev, 6319 "Failed to add host from %s %d\n", 6320 __func__, __LINE__); 6321 return -ENODEV; 6322 } 6323 6324 return 0; 6325 } 6326 6327 /** 6328 * megasas_set_dma_mask - Set DMA mask for supported controllers 6329 * 6330 * @instance: Adapter soft state 6331 * Description: 6332 * 6333 * For Ventura, driver/FW will operate in 63bit DMA addresses. 6334 * 6335 * For invader- 6336 * By default, driver/FW will operate in 32bit DMA addresses 6337 * for consistent DMA mapping but if 32 bit consistent 6338 * DMA mask fails, driver will try with 63 bit consistent 6339 * mask provided FW is true 63bit DMA capable 6340 * 6341 * For older controllers(Thunderbolt and MFI based adapters)- 6342 * driver/FW will operate in 32 bit consistent DMA addresses. 6343 */ 6344 static int 6345 megasas_set_dma_mask(struct megasas_instance *instance) 6346 { 6347 u64 consistent_mask; 6348 struct pci_dev *pdev; 6349 u32 scratch_pad_1; 6350 6351 pdev = instance->pdev; 6352 consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ? 6353 DMA_BIT_MASK(63) : DMA_BIT_MASK(32); 6354 6355 if (IS_DMA64) { 6356 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) && 6357 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) 6358 goto fail_set_dma_mask; 6359 6360 if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) && 6361 (dma_set_coherent_mask(&pdev->dev, consistent_mask) && 6362 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) { 6363 /* 6364 * If 32 bit DMA mask fails, then try for 64 bit mask 6365 * for FW capable of handling 64 bit DMA. 6366 */ 6367 scratch_pad_1 = megasas_readl 6368 (instance, &instance->reg_set->outbound_scratch_pad_1); 6369 6370 if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET)) 6371 goto fail_set_dma_mask; 6372 else if (dma_set_mask_and_coherent(&pdev->dev, 6373 DMA_BIT_MASK(63))) 6374 goto fail_set_dma_mask; 6375 } 6376 } else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) 6377 goto fail_set_dma_mask; 6378 6379 if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32)) 6380 instance->consistent_mask_64bit = false; 6381 else 6382 instance->consistent_mask_64bit = true; 6383 6384 dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n", 6385 ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"), 6386 (instance->consistent_mask_64bit ? "63" : "32")); 6387 6388 return 0; 6389 6390 fail_set_dma_mask: 6391 dev_err(&pdev->dev, "Failed to set DMA mask\n"); 6392 return -1; 6393 6394 } 6395 6396 /* 6397 * megasas_set_adapter_type - Set adapter type. 6398 * Supported controllers can be divided in 6399 * different categories- 6400 * enum MR_ADAPTER_TYPE { 6401 * MFI_SERIES = 1, 6402 * THUNDERBOLT_SERIES = 2, 6403 * INVADER_SERIES = 3, 6404 * VENTURA_SERIES = 4, 6405 * AERO_SERIES = 5, 6406 * }; 6407 * @instance: Adapter soft state 6408 * return: void 6409 */ 6410 static inline void megasas_set_adapter_type(struct megasas_instance *instance) 6411 { 6412 if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) && 6413 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) { 6414 instance->adapter_type = MFI_SERIES; 6415 } else { 6416 switch (instance->pdev->device) { 6417 case PCI_DEVICE_ID_LSI_AERO_10E1: 6418 case PCI_DEVICE_ID_LSI_AERO_10E2: 6419 case PCI_DEVICE_ID_LSI_AERO_10E5: 6420 case PCI_DEVICE_ID_LSI_AERO_10E6: 6421 instance->adapter_type = AERO_SERIES; 6422 break; 6423 case PCI_DEVICE_ID_LSI_VENTURA: 6424 case PCI_DEVICE_ID_LSI_CRUSADER: 6425 case PCI_DEVICE_ID_LSI_HARPOON: 6426 case PCI_DEVICE_ID_LSI_TOMCAT: 6427 case PCI_DEVICE_ID_LSI_VENTURA_4PORT: 6428 case PCI_DEVICE_ID_LSI_CRUSADER_4PORT: 6429 instance->adapter_type = VENTURA_SERIES; 6430 break; 6431 case PCI_DEVICE_ID_LSI_FUSION: 6432 case PCI_DEVICE_ID_LSI_PLASMA: 6433 instance->adapter_type = THUNDERBOLT_SERIES; 6434 break; 6435 case PCI_DEVICE_ID_LSI_INVADER: 6436 case PCI_DEVICE_ID_LSI_INTRUDER: 6437 case PCI_DEVICE_ID_LSI_INTRUDER_24: 6438 case PCI_DEVICE_ID_LSI_CUTLASS_52: 6439 case PCI_DEVICE_ID_LSI_CUTLASS_53: 6440 case PCI_DEVICE_ID_LSI_FURY: 6441 instance->adapter_type = INVADER_SERIES; 6442 break; 6443 default: /* For all other supported controllers */ 6444 instance->adapter_type = MFI_SERIES; 6445 break; 6446 } 6447 } 6448 } 6449 6450 static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance) 6451 { 6452 instance->producer = dma_alloc_coherent(&instance->pdev->dev, 6453 sizeof(u32), &instance->producer_h, GFP_KERNEL); 6454 instance->consumer = dma_alloc_coherent(&instance->pdev->dev, 6455 sizeof(u32), &instance->consumer_h, GFP_KERNEL); 6456 6457 if (!instance->producer || !instance->consumer) { 6458 dev_err(&instance->pdev->dev, 6459 "Failed to allocate memory for producer, consumer\n"); 6460 return -1; 6461 } 6462 6463 *instance->producer = 0; 6464 *instance->consumer = 0; 6465 return 0; 6466 } 6467 6468 /** 6469 * megasas_alloc_ctrl_mem - Allocate per controller memory for core data 6470 * structures which are not common across MFI 6471 * adapters and fusion adapters. 6472 * For MFI based adapters, allocate producer and 6473 * consumer buffers. For fusion adapters, allocate 6474 * memory for fusion context. 6475 * @instance: Adapter soft state 6476 * return: 0 for SUCCESS 6477 */ 6478 static int megasas_alloc_ctrl_mem(struct megasas_instance *instance) 6479 { 6480 instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int), 6481 GFP_KERNEL); 6482 if (!instance->reply_map) 6483 return -ENOMEM; 6484 6485 switch (instance->adapter_type) { 6486 case MFI_SERIES: 6487 if (megasas_alloc_mfi_ctrl_mem(instance)) 6488 goto fail; 6489 break; 6490 case AERO_SERIES: 6491 case VENTURA_SERIES: 6492 case THUNDERBOLT_SERIES: 6493 case INVADER_SERIES: 6494 if (megasas_alloc_fusion_context(instance)) 6495 goto fail; 6496 break; 6497 } 6498 6499 return 0; 6500 fail: 6501 kfree(instance->reply_map); 6502 instance->reply_map = NULL; 6503 return -ENOMEM; 6504 } 6505 6506 /* 6507 * megasas_free_ctrl_mem - Free fusion context for fusion adapters and 6508 * producer, consumer buffers for MFI adapters 6509 * 6510 * @instance - Adapter soft instance 6511 * 6512 */ 6513 static inline void megasas_free_ctrl_mem(struct megasas_instance *instance) 6514 { 6515 kfree(instance->reply_map); 6516 if (instance->adapter_type == MFI_SERIES) { 6517 if (instance->producer) 6518 dma_free_coherent(&instance->pdev->dev, sizeof(u32), 6519 instance->producer, 6520 instance->producer_h); 6521 if (instance->consumer) 6522 dma_free_coherent(&instance->pdev->dev, sizeof(u32), 6523 instance->consumer, 6524 instance->consumer_h); 6525 } else { 6526 megasas_free_fusion_context(instance); 6527 } 6528 } 6529 6530 /** 6531 * megasas_alloc_ctrl_dma_buffers - Allocate consistent DMA buffers during 6532 * driver load time 6533 * 6534 * @instance- Adapter soft instance 6535 * @return- O for SUCCESS 6536 */ 6537 static inline 6538 int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance) 6539 { 6540 struct pci_dev *pdev = instance->pdev; 6541 struct fusion_context *fusion = instance->ctrl_context; 6542 6543 instance->evt_detail = dma_alloc_coherent(&pdev->dev, 6544 sizeof(struct megasas_evt_detail), 6545 &instance->evt_detail_h, GFP_KERNEL); 6546 6547 if (!instance->evt_detail) { 6548 dev_err(&instance->pdev->dev, 6549 "Failed to allocate event detail buffer\n"); 6550 return -ENOMEM; 6551 } 6552 6553 if (fusion) { 6554 fusion->ioc_init_request = 6555 dma_alloc_coherent(&pdev->dev, 6556 sizeof(struct MPI2_IOC_INIT_REQUEST), 6557 &fusion->ioc_init_request_phys, 6558 GFP_KERNEL); 6559 6560 if (!fusion->ioc_init_request) { 6561 dev_err(&pdev->dev, 6562 "Failed to allocate PD list buffer\n"); 6563 return -ENOMEM; 6564 } 6565 6566 instance->snapdump_prop = dma_alloc_coherent(&pdev->dev, 6567 sizeof(struct MR_SNAPDUMP_PROPERTIES), 6568 &instance->snapdump_prop_h, GFP_KERNEL); 6569 6570 if (!instance->snapdump_prop) 6571 dev_err(&pdev->dev, 6572 "Failed to allocate snapdump properties buffer\n"); 6573 6574 instance->host_device_list_buf = dma_alloc_coherent(&pdev->dev, 6575 HOST_DEVICE_LIST_SZ, 6576 &instance->host_device_list_buf_h, 6577 GFP_KERNEL); 6578 6579 if (!instance->host_device_list_buf) { 6580 dev_err(&pdev->dev, 6581 "Failed to allocate targetid list buffer\n"); 6582 return -ENOMEM; 6583 } 6584 6585 } 6586 6587 instance->pd_list_buf = 6588 dma_alloc_coherent(&pdev->dev, 6589 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), 6590 &instance->pd_list_buf_h, GFP_KERNEL); 6591 6592 if (!instance->pd_list_buf) { 6593 dev_err(&pdev->dev, "Failed to allocate PD list buffer\n"); 6594 return -ENOMEM; 6595 } 6596 6597 instance->ctrl_info_buf = 6598 dma_alloc_coherent(&pdev->dev, 6599 sizeof(struct megasas_ctrl_info), 6600 &instance->ctrl_info_buf_h, GFP_KERNEL); 6601 6602 if (!instance->ctrl_info_buf) { 6603 dev_err(&pdev->dev, 6604 "Failed to allocate controller info buffer\n"); 6605 return -ENOMEM; 6606 } 6607 6608 instance->ld_list_buf = 6609 dma_alloc_coherent(&pdev->dev, 6610 sizeof(struct MR_LD_LIST), 6611 &instance->ld_list_buf_h, GFP_KERNEL); 6612 6613 if (!instance->ld_list_buf) { 6614 dev_err(&pdev->dev, "Failed to allocate LD list buffer\n"); 6615 return -ENOMEM; 6616 } 6617 6618 instance->ld_targetid_list_buf = 6619 dma_alloc_coherent(&pdev->dev, 6620 sizeof(struct MR_LD_TARGETID_LIST), 6621 &instance->ld_targetid_list_buf_h, GFP_KERNEL); 6622 6623 if (!instance->ld_targetid_list_buf) { 6624 dev_err(&pdev->dev, 6625 "Failed to allocate LD targetid list buffer\n"); 6626 return -ENOMEM; 6627 } 6628 6629 if (!reset_devices) { 6630 instance->system_info_buf = 6631 dma_alloc_coherent(&pdev->dev, 6632 sizeof(struct MR_DRV_SYSTEM_INFO), 6633 &instance->system_info_h, GFP_KERNEL); 6634 instance->pd_info = 6635 dma_alloc_coherent(&pdev->dev, 6636 sizeof(struct MR_PD_INFO), 6637 &instance->pd_info_h, GFP_KERNEL); 6638 instance->tgt_prop = 6639 dma_alloc_coherent(&pdev->dev, 6640 sizeof(struct MR_TARGET_PROPERTIES), 6641 &instance->tgt_prop_h, GFP_KERNEL); 6642 instance->crash_dump_buf = 6643 dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE, 6644 &instance->crash_dump_h, GFP_KERNEL); 6645 6646 if (!instance->system_info_buf) 6647 dev_err(&instance->pdev->dev, 6648 "Failed to allocate system info buffer\n"); 6649 6650 if (!instance->pd_info) 6651 dev_err(&instance->pdev->dev, 6652 "Failed to allocate pd_info buffer\n"); 6653 6654 if (!instance->tgt_prop) 6655 dev_err(&instance->pdev->dev, 6656 "Failed to allocate tgt_prop buffer\n"); 6657 6658 if (!instance->crash_dump_buf) 6659 dev_err(&instance->pdev->dev, 6660 "Failed to allocate crash dump buffer\n"); 6661 } 6662 6663 return 0; 6664 } 6665 6666 /* 6667 * megasas_free_ctrl_dma_buffers - Free consistent DMA buffers allocated 6668 * during driver load time 6669 * 6670 * @instance- Adapter soft instance 6671 * 6672 */ 6673 static inline 6674 void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance) 6675 { 6676 struct pci_dev *pdev = instance->pdev; 6677 struct fusion_context *fusion = instance->ctrl_context; 6678 6679 if (instance->evt_detail) 6680 dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail), 6681 instance->evt_detail, 6682 instance->evt_detail_h); 6683 6684 if (fusion && fusion->ioc_init_request) 6685 dma_free_coherent(&pdev->dev, 6686 sizeof(struct MPI2_IOC_INIT_REQUEST), 6687 fusion->ioc_init_request, 6688 fusion->ioc_init_request_phys); 6689 6690 if (instance->pd_list_buf) 6691 dma_free_coherent(&pdev->dev, 6692 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), 6693 instance->pd_list_buf, 6694 instance->pd_list_buf_h); 6695 6696 if (instance->ld_list_buf) 6697 dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST), 6698 instance->ld_list_buf, 6699 instance->ld_list_buf_h); 6700 6701 if (instance->ld_targetid_list_buf) 6702 dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST), 6703 instance->ld_targetid_list_buf, 6704 instance->ld_targetid_list_buf_h); 6705 6706 if (instance->ctrl_info_buf) 6707 dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info), 6708 instance->ctrl_info_buf, 6709 instance->ctrl_info_buf_h); 6710 6711 if (instance->system_info_buf) 6712 dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO), 6713 instance->system_info_buf, 6714 instance->system_info_h); 6715 6716 if (instance->pd_info) 6717 dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO), 6718 instance->pd_info, instance->pd_info_h); 6719 6720 if (instance->tgt_prop) 6721 dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES), 6722 instance->tgt_prop, instance->tgt_prop_h); 6723 6724 if (instance->crash_dump_buf) 6725 dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE, 6726 instance->crash_dump_buf, 6727 instance->crash_dump_h); 6728 6729 if (instance->snapdump_prop) 6730 dma_free_coherent(&pdev->dev, 6731 sizeof(struct MR_SNAPDUMP_PROPERTIES), 6732 instance->snapdump_prop, 6733 instance->snapdump_prop_h); 6734 6735 if (instance->host_device_list_buf) 6736 dma_free_coherent(&pdev->dev, 6737 HOST_DEVICE_LIST_SZ, 6738 instance->host_device_list_buf, 6739 instance->host_device_list_buf_h); 6740 6741 } 6742 6743 /* 6744 * megasas_init_ctrl_params - Initialize controller's instance 6745 * parameters before FW init 6746 * @instance - Adapter soft instance 6747 * @return - void 6748 */ 6749 static inline void megasas_init_ctrl_params(struct megasas_instance *instance) 6750 { 6751 instance->fw_crash_state = UNAVAILABLE; 6752 6753 megasas_poll_wait_aen = 0; 6754 instance->issuepend_done = 1; 6755 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 6756 6757 /* 6758 * Initialize locks and queues 6759 */ 6760 INIT_LIST_HEAD(&instance->cmd_pool); 6761 INIT_LIST_HEAD(&instance->internal_reset_pending_q); 6762 6763 atomic_set(&instance->fw_outstanding, 0); 6764 6765 init_waitqueue_head(&instance->int_cmd_wait_q); 6766 init_waitqueue_head(&instance->abort_cmd_wait_q); 6767 6768 spin_lock_init(&instance->crashdump_lock); 6769 spin_lock_init(&instance->mfi_pool_lock); 6770 spin_lock_init(&instance->hba_lock); 6771 spin_lock_init(&instance->stream_lock); 6772 spin_lock_init(&instance->completion_lock); 6773 6774 mutex_init(&instance->reset_mutex); 6775 6776 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 6777 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) 6778 instance->flag_ieee = 1; 6779 6780 megasas_dbg_lvl = 0; 6781 instance->flag = 0; 6782 instance->unload = 1; 6783 instance->last_time = 0; 6784 instance->disableOnlineCtrlReset = 1; 6785 instance->UnevenSpanSupport = 0; 6786 6787 if (instance->adapter_type != MFI_SERIES) 6788 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); 6789 else 6790 INIT_WORK(&instance->work_init, process_fw_state_change_wq); 6791 } 6792 6793 /** 6794 * megasas_probe_one - PCI hotplug entry point 6795 * @pdev: PCI device structure 6796 * @id: PCI ids of supported hotplugged adapter 6797 */ 6798 static int megasas_probe_one(struct pci_dev *pdev, 6799 const struct pci_device_id *id) 6800 { 6801 int rval, pos; 6802 struct Scsi_Host *host; 6803 struct megasas_instance *instance; 6804 u16 control = 0; 6805 6806 switch (pdev->device) { 6807 case PCI_DEVICE_ID_LSI_AERO_10E1: 6808 case PCI_DEVICE_ID_LSI_AERO_10E5: 6809 dev_info(&pdev->dev, "Adapter is in configurable secure mode\n"); 6810 break; 6811 } 6812 6813 /* Reset MSI-X in the kdump kernel */ 6814 if (reset_devices) { 6815 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 6816 if (pos) { 6817 pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, 6818 &control); 6819 if (control & PCI_MSIX_FLAGS_ENABLE) { 6820 dev_info(&pdev->dev, "resetting MSI-X\n"); 6821 pci_write_config_word(pdev, 6822 pos + PCI_MSIX_FLAGS, 6823 control & 6824 ~PCI_MSIX_FLAGS_ENABLE); 6825 } 6826 } 6827 } 6828 6829 /* 6830 * PCI prepping: enable device set bus mastering and dma mask 6831 */ 6832 rval = pci_enable_device_mem(pdev); 6833 6834 if (rval) { 6835 return rval; 6836 } 6837 6838 pci_set_master(pdev); 6839 6840 host = scsi_host_alloc(&megasas_template, 6841 sizeof(struct megasas_instance)); 6842 6843 if (!host) { 6844 dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n"); 6845 goto fail_alloc_instance; 6846 } 6847 6848 instance = (struct megasas_instance *)host->hostdata; 6849 memset(instance, 0, sizeof(*instance)); 6850 atomic_set(&instance->fw_reset_no_pci_access, 0); 6851 6852 /* 6853 * Initialize PCI related and misc parameters 6854 */ 6855 instance->pdev = pdev; 6856 instance->host = host; 6857 instance->unique_id = pdev->bus->number << 8 | pdev->devfn; 6858 instance->init_id = MEGASAS_DEFAULT_INIT_ID; 6859 6860 megasas_set_adapter_type(instance); 6861 6862 /* 6863 * Initialize MFI Firmware 6864 */ 6865 if (megasas_init_fw(instance)) 6866 goto fail_init_mfi; 6867 6868 if (instance->requestorId) { 6869 if (instance->PlasmaFW111) { 6870 instance->vf_affiliation_111 = 6871 dma_alloc_coherent(&pdev->dev, 6872 sizeof(struct MR_LD_VF_AFFILIATION_111), 6873 &instance->vf_affiliation_111_h, 6874 GFP_KERNEL); 6875 if (!instance->vf_affiliation_111) 6876 dev_warn(&pdev->dev, "Can't allocate " 6877 "memory for VF affiliation buffer\n"); 6878 } else { 6879 instance->vf_affiliation = 6880 dma_alloc_coherent(&pdev->dev, 6881 (MAX_LOGICAL_DRIVES + 1) * 6882 sizeof(struct MR_LD_VF_AFFILIATION), 6883 &instance->vf_affiliation_h, 6884 GFP_KERNEL); 6885 if (!instance->vf_affiliation) 6886 dev_warn(&pdev->dev, "Can't allocate " 6887 "memory for VF affiliation buffer\n"); 6888 } 6889 } 6890 6891 /* 6892 * Store instance in PCI softstate 6893 */ 6894 pci_set_drvdata(pdev, instance); 6895 6896 /* 6897 * Add this controller to megasas_mgmt_info structure so that it 6898 * can be exported to management applications 6899 */ 6900 megasas_mgmt_info.count++; 6901 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance; 6902 megasas_mgmt_info.max_index++; 6903 6904 /* 6905 * Register with SCSI mid-layer 6906 */ 6907 if (megasas_io_attach(instance)) 6908 goto fail_io_attach; 6909 6910 instance->unload = 0; 6911 /* 6912 * Trigger SCSI to scan our drives 6913 */ 6914 if (!instance->enable_fw_dev_list || 6915 (instance->host_device_list_buf->count > 0)) 6916 scsi_scan_host(host); 6917 6918 /* 6919 * Initiate AEN (Asynchronous Event Notification) 6920 */ 6921 if (megasas_start_aen(instance)) { 6922 dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n"); 6923 goto fail_start_aen; 6924 } 6925 6926 /* Get current SR-IOV LD/VF affiliation */ 6927 if (instance->requestorId) 6928 megasas_get_ld_vf_affiliation(instance, 1); 6929 6930 return 0; 6931 6932 fail_start_aen: 6933 fail_io_attach: 6934 megasas_mgmt_info.count--; 6935 megasas_mgmt_info.max_index--; 6936 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; 6937 6938 instance->instancet->disable_intr(instance); 6939 megasas_destroy_irqs(instance); 6940 6941 if (instance->adapter_type != MFI_SERIES) 6942 megasas_release_fusion(instance); 6943 else 6944 megasas_release_mfi(instance); 6945 if (instance->msix_vectors) 6946 pci_free_irq_vectors(instance->pdev); 6947 fail_init_mfi: 6948 scsi_host_put(host); 6949 fail_alloc_instance: 6950 pci_disable_device(pdev); 6951 6952 return -ENODEV; 6953 } 6954 6955 /** 6956 * megasas_flush_cache - Requests FW to flush all its caches 6957 * @instance: Adapter soft state 6958 */ 6959 static void megasas_flush_cache(struct megasas_instance *instance) 6960 { 6961 struct megasas_cmd *cmd; 6962 struct megasas_dcmd_frame *dcmd; 6963 6964 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 6965 return; 6966 6967 cmd = megasas_get_cmd(instance); 6968 6969 if (!cmd) 6970 return; 6971 6972 dcmd = &cmd->frame->dcmd; 6973 6974 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6975 6976 dcmd->cmd = MFI_CMD_DCMD; 6977 dcmd->cmd_status = 0x0; 6978 dcmd->sge_count = 0; 6979 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 6980 dcmd->timeout = 0; 6981 dcmd->pad_0 = 0; 6982 dcmd->data_xfer_len = 0; 6983 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH); 6984 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 6985 6986 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) 6987 != DCMD_SUCCESS) { 6988 dev_err(&instance->pdev->dev, 6989 "return from %s %d\n", __func__, __LINE__); 6990 return; 6991 } 6992 6993 megasas_return_cmd(instance, cmd); 6994 } 6995 6996 /** 6997 * megasas_shutdown_controller - Instructs FW to shutdown the controller 6998 * @instance: Adapter soft state 6999 * @opcode: Shutdown/Hibernate 7000 */ 7001 static void megasas_shutdown_controller(struct megasas_instance *instance, 7002 u32 opcode) 7003 { 7004 struct megasas_cmd *cmd; 7005 struct megasas_dcmd_frame *dcmd; 7006 7007 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 7008 return; 7009 7010 cmd = megasas_get_cmd(instance); 7011 7012 if (!cmd) 7013 return; 7014 7015 if (instance->aen_cmd) 7016 megasas_issue_blocked_abort_cmd(instance, 7017 instance->aen_cmd, MFI_IO_TIMEOUT_SECS); 7018 if (instance->map_update_cmd) 7019 megasas_issue_blocked_abort_cmd(instance, 7020 instance->map_update_cmd, MFI_IO_TIMEOUT_SECS); 7021 if (instance->jbod_seq_cmd) 7022 megasas_issue_blocked_abort_cmd(instance, 7023 instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS); 7024 7025 dcmd = &cmd->frame->dcmd; 7026 7027 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 7028 7029 dcmd->cmd = MFI_CMD_DCMD; 7030 dcmd->cmd_status = 0x0; 7031 dcmd->sge_count = 0; 7032 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 7033 dcmd->timeout = 0; 7034 dcmd->pad_0 = 0; 7035 dcmd->data_xfer_len = 0; 7036 dcmd->opcode = cpu_to_le32(opcode); 7037 7038 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) 7039 != DCMD_SUCCESS) { 7040 dev_err(&instance->pdev->dev, 7041 "return from %s %d\n", __func__, __LINE__); 7042 return; 7043 } 7044 7045 megasas_return_cmd(instance, cmd); 7046 } 7047 7048 #ifdef CONFIG_PM 7049 /** 7050 * megasas_suspend - driver suspend entry point 7051 * @pdev: PCI device structure 7052 * @state: PCI power state to suspend routine 7053 */ 7054 static int 7055 megasas_suspend(struct pci_dev *pdev, pm_message_t state) 7056 { 7057 struct Scsi_Host *host; 7058 struct megasas_instance *instance; 7059 7060 instance = pci_get_drvdata(pdev); 7061 host = instance->host; 7062 instance->unload = 1; 7063 7064 /* Shutdown SR-IOV heartbeat timer */ 7065 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7066 del_timer_sync(&instance->sriov_heartbeat_timer); 7067 7068 /* Stop the FW fault detection watchdog */ 7069 if (instance->adapter_type != MFI_SERIES) 7070 megasas_fusion_stop_watchdog(instance); 7071 7072 megasas_flush_cache(instance); 7073 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); 7074 7075 /* cancel the delayed work if this work still in queue */ 7076 if (instance->ev != NULL) { 7077 struct megasas_aen_event *ev = instance->ev; 7078 cancel_delayed_work_sync(&ev->hotplug_work); 7079 instance->ev = NULL; 7080 } 7081 7082 tasklet_kill(&instance->isr_tasklet); 7083 7084 pci_set_drvdata(instance->pdev, instance); 7085 instance->instancet->disable_intr(instance); 7086 7087 megasas_destroy_irqs(instance); 7088 7089 if (instance->msix_vectors) 7090 pci_free_irq_vectors(instance->pdev); 7091 7092 pci_save_state(pdev); 7093 pci_disable_device(pdev); 7094 7095 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 7096 7097 return 0; 7098 } 7099 7100 /** 7101 * megasas_resume- driver resume entry point 7102 * @pdev: PCI device structure 7103 */ 7104 static int 7105 megasas_resume(struct pci_dev *pdev) 7106 { 7107 int rval; 7108 struct Scsi_Host *host; 7109 struct megasas_instance *instance; 7110 int irq_flags = PCI_IRQ_LEGACY; 7111 7112 instance = pci_get_drvdata(pdev); 7113 host = instance->host; 7114 pci_set_power_state(pdev, PCI_D0); 7115 pci_enable_wake(pdev, PCI_D0, 0); 7116 pci_restore_state(pdev); 7117 7118 /* 7119 * PCI prepping: enable device set bus mastering and dma mask 7120 */ 7121 rval = pci_enable_device_mem(pdev); 7122 7123 if (rval) { 7124 dev_err(&pdev->dev, "Enable device failed\n"); 7125 return rval; 7126 } 7127 7128 pci_set_master(pdev); 7129 7130 /* 7131 * We expect the FW state to be READY 7132 */ 7133 if (megasas_transition_to_ready(instance, 0)) 7134 goto fail_ready_state; 7135 7136 if (megasas_set_dma_mask(instance)) 7137 goto fail_set_dma_mask; 7138 7139 /* 7140 * Initialize MFI Firmware 7141 */ 7142 7143 atomic_set(&instance->fw_outstanding, 0); 7144 atomic_set(&instance->ldio_outstanding, 0); 7145 7146 /* Now re-enable MSI-X */ 7147 if (instance->msix_vectors) { 7148 irq_flags = PCI_IRQ_MSIX; 7149 if (smp_affinity_enable) 7150 irq_flags |= PCI_IRQ_AFFINITY; 7151 } 7152 rval = pci_alloc_irq_vectors(instance->pdev, 1, 7153 instance->msix_vectors ? 7154 instance->msix_vectors : 1, irq_flags); 7155 if (rval < 0) 7156 goto fail_reenable_msix; 7157 7158 megasas_setup_reply_map(instance); 7159 7160 if (instance->adapter_type != MFI_SERIES) { 7161 megasas_reset_reply_desc(instance); 7162 if (megasas_ioc_init_fusion(instance)) { 7163 megasas_free_cmds(instance); 7164 megasas_free_cmds_fusion(instance); 7165 goto fail_init_mfi; 7166 } 7167 if (!megasas_get_map_info(instance)) 7168 megasas_sync_map_info(instance); 7169 } else { 7170 *instance->producer = 0; 7171 *instance->consumer = 0; 7172 if (megasas_issue_init_mfi(instance)) 7173 goto fail_init_mfi; 7174 } 7175 7176 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) 7177 goto fail_init_mfi; 7178 7179 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 7180 (unsigned long)instance); 7181 7182 if (instance->msix_vectors ? 7183 megasas_setup_irqs_msix(instance, 0) : 7184 megasas_setup_irqs_ioapic(instance)) 7185 goto fail_init_mfi; 7186 7187 /* Re-launch SR-IOV heartbeat timer */ 7188 if (instance->requestorId) { 7189 if (!megasas_sriov_start_heartbeat(instance, 0)) 7190 megasas_start_timer(instance); 7191 else { 7192 instance->skip_heartbeat_timer_del = 1; 7193 goto fail_init_mfi; 7194 } 7195 } 7196 7197 instance->instancet->enable_intr(instance); 7198 megasas_setup_jbod_map(instance); 7199 instance->unload = 0; 7200 7201 /* 7202 * Initiate AEN (Asynchronous Event Notification) 7203 */ 7204 if (megasas_start_aen(instance)) 7205 dev_err(&instance->pdev->dev, "Start AEN failed\n"); 7206 7207 /* Re-launch FW fault watchdog */ 7208 if (instance->adapter_type != MFI_SERIES) 7209 if (megasas_fusion_start_watchdog(instance) != SUCCESS) 7210 goto fail_start_watchdog; 7211 7212 return 0; 7213 7214 fail_start_watchdog: 7215 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7216 del_timer_sync(&instance->sriov_heartbeat_timer); 7217 fail_init_mfi: 7218 megasas_free_ctrl_dma_buffers(instance); 7219 megasas_free_ctrl_mem(instance); 7220 scsi_host_put(host); 7221 7222 fail_reenable_msix: 7223 fail_set_dma_mask: 7224 fail_ready_state: 7225 7226 pci_disable_device(pdev); 7227 7228 return -ENODEV; 7229 } 7230 #else 7231 #define megasas_suspend NULL 7232 #define megasas_resume NULL 7233 #endif 7234 7235 static inline int 7236 megasas_wait_for_adapter_operational(struct megasas_instance *instance) 7237 { 7238 int wait_time = MEGASAS_RESET_WAIT_TIME * 2; 7239 int i; 7240 u8 adp_state; 7241 7242 for (i = 0; i < wait_time; i++) { 7243 adp_state = atomic_read(&instance->adprecovery); 7244 if ((adp_state == MEGASAS_HBA_OPERATIONAL) || 7245 (adp_state == MEGASAS_HW_CRITICAL_ERROR)) 7246 break; 7247 7248 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) 7249 dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n"); 7250 7251 msleep(1000); 7252 } 7253 7254 if (adp_state != MEGASAS_HBA_OPERATIONAL) { 7255 dev_info(&instance->pdev->dev, 7256 "%s HBA failed to become operational, adp_state %d\n", 7257 __func__, adp_state); 7258 return 1; 7259 } 7260 7261 return 0; 7262 } 7263 7264 /** 7265 * megasas_detach_one - PCI hot"un"plug entry point 7266 * @pdev: PCI device structure 7267 */ 7268 static void megasas_detach_one(struct pci_dev *pdev) 7269 { 7270 int i; 7271 struct Scsi_Host *host; 7272 struct megasas_instance *instance; 7273 struct fusion_context *fusion; 7274 u32 pd_seq_map_sz; 7275 7276 instance = pci_get_drvdata(pdev); 7277 host = instance->host; 7278 fusion = instance->ctrl_context; 7279 7280 /* Shutdown SR-IOV heartbeat timer */ 7281 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7282 del_timer_sync(&instance->sriov_heartbeat_timer); 7283 7284 /* Stop the FW fault detection watchdog */ 7285 if (instance->adapter_type != MFI_SERIES) 7286 megasas_fusion_stop_watchdog(instance); 7287 7288 if (instance->fw_crash_state != UNAVAILABLE) 7289 megasas_free_host_crash_buffer(instance); 7290 scsi_remove_host(instance->host); 7291 instance->unload = 1; 7292 7293 if (megasas_wait_for_adapter_operational(instance)) 7294 goto skip_firing_dcmds; 7295 7296 megasas_flush_cache(instance); 7297 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 7298 7299 skip_firing_dcmds: 7300 /* cancel the delayed work if this work still in queue*/ 7301 if (instance->ev != NULL) { 7302 struct megasas_aen_event *ev = instance->ev; 7303 cancel_delayed_work_sync(&ev->hotplug_work); 7304 instance->ev = NULL; 7305 } 7306 7307 /* cancel all wait events */ 7308 wake_up_all(&instance->int_cmd_wait_q); 7309 7310 tasklet_kill(&instance->isr_tasklet); 7311 7312 /* 7313 * Take the instance off the instance array. Note that we will not 7314 * decrement the max_index. We let this array be sparse array 7315 */ 7316 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 7317 if (megasas_mgmt_info.instance[i] == instance) { 7318 megasas_mgmt_info.count--; 7319 megasas_mgmt_info.instance[i] = NULL; 7320 7321 break; 7322 } 7323 } 7324 7325 instance->instancet->disable_intr(instance); 7326 7327 megasas_destroy_irqs(instance); 7328 7329 if (instance->msix_vectors) 7330 pci_free_irq_vectors(instance->pdev); 7331 7332 if (instance->adapter_type >= VENTURA_SERIES) { 7333 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) 7334 kfree(fusion->stream_detect_by_ld[i]); 7335 kfree(fusion->stream_detect_by_ld); 7336 fusion->stream_detect_by_ld = NULL; 7337 } 7338 7339 7340 if (instance->adapter_type != MFI_SERIES) { 7341 megasas_release_fusion(instance); 7342 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 7343 (sizeof(struct MR_PD_CFG_SEQ) * 7344 (MAX_PHYSICAL_DEVICES - 1)); 7345 for (i = 0; i < 2 ; i++) { 7346 if (fusion->ld_map[i]) 7347 dma_free_coherent(&instance->pdev->dev, 7348 fusion->max_map_sz, 7349 fusion->ld_map[i], 7350 fusion->ld_map_phys[i]); 7351 if (fusion->ld_drv_map[i]) { 7352 if (is_vmalloc_addr(fusion->ld_drv_map[i])) 7353 vfree(fusion->ld_drv_map[i]); 7354 else 7355 free_pages((ulong)fusion->ld_drv_map[i], 7356 fusion->drv_map_pages); 7357 } 7358 7359 if (fusion->pd_seq_sync[i]) 7360 dma_free_coherent(&instance->pdev->dev, 7361 pd_seq_map_sz, 7362 fusion->pd_seq_sync[i], 7363 fusion->pd_seq_phys[i]); 7364 } 7365 } else { 7366 megasas_release_mfi(instance); 7367 } 7368 7369 if (instance->vf_affiliation) 7370 dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) * 7371 sizeof(struct MR_LD_VF_AFFILIATION), 7372 instance->vf_affiliation, 7373 instance->vf_affiliation_h); 7374 7375 if (instance->vf_affiliation_111) 7376 dma_free_coherent(&pdev->dev, 7377 sizeof(struct MR_LD_VF_AFFILIATION_111), 7378 instance->vf_affiliation_111, 7379 instance->vf_affiliation_111_h); 7380 7381 if (instance->hb_host_mem) 7382 dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM), 7383 instance->hb_host_mem, 7384 instance->hb_host_mem_h); 7385 7386 megasas_free_ctrl_dma_buffers(instance); 7387 7388 megasas_free_ctrl_mem(instance); 7389 7390 scsi_host_put(host); 7391 7392 pci_disable_device(pdev); 7393 } 7394 7395 /** 7396 * megasas_shutdown - Shutdown entry point 7397 * @device: Generic device structure 7398 */ 7399 static void megasas_shutdown(struct pci_dev *pdev) 7400 { 7401 struct megasas_instance *instance = pci_get_drvdata(pdev); 7402 7403 instance->unload = 1; 7404 7405 if (megasas_wait_for_adapter_operational(instance)) 7406 goto skip_firing_dcmds; 7407 7408 megasas_flush_cache(instance); 7409 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 7410 7411 skip_firing_dcmds: 7412 instance->instancet->disable_intr(instance); 7413 megasas_destroy_irqs(instance); 7414 7415 if (instance->msix_vectors) 7416 pci_free_irq_vectors(instance->pdev); 7417 } 7418 7419 /** 7420 * megasas_mgmt_open - char node "open" entry point 7421 */ 7422 static int megasas_mgmt_open(struct inode *inode, struct file *filep) 7423 { 7424 /* 7425 * Allow only those users with admin rights 7426 */ 7427 if (!capable(CAP_SYS_ADMIN)) 7428 return -EACCES; 7429 7430 return 0; 7431 } 7432 7433 /** 7434 * megasas_mgmt_fasync - Async notifier registration from applications 7435 * 7436 * This function adds the calling process to a driver global queue. When an 7437 * event occurs, SIGIO will be sent to all processes in this queue. 7438 */ 7439 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode) 7440 { 7441 int rc; 7442 7443 mutex_lock(&megasas_async_queue_mutex); 7444 7445 rc = fasync_helper(fd, filep, mode, &megasas_async_queue); 7446 7447 mutex_unlock(&megasas_async_queue_mutex); 7448 7449 if (rc >= 0) { 7450 /* For sanity check when we get ioctl */ 7451 filep->private_data = filep; 7452 return 0; 7453 } 7454 7455 printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc); 7456 7457 return rc; 7458 } 7459 7460 /** 7461 * megasas_mgmt_poll - char node "poll" entry point 7462 * */ 7463 static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait) 7464 { 7465 __poll_t mask; 7466 unsigned long flags; 7467 7468 poll_wait(file, &megasas_poll_wait, wait); 7469 spin_lock_irqsave(&poll_aen_lock, flags); 7470 if (megasas_poll_wait_aen) 7471 mask = (EPOLLIN | EPOLLRDNORM); 7472 else 7473 mask = 0; 7474 megasas_poll_wait_aen = 0; 7475 spin_unlock_irqrestore(&poll_aen_lock, flags); 7476 return mask; 7477 } 7478 7479 /* 7480 * megasas_set_crash_dump_params_ioctl: 7481 * Send CRASH_DUMP_MODE DCMD to all controllers 7482 * @cmd: MFI command frame 7483 */ 7484 7485 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd) 7486 { 7487 struct megasas_instance *local_instance; 7488 int i, error = 0; 7489 int crash_support; 7490 7491 crash_support = cmd->frame->dcmd.mbox.w[0]; 7492 7493 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 7494 local_instance = megasas_mgmt_info.instance[i]; 7495 if (local_instance && local_instance->crash_dump_drv_support) { 7496 if ((atomic_read(&local_instance->adprecovery) == 7497 MEGASAS_HBA_OPERATIONAL) && 7498 !megasas_set_crash_dump_params(local_instance, 7499 crash_support)) { 7500 local_instance->crash_dump_app_support = 7501 crash_support; 7502 dev_info(&local_instance->pdev->dev, 7503 "Application firmware crash " 7504 "dump mode set success\n"); 7505 error = 0; 7506 } else { 7507 dev_info(&local_instance->pdev->dev, 7508 "Application firmware crash " 7509 "dump mode set failed\n"); 7510 error = -1; 7511 } 7512 } 7513 } 7514 return error; 7515 } 7516 7517 /** 7518 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW 7519 * @instance: Adapter soft state 7520 * @argp: User's ioctl packet 7521 */ 7522 static int 7523 megasas_mgmt_fw_ioctl(struct megasas_instance *instance, 7524 struct megasas_iocpacket __user * user_ioc, 7525 struct megasas_iocpacket *ioc) 7526 { 7527 struct megasas_sge64 *kern_sge64 = NULL; 7528 struct megasas_sge32 *kern_sge32 = NULL; 7529 struct megasas_cmd *cmd; 7530 void *kbuff_arr[MAX_IOCTL_SGE]; 7531 dma_addr_t buf_handle = 0; 7532 int error = 0, i; 7533 void *sense = NULL; 7534 dma_addr_t sense_handle; 7535 unsigned long *sense_ptr; 7536 u32 opcode = 0; 7537 7538 memset(kbuff_arr, 0, sizeof(kbuff_arr)); 7539 7540 if (ioc->sge_count > MAX_IOCTL_SGE) { 7541 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n", 7542 ioc->sge_count, MAX_IOCTL_SGE); 7543 return -EINVAL; 7544 } 7545 7546 if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) || 7547 ((ioc->frame.hdr.cmd == MFI_CMD_NVME) && 7548 !instance->support_nvme_passthru)) { 7549 dev_err(&instance->pdev->dev, 7550 "Received invalid ioctl command 0x%x\n", 7551 ioc->frame.hdr.cmd); 7552 return -ENOTSUPP; 7553 } 7554 7555 cmd = megasas_get_cmd(instance); 7556 if (!cmd) { 7557 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n"); 7558 return -ENOMEM; 7559 } 7560 7561 /* 7562 * User's IOCTL packet has 2 frames (maximum). Copy those two 7563 * frames into our cmd's frames. cmd->frame's context will get 7564 * overwritten when we copy from user's frames. So set that value 7565 * alone separately 7566 */ 7567 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); 7568 cmd->frame->hdr.context = cpu_to_le32(cmd->index); 7569 cmd->frame->hdr.pad_0 = 0; 7570 7571 cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE); 7572 7573 if (instance->consistent_mask_64bit) 7574 cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 | 7575 MFI_FRAME_SENSE64)); 7576 else 7577 cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 | 7578 MFI_FRAME_SENSE64)); 7579 7580 if (cmd->frame->hdr.cmd == MFI_CMD_DCMD) 7581 opcode = le32_to_cpu(cmd->frame->dcmd.opcode); 7582 7583 if (opcode == MR_DCMD_CTRL_SHUTDOWN) { 7584 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) { 7585 megasas_return_cmd(instance, cmd); 7586 return -1; 7587 } 7588 } 7589 7590 if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) { 7591 error = megasas_set_crash_dump_params_ioctl(cmd); 7592 megasas_return_cmd(instance, cmd); 7593 return error; 7594 } 7595 7596 /* 7597 * The management interface between applications and the fw uses 7598 * MFI frames. E.g, RAID configuration changes, LD property changes 7599 * etc are accomplishes through different kinds of MFI frames. The 7600 * driver needs to care only about substituting user buffers with 7601 * kernel buffers in SGLs. The location of SGL is embedded in the 7602 * struct iocpacket itself. 7603 */ 7604 if (instance->consistent_mask_64bit) 7605 kern_sge64 = (struct megasas_sge64 *) 7606 ((unsigned long)cmd->frame + ioc->sgl_off); 7607 else 7608 kern_sge32 = (struct megasas_sge32 *) 7609 ((unsigned long)cmd->frame + ioc->sgl_off); 7610 7611 /* 7612 * For each user buffer, create a mirror buffer and copy in 7613 */ 7614 for (i = 0; i < ioc->sge_count; i++) { 7615 if (!ioc->sgl[i].iov_len) 7616 continue; 7617 7618 kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev, 7619 ioc->sgl[i].iov_len, 7620 &buf_handle, GFP_KERNEL); 7621 if (!kbuff_arr[i]) { 7622 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc " 7623 "kernel SGL buffer for IOCTL\n"); 7624 error = -ENOMEM; 7625 goto out; 7626 } 7627 7628 /* 7629 * We don't change the dma_coherent_mask, so 7630 * dma_alloc_coherent only returns 32bit addresses 7631 */ 7632 if (instance->consistent_mask_64bit) { 7633 kern_sge64[i].phys_addr = cpu_to_le64(buf_handle); 7634 kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len); 7635 } else { 7636 kern_sge32[i].phys_addr = cpu_to_le32(buf_handle); 7637 kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len); 7638 } 7639 7640 /* 7641 * We created a kernel buffer corresponding to the 7642 * user buffer. Now copy in from the user buffer 7643 */ 7644 if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base, 7645 (u32) (ioc->sgl[i].iov_len))) { 7646 error = -EFAULT; 7647 goto out; 7648 } 7649 } 7650 7651 if (ioc->sense_len) { 7652 sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len, 7653 &sense_handle, GFP_KERNEL); 7654 if (!sense) { 7655 error = -ENOMEM; 7656 goto out; 7657 } 7658 7659 sense_ptr = 7660 (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off); 7661 if (instance->consistent_mask_64bit) 7662 *sense_ptr = cpu_to_le64(sense_handle); 7663 else 7664 *sense_ptr = cpu_to_le32(sense_handle); 7665 } 7666 7667 /* 7668 * Set the sync_cmd flag so that the ISR knows not to complete this 7669 * cmd to the SCSI mid-layer 7670 */ 7671 cmd->sync_cmd = 1; 7672 if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) { 7673 cmd->sync_cmd = 0; 7674 dev_err(&instance->pdev->dev, 7675 "return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n", 7676 __func__, __LINE__, cmd->frame->hdr.cmd, opcode, 7677 cmd->cmd_status_drv); 7678 return -EBUSY; 7679 } 7680 7681 cmd->sync_cmd = 0; 7682 7683 if (instance->unload == 1) { 7684 dev_info(&instance->pdev->dev, "Driver unload is in progress " 7685 "don't submit data to application\n"); 7686 goto out; 7687 } 7688 /* 7689 * copy out the kernel buffers to user buffers 7690 */ 7691 for (i = 0; i < ioc->sge_count; i++) { 7692 if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i], 7693 ioc->sgl[i].iov_len)) { 7694 error = -EFAULT; 7695 goto out; 7696 } 7697 } 7698 7699 /* 7700 * copy out the sense 7701 */ 7702 if (ioc->sense_len) { 7703 /* 7704 * sense_ptr points to the location that has the user 7705 * sense buffer address 7706 */ 7707 sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw + 7708 ioc->sense_off); 7709 7710 if (copy_to_user((void __user *)((unsigned long) 7711 get_unaligned((unsigned long *)sense_ptr)), 7712 sense, ioc->sense_len)) { 7713 dev_err(&instance->pdev->dev, "Failed to copy out to user " 7714 "sense data\n"); 7715 error = -EFAULT; 7716 goto out; 7717 } 7718 } 7719 7720 /* 7721 * copy the status codes returned by the fw 7722 */ 7723 if (copy_to_user(&user_ioc->frame.hdr.cmd_status, 7724 &cmd->frame->hdr.cmd_status, sizeof(u8))) { 7725 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n"); 7726 error = -EFAULT; 7727 } 7728 7729 out: 7730 if (sense) { 7731 dma_free_coherent(&instance->pdev->dev, ioc->sense_len, 7732 sense, sense_handle); 7733 } 7734 7735 for (i = 0; i < ioc->sge_count; i++) { 7736 if (kbuff_arr[i]) { 7737 if (instance->consistent_mask_64bit) 7738 dma_free_coherent(&instance->pdev->dev, 7739 le32_to_cpu(kern_sge64[i].length), 7740 kbuff_arr[i], 7741 le64_to_cpu(kern_sge64[i].phys_addr)); 7742 else 7743 dma_free_coherent(&instance->pdev->dev, 7744 le32_to_cpu(kern_sge32[i].length), 7745 kbuff_arr[i], 7746 le32_to_cpu(kern_sge32[i].phys_addr)); 7747 kbuff_arr[i] = NULL; 7748 } 7749 } 7750 7751 megasas_return_cmd(instance, cmd); 7752 return error; 7753 } 7754 7755 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) 7756 { 7757 struct megasas_iocpacket __user *user_ioc = 7758 (struct megasas_iocpacket __user *)arg; 7759 struct megasas_iocpacket *ioc; 7760 struct megasas_instance *instance; 7761 int error; 7762 7763 ioc = memdup_user(user_ioc, sizeof(*ioc)); 7764 if (IS_ERR(ioc)) 7765 return PTR_ERR(ioc); 7766 7767 instance = megasas_lookup_instance(ioc->host_no); 7768 if (!instance) { 7769 error = -ENODEV; 7770 goto out_kfree_ioc; 7771 } 7772 7773 /* Block ioctls in VF mode */ 7774 if (instance->requestorId && !allow_vf_ioctls) { 7775 error = -ENODEV; 7776 goto out_kfree_ioc; 7777 } 7778 7779 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 7780 dev_err(&instance->pdev->dev, "Controller in crit error\n"); 7781 error = -ENODEV; 7782 goto out_kfree_ioc; 7783 } 7784 7785 if (instance->unload == 1) { 7786 error = -ENODEV; 7787 goto out_kfree_ioc; 7788 } 7789 7790 if (down_interruptible(&instance->ioctl_sem)) { 7791 error = -ERESTARTSYS; 7792 goto out_kfree_ioc; 7793 } 7794 7795 if (megasas_wait_for_adapter_operational(instance)) { 7796 error = -ENODEV; 7797 goto out_up; 7798 } 7799 7800 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); 7801 out_up: 7802 up(&instance->ioctl_sem); 7803 7804 out_kfree_ioc: 7805 kfree(ioc); 7806 return error; 7807 } 7808 7809 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) 7810 { 7811 struct megasas_instance *instance; 7812 struct megasas_aen aen; 7813 int error; 7814 7815 if (file->private_data != file) { 7816 printk(KERN_DEBUG "megasas: fasync_helper was not " 7817 "called first\n"); 7818 return -EINVAL; 7819 } 7820 7821 if (copy_from_user(&aen, (void __user *)arg, sizeof(aen))) 7822 return -EFAULT; 7823 7824 instance = megasas_lookup_instance(aen.host_no); 7825 7826 if (!instance) 7827 return -ENODEV; 7828 7829 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 7830 return -ENODEV; 7831 } 7832 7833 if (instance->unload == 1) { 7834 return -ENODEV; 7835 } 7836 7837 if (megasas_wait_for_adapter_operational(instance)) 7838 return -ENODEV; 7839 7840 mutex_lock(&instance->reset_mutex); 7841 error = megasas_register_aen(instance, aen.seq_num, 7842 aen.class_locale_word); 7843 mutex_unlock(&instance->reset_mutex); 7844 return error; 7845 } 7846 7847 /** 7848 * megasas_mgmt_ioctl - char node ioctl entry point 7849 */ 7850 static long 7851 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 7852 { 7853 switch (cmd) { 7854 case MEGASAS_IOC_FIRMWARE: 7855 return megasas_mgmt_ioctl_fw(file, arg); 7856 7857 case MEGASAS_IOC_GET_AEN: 7858 return megasas_mgmt_ioctl_aen(file, arg); 7859 } 7860 7861 return -ENOTTY; 7862 } 7863 7864 #ifdef CONFIG_COMPAT 7865 static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) 7866 { 7867 struct compat_megasas_iocpacket __user *cioc = 7868 (struct compat_megasas_iocpacket __user *)arg; 7869 struct megasas_iocpacket __user *ioc = 7870 compat_alloc_user_space(sizeof(struct megasas_iocpacket)); 7871 int i; 7872 int error = 0; 7873 compat_uptr_t ptr; 7874 u32 local_sense_off; 7875 u32 local_sense_len; 7876 u32 user_sense_off; 7877 7878 if (clear_user(ioc, sizeof(*ioc))) 7879 return -EFAULT; 7880 7881 if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) || 7882 copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) || 7883 copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) || 7884 copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) || 7885 copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) || 7886 copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32))) 7887 return -EFAULT; 7888 7889 /* 7890 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when 7891 * sense_len is not null, so prepare the 64bit value under 7892 * the same condition. 7893 */ 7894 if (get_user(local_sense_off, &ioc->sense_off) || 7895 get_user(local_sense_len, &ioc->sense_len) || 7896 get_user(user_sense_off, &cioc->sense_off)) 7897 return -EFAULT; 7898 7899 if (local_sense_off != user_sense_off) 7900 return -EINVAL; 7901 7902 if (local_sense_len) { 7903 void __user **sense_ioc_ptr = 7904 (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off); 7905 compat_uptr_t *sense_cioc_ptr = 7906 (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off); 7907 if (get_user(ptr, sense_cioc_ptr) || 7908 put_user(compat_ptr(ptr), sense_ioc_ptr)) 7909 return -EFAULT; 7910 } 7911 7912 for (i = 0; i < MAX_IOCTL_SGE; i++) { 7913 if (get_user(ptr, &cioc->sgl[i].iov_base) || 7914 put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) || 7915 copy_in_user(&ioc->sgl[i].iov_len, 7916 &cioc->sgl[i].iov_len, sizeof(compat_size_t))) 7917 return -EFAULT; 7918 } 7919 7920 error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc); 7921 7922 if (copy_in_user(&cioc->frame.hdr.cmd_status, 7923 &ioc->frame.hdr.cmd_status, sizeof(u8))) { 7924 printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n"); 7925 return -EFAULT; 7926 } 7927 return error; 7928 } 7929 7930 static long 7931 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd, 7932 unsigned long arg) 7933 { 7934 switch (cmd) { 7935 case MEGASAS_IOC_FIRMWARE32: 7936 return megasas_mgmt_compat_ioctl_fw(file, arg); 7937 case MEGASAS_IOC_GET_AEN: 7938 return megasas_mgmt_ioctl_aen(file, arg); 7939 } 7940 7941 return -ENOTTY; 7942 } 7943 #endif 7944 7945 /* 7946 * File operations structure for management interface 7947 */ 7948 static const struct file_operations megasas_mgmt_fops = { 7949 .owner = THIS_MODULE, 7950 .open = megasas_mgmt_open, 7951 .fasync = megasas_mgmt_fasync, 7952 .unlocked_ioctl = megasas_mgmt_ioctl, 7953 .poll = megasas_mgmt_poll, 7954 #ifdef CONFIG_COMPAT 7955 .compat_ioctl = megasas_mgmt_compat_ioctl, 7956 #endif 7957 .llseek = noop_llseek, 7958 }; 7959 7960 /* 7961 * PCI hotplug support registration structure 7962 */ 7963 static struct pci_driver megasas_pci_driver = { 7964 7965 .name = "megaraid_sas", 7966 .id_table = megasas_pci_table, 7967 .probe = megasas_probe_one, 7968 .remove = megasas_detach_one, 7969 .suspend = megasas_suspend, 7970 .resume = megasas_resume, 7971 .shutdown = megasas_shutdown, 7972 }; 7973 7974 /* 7975 * Sysfs driver attributes 7976 */ 7977 static ssize_t version_show(struct device_driver *dd, char *buf) 7978 { 7979 return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n", 7980 MEGASAS_VERSION); 7981 } 7982 static DRIVER_ATTR_RO(version); 7983 7984 static ssize_t release_date_show(struct device_driver *dd, char *buf) 7985 { 7986 return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n", 7987 MEGASAS_RELDATE); 7988 } 7989 static DRIVER_ATTR_RO(release_date); 7990 7991 static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf) 7992 { 7993 return sprintf(buf, "%u\n", support_poll_for_event); 7994 } 7995 static DRIVER_ATTR_RO(support_poll_for_event); 7996 7997 static ssize_t support_device_change_show(struct device_driver *dd, char *buf) 7998 { 7999 return sprintf(buf, "%u\n", support_device_change); 8000 } 8001 static DRIVER_ATTR_RO(support_device_change); 8002 8003 static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf) 8004 { 8005 return sprintf(buf, "%u\n", megasas_dbg_lvl); 8006 } 8007 8008 static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf, 8009 size_t count) 8010 { 8011 int retval = count; 8012 8013 if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) { 8014 printk(KERN_ERR "megasas: could not set dbg_lvl\n"); 8015 retval = -EINVAL; 8016 } 8017 return retval; 8018 } 8019 static DRIVER_ATTR_RW(dbg_lvl); 8020 8021 static ssize_t 8022 support_nvme_encapsulation_show(struct device_driver *dd, char *buf) 8023 { 8024 return sprintf(buf, "%u\n", support_nvme_encapsulation); 8025 } 8026 8027 static DRIVER_ATTR_RO(support_nvme_encapsulation); 8028 8029 static inline void megasas_remove_scsi_device(struct scsi_device *sdev) 8030 { 8031 sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n"); 8032 scsi_remove_device(sdev); 8033 scsi_device_put(sdev); 8034 } 8035 8036 /** 8037 * megasas_update_device_list - Update the PD and LD device list from FW 8038 * after an AEN event notification 8039 * @instance: Adapter soft state 8040 * @event_type: Indicates type of event (PD or LD event) 8041 * 8042 * @return: Success or failure 8043 * 8044 * Issue DCMDs to Firmware to update the internal device list in driver. 8045 * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination 8046 * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list. 8047 */ 8048 static 8049 int megasas_update_device_list(struct megasas_instance *instance, 8050 int event_type) 8051 { 8052 int dcmd_ret = DCMD_SUCCESS; 8053 8054 if (instance->enable_fw_dev_list) { 8055 dcmd_ret = megasas_host_device_list_query(instance, false); 8056 if (dcmd_ret != DCMD_SUCCESS) 8057 goto out; 8058 } else { 8059 if (event_type & SCAN_PD_CHANNEL) { 8060 dcmd_ret = megasas_get_pd_list(instance); 8061 8062 if (dcmd_ret != DCMD_SUCCESS) 8063 goto out; 8064 } 8065 8066 if (event_type & SCAN_VD_CHANNEL) { 8067 if (!instance->requestorId || 8068 (instance->requestorId && 8069 megasas_get_ld_vf_affiliation(instance, 0))) { 8070 dcmd_ret = megasas_ld_list_query(instance, 8071 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); 8072 if (dcmd_ret != DCMD_SUCCESS) 8073 goto out; 8074 } 8075 } 8076 } 8077 8078 out: 8079 return dcmd_ret; 8080 } 8081 8082 /** 8083 * megasas_add_remove_devices - Add/remove devices to SCSI mid-layer 8084 * after an AEN event notification 8085 * @instance: Adapter soft state 8086 * @scan_type: Indicates type of devices (PD/LD) to add 8087 * @return void 8088 */ 8089 static 8090 void megasas_add_remove_devices(struct megasas_instance *instance, 8091 int scan_type) 8092 { 8093 int i, j; 8094 u16 pd_index = 0; 8095 u16 ld_index = 0; 8096 u16 channel = 0, id = 0; 8097 struct Scsi_Host *host; 8098 struct scsi_device *sdev1; 8099 struct MR_HOST_DEVICE_LIST *targetid_list = NULL; 8100 struct MR_HOST_DEVICE_LIST_ENTRY *targetid_entry = NULL; 8101 8102 host = instance->host; 8103 8104 if (instance->enable_fw_dev_list) { 8105 targetid_list = instance->host_device_list_buf; 8106 for (i = 0; i < targetid_list->count; i++) { 8107 targetid_entry = &targetid_list->host_device_list[i]; 8108 if (targetid_entry->flags.u.bits.is_sys_pd) { 8109 channel = le16_to_cpu(targetid_entry->target_id) / 8110 MEGASAS_MAX_DEV_PER_CHANNEL; 8111 id = le16_to_cpu(targetid_entry->target_id) % 8112 MEGASAS_MAX_DEV_PER_CHANNEL; 8113 } else { 8114 channel = MEGASAS_MAX_PD_CHANNELS + 8115 (le16_to_cpu(targetid_entry->target_id) / 8116 MEGASAS_MAX_DEV_PER_CHANNEL); 8117 id = le16_to_cpu(targetid_entry->target_id) % 8118 MEGASAS_MAX_DEV_PER_CHANNEL; 8119 } 8120 sdev1 = scsi_device_lookup(host, channel, id, 0); 8121 if (!sdev1) { 8122 scsi_add_device(host, channel, id, 0); 8123 } else { 8124 scsi_device_put(sdev1); 8125 } 8126 } 8127 } 8128 8129 if (scan_type & SCAN_PD_CHANNEL) { 8130 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { 8131 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 8132 pd_index = i * MEGASAS_MAX_DEV_PER_CHANNEL + j; 8133 sdev1 = scsi_device_lookup(host, i, j, 0); 8134 if (instance->pd_list[pd_index].driveState == 8135 MR_PD_STATE_SYSTEM) { 8136 if (!sdev1) 8137 scsi_add_device(host, i, j, 0); 8138 else 8139 scsi_device_put(sdev1); 8140 } else { 8141 if (sdev1) 8142 megasas_remove_scsi_device(sdev1); 8143 } 8144 } 8145 } 8146 } 8147 8148 if (scan_type & SCAN_VD_CHANNEL) { 8149 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { 8150 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 8151 ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; 8152 sdev1 = scsi_device_lookup(host, 8153 MEGASAS_MAX_PD_CHANNELS + i, j, 0); 8154 if (instance->ld_ids[ld_index] != 0xff) { 8155 if (!sdev1) 8156 scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); 8157 else 8158 scsi_device_put(sdev1); 8159 } else { 8160 if (sdev1) 8161 megasas_remove_scsi_device(sdev1); 8162 } 8163 } 8164 } 8165 } 8166 8167 } 8168 8169 static void 8170 megasas_aen_polling(struct work_struct *work) 8171 { 8172 struct megasas_aen_event *ev = 8173 container_of(work, struct megasas_aen_event, hotplug_work.work); 8174 struct megasas_instance *instance = ev->instance; 8175 union megasas_evt_class_locale class_locale; 8176 int event_type = 0; 8177 u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME; 8178 int error; 8179 u8 dcmd_ret = DCMD_SUCCESS; 8180 8181 if (!instance) { 8182 printk(KERN_ERR "invalid instance!\n"); 8183 kfree(ev); 8184 return; 8185 } 8186 8187 /* Adjust event workqueue thread wait time for VF mode */ 8188 if (instance->requestorId) 8189 wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF; 8190 8191 /* Don't run the event workqueue thread if OCR is running */ 8192 mutex_lock(&instance->reset_mutex); 8193 8194 instance->ev = NULL; 8195 if (instance->evt_detail) { 8196 megasas_decode_evt(instance); 8197 8198 switch (le32_to_cpu(instance->evt_detail->code)) { 8199 8200 case MR_EVT_PD_INSERTED: 8201 case MR_EVT_PD_REMOVED: 8202 event_type = SCAN_PD_CHANNEL; 8203 break; 8204 8205 case MR_EVT_LD_OFFLINE: 8206 case MR_EVT_CFG_CLEARED: 8207 case MR_EVT_LD_DELETED: 8208 case MR_EVT_LD_CREATED: 8209 event_type = SCAN_VD_CHANNEL; 8210 break; 8211 8212 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: 8213 case MR_EVT_FOREIGN_CFG_IMPORTED: 8214 case MR_EVT_LD_STATE_CHANGE: 8215 event_type = SCAN_PD_CHANNEL | SCAN_VD_CHANNEL; 8216 dev_info(&instance->pdev->dev, "scanning for scsi%d...\n", 8217 instance->host->host_no); 8218 break; 8219 8220 case MR_EVT_CTRL_PROP_CHANGED: 8221 dcmd_ret = megasas_get_ctrl_info(instance); 8222 if (dcmd_ret == DCMD_SUCCESS && 8223 instance->snapdump_wait_time) { 8224 megasas_get_snapdump_properties(instance); 8225 dev_info(&instance->pdev->dev, 8226 "Snap dump wait time\t: %d\n", 8227 instance->snapdump_wait_time); 8228 } 8229 break; 8230 default: 8231 event_type = 0; 8232 break; 8233 } 8234 } else { 8235 dev_err(&instance->pdev->dev, "invalid evt_detail!\n"); 8236 mutex_unlock(&instance->reset_mutex); 8237 kfree(ev); 8238 return; 8239 } 8240 8241 if (event_type) 8242 dcmd_ret = megasas_update_device_list(instance, event_type); 8243 8244 mutex_unlock(&instance->reset_mutex); 8245 8246 if (event_type && dcmd_ret == DCMD_SUCCESS) 8247 megasas_add_remove_devices(instance, event_type); 8248 8249 if (dcmd_ret == DCMD_SUCCESS) 8250 seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1; 8251 else 8252 seq_num = instance->last_seq_num; 8253 8254 /* Register AEN with FW for latest sequence number plus 1 */ 8255 class_locale.members.reserved = 0; 8256 class_locale.members.locale = MR_EVT_LOCALE_ALL; 8257 class_locale.members.class = MR_EVT_CLASS_DEBUG; 8258 8259 if (instance->aen_cmd != NULL) { 8260 kfree(ev); 8261 return; 8262 } 8263 8264 mutex_lock(&instance->reset_mutex); 8265 error = megasas_register_aen(instance, seq_num, 8266 class_locale.word); 8267 if (error) 8268 dev_err(&instance->pdev->dev, 8269 "register aen failed error %x\n", error); 8270 8271 mutex_unlock(&instance->reset_mutex); 8272 kfree(ev); 8273 } 8274 8275 /** 8276 * megasas_init - Driver load entry point 8277 */ 8278 static int __init megasas_init(void) 8279 { 8280 int rval; 8281 8282 /* 8283 * Booted in kdump kernel, minimize memory footprints by 8284 * disabling few features 8285 */ 8286 if (reset_devices) { 8287 msix_vectors = 1; 8288 rdpq_enable = 0; 8289 dual_qdepth_disable = 1; 8290 } 8291 8292 /* 8293 * Announce driver version and other information 8294 */ 8295 pr_info("megasas: %s\n", MEGASAS_VERSION); 8296 8297 spin_lock_init(&poll_aen_lock); 8298 8299 support_poll_for_event = 2; 8300 support_device_change = 1; 8301 support_nvme_encapsulation = true; 8302 8303 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); 8304 8305 /* 8306 * Register character device node 8307 */ 8308 rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops); 8309 8310 if (rval < 0) { 8311 printk(KERN_DEBUG "megasas: failed to open device node\n"); 8312 return rval; 8313 } 8314 8315 megasas_mgmt_majorno = rval; 8316 8317 /* 8318 * Register ourselves as PCI hotplug module 8319 */ 8320 rval = pci_register_driver(&megasas_pci_driver); 8321 8322 if (rval) { 8323 printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n"); 8324 goto err_pcidrv; 8325 } 8326 8327 rval = driver_create_file(&megasas_pci_driver.driver, 8328 &driver_attr_version); 8329 if (rval) 8330 goto err_dcf_attr_ver; 8331 8332 rval = driver_create_file(&megasas_pci_driver.driver, 8333 &driver_attr_release_date); 8334 if (rval) 8335 goto err_dcf_rel_date; 8336 8337 rval = driver_create_file(&megasas_pci_driver.driver, 8338 &driver_attr_support_poll_for_event); 8339 if (rval) 8340 goto err_dcf_support_poll_for_event; 8341 8342 rval = driver_create_file(&megasas_pci_driver.driver, 8343 &driver_attr_dbg_lvl); 8344 if (rval) 8345 goto err_dcf_dbg_lvl; 8346 rval = driver_create_file(&megasas_pci_driver.driver, 8347 &driver_attr_support_device_change); 8348 if (rval) 8349 goto err_dcf_support_device_change; 8350 8351 rval = driver_create_file(&megasas_pci_driver.driver, 8352 &driver_attr_support_nvme_encapsulation); 8353 if (rval) 8354 goto err_dcf_support_nvme_encapsulation; 8355 8356 return rval; 8357 8358 err_dcf_support_nvme_encapsulation: 8359 driver_remove_file(&megasas_pci_driver.driver, 8360 &driver_attr_support_device_change); 8361 8362 err_dcf_support_device_change: 8363 driver_remove_file(&megasas_pci_driver.driver, 8364 &driver_attr_dbg_lvl); 8365 err_dcf_dbg_lvl: 8366 driver_remove_file(&megasas_pci_driver.driver, 8367 &driver_attr_support_poll_for_event); 8368 err_dcf_support_poll_for_event: 8369 driver_remove_file(&megasas_pci_driver.driver, 8370 &driver_attr_release_date); 8371 err_dcf_rel_date: 8372 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 8373 err_dcf_attr_ver: 8374 pci_unregister_driver(&megasas_pci_driver); 8375 err_pcidrv: 8376 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 8377 return rval; 8378 } 8379 8380 /** 8381 * megasas_exit - Driver unload entry point 8382 */ 8383 static void __exit megasas_exit(void) 8384 { 8385 driver_remove_file(&megasas_pci_driver.driver, 8386 &driver_attr_dbg_lvl); 8387 driver_remove_file(&megasas_pci_driver.driver, 8388 &driver_attr_support_poll_for_event); 8389 driver_remove_file(&megasas_pci_driver.driver, 8390 &driver_attr_support_device_change); 8391 driver_remove_file(&megasas_pci_driver.driver, 8392 &driver_attr_release_date); 8393 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 8394 driver_remove_file(&megasas_pci_driver.driver, 8395 &driver_attr_support_nvme_encapsulation); 8396 8397 pci_unregister_driver(&megasas_pci_driver); 8398 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 8399 } 8400 8401 module_init(megasas_init); 8402 module_exit(megasas_exit); 8403