1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux MegaRAID driver for SAS based RAID controllers 4 * 5 * Copyright (c) 2003-2013 LSI Corporation 6 * Copyright (c) 2013-2016 Avago Technologies 7 * Copyright (c) 2016-2018 Broadcom Inc. 8 * 9 * Authors: Broadcom Inc. 10 * Sreenivas Bagalkote 11 * Sumant Patro 12 * Bo Yang 13 * Adam Radford 14 * Kashyap Desai <kashyap.desai@broadcom.com> 15 * Sumit Saxena <sumit.saxena@broadcom.com> 16 * 17 * Send feedback to: megaraidlinux.pdl@broadcom.com 18 */ 19 20 #include <linux/kernel.h> 21 #include <linux/types.h> 22 #include <linux/pci.h> 23 #include <linux/list.h> 24 #include <linux/moduleparam.h> 25 #include <linux/module.h> 26 #include <linux/spinlock.h> 27 #include <linux/interrupt.h> 28 #include <linux/delay.h> 29 #include <linux/uio.h> 30 #include <linux/slab.h> 31 #include <linux/uaccess.h> 32 #include <asm/unaligned.h> 33 #include <linux/fs.h> 34 #include <linux/compat.h> 35 #include <linux/blkdev.h> 36 #include <linux/mutex.h> 37 #include <linux/poll.h> 38 #include <linux/vmalloc.h> 39 #include <linux/irq_poll.h> 40 41 #include <scsi/scsi.h> 42 #include <scsi/scsi_cmnd.h> 43 #include <scsi/scsi_device.h> 44 #include <scsi/scsi_host.h> 45 #include <scsi/scsi_tcq.h> 46 #include <scsi/scsi_dbg.h> 47 #include "megaraid_sas_fusion.h" 48 #include "megaraid_sas.h" 49 50 /* 51 * Number of sectors per IO command 52 * Will be set in megasas_init_mfi if user does not provide 53 */ 54 static unsigned int max_sectors; 55 module_param_named(max_sectors, max_sectors, int, 0444); 56 MODULE_PARM_DESC(max_sectors, 57 "Maximum number of sectors per IO command"); 58 59 static int msix_disable; 60 module_param(msix_disable, int, 0444); 61 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0"); 62 63 static unsigned int msix_vectors; 64 module_param(msix_vectors, int, 0444); 65 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW"); 66 67 static int allow_vf_ioctls; 68 module_param(allow_vf_ioctls, int, 0444); 69 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0"); 70 71 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; 72 module_param(throttlequeuedepth, int, 0444); 73 MODULE_PARM_DESC(throttlequeuedepth, 74 "Adapter queue depth when throttled due to I/O timeout. Default: 16"); 75 76 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME; 77 module_param(resetwaittime, int, 0444); 78 MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s"); 79 80 int smp_affinity_enable = 1; 81 module_param(smp_affinity_enable, int, 0444); 82 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)"); 83 84 int rdpq_enable = 1; 85 module_param(rdpq_enable, int, 0444); 86 MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)"); 87 88 unsigned int dual_qdepth_disable; 89 module_param(dual_qdepth_disable, int, 0444); 90 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0"); 91 92 unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; 93 module_param(scmd_timeout, int, 0444); 94 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer."); 95 96 int perf_mode = -1; 97 module_param(perf_mode, int, 0444); 98 MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t" 99 "0 - balanced: High iops and low latency queues are allocated &\n\t\t" 100 "interrupt coalescing is enabled only on high iops queues\n\t\t" 101 "1 - iops: High iops queues are not allocated &\n\t\t" 102 "interrupt coalescing is enabled on all queues\n\t\t" 103 "2 - latency: High iops queues are not allocated &\n\t\t" 104 "interrupt coalescing is disabled on all queues\n\t\t" 105 "default mode is 'balanced'" 106 ); 107 108 int event_log_level = MFI_EVT_CLASS_CRITICAL; 109 module_param(event_log_level, int, 0644); 110 MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)"); 111 112 MODULE_LICENSE("GPL"); 113 MODULE_VERSION(MEGASAS_VERSION); 114 MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com"); 115 MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver"); 116 117 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); 118 static int megasas_get_pd_list(struct megasas_instance *instance); 119 static int megasas_ld_list_query(struct megasas_instance *instance, 120 u8 query_type); 121 static int megasas_issue_init_mfi(struct megasas_instance *instance); 122 static int megasas_register_aen(struct megasas_instance *instance, 123 u32 seq_num, u32 class_locale_word); 124 static void megasas_get_pd_info(struct megasas_instance *instance, 125 struct scsi_device *sdev); 126 127 /* 128 * PCI ID table for all supported controllers 129 */ 130 static struct pci_device_id megasas_pci_table[] = { 131 132 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)}, 133 /* xscale IOP */ 134 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)}, 135 /* ppc IOP */ 136 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)}, 137 /* ppc IOP */ 138 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)}, 139 /* gen2*/ 140 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)}, 141 /* gen2*/ 142 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)}, 143 /* skinny*/ 144 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)}, 145 /* skinny*/ 146 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, 147 /* xscale IOP, vega */ 148 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, 149 /* xscale IOP */ 150 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)}, 151 /* Fusion */ 152 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)}, 153 /* Plasma */ 154 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)}, 155 /* Invader */ 156 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)}, 157 /* Fury */ 158 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)}, 159 /* Intruder */ 160 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)}, 161 /* Intruder 24 port*/ 162 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)}, 163 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)}, 164 /* VENTURA */ 165 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)}, 166 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)}, 167 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)}, 168 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)}, 169 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)}, 170 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)}, 171 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)}, 172 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)}, 173 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)}, 174 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)}, 175 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E0)}, 176 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E3)}, 177 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E4)}, 178 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E7)}, 179 {} 180 }; 181 182 MODULE_DEVICE_TABLE(pci, megasas_pci_table); 183 184 static int megasas_mgmt_majorno; 185 struct megasas_mgmt_info megasas_mgmt_info; 186 static struct fasync_struct *megasas_async_queue; 187 static DEFINE_MUTEX(megasas_async_queue_mutex); 188 189 static int megasas_poll_wait_aen; 190 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait); 191 static u32 support_poll_for_event; 192 u32 megasas_dbg_lvl; 193 static u32 support_device_change; 194 static bool support_nvme_encapsulation; 195 static bool support_pci_lane_margining; 196 197 /* define lock for aen poll */ 198 spinlock_t poll_aen_lock; 199 200 extern struct dentry *megasas_debugfs_root; 201 extern void megasas_init_debugfs(void); 202 extern void megasas_exit_debugfs(void); 203 extern void megasas_setup_debugfs(struct megasas_instance *instance); 204 extern void megasas_destroy_debugfs(struct megasas_instance *instance); 205 206 void 207 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 208 u8 alt_status); 209 static u32 210 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance); 211 static int 212 megasas_adp_reset_gen2(struct megasas_instance *instance, 213 struct megasas_register_set __iomem *reg_set); 214 static irqreturn_t megasas_isr(int irq, void *devp); 215 static u32 216 megasas_init_adapter_mfi(struct megasas_instance *instance); 217 u32 218 megasas_build_and_issue_cmd(struct megasas_instance *instance, 219 struct scsi_cmnd *scmd); 220 static void megasas_complete_cmd_dpc(unsigned long instance_addr); 221 int 222 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, 223 int seconds); 224 void megasas_fusion_ocr_wq(struct work_struct *work); 225 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 226 int initial); 227 static int 228 megasas_set_dma_mask(struct megasas_instance *instance); 229 static int 230 megasas_alloc_ctrl_mem(struct megasas_instance *instance); 231 static inline void 232 megasas_free_ctrl_mem(struct megasas_instance *instance); 233 static inline int 234 megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance); 235 static inline void 236 megasas_free_ctrl_dma_buffers(struct megasas_instance *instance); 237 static inline void 238 megasas_init_ctrl_params(struct megasas_instance *instance); 239 240 u32 megasas_readl(struct megasas_instance *instance, 241 const volatile void __iomem *addr) 242 { 243 u32 i = 0, ret_val; 244 /* 245 * Due to a HW errata in Aero controllers, reads to certain 246 * Fusion registers could intermittently return all zeroes. 247 * This behavior is transient in nature and subsequent reads will 248 * return valid value. As a workaround in driver, retry readl for 249 * upto three times until a non-zero value is read. 250 */ 251 if (instance->adapter_type == AERO_SERIES) { 252 do { 253 ret_val = readl(addr); 254 i++; 255 } while (ret_val == 0 && i < 3); 256 return ret_val; 257 } else { 258 return readl(addr); 259 } 260 } 261 262 /** 263 * megasas_set_dma_settings - Populate DMA address, length and flags for DCMDs 264 * @instance: Adapter soft state 265 * @dcmd: DCMD frame inside MFI command 266 * @dma_addr: DMA address of buffer to be passed to FW 267 * @dma_len: Length of DMA buffer to be passed to FW 268 * @return: void 269 */ 270 void megasas_set_dma_settings(struct megasas_instance *instance, 271 struct megasas_dcmd_frame *dcmd, 272 dma_addr_t dma_addr, u32 dma_len) 273 { 274 if (instance->consistent_mask_64bit) { 275 dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr); 276 dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len); 277 dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64); 278 279 } else { 280 dcmd->sgl.sge32[0].phys_addr = 281 cpu_to_le32(lower_32_bits(dma_addr)); 282 dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len); 283 dcmd->flags = cpu_to_le16(dcmd->flags); 284 } 285 } 286 287 static void 288 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 289 { 290 instance->instancet->fire_cmd(instance, 291 cmd->frame_phys_addr, 0, instance->reg_set); 292 return; 293 } 294 295 /** 296 * megasas_get_cmd - Get a command from the free pool 297 * @instance: Adapter soft state 298 * 299 * Returns a free command from the pool 300 */ 301 struct megasas_cmd *megasas_get_cmd(struct megasas_instance 302 *instance) 303 { 304 unsigned long flags; 305 struct megasas_cmd *cmd = NULL; 306 307 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 308 309 if (!list_empty(&instance->cmd_pool)) { 310 cmd = list_entry((&instance->cmd_pool)->next, 311 struct megasas_cmd, list); 312 list_del_init(&cmd->list); 313 } else { 314 dev_err(&instance->pdev->dev, "Command pool empty!\n"); 315 } 316 317 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 318 return cmd; 319 } 320 321 /** 322 * megasas_return_cmd - Return a cmd to free command pool 323 * @instance: Adapter soft state 324 * @cmd: Command packet to be returned to free command pool 325 */ 326 void 327 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 328 { 329 unsigned long flags; 330 u32 blk_tags; 331 struct megasas_cmd_fusion *cmd_fusion; 332 struct fusion_context *fusion = instance->ctrl_context; 333 334 /* This flag is used only for fusion adapter. 335 * Wait for Interrupt for Polled mode DCMD 336 */ 337 if (cmd->flags & DRV_DCMD_POLLED_MODE) 338 return; 339 340 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 341 342 if (fusion) { 343 blk_tags = instance->max_scsi_cmds + cmd->index; 344 cmd_fusion = fusion->cmd_list[blk_tags]; 345 megasas_return_cmd_fusion(instance, cmd_fusion); 346 } 347 cmd->scmd = NULL; 348 cmd->frame_count = 0; 349 cmd->flags = 0; 350 memset(cmd->frame, 0, instance->mfi_frame_size); 351 cmd->frame->io.context = cpu_to_le32(cmd->index); 352 if (!fusion && reset_devices) 353 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 354 list_add(&cmd->list, (&instance->cmd_pool)->next); 355 356 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 357 358 } 359 360 static const char * 361 format_timestamp(uint32_t timestamp) 362 { 363 static char buffer[32]; 364 365 if ((timestamp & 0xff000000) == 0xff000000) 366 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp & 367 0x00ffffff); 368 else 369 snprintf(buffer, sizeof(buffer), "%us", timestamp); 370 return buffer; 371 } 372 373 static const char * 374 format_class(int8_t class) 375 { 376 static char buffer[6]; 377 378 switch (class) { 379 case MFI_EVT_CLASS_DEBUG: 380 return "debug"; 381 case MFI_EVT_CLASS_PROGRESS: 382 return "progress"; 383 case MFI_EVT_CLASS_INFO: 384 return "info"; 385 case MFI_EVT_CLASS_WARNING: 386 return "WARN"; 387 case MFI_EVT_CLASS_CRITICAL: 388 return "CRIT"; 389 case MFI_EVT_CLASS_FATAL: 390 return "FATAL"; 391 case MFI_EVT_CLASS_DEAD: 392 return "DEAD"; 393 default: 394 snprintf(buffer, sizeof(buffer), "%d", class); 395 return buffer; 396 } 397 } 398 399 /** 400 * megasas_decode_evt: Decode FW AEN event and print critical event 401 * for information. 402 * @instance: Adapter soft state 403 */ 404 static void 405 megasas_decode_evt(struct megasas_instance *instance) 406 { 407 struct megasas_evt_detail *evt_detail = instance->evt_detail; 408 union megasas_evt_class_locale class_locale; 409 class_locale.word = le32_to_cpu(evt_detail->cl.word); 410 411 if ((event_log_level < MFI_EVT_CLASS_DEBUG) || 412 (event_log_level > MFI_EVT_CLASS_DEAD)) { 413 printk(KERN_WARNING "megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n"); 414 event_log_level = MFI_EVT_CLASS_CRITICAL; 415 } 416 417 if (class_locale.members.class >= event_log_level) 418 dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n", 419 le32_to_cpu(evt_detail->seq_num), 420 format_timestamp(le32_to_cpu(evt_detail->time_stamp)), 421 (class_locale.members.locale), 422 format_class(class_locale.members.class), 423 evt_detail->description); 424 } 425 426 /** 427 * The following functions are defined for xscale 428 * (deviceid : 1064R, PERC5) controllers 429 */ 430 431 /** 432 * megasas_enable_intr_xscale - Enables interrupts 433 * @regs: MFI register set 434 */ 435 static inline void 436 megasas_enable_intr_xscale(struct megasas_instance *instance) 437 { 438 struct megasas_register_set __iomem *regs; 439 440 regs = instance->reg_set; 441 writel(0, &(regs)->outbound_intr_mask); 442 443 /* Dummy readl to force pci flush */ 444 readl(®s->outbound_intr_mask); 445 } 446 447 /** 448 * megasas_disable_intr_xscale -Disables interrupt 449 * @regs: MFI register set 450 */ 451 static inline void 452 megasas_disable_intr_xscale(struct megasas_instance *instance) 453 { 454 struct megasas_register_set __iomem *regs; 455 u32 mask = 0x1f; 456 457 regs = instance->reg_set; 458 writel(mask, ®s->outbound_intr_mask); 459 /* Dummy readl to force pci flush */ 460 readl(®s->outbound_intr_mask); 461 } 462 463 /** 464 * megasas_read_fw_status_reg_xscale - returns the current FW status value 465 * @regs: MFI register set 466 */ 467 static u32 468 megasas_read_fw_status_reg_xscale(struct megasas_instance *instance) 469 { 470 return readl(&instance->reg_set->outbound_msg_0); 471 } 472 /** 473 * megasas_clear_interrupt_xscale - Check & clear interrupt 474 * @regs: MFI register set 475 */ 476 static int 477 megasas_clear_intr_xscale(struct megasas_instance *instance) 478 { 479 u32 status; 480 u32 mfiStatus = 0; 481 struct megasas_register_set __iomem *regs; 482 regs = instance->reg_set; 483 484 /* 485 * Check if it is our interrupt 486 */ 487 status = readl(®s->outbound_intr_status); 488 489 if (status & MFI_OB_INTR_STATUS_MASK) 490 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 491 if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT) 492 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 493 494 /* 495 * Clear the interrupt by writing back the same value 496 */ 497 if (mfiStatus) 498 writel(status, ®s->outbound_intr_status); 499 500 /* Dummy readl to force pci flush */ 501 readl(®s->outbound_intr_status); 502 503 return mfiStatus; 504 } 505 506 /** 507 * megasas_fire_cmd_xscale - Sends command to the FW 508 * @frame_phys_addr : Physical address of cmd 509 * @frame_count : Number of frames for the command 510 * @regs : MFI register set 511 */ 512 static inline void 513 megasas_fire_cmd_xscale(struct megasas_instance *instance, 514 dma_addr_t frame_phys_addr, 515 u32 frame_count, 516 struct megasas_register_set __iomem *regs) 517 { 518 unsigned long flags; 519 520 spin_lock_irqsave(&instance->hba_lock, flags); 521 writel((frame_phys_addr >> 3)|(frame_count), 522 &(regs)->inbound_queue_port); 523 spin_unlock_irqrestore(&instance->hba_lock, flags); 524 } 525 526 /** 527 * megasas_adp_reset_xscale - For controller reset 528 * @regs: MFI register set 529 */ 530 static int 531 megasas_adp_reset_xscale(struct megasas_instance *instance, 532 struct megasas_register_set __iomem *regs) 533 { 534 u32 i; 535 u32 pcidata; 536 537 writel(MFI_ADP_RESET, ®s->inbound_doorbell); 538 539 for (i = 0; i < 3; i++) 540 msleep(1000); /* sleep for 3 secs */ 541 pcidata = 0; 542 pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata); 543 dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata); 544 if (pcidata & 0x2) { 545 dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata); 546 pcidata &= ~0x2; 547 pci_write_config_dword(instance->pdev, 548 MFI_1068_PCSR_OFFSET, pcidata); 549 550 for (i = 0; i < 2; i++) 551 msleep(1000); /* need to wait 2 secs again */ 552 553 pcidata = 0; 554 pci_read_config_dword(instance->pdev, 555 MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata); 556 dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata); 557 if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) { 558 dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata); 559 pcidata = 0; 560 pci_write_config_dword(instance->pdev, 561 MFI_1068_FW_HANDSHAKE_OFFSET, pcidata); 562 } 563 } 564 return 0; 565 } 566 567 /** 568 * megasas_check_reset_xscale - For controller reset check 569 * @regs: MFI register set 570 */ 571 static int 572 megasas_check_reset_xscale(struct megasas_instance *instance, 573 struct megasas_register_set __iomem *regs) 574 { 575 if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) && 576 (le32_to_cpu(*instance->consumer) == 577 MEGASAS_ADPRESET_INPROG_SIGN)) 578 return 1; 579 return 0; 580 } 581 582 static struct megasas_instance_template megasas_instance_template_xscale = { 583 584 .fire_cmd = megasas_fire_cmd_xscale, 585 .enable_intr = megasas_enable_intr_xscale, 586 .disable_intr = megasas_disable_intr_xscale, 587 .clear_intr = megasas_clear_intr_xscale, 588 .read_fw_status_reg = megasas_read_fw_status_reg_xscale, 589 .adp_reset = megasas_adp_reset_xscale, 590 .check_reset = megasas_check_reset_xscale, 591 .service_isr = megasas_isr, 592 .tasklet = megasas_complete_cmd_dpc, 593 .init_adapter = megasas_init_adapter_mfi, 594 .build_and_issue_cmd = megasas_build_and_issue_cmd, 595 .issue_dcmd = megasas_issue_dcmd, 596 }; 597 598 /** 599 * This is the end of set of functions & definitions specific 600 * to xscale (deviceid : 1064R, PERC5) controllers 601 */ 602 603 /** 604 * The following functions are defined for ppc (deviceid : 0x60) 605 * controllers 606 */ 607 608 /** 609 * megasas_enable_intr_ppc - Enables interrupts 610 * @regs: MFI register set 611 */ 612 static inline void 613 megasas_enable_intr_ppc(struct megasas_instance *instance) 614 { 615 struct megasas_register_set __iomem *regs; 616 617 regs = instance->reg_set; 618 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 619 620 writel(~0x80000000, &(regs)->outbound_intr_mask); 621 622 /* Dummy readl to force pci flush */ 623 readl(®s->outbound_intr_mask); 624 } 625 626 /** 627 * megasas_disable_intr_ppc - Disable interrupt 628 * @regs: MFI register set 629 */ 630 static inline void 631 megasas_disable_intr_ppc(struct megasas_instance *instance) 632 { 633 struct megasas_register_set __iomem *regs; 634 u32 mask = 0xFFFFFFFF; 635 636 regs = instance->reg_set; 637 writel(mask, ®s->outbound_intr_mask); 638 /* Dummy readl to force pci flush */ 639 readl(®s->outbound_intr_mask); 640 } 641 642 /** 643 * megasas_read_fw_status_reg_ppc - returns the current FW status value 644 * @regs: MFI register set 645 */ 646 static u32 647 megasas_read_fw_status_reg_ppc(struct megasas_instance *instance) 648 { 649 return readl(&instance->reg_set->outbound_scratch_pad_0); 650 } 651 652 /** 653 * megasas_clear_interrupt_ppc - Check & clear interrupt 654 * @regs: MFI register set 655 */ 656 static int 657 megasas_clear_intr_ppc(struct megasas_instance *instance) 658 { 659 u32 status, mfiStatus = 0; 660 struct megasas_register_set __iomem *regs; 661 regs = instance->reg_set; 662 663 /* 664 * Check if it is our interrupt 665 */ 666 status = readl(®s->outbound_intr_status); 667 668 if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT) 669 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 670 671 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) 672 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 673 674 /* 675 * Clear the interrupt by writing back the same value 676 */ 677 writel(status, ®s->outbound_doorbell_clear); 678 679 /* Dummy readl to force pci flush */ 680 readl(®s->outbound_doorbell_clear); 681 682 return mfiStatus; 683 } 684 685 /** 686 * megasas_fire_cmd_ppc - Sends command to the FW 687 * @frame_phys_addr : Physical address of cmd 688 * @frame_count : Number of frames for the command 689 * @regs : MFI register set 690 */ 691 static inline void 692 megasas_fire_cmd_ppc(struct megasas_instance *instance, 693 dma_addr_t frame_phys_addr, 694 u32 frame_count, 695 struct megasas_register_set __iomem *regs) 696 { 697 unsigned long flags; 698 699 spin_lock_irqsave(&instance->hba_lock, flags); 700 writel((frame_phys_addr | (frame_count<<1))|1, 701 &(regs)->inbound_queue_port); 702 spin_unlock_irqrestore(&instance->hba_lock, flags); 703 } 704 705 /** 706 * megasas_check_reset_ppc - For controller reset check 707 * @regs: MFI register set 708 */ 709 static int 710 megasas_check_reset_ppc(struct megasas_instance *instance, 711 struct megasas_register_set __iomem *regs) 712 { 713 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 714 return 1; 715 716 return 0; 717 } 718 719 static struct megasas_instance_template megasas_instance_template_ppc = { 720 721 .fire_cmd = megasas_fire_cmd_ppc, 722 .enable_intr = megasas_enable_intr_ppc, 723 .disable_intr = megasas_disable_intr_ppc, 724 .clear_intr = megasas_clear_intr_ppc, 725 .read_fw_status_reg = megasas_read_fw_status_reg_ppc, 726 .adp_reset = megasas_adp_reset_xscale, 727 .check_reset = megasas_check_reset_ppc, 728 .service_isr = megasas_isr, 729 .tasklet = megasas_complete_cmd_dpc, 730 .init_adapter = megasas_init_adapter_mfi, 731 .build_and_issue_cmd = megasas_build_and_issue_cmd, 732 .issue_dcmd = megasas_issue_dcmd, 733 }; 734 735 /** 736 * megasas_enable_intr_skinny - Enables interrupts 737 * @regs: MFI register set 738 */ 739 static inline void 740 megasas_enable_intr_skinny(struct megasas_instance *instance) 741 { 742 struct megasas_register_set __iomem *regs; 743 744 regs = instance->reg_set; 745 writel(0xFFFFFFFF, &(regs)->outbound_intr_mask); 746 747 writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 748 749 /* Dummy readl to force pci flush */ 750 readl(®s->outbound_intr_mask); 751 } 752 753 /** 754 * megasas_disable_intr_skinny - Disables interrupt 755 * @regs: MFI register set 756 */ 757 static inline void 758 megasas_disable_intr_skinny(struct megasas_instance *instance) 759 { 760 struct megasas_register_set __iomem *regs; 761 u32 mask = 0xFFFFFFFF; 762 763 regs = instance->reg_set; 764 writel(mask, ®s->outbound_intr_mask); 765 /* Dummy readl to force pci flush */ 766 readl(®s->outbound_intr_mask); 767 } 768 769 /** 770 * megasas_read_fw_status_reg_skinny - returns the current FW status value 771 * @regs: MFI register set 772 */ 773 static u32 774 megasas_read_fw_status_reg_skinny(struct megasas_instance *instance) 775 { 776 return readl(&instance->reg_set->outbound_scratch_pad_0); 777 } 778 779 /** 780 * megasas_clear_interrupt_skinny - Check & clear interrupt 781 * @regs: MFI register set 782 */ 783 static int 784 megasas_clear_intr_skinny(struct megasas_instance *instance) 785 { 786 u32 status; 787 u32 mfiStatus = 0; 788 struct megasas_register_set __iomem *regs; 789 regs = instance->reg_set; 790 791 /* 792 * Check if it is our interrupt 793 */ 794 status = readl(®s->outbound_intr_status); 795 796 if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) { 797 return 0; 798 } 799 800 /* 801 * Check if it is our interrupt 802 */ 803 if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) == 804 MFI_STATE_FAULT) { 805 mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 806 } else 807 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 808 809 /* 810 * Clear the interrupt by writing back the same value 811 */ 812 writel(status, ®s->outbound_intr_status); 813 814 /* 815 * dummy read to flush PCI 816 */ 817 readl(®s->outbound_intr_status); 818 819 return mfiStatus; 820 } 821 822 /** 823 * megasas_fire_cmd_skinny - Sends command to the FW 824 * @frame_phys_addr : Physical address of cmd 825 * @frame_count : Number of frames for the command 826 * @regs : MFI register set 827 */ 828 static inline void 829 megasas_fire_cmd_skinny(struct megasas_instance *instance, 830 dma_addr_t frame_phys_addr, 831 u32 frame_count, 832 struct megasas_register_set __iomem *regs) 833 { 834 unsigned long flags; 835 836 spin_lock_irqsave(&instance->hba_lock, flags); 837 writel(upper_32_bits(frame_phys_addr), 838 &(regs)->inbound_high_queue_port); 839 writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1, 840 &(regs)->inbound_low_queue_port); 841 spin_unlock_irqrestore(&instance->hba_lock, flags); 842 } 843 844 /** 845 * megasas_check_reset_skinny - For controller reset check 846 * @regs: MFI register set 847 */ 848 static int 849 megasas_check_reset_skinny(struct megasas_instance *instance, 850 struct megasas_register_set __iomem *regs) 851 { 852 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 853 return 1; 854 855 return 0; 856 } 857 858 static struct megasas_instance_template megasas_instance_template_skinny = { 859 860 .fire_cmd = megasas_fire_cmd_skinny, 861 .enable_intr = megasas_enable_intr_skinny, 862 .disable_intr = megasas_disable_intr_skinny, 863 .clear_intr = megasas_clear_intr_skinny, 864 .read_fw_status_reg = megasas_read_fw_status_reg_skinny, 865 .adp_reset = megasas_adp_reset_gen2, 866 .check_reset = megasas_check_reset_skinny, 867 .service_isr = megasas_isr, 868 .tasklet = megasas_complete_cmd_dpc, 869 .init_adapter = megasas_init_adapter_mfi, 870 .build_and_issue_cmd = megasas_build_and_issue_cmd, 871 .issue_dcmd = megasas_issue_dcmd, 872 }; 873 874 875 /** 876 * The following functions are defined for gen2 (deviceid : 0x78 0x79) 877 * controllers 878 */ 879 880 /** 881 * megasas_enable_intr_gen2 - Enables interrupts 882 * @regs: MFI register set 883 */ 884 static inline void 885 megasas_enable_intr_gen2(struct megasas_instance *instance) 886 { 887 struct megasas_register_set __iomem *regs; 888 889 regs = instance->reg_set; 890 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 891 892 /* write ~0x00000005 (4 & 1) to the intr mask*/ 893 writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 894 895 /* Dummy readl to force pci flush */ 896 readl(®s->outbound_intr_mask); 897 } 898 899 /** 900 * megasas_disable_intr_gen2 - Disables interrupt 901 * @regs: MFI register set 902 */ 903 static inline void 904 megasas_disable_intr_gen2(struct megasas_instance *instance) 905 { 906 struct megasas_register_set __iomem *regs; 907 u32 mask = 0xFFFFFFFF; 908 909 regs = instance->reg_set; 910 writel(mask, ®s->outbound_intr_mask); 911 /* Dummy readl to force pci flush */ 912 readl(®s->outbound_intr_mask); 913 } 914 915 /** 916 * megasas_read_fw_status_reg_gen2 - returns the current FW status value 917 * @regs: MFI register set 918 */ 919 static u32 920 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance) 921 { 922 return readl(&instance->reg_set->outbound_scratch_pad_0); 923 } 924 925 /** 926 * megasas_clear_interrupt_gen2 - Check & clear interrupt 927 * @regs: MFI register set 928 */ 929 static int 930 megasas_clear_intr_gen2(struct megasas_instance *instance) 931 { 932 u32 status; 933 u32 mfiStatus = 0; 934 struct megasas_register_set __iomem *regs; 935 regs = instance->reg_set; 936 937 /* 938 * Check if it is our interrupt 939 */ 940 status = readl(®s->outbound_intr_status); 941 942 if (status & MFI_INTR_FLAG_REPLY_MESSAGE) { 943 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 944 } 945 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) { 946 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 947 } 948 949 /* 950 * Clear the interrupt by writing back the same value 951 */ 952 if (mfiStatus) 953 writel(status, ®s->outbound_doorbell_clear); 954 955 /* Dummy readl to force pci flush */ 956 readl(®s->outbound_intr_status); 957 958 return mfiStatus; 959 } 960 /** 961 * megasas_fire_cmd_gen2 - Sends command to the FW 962 * @frame_phys_addr : Physical address of cmd 963 * @frame_count : Number of frames for the command 964 * @regs : MFI register set 965 */ 966 static inline void 967 megasas_fire_cmd_gen2(struct megasas_instance *instance, 968 dma_addr_t frame_phys_addr, 969 u32 frame_count, 970 struct megasas_register_set __iomem *regs) 971 { 972 unsigned long flags; 973 974 spin_lock_irqsave(&instance->hba_lock, flags); 975 writel((frame_phys_addr | (frame_count<<1))|1, 976 &(regs)->inbound_queue_port); 977 spin_unlock_irqrestore(&instance->hba_lock, flags); 978 } 979 980 /** 981 * megasas_adp_reset_gen2 - For controller reset 982 * @regs: MFI register set 983 */ 984 static int 985 megasas_adp_reset_gen2(struct megasas_instance *instance, 986 struct megasas_register_set __iomem *reg_set) 987 { 988 u32 retry = 0 ; 989 u32 HostDiag; 990 u32 __iomem *seq_offset = ®_set->seq_offset; 991 u32 __iomem *hostdiag_offset = ®_set->host_diag; 992 993 if (instance->instancet == &megasas_instance_template_skinny) { 994 seq_offset = ®_set->fusion_seq_offset; 995 hostdiag_offset = ®_set->fusion_host_diag; 996 } 997 998 writel(0, seq_offset); 999 writel(4, seq_offset); 1000 writel(0xb, seq_offset); 1001 writel(2, seq_offset); 1002 writel(7, seq_offset); 1003 writel(0xd, seq_offset); 1004 1005 msleep(1000); 1006 1007 HostDiag = (u32)readl(hostdiag_offset); 1008 1009 while (!(HostDiag & DIAG_WRITE_ENABLE)) { 1010 msleep(100); 1011 HostDiag = (u32)readl(hostdiag_offset); 1012 dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n", 1013 retry, HostDiag); 1014 1015 if (retry++ >= 100) 1016 return 1; 1017 1018 } 1019 1020 dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag); 1021 1022 writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset); 1023 1024 ssleep(10); 1025 1026 HostDiag = (u32)readl(hostdiag_offset); 1027 while (HostDiag & DIAG_RESET_ADAPTER) { 1028 msleep(100); 1029 HostDiag = (u32)readl(hostdiag_offset); 1030 dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n", 1031 retry, HostDiag); 1032 1033 if (retry++ >= 1000) 1034 return 1; 1035 1036 } 1037 return 0; 1038 } 1039 1040 /** 1041 * megasas_check_reset_gen2 - For controller reset check 1042 * @regs: MFI register set 1043 */ 1044 static int 1045 megasas_check_reset_gen2(struct megasas_instance *instance, 1046 struct megasas_register_set __iomem *regs) 1047 { 1048 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 1049 return 1; 1050 1051 return 0; 1052 } 1053 1054 static struct megasas_instance_template megasas_instance_template_gen2 = { 1055 1056 .fire_cmd = megasas_fire_cmd_gen2, 1057 .enable_intr = megasas_enable_intr_gen2, 1058 .disable_intr = megasas_disable_intr_gen2, 1059 .clear_intr = megasas_clear_intr_gen2, 1060 .read_fw_status_reg = megasas_read_fw_status_reg_gen2, 1061 .adp_reset = megasas_adp_reset_gen2, 1062 .check_reset = megasas_check_reset_gen2, 1063 .service_isr = megasas_isr, 1064 .tasklet = megasas_complete_cmd_dpc, 1065 .init_adapter = megasas_init_adapter_mfi, 1066 .build_and_issue_cmd = megasas_build_and_issue_cmd, 1067 .issue_dcmd = megasas_issue_dcmd, 1068 }; 1069 1070 /** 1071 * This is the end of set of functions & definitions 1072 * specific to gen2 (deviceid : 0x78, 0x79) controllers 1073 */ 1074 1075 /* 1076 * Template added for TB (Fusion) 1077 */ 1078 extern struct megasas_instance_template megasas_instance_template_fusion; 1079 1080 /** 1081 * megasas_issue_polled - Issues a polling command 1082 * @instance: Adapter soft state 1083 * @cmd: Command packet to be issued 1084 * 1085 * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting. 1086 */ 1087 int 1088 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) 1089 { 1090 struct megasas_header *frame_hdr = &cmd->frame->hdr; 1091 1092 frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS; 1093 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); 1094 1095 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1096 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1097 __func__, __LINE__); 1098 return DCMD_NOT_FIRED; 1099 } 1100 1101 instance->instancet->issue_dcmd(instance, cmd); 1102 1103 return wait_and_poll(instance, cmd, instance->requestorId ? 1104 MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS); 1105 } 1106 1107 /** 1108 * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds 1109 * @instance: Adapter soft state 1110 * @cmd: Command to be issued 1111 * @timeout: Timeout in seconds 1112 * 1113 * This function waits on an event for the command to be returned from ISR. 1114 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 1115 * Used to issue ioctl commands. 1116 */ 1117 int 1118 megasas_issue_blocked_cmd(struct megasas_instance *instance, 1119 struct megasas_cmd *cmd, int timeout) 1120 { 1121 int ret = 0; 1122 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 1123 1124 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1125 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1126 __func__, __LINE__); 1127 return DCMD_NOT_FIRED; 1128 } 1129 1130 instance->instancet->issue_dcmd(instance, cmd); 1131 1132 if (timeout) { 1133 ret = wait_event_timeout(instance->int_cmd_wait_q, 1134 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); 1135 if (!ret) { 1136 dev_err(&instance->pdev->dev, 1137 "DCMD(opcode: 0x%x) is timed out, func:%s\n", 1138 cmd->frame->dcmd.opcode, __func__); 1139 return DCMD_TIMEOUT; 1140 } 1141 } else 1142 wait_event(instance->int_cmd_wait_q, 1143 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS); 1144 1145 return (cmd->cmd_status_drv == MFI_STAT_OK) ? 1146 DCMD_SUCCESS : DCMD_FAILED; 1147 } 1148 1149 /** 1150 * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd 1151 * @instance: Adapter soft state 1152 * @cmd_to_abort: Previously issued cmd to be aborted 1153 * @timeout: Timeout in seconds 1154 * 1155 * MFI firmware can abort previously issued AEN comamnd (automatic event 1156 * notification). The megasas_issue_blocked_abort_cmd() issues such abort 1157 * cmd and waits for return status. 1158 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 1159 */ 1160 static int 1161 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, 1162 struct megasas_cmd *cmd_to_abort, int timeout) 1163 { 1164 struct megasas_cmd *cmd; 1165 struct megasas_abort_frame *abort_fr; 1166 int ret = 0; 1167 u32 opcode; 1168 1169 cmd = megasas_get_cmd(instance); 1170 1171 if (!cmd) 1172 return -1; 1173 1174 abort_fr = &cmd->frame->abort; 1175 1176 /* 1177 * Prepare and issue the abort frame 1178 */ 1179 abort_fr->cmd = MFI_CMD_ABORT; 1180 abort_fr->cmd_status = MFI_STAT_INVALID_STATUS; 1181 abort_fr->flags = cpu_to_le16(0); 1182 abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index); 1183 abort_fr->abort_mfi_phys_addr_lo = 1184 cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr)); 1185 abort_fr->abort_mfi_phys_addr_hi = 1186 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); 1187 1188 cmd->sync_cmd = 1; 1189 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 1190 1191 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1192 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1193 __func__, __LINE__); 1194 return DCMD_NOT_FIRED; 1195 } 1196 1197 instance->instancet->issue_dcmd(instance, cmd); 1198 1199 if (timeout) { 1200 ret = wait_event_timeout(instance->abort_cmd_wait_q, 1201 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); 1202 if (!ret) { 1203 opcode = cmd_to_abort->frame->dcmd.opcode; 1204 dev_err(&instance->pdev->dev, 1205 "Abort(to be aborted DCMD opcode: 0x%x) is timed out func:%s\n", 1206 opcode, __func__); 1207 return DCMD_TIMEOUT; 1208 } 1209 } else 1210 wait_event(instance->abort_cmd_wait_q, 1211 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS); 1212 1213 cmd->sync_cmd = 0; 1214 1215 megasas_return_cmd(instance, cmd); 1216 return (cmd->cmd_status_drv == MFI_STAT_OK) ? 1217 DCMD_SUCCESS : DCMD_FAILED; 1218 } 1219 1220 /** 1221 * megasas_make_sgl32 - Prepares 32-bit SGL 1222 * @instance: Adapter soft state 1223 * @scp: SCSI command from the mid-layer 1224 * @mfi_sgl: SGL to be filled in 1225 * 1226 * If successful, this function returns the number of SG elements. Otherwise, 1227 * it returnes -1. 1228 */ 1229 static int 1230 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp, 1231 union megasas_sgl *mfi_sgl) 1232 { 1233 int i; 1234 int sge_count; 1235 struct scatterlist *os_sgl; 1236 1237 sge_count = scsi_dma_map(scp); 1238 BUG_ON(sge_count < 0); 1239 1240 if (sge_count) { 1241 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1242 mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl)); 1243 mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl)); 1244 } 1245 } 1246 return sge_count; 1247 } 1248 1249 /** 1250 * megasas_make_sgl64 - Prepares 64-bit SGL 1251 * @instance: Adapter soft state 1252 * @scp: SCSI command from the mid-layer 1253 * @mfi_sgl: SGL to be filled in 1254 * 1255 * If successful, this function returns the number of SG elements. Otherwise, 1256 * it returnes -1. 1257 */ 1258 static int 1259 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, 1260 union megasas_sgl *mfi_sgl) 1261 { 1262 int i; 1263 int sge_count; 1264 struct scatterlist *os_sgl; 1265 1266 sge_count = scsi_dma_map(scp); 1267 BUG_ON(sge_count < 0); 1268 1269 if (sge_count) { 1270 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1271 mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl)); 1272 mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl)); 1273 } 1274 } 1275 return sge_count; 1276 } 1277 1278 /** 1279 * megasas_make_sgl_skinny - Prepares IEEE SGL 1280 * @instance: Adapter soft state 1281 * @scp: SCSI command from the mid-layer 1282 * @mfi_sgl: SGL to be filled in 1283 * 1284 * If successful, this function returns the number of SG elements. Otherwise, 1285 * it returnes -1. 1286 */ 1287 static int 1288 megasas_make_sgl_skinny(struct megasas_instance *instance, 1289 struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) 1290 { 1291 int i; 1292 int sge_count; 1293 struct scatterlist *os_sgl; 1294 1295 sge_count = scsi_dma_map(scp); 1296 1297 if (sge_count) { 1298 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1299 mfi_sgl->sge_skinny[i].length = 1300 cpu_to_le32(sg_dma_len(os_sgl)); 1301 mfi_sgl->sge_skinny[i].phys_addr = 1302 cpu_to_le64(sg_dma_address(os_sgl)); 1303 mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0); 1304 } 1305 } 1306 return sge_count; 1307 } 1308 1309 /** 1310 * megasas_get_frame_count - Computes the number of frames 1311 * @frame_type : type of frame- io or pthru frame 1312 * @sge_count : number of sg elements 1313 * 1314 * Returns the number of frames required for numnber of sge's (sge_count) 1315 */ 1316 1317 static u32 megasas_get_frame_count(struct megasas_instance *instance, 1318 u8 sge_count, u8 frame_type) 1319 { 1320 int num_cnt; 1321 int sge_bytes; 1322 u32 sge_sz; 1323 u32 frame_count = 0; 1324 1325 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : 1326 sizeof(struct megasas_sge32); 1327 1328 if (instance->flag_ieee) { 1329 sge_sz = sizeof(struct megasas_sge_skinny); 1330 } 1331 1332 /* 1333 * Main frame can contain 2 SGEs for 64-bit SGLs and 1334 * 3 SGEs for 32-bit SGLs for ldio & 1335 * 1 SGEs for 64-bit SGLs and 1336 * 2 SGEs for 32-bit SGLs for pthru frame 1337 */ 1338 if (unlikely(frame_type == PTHRU_FRAME)) { 1339 if (instance->flag_ieee == 1) { 1340 num_cnt = sge_count - 1; 1341 } else if (IS_DMA64) 1342 num_cnt = sge_count - 1; 1343 else 1344 num_cnt = sge_count - 2; 1345 } else { 1346 if (instance->flag_ieee == 1) { 1347 num_cnt = sge_count - 1; 1348 } else if (IS_DMA64) 1349 num_cnt = sge_count - 2; 1350 else 1351 num_cnt = sge_count - 3; 1352 } 1353 1354 if (num_cnt > 0) { 1355 sge_bytes = sge_sz * num_cnt; 1356 1357 frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + 1358 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ; 1359 } 1360 /* Main frame */ 1361 frame_count += 1; 1362 1363 if (frame_count > 7) 1364 frame_count = 8; 1365 return frame_count; 1366 } 1367 1368 /** 1369 * megasas_build_dcdb - Prepares a direct cdb (DCDB) command 1370 * @instance: Adapter soft state 1371 * @scp: SCSI command 1372 * @cmd: Command to be prepared in 1373 * 1374 * This function prepares CDB commands. These are typcially pass-through 1375 * commands to the devices. 1376 */ 1377 static int 1378 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, 1379 struct megasas_cmd *cmd) 1380 { 1381 u32 is_logical; 1382 u32 device_id; 1383 u16 flags = 0; 1384 struct megasas_pthru_frame *pthru; 1385 1386 is_logical = MEGASAS_IS_LOGICAL(scp->device); 1387 device_id = MEGASAS_DEV_INDEX(scp); 1388 pthru = (struct megasas_pthru_frame *)cmd->frame; 1389 1390 if (scp->sc_data_direction == DMA_TO_DEVICE) 1391 flags = MFI_FRAME_DIR_WRITE; 1392 else if (scp->sc_data_direction == DMA_FROM_DEVICE) 1393 flags = MFI_FRAME_DIR_READ; 1394 else if (scp->sc_data_direction == DMA_NONE) 1395 flags = MFI_FRAME_DIR_NONE; 1396 1397 if (instance->flag_ieee == 1) { 1398 flags |= MFI_FRAME_IEEE; 1399 } 1400 1401 /* 1402 * Prepare the DCDB frame 1403 */ 1404 pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO; 1405 pthru->cmd_status = 0x0; 1406 pthru->scsi_status = 0x0; 1407 pthru->target_id = device_id; 1408 pthru->lun = scp->device->lun; 1409 pthru->cdb_len = scp->cmd_len; 1410 pthru->timeout = 0; 1411 pthru->pad_0 = 0; 1412 pthru->flags = cpu_to_le16(flags); 1413 pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp)); 1414 1415 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); 1416 1417 /* 1418 * If the command is for the tape device, set the 1419 * pthru timeout to the os layer timeout value. 1420 */ 1421 if (scp->device->type == TYPE_TAPE) { 1422 if ((scp->request->timeout / HZ) > 0xFFFF) 1423 pthru->timeout = cpu_to_le16(0xFFFF); 1424 else 1425 pthru->timeout = cpu_to_le16(scp->request->timeout / HZ); 1426 } 1427 1428 /* 1429 * Construct SGL 1430 */ 1431 if (instance->flag_ieee == 1) { 1432 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1433 pthru->sge_count = megasas_make_sgl_skinny(instance, scp, 1434 &pthru->sgl); 1435 } else if (IS_DMA64) { 1436 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1437 pthru->sge_count = megasas_make_sgl64(instance, scp, 1438 &pthru->sgl); 1439 } else 1440 pthru->sge_count = megasas_make_sgl32(instance, scp, 1441 &pthru->sgl); 1442 1443 if (pthru->sge_count > instance->max_num_sge) { 1444 dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n", 1445 pthru->sge_count); 1446 return 0; 1447 } 1448 1449 /* 1450 * Sense info specific 1451 */ 1452 pthru->sense_len = SCSI_SENSE_BUFFERSIZE; 1453 pthru->sense_buf_phys_addr_hi = 1454 cpu_to_le32(upper_32_bits(cmd->sense_phys_addr)); 1455 pthru->sense_buf_phys_addr_lo = 1456 cpu_to_le32(lower_32_bits(cmd->sense_phys_addr)); 1457 1458 /* 1459 * Compute the total number of frames this command consumes. FW uses 1460 * this number to pull sufficient number of frames from host memory. 1461 */ 1462 cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count, 1463 PTHRU_FRAME); 1464 1465 return cmd->frame_count; 1466 } 1467 1468 /** 1469 * megasas_build_ldio - Prepares IOs to logical devices 1470 * @instance: Adapter soft state 1471 * @scp: SCSI command 1472 * @cmd: Command to be prepared 1473 * 1474 * Frames (and accompanying SGLs) for regular SCSI IOs use this function. 1475 */ 1476 static int 1477 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, 1478 struct megasas_cmd *cmd) 1479 { 1480 u32 device_id; 1481 u8 sc = scp->cmnd[0]; 1482 u16 flags = 0; 1483 struct megasas_io_frame *ldio; 1484 1485 device_id = MEGASAS_DEV_INDEX(scp); 1486 ldio = (struct megasas_io_frame *)cmd->frame; 1487 1488 if (scp->sc_data_direction == DMA_TO_DEVICE) 1489 flags = MFI_FRAME_DIR_WRITE; 1490 else if (scp->sc_data_direction == DMA_FROM_DEVICE) 1491 flags = MFI_FRAME_DIR_READ; 1492 1493 if (instance->flag_ieee == 1) { 1494 flags |= MFI_FRAME_IEEE; 1495 } 1496 1497 /* 1498 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds 1499 */ 1500 ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ; 1501 ldio->cmd_status = 0x0; 1502 ldio->scsi_status = 0x0; 1503 ldio->target_id = device_id; 1504 ldio->timeout = 0; 1505 ldio->reserved_0 = 0; 1506 ldio->pad_0 = 0; 1507 ldio->flags = cpu_to_le16(flags); 1508 ldio->start_lba_hi = 0; 1509 ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0; 1510 1511 /* 1512 * 6-byte READ(0x08) or WRITE(0x0A) cdb 1513 */ 1514 if (scp->cmd_len == 6) { 1515 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]); 1516 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) | 1517 ((u32) scp->cmnd[2] << 8) | 1518 (u32) scp->cmnd[3]); 1519 1520 ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF); 1521 } 1522 1523 /* 1524 * 10-byte READ(0x28) or WRITE(0x2A) cdb 1525 */ 1526 else if (scp->cmd_len == 10) { 1527 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] | 1528 ((u32) scp->cmnd[7] << 8)); 1529 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1530 ((u32) scp->cmnd[3] << 16) | 1531 ((u32) scp->cmnd[4] << 8) | 1532 (u32) scp->cmnd[5]); 1533 } 1534 1535 /* 1536 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 1537 */ 1538 else if (scp->cmd_len == 12) { 1539 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) | 1540 ((u32) scp->cmnd[7] << 16) | 1541 ((u32) scp->cmnd[8] << 8) | 1542 (u32) scp->cmnd[9]); 1543 1544 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1545 ((u32) scp->cmnd[3] << 16) | 1546 ((u32) scp->cmnd[4] << 8) | 1547 (u32) scp->cmnd[5]); 1548 } 1549 1550 /* 1551 * 16-byte READ(0x88) or WRITE(0x8A) cdb 1552 */ 1553 else if (scp->cmd_len == 16) { 1554 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) | 1555 ((u32) scp->cmnd[11] << 16) | 1556 ((u32) scp->cmnd[12] << 8) | 1557 (u32) scp->cmnd[13]); 1558 1559 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) | 1560 ((u32) scp->cmnd[7] << 16) | 1561 ((u32) scp->cmnd[8] << 8) | 1562 (u32) scp->cmnd[9]); 1563 1564 ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1565 ((u32) scp->cmnd[3] << 16) | 1566 ((u32) scp->cmnd[4] << 8) | 1567 (u32) scp->cmnd[5]); 1568 1569 } 1570 1571 /* 1572 * Construct SGL 1573 */ 1574 if (instance->flag_ieee) { 1575 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1576 ldio->sge_count = megasas_make_sgl_skinny(instance, scp, 1577 &ldio->sgl); 1578 } else if (IS_DMA64) { 1579 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1580 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); 1581 } else 1582 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); 1583 1584 if (ldio->sge_count > instance->max_num_sge) { 1585 dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n", 1586 ldio->sge_count); 1587 return 0; 1588 } 1589 1590 /* 1591 * Sense info specific 1592 */ 1593 ldio->sense_len = SCSI_SENSE_BUFFERSIZE; 1594 ldio->sense_buf_phys_addr_hi = 0; 1595 ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr); 1596 1597 /* 1598 * Compute the total number of frames this command consumes. FW uses 1599 * this number to pull sufficient number of frames from host memory. 1600 */ 1601 cmd->frame_count = megasas_get_frame_count(instance, 1602 ldio->sge_count, IO_FRAME); 1603 1604 return cmd->frame_count; 1605 } 1606 1607 /** 1608 * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD 1609 * and whether it's RW or non RW 1610 * @scmd: SCSI command 1611 * 1612 */ 1613 inline int megasas_cmd_type(struct scsi_cmnd *cmd) 1614 { 1615 int ret; 1616 1617 switch (cmd->cmnd[0]) { 1618 case READ_10: 1619 case WRITE_10: 1620 case READ_12: 1621 case WRITE_12: 1622 case READ_6: 1623 case WRITE_6: 1624 case READ_16: 1625 case WRITE_16: 1626 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? 1627 READ_WRITE_LDIO : READ_WRITE_SYSPDIO; 1628 break; 1629 default: 1630 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? 1631 NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO; 1632 } 1633 return ret; 1634 } 1635 1636 /** 1637 * megasas_dump_pending_frames - Dumps the frame address of all pending cmds 1638 * in FW 1639 * @instance: Adapter soft state 1640 */ 1641 static inline void 1642 megasas_dump_pending_frames(struct megasas_instance *instance) 1643 { 1644 struct megasas_cmd *cmd; 1645 int i,n; 1646 union megasas_sgl *mfi_sgl; 1647 struct megasas_io_frame *ldio; 1648 struct megasas_pthru_frame *pthru; 1649 u32 sgcount; 1650 u16 max_cmd = instance->max_fw_cmds; 1651 1652 dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no); 1653 dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding)); 1654 if (IS_DMA64) 1655 dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no); 1656 else 1657 dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no); 1658 1659 dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no); 1660 for (i = 0; i < max_cmd; i++) { 1661 cmd = instance->cmd_list[i]; 1662 if (!cmd->scmd) 1663 continue; 1664 dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr); 1665 if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) { 1666 ldio = (struct megasas_io_frame *)cmd->frame; 1667 mfi_sgl = &ldio->sgl; 1668 sgcount = ldio->sge_count; 1669 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x," 1670 " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1671 instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id, 1672 le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi), 1673 le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount); 1674 } else { 1675 pthru = (struct megasas_pthru_frame *) cmd->frame; 1676 mfi_sgl = &pthru->sgl; 1677 sgcount = pthru->sge_count; 1678 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, " 1679 "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1680 instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id, 1681 pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len), 1682 le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount); 1683 } 1684 if (megasas_dbg_lvl & MEGASAS_DBG_LVL) { 1685 for (n = 0; n < sgcount; n++) { 1686 if (IS_DMA64) 1687 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n", 1688 le32_to_cpu(mfi_sgl->sge64[n].length), 1689 le64_to_cpu(mfi_sgl->sge64[n].phys_addr)); 1690 else 1691 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n", 1692 le32_to_cpu(mfi_sgl->sge32[n].length), 1693 le32_to_cpu(mfi_sgl->sge32[n].phys_addr)); 1694 } 1695 } 1696 } /*for max_cmd*/ 1697 dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no); 1698 for (i = 0; i < max_cmd; i++) { 1699 1700 cmd = instance->cmd_list[i]; 1701 1702 if (cmd->sync_cmd == 1) 1703 dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr); 1704 } 1705 dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no); 1706 } 1707 1708 u32 1709 megasas_build_and_issue_cmd(struct megasas_instance *instance, 1710 struct scsi_cmnd *scmd) 1711 { 1712 struct megasas_cmd *cmd; 1713 u32 frame_count; 1714 1715 cmd = megasas_get_cmd(instance); 1716 if (!cmd) 1717 return SCSI_MLQUEUE_HOST_BUSY; 1718 1719 /* 1720 * Logical drive command 1721 */ 1722 if (megasas_cmd_type(scmd) == READ_WRITE_LDIO) 1723 frame_count = megasas_build_ldio(instance, scmd, cmd); 1724 else 1725 frame_count = megasas_build_dcdb(instance, scmd, cmd); 1726 1727 if (!frame_count) 1728 goto out_return_cmd; 1729 1730 cmd->scmd = scmd; 1731 scmd->SCp.ptr = (char *)cmd; 1732 1733 /* 1734 * Issue the command to the FW 1735 */ 1736 atomic_inc(&instance->fw_outstanding); 1737 1738 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 1739 cmd->frame_count-1, instance->reg_set); 1740 1741 return 0; 1742 out_return_cmd: 1743 megasas_return_cmd(instance, cmd); 1744 return SCSI_MLQUEUE_HOST_BUSY; 1745 } 1746 1747 1748 /** 1749 * megasas_queue_command - Queue entry point 1750 * @scmd: SCSI command to be queued 1751 * @done: Callback entry point 1752 */ 1753 static int 1754 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) 1755 { 1756 struct megasas_instance *instance; 1757 struct MR_PRIV_DEVICE *mr_device_priv_data; 1758 1759 instance = (struct megasas_instance *) 1760 scmd->device->host->hostdata; 1761 1762 if (instance->unload == 1) { 1763 scmd->result = DID_NO_CONNECT << 16; 1764 scmd->scsi_done(scmd); 1765 return 0; 1766 } 1767 1768 if (instance->issuepend_done == 0) 1769 return SCSI_MLQUEUE_HOST_BUSY; 1770 1771 1772 /* Check for an mpio path and adjust behavior */ 1773 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { 1774 if (megasas_check_mpio_paths(instance, scmd) == 1775 (DID_REQUEUE << 16)) { 1776 return SCSI_MLQUEUE_HOST_BUSY; 1777 } else { 1778 scmd->result = DID_NO_CONNECT << 16; 1779 scmd->scsi_done(scmd); 1780 return 0; 1781 } 1782 } 1783 1784 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1785 scmd->result = DID_NO_CONNECT << 16; 1786 scmd->scsi_done(scmd); 1787 return 0; 1788 } 1789 1790 mr_device_priv_data = scmd->device->hostdata; 1791 if (!mr_device_priv_data) { 1792 scmd->result = DID_NO_CONNECT << 16; 1793 scmd->scsi_done(scmd); 1794 return 0; 1795 } 1796 1797 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 1798 return SCSI_MLQUEUE_HOST_BUSY; 1799 1800 if (mr_device_priv_data->tm_busy) 1801 return SCSI_MLQUEUE_DEVICE_BUSY; 1802 1803 1804 scmd->result = 0; 1805 1806 if (MEGASAS_IS_LOGICAL(scmd->device) && 1807 (scmd->device->id >= instance->fw_supported_vd_count || 1808 scmd->device->lun)) { 1809 scmd->result = DID_BAD_TARGET << 16; 1810 goto out_done; 1811 } 1812 1813 if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && 1814 MEGASAS_IS_LOGICAL(scmd->device) && 1815 (!instance->fw_sync_cache_support)) { 1816 scmd->result = DID_OK << 16; 1817 goto out_done; 1818 } 1819 1820 return instance->instancet->build_and_issue_cmd(instance, scmd); 1821 1822 out_done: 1823 scmd->scsi_done(scmd); 1824 return 0; 1825 } 1826 1827 static struct megasas_instance *megasas_lookup_instance(u16 host_no) 1828 { 1829 int i; 1830 1831 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 1832 1833 if ((megasas_mgmt_info.instance[i]) && 1834 (megasas_mgmt_info.instance[i]->host->host_no == host_no)) 1835 return megasas_mgmt_info.instance[i]; 1836 } 1837 1838 return NULL; 1839 } 1840 1841 /* 1842 * megasas_set_dynamic_target_properties - 1843 * Device property set by driver may not be static and it is required to be 1844 * updated after OCR 1845 * 1846 * set tm_capable. 1847 * set dma alignment (only for eedp protection enable vd). 1848 * 1849 * @sdev: OS provided scsi device 1850 * 1851 * Returns void 1852 */ 1853 void megasas_set_dynamic_target_properties(struct scsi_device *sdev, 1854 bool is_target_prop) 1855 { 1856 u16 pd_index = 0, ld; 1857 u32 device_id; 1858 struct megasas_instance *instance; 1859 struct fusion_context *fusion; 1860 struct MR_PRIV_DEVICE *mr_device_priv_data; 1861 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 1862 struct MR_LD_RAID *raid; 1863 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 1864 1865 instance = megasas_lookup_instance(sdev->host->host_no); 1866 fusion = instance->ctrl_context; 1867 mr_device_priv_data = sdev->hostdata; 1868 1869 if (!fusion || !mr_device_priv_data) 1870 return; 1871 1872 if (MEGASAS_IS_LOGICAL(sdev)) { 1873 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) 1874 + sdev->id; 1875 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 1876 ld = MR_TargetIdToLdGet(device_id, local_map_ptr); 1877 if (ld >= instance->fw_supported_vd_count) 1878 return; 1879 raid = MR_LdRaidGet(ld, local_map_ptr); 1880 1881 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) 1882 blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1883 1884 mr_device_priv_data->is_tm_capable = 1885 raid->capability.tmCapable; 1886 } else if (instance->use_seqnum_jbod_fp) { 1887 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1888 sdev->id; 1889 pd_sync = (void *)fusion->pd_seq_sync 1890 [(instance->pd_seq_map_id - 1) & 1]; 1891 mr_device_priv_data->is_tm_capable = 1892 pd_sync->seq[pd_index].capability.tmCapable; 1893 } 1894 1895 if (is_target_prop && instance->tgt_prop->reset_tmo) { 1896 /* 1897 * If FW provides a target reset timeout value, driver will use 1898 * it. If not set, fallback to default values. 1899 */ 1900 mr_device_priv_data->target_reset_tmo = 1901 min_t(u8, instance->max_reset_tmo, 1902 instance->tgt_prop->reset_tmo); 1903 mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo; 1904 } else { 1905 mr_device_priv_data->target_reset_tmo = 1906 MEGASAS_DEFAULT_TM_TIMEOUT; 1907 mr_device_priv_data->task_abort_tmo = 1908 MEGASAS_DEFAULT_TM_TIMEOUT; 1909 } 1910 } 1911 1912 /* 1913 * megasas_set_nvme_device_properties - 1914 * set nomerges=2 1915 * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K). 1916 * set maximum io transfer = MDTS of NVME device provided by MR firmware. 1917 * 1918 * MR firmware provides value in KB. Caller of this function converts 1919 * kb into bytes. 1920 * 1921 * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size, 1922 * MR firmware provides value 128 as (32 * 4K) = 128K. 1923 * 1924 * @sdev: scsi device 1925 * @max_io_size: maximum io transfer size 1926 * 1927 */ 1928 static inline void 1929 megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size) 1930 { 1931 struct megasas_instance *instance; 1932 u32 mr_nvme_pg_size; 1933 1934 instance = (struct megasas_instance *)sdev->host->hostdata; 1935 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, 1936 MR_DEFAULT_NVME_PAGE_SIZE); 1937 1938 blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512)); 1939 1940 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue); 1941 blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1); 1942 } 1943 1944 1945 /* 1946 * megasas_set_static_target_properties - 1947 * Device property set by driver are static and it is not required to be 1948 * updated after OCR. 1949 * 1950 * set io timeout 1951 * set device queue depth 1952 * set nvme device properties. see - megasas_set_nvme_device_properties 1953 * 1954 * @sdev: scsi device 1955 * @is_target_prop true, if fw provided target properties. 1956 */ 1957 static void megasas_set_static_target_properties(struct scsi_device *sdev, 1958 bool is_target_prop) 1959 { 1960 u8 interface_type; 1961 u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN; 1962 u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB; 1963 u32 tgt_device_qd; 1964 struct megasas_instance *instance; 1965 struct MR_PRIV_DEVICE *mr_device_priv_data; 1966 1967 instance = megasas_lookup_instance(sdev->host->host_no); 1968 mr_device_priv_data = sdev->hostdata; 1969 interface_type = mr_device_priv_data->interface_type; 1970 1971 /* 1972 * The RAID firmware may require extended timeouts. 1973 */ 1974 blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ); 1975 1976 switch (interface_type) { 1977 case SAS_PD: 1978 device_qd = MEGASAS_SAS_QD; 1979 break; 1980 case SATA_PD: 1981 device_qd = MEGASAS_SATA_QD; 1982 break; 1983 case NVME_PD: 1984 device_qd = MEGASAS_NVME_QD; 1985 break; 1986 } 1987 1988 if (is_target_prop) { 1989 tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth); 1990 if (tgt_device_qd && 1991 (tgt_device_qd <= instance->host->can_queue)) 1992 device_qd = tgt_device_qd; 1993 1994 /* max_io_size_kb will be set to non zero for 1995 * nvme based vd and syspd. 1996 */ 1997 max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb); 1998 } 1999 2000 if (instance->nvme_page_size && max_io_size_kb) 2001 megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10)); 2002 2003 scsi_change_queue_depth(sdev, device_qd); 2004 2005 } 2006 2007 2008 static int megasas_slave_configure(struct scsi_device *sdev) 2009 { 2010 u16 pd_index = 0; 2011 struct megasas_instance *instance; 2012 int ret_target_prop = DCMD_FAILED; 2013 bool is_target_prop = false; 2014 2015 instance = megasas_lookup_instance(sdev->host->host_no); 2016 if (instance->pd_list_not_supported) { 2017 if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) { 2018 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 2019 sdev->id; 2020 if (instance->pd_list[pd_index].driveState != 2021 MR_PD_STATE_SYSTEM) 2022 return -ENXIO; 2023 } 2024 } 2025 2026 mutex_lock(&instance->reset_mutex); 2027 /* Send DCMD to Firmware and cache the information */ 2028 if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev)) 2029 megasas_get_pd_info(instance, sdev); 2030 2031 /* Some ventura firmware may not have instance->nvme_page_size set. 2032 * Do not send MR_DCMD_DRV_GET_TARGET_PROP 2033 */ 2034 if ((instance->tgt_prop) && (instance->nvme_page_size)) 2035 ret_target_prop = megasas_get_target_prop(instance, sdev); 2036 2037 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; 2038 megasas_set_static_target_properties(sdev, is_target_prop); 2039 2040 /* This sdev property may change post OCR */ 2041 megasas_set_dynamic_target_properties(sdev, is_target_prop); 2042 2043 mutex_unlock(&instance->reset_mutex); 2044 2045 return 0; 2046 } 2047 2048 static int megasas_slave_alloc(struct scsi_device *sdev) 2049 { 2050 u16 pd_index = 0; 2051 struct megasas_instance *instance ; 2052 struct MR_PRIV_DEVICE *mr_device_priv_data; 2053 2054 instance = megasas_lookup_instance(sdev->host->host_no); 2055 if (!MEGASAS_IS_LOGICAL(sdev)) { 2056 /* 2057 * Open the OS scan to the SYSTEM PD 2058 */ 2059 pd_index = 2060 (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 2061 sdev->id; 2062 if ((instance->pd_list_not_supported || 2063 instance->pd_list[pd_index].driveState == 2064 MR_PD_STATE_SYSTEM)) { 2065 goto scan_target; 2066 } 2067 return -ENXIO; 2068 } 2069 2070 scan_target: 2071 mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data), 2072 GFP_KERNEL); 2073 if (!mr_device_priv_data) 2074 return -ENOMEM; 2075 sdev->hostdata = mr_device_priv_data; 2076 2077 atomic_set(&mr_device_priv_data->r1_ldio_hint, 2078 instance->r1_ldio_hint_default); 2079 return 0; 2080 } 2081 2082 static void megasas_slave_destroy(struct scsi_device *sdev) 2083 { 2084 kfree(sdev->hostdata); 2085 sdev->hostdata = NULL; 2086 } 2087 2088 /* 2089 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a 2090 * kill adapter 2091 * @instance: Adapter soft state 2092 * 2093 */ 2094 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance) 2095 { 2096 int i; 2097 struct megasas_cmd *cmd_mfi; 2098 struct megasas_cmd_fusion *cmd_fusion; 2099 struct fusion_context *fusion = instance->ctrl_context; 2100 2101 /* Find all outstanding ioctls */ 2102 if (fusion) { 2103 for (i = 0; i < instance->max_fw_cmds; i++) { 2104 cmd_fusion = fusion->cmd_list[i]; 2105 if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) { 2106 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 2107 if (cmd_mfi->sync_cmd && 2108 (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) { 2109 cmd_mfi->frame->hdr.cmd_status = 2110 MFI_STAT_WRONG_STATE; 2111 megasas_complete_cmd(instance, 2112 cmd_mfi, DID_OK); 2113 } 2114 } 2115 } 2116 } else { 2117 for (i = 0; i < instance->max_fw_cmds; i++) { 2118 cmd_mfi = instance->cmd_list[i]; 2119 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != 2120 MFI_CMD_ABORT) 2121 megasas_complete_cmd(instance, cmd_mfi, DID_OK); 2122 } 2123 } 2124 } 2125 2126 2127 void megaraid_sas_kill_hba(struct megasas_instance *instance) 2128 { 2129 /* Set critical error to block I/O & ioctls in case caller didn't */ 2130 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); 2131 /* Wait 1 second to ensure IO or ioctls in build have posted */ 2132 msleep(1000); 2133 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 2134 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 2135 (instance->adapter_type != MFI_SERIES)) { 2136 if (!instance->requestorId) { 2137 writel(MFI_STOP_ADP, &instance->reg_set->doorbell); 2138 /* Flush */ 2139 readl(&instance->reg_set->doorbell); 2140 } 2141 if (instance->requestorId && instance->peerIsPresent) 2142 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 2143 } else { 2144 writel(MFI_STOP_ADP, 2145 &instance->reg_set->inbound_doorbell); 2146 } 2147 /* Complete outstanding ioctls when adapter is killed */ 2148 megasas_complete_outstanding_ioctls(instance); 2149 } 2150 2151 /** 2152 * megasas_check_and_restore_queue_depth - Check if queue depth needs to be 2153 * restored to max value 2154 * @instance: Adapter soft state 2155 * 2156 */ 2157 void 2158 megasas_check_and_restore_queue_depth(struct megasas_instance *instance) 2159 { 2160 unsigned long flags; 2161 2162 if (instance->flag & MEGASAS_FW_BUSY 2163 && time_after(jiffies, instance->last_time + 5 * HZ) 2164 && atomic_read(&instance->fw_outstanding) < 2165 instance->throttlequeuedepth + 1) { 2166 2167 spin_lock_irqsave(instance->host->host_lock, flags); 2168 instance->flag &= ~MEGASAS_FW_BUSY; 2169 2170 instance->host->can_queue = instance->cur_can_queue; 2171 spin_unlock_irqrestore(instance->host->host_lock, flags); 2172 } 2173 } 2174 2175 /** 2176 * megasas_complete_cmd_dpc - Returns FW's controller structure 2177 * @instance_addr: Address of adapter soft state 2178 * 2179 * Tasklet to complete cmds 2180 */ 2181 static void megasas_complete_cmd_dpc(unsigned long instance_addr) 2182 { 2183 u32 producer; 2184 u32 consumer; 2185 u32 context; 2186 struct megasas_cmd *cmd; 2187 struct megasas_instance *instance = 2188 (struct megasas_instance *)instance_addr; 2189 unsigned long flags; 2190 2191 /* If we have already declared adapter dead, donot complete cmds */ 2192 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 2193 return; 2194 2195 spin_lock_irqsave(&instance->completion_lock, flags); 2196 2197 producer = le32_to_cpu(*instance->producer); 2198 consumer = le32_to_cpu(*instance->consumer); 2199 2200 while (consumer != producer) { 2201 context = le32_to_cpu(instance->reply_queue[consumer]); 2202 if (context >= instance->max_fw_cmds) { 2203 dev_err(&instance->pdev->dev, "Unexpected context value %x\n", 2204 context); 2205 BUG(); 2206 } 2207 2208 cmd = instance->cmd_list[context]; 2209 2210 megasas_complete_cmd(instance, cmd, DID_OK); 2211 2212 consumer++; 2213 if (consumer == (instance->max_fw_cmds + 1)) { 2214 consumer = 0; 2215 } 2216 } 2217 2218 *instance->consumer = cpu_to_le32(producer); 2219 2220 spin_unlock_irqrestore(&instance->completion_lock, flags); 2221 2222 /* 2223 * Check if we can restore can_queue 2224 */ 2225 megasas_check_and_restore_queue_depth(instance); 2226 } 2227 2228 static void megasas_sriov_heartbeat_handler(struct timer_list *t); 2229 2230 /** 2231 * megasas_start_timer - Initializes sriov heartbeat timer object 2232 * @instance: Adapter soft state 2233 * 2234 */ 2235 void megasas_start_timer(struct megasas_instance *instance) 2236 { 2237 struct timer_list *timer = &instance->sriov_heartbeat_timer; 2238 2239 timer_setup(timer, megasas_sriov_heartbeat_handler, 0); 2240 timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF; 2241 add_timer(timer); 2242 } 2243 2244 static void 2245 megasas_internal_reset_defer_cmds(struct megasas_instance *instance); 2246 2247 static void 2248 process_fw_state_change_wq(struct work_struct *work); 2249 2250 static void megasas_do_ocr(struct megasas_instance *instance) 2251 { 2252 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 2253 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 2254 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 2255 *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); 2256 } 2257 instance->instancet->disable_intr(instance); 2258 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 2259 instance->issuepend_done = 0; 2260 2261 atomic_set(&instance->fw_outstanding, 0); 2262 megasas_internal_reset_defer_cmds(instance); 2263 process_fw_state_change_wq(&instance->work_init); 2264 } 2265 2266 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance, 2267 int initial) 2268 { 2269 struct megasas_cmd *cmd; 2270 struct megasas_dcmd_frame *dcmd; 2271 struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL; 2272 dma_addr_t new_affiliation_111_h; 2273 int ld, retval = 0; 2274 u8 thisVf; 2275 2276 cmd = megasas_get_cmd(instance); 2277 2278 if (!cmd) { 2279 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:" 2280 "Failed to get cmd for scsi%d\n", 2281 instance->host->host_no); 2282 return -ENOMEM; 2283 } 2284 2285 dcmd = &cmd->frame->dcmd; 2286 2287 if (!instance->vf_affiliation_111) { 2288 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " 2289 "affiliation for scsi%d\n", instance->host->host_no); 2290 megasas_return_cmd(instance, cmd); 2291 return -ENOMEM; 2292 } 2293 2294 if (initial) 2295 memset(instance->vf_affiliation_111, 0, 2296 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2297 else { 2298 new_affiliation_111 = 2299 dma_alloc_coherent(&instance->pdev->dev, 2300 sizeof(struct MR_LD_VF_AFFILIATION_111), 2301 &new_affiliation_111_h, GFP_KERNEL); 2302 if (!new_affiliation_111) { 2303 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2304 "memory for new affiliation for scsi%d\n", 2305 instance->host->host_no); 2306 megasas_return_cmd(instance, cmd); 2307 return -ENOMEM; 2308 } 2309 } 2310 2311 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2312 2313 dcmd->cmd = MFI_CMD_DCMD; 2314 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2315 dcmd->sge_count = 1; 2316 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2317 dcmd->timeout = 0; 2318 dcmd->pad_0 = 0; 2319 dcmd->data_xfer_len = 2320 cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111)); 2321 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111); 2322 2323 if (initial) 2324 dcmd->sgl.sge32[0].phys_addr = 2325 cpu_to_le32(instance->vf_affiliation_111_h); 2326 else 2327 dcmd->sgl.sge32[0].phys_addr = 2328 cpu_to_le32(new_affiliation_111_h); 2329 2330 dcmd->sgl.sge32[0].length = cpu_to_le32( 2331 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2332 2333 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " 2334 "scsi%d\n", instance->host->host_no); 2335 2336 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { 2337 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" 2338 " failed with status 0x%x for scsi%d\n", 2339 dcmd->cmd_status, instance->host->host_no); 2340 retval = 1; /* Do a scan if we couldn't get affiliation */ 2341 goto out; 2342 } 2343 2344 if (!initial) { 2345 thisVf = new_affiliation_111->thisVf; 2346 for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++) 2347 if (instance->vf_affiliation_111->map[ld].policy[thisVf] != 2348 new_affiliation_111->map[ld].policy[thisVf]) { 2349 dev_warn(&instance->pdev->dev, "SR-IOV: " 2350 "Got new LD/VF affiliation for scsi%d\n", 2351 instance->host->host_no); 2352 memcpy(instance->vf_affiliation_111, 2353 new_affiliation_111, 2354 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2355 retval = 1; 2356 goto out; 2357 } 2358 } 2359 out: 2360 if (new_affiliation_111) { 2361 dma_free_coherent(&instance->pdev->dev, 2362 sizeof(struct MR_LD_VF_AFFILIATION_111), 2363 new_affiliation_111, 2364 new_affiliation_111_h); 2365 } 2366 2367 megasas_return_cmd(instance, cmd); 2368 2369 return retval; 2370 } 2371 2372 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance, 2373 int initial) 2374 { 2375 struct megasas_cmd *cmd; 2376 struct megasas_dcmd_frame *dcmd; 2377 struct MR_LD_VF_AFFILIATION *new_affiliation = NULL; 2378 struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL; 2379 dma_addr_t new_affiliation_h; 2380 int i, j, retval = 0, found = 0, doscan = 0; 2381 u8 thisVf; 2382 2383 cmd = megasas_get_cmd(instance); 2384 2385 if (!cmd) { 2386 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: " 2387 "Failed to get cmd for scsi%d\n", 2388 instance->host->host_no); 2389 return -ENOMEM; 2390 } 2391 2392 dcmd = &cmd->frame->dcmd; 2393 2394 if (!instance->vf_affiliation) { 2395 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " 2396 "affiliation for scsi%d\n", instance->host->host_no); 2397 megasas_return_cmd(instance, cmd); 2398 return -ENOMEM; 2399 } 2400 2401 if (initial) 2402 memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) * 2403 sizeof(struct MR_LD_VF_AFFILIATION)); 2404 else { 2405 new_affiliation = 2406 dma_alloc_coherent(&instance->pdev->dev, 2407 (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION), 2408 &new_affiliation_h, GFP_KERNEL); 2409 if (!new_affiliation) { 2410 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2411 "memory for new affiliation for scsi%d\n", 2412 instance->host->host_no); 2413 megasas_return_cmd(instance, cmd); 2414 return -ENOMEM; 2415 } 2416 } 2417 2418 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2419 2420 dcmd->cmd = MFI_CMD_DCMD; 2421 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2422 dcmd->sge_count = 1; 2423 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2424 dcmd->timeout = 0; 2425 dcmd->pad_0 = 0; 2426 dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * 2427 sizeof(struct MR_LD_VF_AFFILIATION)); 2428 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS); 2429 2430 if (initial) 2431 dcmd->sgl.sge32[0].phys_addr = 2432 cpu_to_le32(instance->vf_affiliation_h); 2433 else 2434 dcmd->sgl.sge32[0].phys_addr = 2435 cpu_to_le32(new_affiliation_h); 2436 2437 dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * 2438 sizeof(struct MR_LD_VF_AFFILIATION)); 2439 2440 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " 2441 "scsi%d\n", instance->host->host_no); 2442 2443 2444 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { 2445 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" 2446 " failed with status 0x%x for scsi%d\n", 2447 dcmd->cmd_status, instance->host->host_no); 2448 retval = 1; /* Do a scan if we couldn't get affiliation */ 2449 goto out; 2450 } 2451 2452 if (!initial) { 2453 if (!new_affiliation->ldCount) { 2454 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " 2455 "affiliation for passive path for scsi%d\n", 2456 instance->host->host_no); 2457 retval = 1; 2458 goto out; 2459 } 2460 newmap = new_affiliation->map; 2461 savedmap = instance->vf_affiliation->map; 2462 thisVf = new_affiliation->thisVf; 2463 for (i = 0 ; i < new_affiliation->ldCount; i++) { 2464 found = 0; 2465 for (j = 0; j < instance->vf_affiliation->ldCount; 2466 j++) { 2467 if (newmap->ref.targetId == 2468 savedmap->ref.targetId) { 2469 found = 1; 2470 if (newmap->policy[thisVf] != 2471 savedmap->policy[thisVf]) { 2472 doscan = 1; 2473 goto out; 2474 } 2475 } 2476 savedmap = (struct MR_LD_VF_MAP *) 2477 ((unsigned char *)savedmap + 2478 savedmap->size); 2479 } 2480 if (!found && newmap->policy[thisVf] != 2481 MR_LD_ACCESS_HIDDEN) { 2482 doscan = 1; 2483 goto out; 2484 } 2485 newmap = (struct MR_LD_VF_MAP *) 2486 ((unsigned char *)newmap + newmap->size); 2487 } 2488 2489 newmap = new_affiliation->map; 2490 savedmap = instance->vf_affiliation->map; 2491 2492 for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) { 2493 found = 0; 2494 for (j = 0 ; j < new_affiliation->ldCount; j++) { 2495 if (savedmap->ref.targetId == 2496 newmap->ref.targetId) { 2497 found = 1; 2498 if (savedmap->policy[thisVf] != 2499 newmap->policy[thisVf]) { 2500 doscan = 1; 2501 goto out; 2502 } 2503 } 2504 newmap = (struct MR_LD_VF_MAP *) 2505 ((unsigned char *)newmap + 2506 newmap->size); 2507 } 2508 if (!found && savedmap->policy[thisVf] != 2509 MR_LD_ACCESS_HIDDEN) { 2510 doscan = 1; 2511 goto out; 2512 } 2513 savedmap = (struct MR_LD_VF_MAP *) 2514 ((unsigned char *)savedmap + 2515 savedmap->size); 2516 } 2517 } 2518 out: 2519 if (doscan) { 2520 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " 2521 "affiliation for scsi%d\n", instance->host->host_no); 2522 memcpy(instance->vf_affiliation, new_affiliation, 2523 new_affiliation->size); 2524 retval = 1; 2525 } 2526 2527 if (new_affiliation) 2528 dma_free_coherent(&instance->pdev->dev, 2529 (MAX_LOGICAL_DRIVES + 1) * 2530 sizeof(struct MR_LD_VF_AFFILIATION), 2531 new_affiliation, new_affiliation_h); 2532 megasas_return_cmd(instance, cmd); 2533 2534 return retval; 2535 } 2536 2537 /* This function will get the current SR-IOV LD/VF affiliation */ 2538 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 2539 int initial) 2540 { 2541 int retval; 2542 2543 if (instance->PlasmaFW111) 2544 retval = megasas_get_ld_vf_affiliation_111(instance, initial); 2545 else 2546 retval = megasas_get_ld_vf_affiliation_12(instance, initial); 2547 return retval; 2548 } 2549 2550 /* This function will tell FW to start the SR-IOV heartbeat */ 2551 int megasas_sriov_start_heartbeat(struct megasas_instance *instance, 2552 int initial) 2553 { 2554 struct megasas_cmd *cmd; 2555 struct megasas_dcmd_frame *dcmd; 2556 int retval = 0; 2557 2558 cmd = megasas_get_cmd(instance); 2559 2560 if (!cmd) { 2561 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: " 2562 "Failed to get cmd for scsi%d\n", 2563 instance->host->host_no); 2564 return -ENOMEM; 2565 } 2566 2567 dcmd = &cmd->frame->dcmd; 2568 2569 if (initial) { 2570 instance->hb_host_mem = 2571 dma_alloc_coherent(&instance->pdev->dev, 2572 sizeof(struct MR_CTRL_HB_HOST_MEM), 2573 &instance->hb_host_mem_h, 2574 GFP_KERNEL); 2575 if (!instance->hb_host_mem) { 2576 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate" 2577 " memory for heartbeat host memory for scsi%d\n", 2578 instance->host->host_no); 2579 retval = -ENOMEM; 2580 goto out; 2581 } 2582 } 2583 2584 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2585 2586 dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2587 dcmd->cmd = MFI_CMD_DCMD; 2588 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2589 dcmd->sge_count = 1; 2590 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2591 dcmd->timeout = 0; 2592 dcmd->pad_0 = 0; 2593 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2594 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC); 2595 2596 megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h, 2597 sizeof(struct MR_CTRL_HB_HOST_MEM)); 2598 2599 dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n", 2600 instance->host->host_no); 2601 2602 if ((instance->adapter_type != MFI_SERIES) && 2603 !instance->mask_interrupts) 2604 retval = megasas_issue_blocked_cmd(instance, cmd, 2605 MEGASAS_ROUTINE_WAIT_TIME_VF); 2606 else 2607 retval = megasas_issue_polled(instance, cmd); 2608 2609 if (retval) { 2610 dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST" 2611 "_MEM_ALLOC DCMD %s for scsi%d\n", 2612 (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ? 2613 "timed out" : "failed", instance->host->host_no); 2614 retval = 1; 2615 } 2616 2617 out: 2618 megasas_return_cmd(instance, cmd); 2619 2620 return retval; 2621 } 2622 2623 /* Handler for SR-IOV heartbeat */ 2624 static void megasas_sriov_heartbeat_handler(struct timer_list *t) 2625 { 2626 struct megasas_instance *instance = 2627 from_timer(instance, t, sriov_heartbeat_timer); 2628 2629 if (instance->hb_host_mem->HB.fwCounter != 2630 instance->hb_host_mem->HB.driverCounter) { 2631 instance->hb_host_mem->HB.driverCounter = 2632 instance->hb_host_mem->HB.fwCounter; 2633 mod_timer(&instance->sriov_heartbeat_timer, 2634 jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 2635 } else { 2636 dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never " 2637 "completed for scsi%d\n", instance->host->host_no); 2638 schedule_work(&instance->work_init); 2639 } 2640 } 2641 2642 /** 2643 * megasas_wait_for_outstanding - Wait for all outstanding cmds 2644 * @instance: Adapter soft state 2645 * 2646 * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to 2647 * complete all its outstanding commands. Returns error if one or more IOs 2648 * are pending after this time period. It also marks the controller dead. 2649 */ 2650 static int megasas_wait_for_outstanding(struct megasas_instance *instance) 2651 { 2652 int i, sl, outstanding; 2653 u32 reset_index; 2654 u32 wait_time = MEGASAS_RESET_WAIT_TIME; 2655 unsigned long flags; 2656 struct list_head clist_local; 2657 struct megasas_cmd *reset_cmd; 2658 u32 fw_state; 2659 2660 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2661 dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n", 2662 __func__, __LINE__); 2663 return FAILED; 2664 } 2665 2666 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 2667 2668 INIT_LIST_HEAD(&clist_local); 2669 spin_lock_irqsave(&instance->hba_lock, flags); 2670 list_splice_init(&instance->internal_reset_pending_q, 2671 &clist_local); 2672 spin_unlock_irqrestore(&instance->hba_lock, flags); 2673 2674 dev_notice(&instance->pdev->dev, "HBA reset wait ...\n"); 2675 for (i = 0; i < wait_time; i++) { 2676 msleep(1000); 2677 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) 2678 break; 2679 } 2680 2681 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 2682 dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n"); 2683 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); 2684 return FAILED; 2685 } 2686 2687 reset_index = 0; 2688 while (!list_empty(&clist_local)) { 2689 reset_cmd = list_entry((&clist_local)->next, 2690 struct megasas_cmd, list); 2691 list_del_init(&reset_cmd->list); 2692 if (reset_cmd->scmd) { 2693 reset_cmd->scmd->result = DID_REQUEUE << 16; 2694 dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n", 2695 reset_index, reset_cmd, 2696 reset_cmd->scmd->cmnd[0]); 2697 2698 reset_cmd->scmd->scsi_done(reset_cmd->scmd); 2699 megasas_return_cmd(instance, reset_cmd); 2700 } else if (reset_cmd->sync_cmd) { 2701 dev_notice(&instance->pdev->dev, "%p synch cmds" 2702 "reset queue\n", 2703 reset_cmd); 2704 2705 reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 2706 instance->instancet->fire_cmd(instance, 2707 reset_cmd->frame_phys_addr, 2708 0, instance->reg_set); 2709 } else { 2710 dev_notice(&instance->pdev->dev, "%p unexpected" 2711 "cmds lst\n", 2712 reset_cmd); 2713 } 2714 reset_index++; 2715 } 2716 2717 return SUCCESS; 2718 } 2719 2720 for (i = 0; i < resetwaittime; i++) { 2721 outstanding = atomic_read(&instance->fw_outstanding); 2722 2723 if (!outstanding) 2724 break; 2725 2726 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 2727 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d " 2728 "commands to complete\n",i,outstanding); 2729 /* 2730 * Call cmd completion routine. Cmd to be 2731 * be completed directly without depending on isr. 2732 */ 2733 megasas_complete_cmd_dpc((unsigned long)instance); 2734 } 2735 2736 msleep(1000); 2737 } 2738 2739 i = 0; 2740 outstanding = atomic_read(&instance->fw_outstanding); 2741 fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK; 2742 2743 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) 2744 goto no_outstanding; 2745 2746 if (instance->disableOnlineCtrlReset) 2747 goto kill_hba_and_failed; 2748 do { 2749 if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) { 2750 dev_info(&instance->pdev->dev, 2751 "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, outstanding 0x%x\n", 2752 __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding)); 2753 if (i == 3) 2754 goto kill_hba_and_failed; 2755 megasas_do_ocr(instance); 2756 2757 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2758 dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n", 2759 __func__, __LINE__); 2760 return FAILED; 2761 } 2762 dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n", 2763 __func__, __LINE__); 2764 2765 for (sl = 0; sl < 10; sl++) 2766 msleep(500); 2767 2768 outstanding = atomic_read(&instance->fw_outstanding); 2769 2770 fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK; 2771 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) 2772 goto no_outstanding; 2773 } 2774 i++; 2775 } while (i <= 3); 2776 2777 no_outstanding: 2778 2779 dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n", 2780 __func__, __LINE__); 2781 return SUCCESS; 2782 2783 kill_hba_and_failed: 2784 2785 /* Reset not supported, kill adapter */ 2786 dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d" 2787 " disableOnlineCtrlReset %d fw_outstanding %d \n", 2788 __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset, 2789 atomic_read(&instance->fw_outstanding)); 2790 megasas_dump_pending_frames(instance); 2791 megaraid_sas_kill_hba(instance); 2792 2793 return FAILED; 2794 } 2795 2796 /** 2797 * megasas_generic_reset - Generic reset routine 2798 * @scmd: Mid-layer SCSI command 2799 * 2800 * This routine implements a generic reset handler for device, bus and host 2801 * reset requests. Device, bus and host specific reset handlers can use this 2802 * function after they do their specific tasks. 2803 */ 2804 static int megasas_generic_reset(struct scsi_cmnd *scmd) 2805 { 2806 int ret_val; 2807 struct megasas_instance *instance; 2808 2809 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2810 2811 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n", 2812 scmd->cmnd[0], scmd->retries); 2813 2814 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2815 dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n"); 2816 return FAILED; 2817 } 2818 2819 ret_val = megasas_wait_for_outstanding(instance); 2820 if (ret_val == SUCCESS) 2821 dev_notice(&instance->pdev->dev, "reset successful\n"); 2822 else 2823 dev_err(&instance->pdev->dev, "failed to do reset\n"); 2824 2825 return ret_val; 2826 } 2827 2828 /** 2829 * megasas_reset_timer - quiesce the adapter if required 2830 * @scmd: scsi cmnd 2831 * 2832 * Sets the FW busy flag and reduces the host->can_queue if the 2833 * cmd has not been completed within the timeout period. 2834 */ 2835 static enum 2836 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) 2837 { 2838 struct megasas_instance *instance; 2839 unsigned long flags; 2840 2841 if (time_after(jiffies, scmd->jiffies_at_alloc + 2842 (scmd_timeout * 2) * HZ)) { 2843 return BLK_EH_DONE; 2844 } 2845 2846 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2847 if (!(instance->flag & MEGASAS_FW_BUSY)) { 2848 /* FW is busy, throttle IO */ 2849 spin_lock_irqsave(instance->host->host_lock, flags); 2850 2851 instance->host->can_queue = instance->throttlequeuedepth; 2852 instance->last_time = jiffies; 2853 instance->flag |= MEGASAS_FW_BUSY; 2854 2855 spin_unlock_irqrestore(instance->host->host_lock, flags); 2856 } 2857 return BLK_EH_RESET_TIMER; 2858 } 2859 2860 /** 2861 * megasas_dump - This function will print hexdump of provided buffer. 2862 * @buf: Buffer to be dumped 2863 * @sz: Size in bytes 2864 * @format: Different formats of dumping e.g. format=n will 2865 * cause only 'n' 32 bit words to be dumped in a single 2866 * line. 2867 */ 2868 inline void 2869 megasas_dump(void *buf, int sz, int format) 2870 { 2871 int i; 2872 __le32 *buf_loc = (__le32 *)buf; 2873 2874 for (i = 0; i < (sz / sizeof(__le32)); i++) { 2875 if ((i % format) == 0) { 2876 if (i != 0) 2877 printk(KERN_CONT "\n"); 2878 printk(KERN_CONT "%08x: ", (i * 4)); 2879 } 2880 printk(KERN_CONT "%08x ", le32_to_cpu(buf_loc[i])); 2881 } 2882 printk(KERN_CONT "\n"); 2883 } 2884 2885 /** 2886 * megasas_dump_reg_set - This function will print hexdump of register set 2887 * @buf: Buffer to be dumped 2888 * @sz: Size in bytes 2889 * @format: Different formats of dumping e.g. format=n will 2890 * cause only 'n' 32 bit words to be dumped in a 2891 * single line. 2892 */ 2893 inline void 2894 megasas_dump_reg_set(void __iomem *reg_set) 2895 { 2896 unsigned int i, sz = 256; 2897 u32 __iomem *reg = (u32 __iomem *)reg_set; 2898 2899 for (i = 0; i < (sz / sizeof(u32)); i++) 2900 printk("%08x: %08x\n", (i * 4), readl(®[i])); 2901 } 2902 2903 /** 2904 * megasas_dump_fusion_io - This function will print key details 2905 * of SCSI IO 2906 * @scmd: SCSI command pointer of SCSI IO 2907 */ 2908 void 2909 megasas_dump_fusion_io(struct scsi_cmnd *scmd) 2910 { 2911 struct megasas_cmd_fusion *cmd; 2912 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2913 struct megasas_instance *instance; 2914 2915 cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr; 2916 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2917 2918 scmd_printk(KERN_INFO, scmd, 2919 "scmd: (0x%p) retries: 0x%x allowed: 0x%x\n", 2920 scmd, scmd->retries, scmd->allowed); 2921 scsi_print_command(scmd); 2922 2923 if (cmd) { 2924 req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc; 2925 scmd_printk(KERN_INFO, scmd, "Request descriptor details:\n"); 2926 scmd_printk(KERN_INFO, scmd, 2927 "RequestFlags:0x%x MSIxIndex:0x%x SMID:0x%x LMID:0x%x DevHandle:0x%x\n", 2928 req_desc->SCSIIO.RequestFlags, 2929 req_desc->SCSIIO.MSIxIndex, req_desc->SCSIIO.SMID, 2930 req_desc->SCSIIO.LMID, req_desc->SCSIIO.DevHandle); 2931 2932 printk(KERN_INFO "IO request frame:\n"); 2933 megasas_dump(cmd->io_request, 2934 MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, 8); 2935 printk(KERN_INFO "Chain frame:\n"); 2936 megasas_dump(cmd->sg_frame, 2937 instance->max_chain_frame_sz, 8); 2938 } 2939 2940 } 2941 2942 /* 2943 * megasas_dump_sys_regs - This function will dump system registers through 2944 * sysfs. 2945 * @reg_set: Pointer to System register set. 2946 * @buf: Buffer to which output is to be written. 2947 * @return: Number of bytes written to buffer. 2948 */ 2949 static inline ssize_t 2950 megasas_dump_sys_regs(void __iomem *reg_set, char *buf) 2951 { 2952 unsigned int i, sz = 256; 2953 int bytes_wrote = 0; 2954 char *loc = (char *)buf; 2955 u32 __iomem *reg = (u32 __iomem *)reg_set; 2956 2957 for (i = 0; i < sz / sizeof(u32); i++) { 2958 bytes_wrote += snprintf(loc + bytes_wrote, PAGE_SIZE, 2959 "%08x: %08x\n", (i * 4), 2960 readl(®[i])); 2961 } 2962 return bytes_wrote; 2963 } 2964 2965 /** 2966 * megasas_reset_bus_host - Bus & host reset handler entry point 2967 */ 2968 static int megasas_reset_bus_host(struct scsi_cmnd *scmd) 2969 { 2970 int ret; 2971 struct megasas_instance *instance; 2972 2973 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2974 2975 scmd_printk(KERN_INFO, scmd, 2976 "OCR is requested due to IO timeout!!\n"); 2977 2978 scmd_printk(KERN_INFO, scmd, 2979 "SCSI host state: %d SCSI host busy: %d FW outstanding: %d\n", 2980 scmd->device->host->shost_state, 2981 scsi_host_busy(scmd->device->host), 2982 atomic_read(&instance->fw_outstanding)); 2983 /* 2984 * First wait for all commands to complete 2985 */ 2986 if (instance->adapter_type == MFI_SERIES) { 2987 ret = megasas_generic_reset(scmd); 2988 } else { 2989 megasas_dump_fusion_io(scmd); 2990 ret = megasas_reset_fusion(scmd->device->host, 2991 SCSIIO_TIMEOUT_OCR); 2992 } 2993 2994 return ret; 2995 } 2996 2997 /** 2998 * megasas_task_abort - Issues task abort request to firmware 2999 * (supported only for fusion adapters) 3000 * @scmd: SCSI command pointer 3001 */ 3002 static int megasas_task_abort(struct scsi_cmnd *scmd) 3003 { 3004 int ret; 3005 struct megasas_instance *instance; 3006 3007 instance = (struct megasas_instance *)scmd->device->host->hostdata; 3008 3009 if (instance->adapter_type != MFI_SERIES) 3010 ret = megasas_task_abort_fusion(scmd); 3011 else { 3012 sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n"); 3013 ret = FAILED; 3014 } 3015 3016 return ret; 3017 } 3018 3019 /** 3020 * megasas_reset_target: Issues target reset request to firmware 3021 * (supported only for fusion adapters) 3022 * @scmd: SCSI command pointer 3023 */ 3024 static int megasas_reset_target(struct scsi_cmnd *scmd) 3025 { 3026 int ret; 3027 struct megasas_instance *instance; 3028 3029 instance = (struct megasas_instance *)scmd->device->host->hostdata; 3030 3031 if (instance->adapter_type != MFI_SERIES) 3032 ret = megasas_reset_target_fusion(scmd); 3033 else { 3034 sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n"); 3035 ret = FAILED; 3036 } 3037 3038 return ret; 3039 } 3040 3041 /** 3042 * megasas_bios_param - Returns disk geometry for a disk 3043 * @sdev: device handle 3044 * @bdev: block device 3045 * @capacity: drive capacity 3046 * @geom: geometry parameters 3047 */ 3048 static int 3049 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev, 3050 sector_t capacity, int geom[]) 3051 { 3052 int heads; 3053 int sectors; 3054 sector_t cylinders; 3055 unsigned long tmp; 3056 3057 /* Default heads (64) & sectors (32) */ 3058 heads = 64; 3059 sectors = 32; 3060 3061 tmp = heads * sectors; 3062 cylinders = capacity; 3063 3064 sector_div(cylinders, tmp); 3065 3066 /* 3067 * Handle extended translation size for logical drives > 1Gb 3068 */ 3069 3070 if (capacity >= 0x200000) { 3071 heads = 255; 3072 sectors = 63; 3073 tmp = heads*sectors; 3074 cylinders = capacity; 3075 sector_div(cylinders, tmp); 3076 } 3077 3078 geom[0] = heads; 3079 geom[1] = sectors; 3080 geom[2] = cylinders; 3081 3082 return 0; 3083 } 3084 3085 static void megasas_aen_polling(struct work_struct *work); 3086 3087 /** 3088 * megasas_service_aen - Processes an event notification 3089 * @instance: Adapter soft state 3090 * @cmd: AEN command completed by the ISR 3091 * 3092 * For AEN, driver sends a command down to FW that is held by the FW till an 3093 * event occurs. When an event of interest occurs, FW completes the command 3094 * that it was previously holding. 3095 * 3096 * This routines sends SIGIO signal to processes that have registered with the 3097 * driver for AEN. 3098 */ 3099 static void 3100 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) 3101 { 3102 unsigned long flags; 3103 3104 /* 3105 * Don't signal app if it is just an aborted previously registered aen 3106 */ 3107 if ((!cmd->abort_aen) && (instance->unload == 0)) { 3108 spin_lock_irqsave(&poll_aen_lock, flags); 3109 megasas_poll_wait_aen = 1; 3110 spin_unlock_irqrestore(&poll_aen_lock, flags); 3111 wake_up(&megasas_poll_wait); 3112 kill_fasync(&megasas_async_queue, SIGIO, POLL_IN); 3113 } 3114 else 3115 cmd->abort_aen = 0; 3116 3117 instance->aen_cmd = NULL; 3118 3119 megasas_return_cmd(instance, cmd); 3120 3121 if ((instance->unload == 0) && 3122 ((instance->issuepend_done == 1))) { 3123 struct megasas_aen_event *ev; 3124 3125 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 3126 if (!ev) { 3127 dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n"); 3128 } else { 3129 ev->instance = instance; 3130 instance->ev = ev; 3131 INIT_DELAYED_WORK(&ev->hotplug_work, 3132 megasas_aen_polling); 3133 schedule_delayed_work(&ev->hotplug_work, 0); 3134 } 3135 } 3136 } 3137 3138 static ssize_t 3139 fw_crash_buffer_store(struct device *cdev, 3140 struct device_attribute *attr, const char *buf, size_t count) 3141 { 3142 struct Scsi_Host *shost = class_to_shost(cdev); 3143 struct megasas_instance *instance = 3144 (struct megasas_instance *) shost->hostdata; 3145 int val = 0; 3146 unsigned long flags; 3147 3148 if (kstrtoint(buf, 0, &val) != 0) 3149 return -EINVAL; 3150 3151 spin_lock_irqsave(&instance->crashdump_lock, flags); 3152 instance->fw_crash_buffer_offset = val; 3153 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3154 return strlen(buf); 3155 } 3156 3157 static ssize_t 3158 fw_crash_buffer_show(struct device *cdev, 3159 struct device_attribute *attr, char *buf) 3160 { 3161 struct Scsi_Host *shost = class_to_shost(cdev); 3162 struct megasas_instance *instance = 3163 (struct megasas_instance *) shost->hostdata; 3164 u32 size; 3165 unsigned long dmachunk = CRASH_DMA_BUF_SIZE; 3166 unsigned long chunk_left_bytes; 3167 unsigned long src_addr; 3168 unsigned long flags; 3169 u32 buff_offset; 3170 3171 spin_lock_irqsave(&instance->crashdump_lock, flags); 3172 buff_offset = instance->fw_crash_buffer_offset; 3173 if (!instance->crash_dump_buf && 3174 !((instance->fw_crash_state == AVAILABLE) || 3175 (instance->fw_crash_state == COPYING))) { 3176 dev_err(&instance->pdev->dev, 3177 "Firmware crash dump is not available\n"); 3178 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3179 return -EINVAL; 3180 } 3181 3182 if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) { 3183 dev_err(&instance->pdev->dev, 3184 "Firmware crash dump offset is out of range\n"); 3185 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3186 return 0; 3187 } 3188 3189 size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset; 3190 chunk_left_bytes = dmachunk - (buff_offset % dmachunk); 3191 size = (size > chunk_left_bytes) ? chunk_left_bytes : size; 3192 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; 3193 3194 src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] + 3195 (buff_offset % dmachunk); 3196 memcpy(buf, (void *)src_addr, size); 3197 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3198 3199 return size; 3200 } 3201 3202 static ssize_t 3203 fw_crash_buffer_size_show(struct device *cdev, 3204 struct device_attribute *attr, char *buf) 3205 { 3206 struct Scsi_Host *shost = class_to_shost(cdev); 3207 struct megasas_instance *instance = 3208 (struct megasas_instance *) shost->hostdata; 3209 3210 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long) 3211 ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE); 3212 } 3213 3214 static ssize_t 3215 fw_crash_state_store(struct device *cdev, 3216 struct device_attribute *attr, const char *buf, size_t count) 3217 { 3218 struct Scsi_Host *shost = class_to_shost(cdev); 3219 struct megasas_instance *instance = 3220 (struct megasas_instance *) shost->hostdata; 3221 int val = 0; 3222 unsigned long flags; 3223 3224 if (kstrtoint(buf, 0, &val) != 0) 3225 return -EINVAL; 3226 3227 if ((val <= AVAILABLE || val > COPY_ERROR)) { 3228 dev_err(&instance->pdev->dev, "application updates invalid " 3229 "firmware crash state\n"); 3230 return -EINVAL; 3231 } 3232 3233 instance->fw_crash_state = val; 3234 3235 if ((val == COPIED) || (val == COPY_ERROR)) { 3236 spin_lock_irqsave(&instance->crashdump_lock, flags); 3237 megasas_free_host_crash_buffer(instance); 3238 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3239 if (val == COPY_ERROR) 3240 dev_info(&instance->pdev->dev, "application failed to " 3241 "copy Firmware crash dump\n"); 3242 else 3243 dev_info(&instance->pdev->dev, "Firmware crash dump " 3244 "copied successfully\n"); 3245 } 3246 return strlen(buf); 3247 } 3248 3249 static ssize_t 3250 fw_crash_state_show(struct device *cdev, 3251 struct device_attribute *attr, char *buf) 3252 { 3253 struct Scsi_Host *shost = class_to_shost(cdev); 3254 struct megasas_instance *instance = 3255 (struct megasas_instance *) shost->hostdata; 3256 3257 return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state); 3258 } 3259 3260 static ssize_t 3261 page_size_show(struct device *cdev, 3262 struct device_attribute *attr, char *buf) 3263 { 3264 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1); 3265 } 3266 3267 static ssize_t 3268 ldio_outstanding_show(struct device *cdev, struct device_attribute *attr, 3269 char *buf) 3270 { 3271 struct Scsi_Host *shost = class_to_shost(cdev); 3272 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3273 3274 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding)); 3275 } 3276 3277 static ssize_t 3278 fw_cmds_outstanding_show(struct device *cdev, 3279 struct device_attribute *attr, char *buf) 3280 { 3281 struct Scsi_Host *shost = class_to_shost(cdev); 3282 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3283 3284 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding)); 3285 } 3286 3287 static ssize_t 3288 dump_system_regs_show(struct device *cdev, 3289 struct device_attribute *attr, char *buf) 3290 { 3291 struct Scsi_Host *shost = class_to_shost(cdev); 3292 struct megasas_instance *instance = 3293 (struct megasas_instance *)shost->hostdata; 3294 3295 return megasas_dump_sys_regs(instance->reg_set, buf); 3296 } 3297 3298 static ssize_t 3299 raid_map_id_show(struct device *cdev, struct device_attribute *attr, 3300 char *buf) 3301 { 3302 struct Scsi_Host *shost = class_to_shost(cdev); 3303 struct megasas_instance *instance = 3304 (struct megasas_instance *)shost->hostdata; 3305 3306 return snprintf(buf, PAGE_SIZE, "%ld\n", 3307 (unsigned long)instance->map_id); 3308 } 3309 3310 static DEVICE_ATTR_RW(fw_crash_buffer); 3311 static DEVICE_ATTR_RO(fw_crash_buffer_size); 3312 static DEVICE_ATTR_RW(fw_crash_state); 3313 static DEVICE_ATTR_RO(page_size); 3314 static DEVICE_ATTR_RO(ldio_outstanding); 3315 static DEVICE_ATTR_RO(fw_cmds_outstanding); 3316 static DEVICE_ATTR_RO(dump_system_regs); 3317 static DEVICE_ATTR_RO(raid_map_id); 3318 3319 static struct device_attribute *megaraid_host_attrs[] = { 3320 &dev_attr_fw_crash_buffer_size, 3321 &dev_attr_fw_crash_buffer, 3322 &dev_attr_fw_crash_state, 3323 &dev_attr_page_size, 3324 &dev_attr_ldio_outstanding, 3325 &dev_attr_fw_cmds_outstanding, 3326 &dev_attr_dump_system_regs, 3327 &dev_attr_raid_map_id, 3328 NULL, 3329 }; 3330 3331 /* 3332 * Scsi host template for megaraid_sas driver 3333 */ 3334 static struct scsi_host_template megasas_template = { 3335 3336 .module = THIS_MODULE, 3337 .name = "Avago SAS based MegaRAID driver", 3338 .proc_name = "megaraid_sas", 3339 .slave_configure = megasas_slave_configure, 3340 .slave_alloc = megasas_slave_alloc, 3341 .slave_destroy = megasas_slave_destroy, 3342 .queuecommand = megasas_queue_command, 3343 .eh_target_reset_handler = megasas_reset_target, 3344 .eh_abort_handler = megasas_task_abort, 3345 .eh_host_reset_handler = megasas_reset_bus_host, 3346 .eh_timed_out = megasas_reset_timer, 3347 .shost_attrs = megaraid_host_attrs, 3348 .bios_param = megasas_bios_param, 3349 .change_queue_depth = scsi_change_queue_depth, 3350 .max_segment_size = 0xffffffff, 3351 .no_write_same = 1, 3352 }; 3353 3354 /** 3355 * megasas_complete_int_cmd - Completes an internal command 3356 * @instance: Adapter soft state 3357 * @cmd: Command to be completed 3358 * 3359 * The megasas_issue_blocked_cmd() function waits for a command to complete 3360 * after it issues a command. This function wakes up that waiting routine by 3361 * calling wake_up() on the wait queue. 3362 */ 3363 static void 3364 megasas_complete_int_cmd(struct megasas_instance *instance, 3365 struct megasas_cmd *cmd) 3366 { 3367 cmd->cmd_status_drv = cmd->frame->io.cmd_status; 3368 wake_up(&instance->int_cmd_wait_q); 3369 } 3370 3371 /** 3372 * megasas_complete_abort - Completes aborting a command 3373 * @instance: Adapter soft state 3374 * @cmd: Cmd that was issued to abort another cmd 3375 * 3376 * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q 3377 * after it issues an abort on a previously issued command. This function 3378 * wakes up all functions waiting on the same wait queue. 3379 */ 3380 static void 3381 megasas_complete_abort(struct megasas_instance *instance, 3382 struct megasas_cmd *cmd) 3383 { 3384 if (cmd->sync_cmd) { 3385 cmd->sync_cmd = 0; 3386 cmd->cmd_status_drv = 0; 3387 wake_up(&instance->abort_cmd_wait_q); 3388 } 3389 } 3390 3391 /** 3392 * megasas_complete_cmd - Completes a command 3393 * @instance: Adapter soft state 3394 * @cmd: Command to be completed 3395 * @alt_status: If non-zero, use this value as status to 3396 * SCSI mid-layer instead of the value returned 3397 * by the FW. This should be used if caller wants 3398 * an alternate status (as in the case of aborted 3399 * commands) 3400 */ 3401 void 3402 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 3403 u8 alt_status) 3404 { 3405 int exception = 0; 3406 struct megasas_header *hdr = &cmd->frame->hdr; 3407 unsigned long flags; 3408 struct fusion_context *fusion = instance->ctrl_context; 3409 u32 opcode, status; 3410 3411 /* flag for the retry reset */ 3412 cmd->retry_for_fw_reset = 0; 3413 3414 if (cmd->scmd) 3415 cmd->scmd->SCp.ptr = NULL; 3416 3417 switch (hdr->cmd) { 3418 case MFI_CMD_INVALID: 3419 /* Some older 1068 controller FW may keep a pended 3420 MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel 3421 when booting the kdump kernel. Ignore this command to 3422 prevent a kernel panic on shutdown of the kdump kernel. */ 3423 dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command " 3424 "completed\n"); 3425 dev_warn(&instance->pdev->dev, "If you have a controller " 3426 "other than PERC5, please upgrade your firmware\n"); 3427 break; 3428 case MFI_CMD_PD_SCSI_IO: 3429 case MFI_CMD_LD_SCSI_IO: 3430 3431 /* 3432 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been 3433 * issued either through an IO path or an IOCTL path. If it 3434 * was via IOCTL, we will send it to internal completion. 3435 */ 3436 if (cmd->sync_cmd) { 3437 cmd->sync_cmd = 0; 3438 megasas_complete_int_cmd(instance, cmd); 3439 break; 3440 } 3441 /* fall through */ 3442 3443 case MFI_CMD_LD_READ: 3444 case MFI_CMD_LD_WRITE: 3445 3446 if (alt_status) { 3447 cmd->scmd->result = alt_status << 16; 3448 exception = 1; 3449 } 3450 3451 if (exception) { 3452 3453 atomic_dec(&instance->fw_outstanding); 3454 3455 scsi_dma_unmap(cmd->scmd); 3456 cmd->scmd->scsi_done(cmd->scmd); 3457 megasas_return_cmd(instance, cmd); 3458 3459 break; 3460 } 3461 3462 switch (hdr->cmd_status) { 3463 3464 case MFI_STAT_OK: 3465 cmd->scmd->result = DID_OK << 16; 3466 break; 3467 3468 case MFI_STAT_SCSI_IO_FAILED: 3469 case MFI_STAT_LD_INIT_IN_PROGRESS: 3470 cmd->scmd->result = 3471 (DID_ERROR << 16) | hdr->scsi_status; 3472 break; 3473 3474 case MFI_STAT_SCSI_DONE_WITH_ERROR: 3475 3476 cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status; 3477 3478 if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) { 3479 memset(cmd->scmd->sense_buffer, 0, 3480 SCSI_SENSE_BUFFERSIZE); 3481 memcpy(cmd->scmd->sense_buffer, cmd->sense, 3482 hdr->sense_len); 3483 3484 cmd->scmd->result |= DRIVER_SENSE << 24; 3485 } 3486 3487 break; 3488 3489 case MFI_STAT_LD_OFFLINE: 3490 case MFI_STAT_DEVICE_NOT_FOUND: 3491 cmd->scmd->result = DID_BAD_TARGET << 16; 3492 break; 3493 3494 default: 3495 dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n", 3496 hdr->cmd_status); 3497 cmd->scmd->result = DID_ERROR << 16; 3498 break; 3499 } 3500 3501 atomic_dec(&instance->fw_outstanding); 3502 3503 scsi_dma_unmap(cmd->scmd); 3504 cmd->scmd->scsi_done(cmd->scmd); 3505 megasas_return_cmd(instance, cmd); 3506 3507 break; 3508 3509 case MFI_CMD_SMP: 3510 case MFI_CMD_STP: 3511 case MFI_CMD_NVME: 3512 case MFI_CMD_TOOLBOX: 3513 megasas_complete_int_cmd(instance, cmd); 3514 break; 3515 3516 case MFI_CMD_DCMD: 3517 opcode = le32_to_cpu(cmd->frame->dcmd.opcode); 3518 /* Check for LD map update */ 3519 if ((opcode == MR_DCMD_LD_MAP_GET_INFO) 3520 && (cmd->frame->dcmd.mbox.b[1] == 1)) { 3521 fusion->fast_path_io = 0; 3522 spin_lock_irqsave(instance->host->host_lock, flags); 3523 status = cmd->frame->hdr.cmd_status; 3524 instance->map_update_cmd = NULL; 3525 if (status != MFI_STAT_OK) { 3526 if (status != MFI_STAT_NOT_FOUND) 3527 dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n", 3528 cmd->frame->hdr.cmd_status); 3529 else { 3530 megasas_return_cmd(instance, cmd); 3531 spin_unlock_irqrestore( 3532 instance->host->host_lock, 3533 flags); 3534 break; 3535 } 3536 } 3537 3538 megasas_return_cmd(instance, cmd); 3539 3540 /* 3541 * Set fast path IO to ZERO. 3542 * Validate Map will set proper value. 3543 * Meanwhile all IOs will go as LD IO. 3544 */ 3545 if (status == MFI_STAT_OK && 3546 (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) { 3547 instance->map_id++; 3548 fusion->fast_path_io = 1; 3549 } else { 3550 fusion->fast_path_io = 0; 3551 } 3552 3553 megasas_sync_map_info(instance); 3554 spin_unlock_irqrestore(instance->host->host_lock, 3555 flags); 3556 break; 3557 } 3558 if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO || 3559 opcode == MR_DCMD_CTRL_EVENT_GET) { 3560 spin_lock_irqsave(&poll_aen_lock, flags); 3561 megasas_poll_wait_aen = 0; 3562 spin_unlock_irqrestore(&poll_aen_lock, flags); 3563 } 3564 3565 /* FW has an updated PD sequence */ 3566 if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && 3567 (cmd->frame->dcmd.mbox.b[0] == 1)) { 3568 3569 spin_lock_irqsave(instance->host->host_lock, flags); 3570 status = cmd->frame->hdr.cmd_status; 3571 instance->jbod_seq_cmd = NULL; 3572 megasas_return_cmd(instance, cmd); 3573 3574 if (status == MFI_STAT_OK) { 3575 instance->pd_seq_map_id++; 3576 /* Re-register a pd sync seq num cmd */ 3577 if (megasas_sync_pd_seq_num(instance, true)) 3578 instance->use_seqnum_jbod_fp = false; 3579 } else 3580 instance->use_seqnum_jbod_fp = false; 3581 3582 spin_unlock_irqrestore(instance->host->host_lock, flags); 3583 break; 3584 } 3585 3586 /* 3587 * See if got an event notification 3588 */ 3589 if (opcode == MR_DCMD_CTRL_EVENT_WAIT) 3590 megasas_service_aen(instance, cmd); 3591 else 3592 megasas_complete_int_cmd(instance, cmd); 3593 3594 break; 3595 3596 case MFI_CMD_ABORT: 3597 /* 3598 * Cmd issued to abort another cmd returned 3599 */ 3600 megasas_complete_abort(instance, cmd); 3601 break; 3602 3603 default: 3604 dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n", 3605 hdr->cmd); 3606 megasas_complete_int_cmd(instance, cmd); 3607 break; 3608 } 3609 } 3610 3611 /** 3612 * megasas_issue_pending_cmds_again - issue all pending cmds 3613 * in FW again because of the fw reset 3614 * @instance: Adapter soft state 3615 */ 3616 static inline void 3617 megasas_issue_pending_cmds_again(struct megasas_instance *instance) 3618 { 3619 struct megasas_cmd *cmd; 3620 struct list_head clist_local; 3621 union megasas_evt_class_locale class_locale; 3622 unsigned long flags; 3623 u32 seq_num; 3624 3625 INIT_LIST_HEAD(&clist_local); 3626 spin_lock_irqsave(&instance->hba_lock, flags); 3627 list_splice_init(&instance->internal_reset_pending_q, &clist_local); 3628 spin_unlock_irqrestore(&instance->hba_lock, flags); 3629 3630 while (!list_empty(&clist_local)) { 3631 cmd = list_entry((&clist_local)->next, 3632 struct megasas_cmd, list); 3633 list_del_init(&cmd->list); 3634 3635 if (cmd->sync_cmd || cmd->scmd) { 3636 dev_notice(&instance->pdev->dev, "command %p, %p:%d" 3637 "detected to be pending while HBA reset\n", 3638 cmd, cmd->scmd, cmd->sync_cmd); 3639 3640 cmd->retry_for_fw_reset++; 3641 3642 if (cmd->retry_for_fw_reset == 3) { 3643 dev_notice(&instance->pdev->dev, "cmd %p, %p:%d" 3644 "was tried multiple times during reset." 3645 "Shutting down the HBA\n", 3646 cmd, cmd->scmd, cmd->sync_cmd); 3647 instance->instancet->disable_intr(instance); 3648 atomic_set(&instance->fw_reset_no_pci_access, 1); 3649 megaraid_sas_kill_hba(instance); 3650 return; 3651 } 3652 } 3653 3654 if (cmd->sync_cmd == 1) { 3655 if (cmd->scmd) { 3656 dev_notice(&instance->pdev->dev, "unexpected" 3657 "cmd attached to internal command!\n"); 3658 } 3659 dev_notice(&instance->pdev->dev, "%p synchronous cmd" 3660 "on the internal reset queue," 3661 "issue it again.\n", cmd); 3662 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 3663 instance->instancet->fire_cmd(instance, 3664 cmd->frame_phys_addr, 3665 0, instance->reg_set); 3666 } else if (cmd->scmd) { 3667 dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]" 3668 "detected on the internal queue, issue again.\n", 3669 cmd, cmd->scmd->cmnd[0]); 3670 3671 atomic_inc(&instance->fw_outstanding); 3672 instance->instancet->fire_cmd(instance, 3673 cmd->frame_phys_addr, 3674 cmd->frame_count-1, instance->reg_set); 3675 } else { 3676 dev_notice(&instance->pdev->dev, "%p unexpected cmd on the" 3677 "internal reset defer list while re-issue!!\n", 3678 cmd); 3679 } 3680 } 3681 3682 if (instance->aen_cmd) { 3683 dev_notice(&instance->pdev->dev, "aen_cmd in def process\n"); 3684 megasas_return_cmd(instance, instance->aen_cmd); 3685 3686 instance->aen_cmd = NULL; 3687 } 3688 3689 /* 3690 * Initiate AEN (Asynchronous Event Notification) 3691 */ 3692 seq_num = instance->last_seq_num; 3693 class_locale.members.reserved = 0; 3694 class_locale.members.locale = MR_EVT_LOCALE_ALL; 3695 class_locale.members.class = MR_EVT_CLASS_DEBUG; 3696 3697 megasas_register_aen(instance, seq_num, class_locale.word); 3698 } 3699 3700 /** 3701 * Move the internal reset pending commands to a deferred queue. 3702 * 3703 * We move the commands pending at internal reset time to a 3704 * pending queue. This queue would be flushed after successful 3705 * completion of the internal reset sequence. if the internal reset 3706 * did not complete in time, the kernel reset handler would flush 3707 * these commands. 3708 **/ 3709 static void 3710 megasas_internal_reset_defer_cmds(struct megasas_instance *instance) 3711 { 3712 struct megasas_cmd *cmd; 3713 int i; 3714 u16 max_cmd = instance->max_fw_cmds; 3715 u32 defer_index; 3716 unsigned long flags; 3717 3718 defer_index = 0; 3719 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 3720 for (i = 0; i < max_cmd; i++) { 3721 cmd = instance->cmd_list[i]; 3722 if (cmd->sync_cmd == 1 || cmd->scmd) { 3723 dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p" 3724 "on the defer queue as internal\n", 3725 defer_index, cmd, cmd->sync_cmd, cmd->scmd); 3726 3727 if (!list_empty(&cmd->list)) { 3728 dev_notice(&instance->pdev->dev, "ERROR while" 3729 " moving this cmd:%p, %d %p, it was" 3730 "discovered on some list?\n", 3731 cmd, cmd->sync_cmd, cmd->scmd); 3732 3733 list_del_init(&cmd->list); 3734 } 3735 defer_index++; 3736 list_add_tail(&cmd->list, 3737 &instance->internal_reset_pending_q); 3738 } 3739 } 3740 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 3741 } 3742 3743 3744 static void 3745 process_fw_state_change_wq(struct work_struct *work) 3746 { 3747 struct megasas_instance *instance = 3748 container_of(work, struct megasas_instance, work_init); 3749 u32 wait; 3750 unsigned long flags; 3751 3752 if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) { 3753 dev_notice(&instance->pdev->dev, "error, recovery st %x\n", 3754 atomic_read(&instance->adprecovery)); 3755 return ; 3756 } 3757 3758 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { 3759 dev_notice(&instance->pdev->dev, "FW detected to be in fault" 3760 "state, restarting it...\n"); 3761 3762 instance->instancet->disable_intr(instance); 3763 atomic_set(&instance->fw_outstanding, 0); 3764 3765 atomic_set(&instance->fw_reset_no_pci_access, 1); 3766 instance->instancet->adp_reset(instance, instance->reg_set); 3767 atomic_set(&instance->fw_reset_no_pci_access, 0); 3768 3769 dev_notice(&instance->pdev->dev, "FW restarted successfully," 3770 "initiating next stage...\n"); 3771 3772 dev_notice(&instance->pdev->dev, "HBA recovery state machine," 3773 "state 2 starting...\n"); 3774 3775 /* waiting for about 20 second before start the second init */ 3776 for (wait = 0; wait < 30; wait++) { 3777 msleep(1000); 3778 } 3779 3780 if (megasas_transition_to_ready(instance, 1)) { 3781 dev_notice(&instance->pdev->dev, "adapter not ready\n"); 3782 3783 atomic_set(&instance->fw_reset_no_pci_access, 1); 3784 megaraid_sas_kill_hba(instance); 3785 return ; 3786 } 3787 3788 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 3789 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 3790 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR) 3791 ) { 3792 *instance->consumer = *instance->producer; 3793 } else { 3794 *instance->consumer = 0; 3795 *instance->producer = 0; 3796 } 3797 3798 megasas_issue_init_mfi(instance); 3799 3800 spin_lock_irqsave(&instance->hba_lock, flags); 3801 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 3802 spin_unlock_irqrestore(&instance->hba_lock, flags); 3803 instance->instancet->enable_intr(instance); 3804 3805 megasas_issue_pending_cmds_again(instance); 3806 instance->issuepend_done = 1; 3807 } 3808 } 3809 3810 /** 3811 * megasas_deplete_reply_queue - Processes all completed commands 3812 * @instance: Adapter soft state 3813 * @alt_status: Alternate status to be returned to 3814 * SCSI mid-layer instead of the status 3815 * returned by the FW 3816 * Note: this must be called with hba lock held 3817 */ 3818 static int 3819 megasas_deplete_reply_queue(struct megasas_instance *instance, 3820 u8 alt_status) 3821 { 3822 u32 mfiStatus; 3823 u32 fw_state; 3824 3825 if ((mfiStatus = instance->instancet->check_reset(instance, 3826 instance->reg_set)) == 1) { 3827 return IRQ_HANDLED; 3828 } 3829 3830 mfiStatus = instance->instancet->clear_intr(instance); 3831 if (mfiStatus == 0) { 3832 /* Hardware may not set outbound_intr_status in MSI-X mode */ 3833 if (!instance->msix_vectors) 3834 return IRQ_NONE; 3835 } 3836 3837 instance->mfiStatus = mfiStatus; 3838 3839 if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) { 3840 fw_state = instance->instancet->read_fw_status_reg( 3841 instance) & MFI_STATE_MASK; 3842 3843 if (fw_state != MFI_STATE_FAULT) { 3844 dev_notice(&instance->pdev->dev, "fw state:%x\n", 3845 fw_state); 3846 } 3847 3848 if ((fw_state == MFI_STATE_FAULT) && 3849 (instance->disableOnlineCtrlReset == 0)) { 3850 dev_notice(&instance->pdev->dev, "wait adp restart\n"); 3851 3852 if ((instance->pdev->device == 3853 PCI_DEVICE_ID_LSI_SAS1064R) || 3854 (instance->pdev->device == 3855 PCI_DEVICE_ID_DELL_PERC5) || 3856 (instance->pdev->device == 3857 PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 3858 3859 *instance->consumer = 3860 cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); 3861 } 3862 3863 3864 instance->instancet->disable_intr(instance); 3865 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 3866 instance->issuepend_done = 0; 3867 3868 atomic_set(&instance->fw_outstanding, 0); 3869 megasas_internal_reset_defer_cmds(instance); 3870 3871 dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n", 3872 fw_state, atomic_read(&instance->adprecovery)); 3873 3874 schedule_work(&instance->work_init); 3875 return IRQ_HANDLED; 3876 3877 } else { 3878 dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n", 3879 fw_state, instance->disableOnlineCtrlReset); 3880 } 3881 } 3882 3883 tasklet_schedule(&instance->isr_tasklet); 3884 return IRQ_HANDLED; 3885 } 3886 /** 3887 * megasas_isr - isr entry point 3888 */ 3889 static irqreturn_t megasas_isr(int irq, void *devp) 3890 { 3891 struct megasas_irq_context *irq_context = devp; 3892 struct megasas_instance *instance = irq_context->instance; 3893 unsigned long flags; 3894 irqreturn_t rc; 3895 3896 if (atomic_read(&instance->fw_reset_no_pci_access)) 3897 return IRQ_HANDLED; 3898 3899 spin_lock_irqsave(&instance->hba_lock, flags); 3900 rc = megasas_deplete_reply_queue(instance, DID_OK); 3901 spin_unlock_irqrestore(&instance->hba_lock, flags); 3902 3903 return rc; 3904 } 3905 3906 /** 3907 * megasas_transition_to_ready - Move the FW to READY state 3908 * @instance: Adapter soft state 3909 * 3910 * During the initialization, FW passes can potentially be in any one of 3911 * several possible states. If the FW in operational, waiting-for-handshake 3912 * states, driver must take steps to bring it to ready state. Otherwise, it 3913 * has to wait for the ready state. 3914 */ 3915 int 3916 megasas_transition_to_ready(struct megasas_instance *instance, int ocr) 3917 { 3918 int i; 3919 u8 max_wait; 3920 u32 fw_state; 3921 u32 abs_state, curr_abs_state; 3922 3923 abs_state = instance->instancet->read_fw_status_reg(instance); 3924 fw_state = abs_state & MFI_STATE_MASK; 3925 3926 if (fw_state != MFI_STATE_READY) 3927 dev_info(&instance->pdev->dev, "Waiting for FW to come to ready" 3928 " state\n"); 3929 3930 while (fw_state != MFI_STATE_READY) { 3931 3932 switch (fw_state) { 3933 3934 case MFI_STATE_FAULT: 3935 dev_printk(KERN_ERR, &instance->pdev->dev, 3936 "FW in FAULT state, Fault code:0x%x subcode:0x%x func:%s\n", 3937 abs_state & MFI_STATE_FAULT_CODE, 3938 abs_state & MFI_STATE_FAULT_SUBCODE, __func__); 3939 if (ocr) { 3940 max_wait = MEGASAS_RESET_WAIT_TIME; 3941 break; 3942 } else { 3943 dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n"); 3944 megasas_dump_reg_set(instance->reg_set); 3945 return -ENODEV; 3946 } 3947 3948 case MFI_STATE_WAIT_HANDSHAKE: 3949 /* 3950 * Set the CLR bit in inbound doorbell 3951 */ 3952 if ((instance->pdev->device == 3953 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 3954 (instance->pdev->device == 3955 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 3956 (instance->adapter_type != MFI_SERIES)) 3957 writel( 3958 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 3959 &instance->reg_set->doorbell); 3960 else 3961 writel( 3962 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 3963 &instance->reg_set->inbound_doorbell); 3964 3965 max_wait = MEGASAS_RESET_WAIT_TIME; 3966 break; 3967 3968 case MFI_STATE_BOOT_MESSAGE_PENDING: 3969 if ((instance->pdev->device == 3970 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 3971 (instance->pdev->device == 3972 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 3973 (instance->adapter_type != MFI_SERIES)) 3974 writel(MFI_INIT_HOTPLUG, 3975 &instance->reg_set->doorbell); 3976 else 3977 writel(MFI_INIT_HOTPLUG, 3978 &instance->reg_set->inbound_doorbell); 3979 3980 max_wait = MEGASAS_RESET_WAIT_TIME; 3981 break; 3982 3983 case MFI_STATE_OPERATIONAL: 3984 /* 3985 * Bring it to READY state; assuming max wait 10 secs 3986 */ 3987 instance->instancet->disable_intr(instance); 3988 if ((instance->pdev->device == 3989 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 3990 (instance->pdev->device == 3991 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 3992 (instance->adapter_type != MFI_SERIES)) { 3993 writel(MFI_RESET_FLAGS, 3994 &instance->reg_set->doorbell); 3995 3996 if (instance->adapter_type != MFI_SERIES) { 3997 for (i = 0; i < (10 * 1000); i += 20) { 3998 if (megasas_readl( 3999 instance, 4000 &instance-> 4001 reg_set-> 4002 doorbell) & 1) 4003 msleep(20); 4004 else 4005 break; 4006 } 4007 } 4008 } else 4009 writel(MFI_RESET_FLAGS, 4010 &instance->reg_set->inbound_doorbell); 4011 4012 max_wait = MEGASAS_RESET_WAIT_TIME; 4013 break; 4014 4015 case MFI_STATE_UNDEFINED: 4016 /* 4017 * This state should not last for more than 2 seconds 4018 */ 4019 max_wait = MEGASAS_RESET_WAIT_TIME; 4020 break; 4021 4022 case MFI_STATE_BB_INIT: 4023 max_wait = MEGASAS_RESET_WAIT_TIME; 4024 break; 4025 4026 case MFI_STATE_FW_INIT: 4027 max_wait = MEGASAS_RESET_WAIT_TIME; 4028 break; 4029 4030 case MFI_STATE_FW_INIT_2: 4031 max_wait = MEGASAS_RESET_WAIT_TIME; 4032 break; 4033 4034 case MFI_STATE_DEVICE_SCAN: 4035 max_wait = MEGASAS_RESET_WAIT_TIME; 4036 break; 4037 4038 case MFI_STATE_FLUSH_CACHE: 4039 max_wait = MEGASAS_RESET_WAIT_TIME; 4040 break; 4041 4042 default: 4043 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n", 4044 fw_state); 4045 dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n"); 4046 megasas_dump_reg_set(instance->reg_set); 4047 return -ENODEV; 4048 } 4049 4050 /* 4051 * The cur_state should not last for more than max_wait secs 4052 */ 4053 for (i = 0; i < max_wait * 50; i++) { 4054 curr_abs_state = instance->instancet-> 4055 read_fw_status_reg(instance); 4056 4057 if (abs_state == curr_abs_state) { 4058 msleep(20); 4059 } else 4060 break; 4061 } 4062 4063 /* 4064 * Return error if fw_state hasn't changed after max_wait 4065 */ 4066 if (curr_abs_state == abs_state) { 4067 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed " 4068 "in %d secs\n", fw_state, max_wait); 4069 dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n"); 4070 megasas_dump_reg_set(instance->reg_set); 4071 return -ENODEV; 4072 } 4073 4074 abs_state = curr_abs_state; 4075 fw_state = curr_abs_state & MFI_STATE_MASK; 4076 } 4077 dev_info(&instance->pdev->dev, "FW now in Ready state\n"); 4078 4079 return 0; 4080 } 4081 4082 /** 4083 * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool 4084 * @instance: Adapter soft state 4085 */ 4086 static void megasas_teardown_frame_pool(struct megasas_instance *instance) 4087 { 4088 int i; 4089 u16 max_cmd = instance->max_mfi_cmds; 4090 struct megasas_cmd *cmd; 4091 4092 if (!instance->frame_dma_pool) 4093 return; 4094 4095 /* 4096 * Return all frames to pool 4097 */ 4098 for (i = 0; i < max_cmd; i++) { 4099 4100 cmd = instance->cmd_list[i]; 4101 4102 if (cmd->frame) 4103 dma_pool_free(instance->frame_dma_pool, cmd->frame, 4104 cmd->frame_phys_addr); 4105 4106 if (cmd->sense) 4107 dma_pool_free(instance->sense_dma_pool, cmd->sense, 4108 cmd->sense_phys_addr); 4109 } 4110 4111 /* 4112 * Now destroy the pool itself 4113 */ 4114 dma_pool_destroy(instance->frame_dma_pool); 4115 dma_pool_destroy(instance->sense_dma_pool); 4116 4117 instance->frame_dma_pool = NULL; 4118 instance->sense_dma_pool = NULL; 4119 } 4120 4121 /** 4122 * megasas_create_frame_pool - Creates DMA pool for cmd frames 4123 * @instance: Adapter soft state 4124 * 4125 * Each command packet has an embedded DMA memory buffer that is used for 4126 * filling MFI frame and the SG list that immediately follows the frame. This 4127 * function creates those DMA memory buffers for each command packet by using 4128 * PCI pool facility. 4129 */ 4130 static int megasas_create_frame_pool(struct megasas_instance *instance) 4131 { 4132 int i; 4133 u16 max_cmd; 4134 u32 frame_count; 4135 struct megasas_cmd *cmd; 4136 4137 max_cmd = instance->max_mfi_cmds; 4138 4139 /* 4140 * For MFI controllers. 4141 * max_num_sge = 60 4142 * max_sge_sz = 16 byte (sizeof megasas_sge_skinny) 4143 * Total 960 byte (15 MFI frame of 64 byte) 4144 * 4145 * Fusion adapter require only 3 extra frame. 4146 * max_num_sge = 16 (defined as MAX_IOCTL_SGE) 4147 * max_sge_sz = 12 byte (sizeof megasas_sge64) 4148 * Total 192 byte (3 MFI frame of 64 byte) 4149 */ 4150 frame_count = (instance->adapter_type == MFI_SERIES) ? 4151 (15 + 1) : (3 + 1); 4152 instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count; 4153 /* 4154 * Use DMA pool facility provided by PCI layer 4155 */ 4156 instance->frame_dma_pool = dma_pool_create("megasas frame pool", 4157 &instance->pdev->dev, 4158 instance->mfi_frame_size, 256, 0); 4159 4160 if (!instance->frame_dma_pool) { 4161 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n"); 4162 return -ENOMEM; 4163 } 4164 4165 instance->sense_dma_pool = dma_pool_create("megasas sense pool", 4166 &instance->pdev->dev, 128, 4167 4, 0); 4168 4169 if (!instance->sense_dma_pool) { 4170 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n"); 4171 4172 dma_pool_destroy(instance->frame_dma_pool); 4173 instance->frame_dma_pool = NULL; 4174 4175 return -ENOMEM; 4176 } 4177 4178 /* 4179 * Allocate and attach a frame to each of the commands in cmd_list. 4180 * By making cmd->index as the context instead of the &cmd, we can 4181 * always use 32bit context regardless of the architecture 4182 */ 4183 for (i = 0; i < max_cmd; i++) { 4184 4185 cmd = instance->cmd_list[i]; 4186 4187 cmd->frame = dma_pool_zalloc(instance->frame_dma_pool, 4188 GFP_KERNEL, &cmd->frame_phys_addr); 4189 4190 cmd->sense = dma_pool_alloc(instance->sense_dma_pool, 4191 GFP_KERNEL, &cmd->sense_phys_addr); 4192 4193 /* 4194 * megasas_teardown_frame_pool() takes care of freeing 4195 * whatever has been allocated 4196 */ 4197 if (!cmd->frame || !cmd->sense) { 4198 dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n"); 4199 megasas_teardown_frame_pool(instance); 4200 return -ENOMEM; 4201 } 4202 4203 cmd->frame->io.context = cpu_to_le32(cmd->index); 4204 cmd->frame->io.pad_0 = 0; 4205 if ((instance->adapter_type == MFI_SERIES) && reset_devices) 4206 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 4207 } 4208 4209 return 0; 4210 } 4211 4212 /** 4213 * megasas_free_cmds - Free all the cmds in the free cmd pool 4214 * @instance: Adapter soft state 4215 */ 4216 void megasas_free_cmds(struct megasas_instance *instance) 4217 { 4218 int i; 4219 4220 /* First free the MFI frame pool */ 4221 megasas_teardown_frame_pool(instance); 4222 4223 /* Free all the commands in the cmd_list */ 4224 for (i = 0; i < instance->max_mfi_cmds; i++) 4225 4226 kfree(instance->cmd_list[i]); 4227 4228 /* Free the cmd_list buffer itself */ 4229 kfree(instance->cmd_list); 4230 instance->cmd_list = NULL; 4231 4232 INIT_LIST_HEAD(&instance->cmd_pool); 4233 } 4234 4235 /** 4236 * megasas_alloc_cmds - Allocates the command packets 4237 * @instance: Adapter soft state 4238 * 4239 * Each command that is issued to the FW, whether IO commands from the OS or 4240 * internal commands like IOCTLs, are wrapped in local data structure called 4241 * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to 4242 * the FW. 4243 * 4244 * Each frame has a 32-bit field called context (tag). This context is used 4245 * to get back the megasas_cmd from the frame when a frame gets completed in 4246 * the ISR. Typically the address of the megasas_cmd itself would be used as 4247 * the context. But we wanted to keep the differences between 32 and 64 bit 4248 * systems to the mininum. We always use 32 bit integers for the context. In 4249 * this driver, the 32 bit values are the indices into an array cmd_list. 4250 * This array is used only to look up the megasas_cmd given the context. The 4251 * free commands themselves are maintained in a linked list called cmd_pool. 4252 */ 4253 int megasas_alloc_cmds(struct megasas_instance *instance) 4254 { 4255 int i; 4256 int j; 4257 u16 max_cmd; 4258 struct megasas_cmd *cmd; 4259 4260 max_cmd = instance->max_mfi_cmds; 4261 4262 /* 4263 * instance->cmd_list is an array of struct megasas_cmd pointers. 4264 * Allocate the dynamic array first and then allocate individual 4265 * commands. 4266 */ 4267 instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL); 4268 4269 if (!instance->cmd_list) { 4270 dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n"); 4271 return -ENOMEM; 4272 } 4273 4274 memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd); 4275 4276 for (i = 0; i < max_cmd; i++) { 4277 instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd), 4278 GFP_KERNEL); 4279 4280 if (!instance->cmd_list[i]) { 4281 4282 for (j = 0; j < i; j++) 4283 kfree(instance->cmd_list[j]); 4284 4285 kfree(instance->cmd_list); 4286 instance->cmd_list = NULL; 4287 4288 return -ENOMEM; 4289 } 4290 } 4291 4292 for (i = 0; i < max_cmd; i++) { 4293 cmd = instance->cmd_list[i]; 4294 memset(cmd, 0, sizeof(struct megasas_cmd)); 4295 cmd->index = i; 4296 cmd->scmd = NULL; 4297 cmd->instance = instance; 4298 4299 list_add_tail(&cmd->list, &instance->cmd_pool); 4300 } 4301 4302 /* 4303 * Create a frame pool and assign one frame to each cmd 4304 */ 4305 if (megasas_create_frame_pool(instance)) { 4306 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n"); 4307 megasas_free_cmds(instance); 4308 return -ENOMEM; 4309 } 4310 4311 return 0; 4312 } 4313 4314 /* 4315 * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state. 4316 * @instance: Adapter soft state 4317 * 4318 * Return 0 for only Fusion adapter, if driver load/unload is not in progress 4319 * or FW is not under OCR. 4320 */ 4321 inline int 4322 dcmd_timeout_ocr_possible(struct megasas_instance *instance) { 4323 4324 if (instance->adapter_type == MFI_SERIES) 4325 return KILL_ADAPTER; 4326 else if (instance->unload || 4327 test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) 4328 return IGNORE_TIMEOUT; 4329 else 4330 return INITIATE_OCR; 4331 } 4332 4333 static void 4334 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev) 4335 { 4336 int ret; 4337 struct megasas_cmd *cmd; 4338 struct megasas_dcmd_frame *dcmd; 4339 4340 struct MR_PRIV_DEVICE *mr_device_priv_data; 4341 u16 device_id = 0; 4342 4343 device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; 4344 cmd = megasas_get_cmd(instance); 4345 4346 if (!cmd) { 4347 dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__); 4348 return; 4349 } 4350 4351 dcmd = &cmd->frame->dcmd; 4352 4353 memset(instance->pd_info, 0, sizeof(*instance->pd_info)); 4354 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4355 4356 dcmd->mbox.s[0] = cpu_to_le16(device_id); 4357 dcmd->cmd = MFI_CMD_DCMD; 4358 dcmd->cmd_status = 0xFF; 4359 dcmd->sge_count = 1; 4360 dcmd->flags = MFI_FRAME_DIR_READ; 4361 dcmd->timeout = 0; 4362 dcmd->pad_0 = 0; 4363 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO)); 4364 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO); 4365 4366 megasas_set_dma_settings(instance, dcmd, instance->pd_info_h, 4367 sizeof(struct MR_PD_INFO)); 4368 4369 if ((instance->adapter_type != MFI_SERIES) && 4370 !instance->mask_interrupts) 4371 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4372 else 4373 ret = megasas_issue_polled(instance, cmd); 4374 4375 switch (ret) { 4376 case DCMD_SUCCESS: 4377 mr_device_priv_data = sdev->hostdata; 4378 le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType); 4379 mr_device_priv_data->interface_type = 4380 instance->pd_info->state.ddf.pdType.intf; 4381 break; 4382 4383 case DCMD_TIMEOUT: 4384 4385 switch (dcmd_timeout_ocr_possible(instance)) { 4386 case INITIATE_OCR: 4387 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4388 mutex_unlock(&instance->reset_mutex); 4389 megasas_reset_fusion(instance->host, 4390 MFI_IO_TIMEOUT_OCR); 4391 mutex_lock(&instance->reset_mutex); 4392 break; 4393 case KILL_ADAPTER: 4394 megaraid_sas_kill_hba(instance); 4395 break; 4396 case IGNORE_TIMEOUT: 4397 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4398 __func__, __LINE__); 4399 break; 4400 } 4401 4402 break; 4403 } 4404 4405 if (ret != DCMD_TIMEOUT) 4406 megasas_return_cmd(instance, cmd); 4407 4408 return; 4409 } 4410 /* 4411 * megasas_get_pd_list_info - Returns FW's pd_list structure 4412 * @instance: Adapter soft state 4413 * @pd_list: pd_list structure 4414 * 4415 * Issues an internal command (DCMD) to get the FW's controller PD 4416 * list structure. This information is mainly used to find out SYSTEM 4417 * supported by the FW. 4418 */ 4419 static int 4420 megasas_get_pd_list(struct megasas_instance *instance) 4421 { 4422 int ret = 0, pd_index = 0; 4423 struct megasas_cmd *cmd; 4424 struct megasas_dcmd_frame *dcmd; 4425 struct MR_PD_LIST *ci; 4426 struct MR_PD_ADDRESS *pd_addr; 4427 4428 if (instance->pd_list_not_supported) { 4429 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " 4430 "not supported by firmware\n"); 4431 return ret; 4432 } 4433 4434 ci = instance->pd_list_buf; 4435 4436 cmd = megasas_get_cmd(instance); 4437 4438 if (!cmd) { 4439 dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n"); 4440 return -ENOMEM; 4441 } 4442 4443 dcmd = &cmd->frame->dcmd; 4444 4445 memset(ci, 0, sizeof(*ci)); 4446 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4447 4448 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; 4449 dcmd->mbox.b[1] = 0; 4450 dcmd->cmd = MFI_CMD_DCMD; 4451 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4452 dcmd->sge_count = 1; 4453 dcmd->flags = MFI_FRAME_DIR_READ; 4454 dcmd->timeout = 0; 4455 dcmd->pad_0 = 0; 4456 dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); 4457 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY); 4458 4459 megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h, 4460 (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST))); 4461 4462 if ((instance->adapter_type != MFI_SERIES) && 4463 !instance->mask_interrupts) 4464 ret = megasas_issue_blocked_cmd(instance, cmd, 4465 MFI_IO_TIMEOUT_SECS); 4466 else 4467 ret = megasas_issue_polled(instance, cmd); 4468 4469 switch (ret) { 4470 case DCMD_FAILED: 4471 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " 4472 "failed/not supported by firmware\n"); 4473 4474 if (instance->adapter_type != MFI_SERIES) 4475 megaraid_sas_kill_hba(instance); 4476 else 4477 instance->pd_list_not_supported = 1; 4478 break; 4479 case DCMD_TIMEOUT: 4480 4481 switch (dcmd_timeout_ocr_possible(instance)) { 4482 case INITIATE_OCR: 4483 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4484 /* 4485 * DCMD failed from AEN path. 4486 * AEN path already hold reset_mutex to avoid PCI access 4487 * while OCR is in progress. 4488 */ 4489 mutex_unlock(&instance->reset_mutex); 4490 megasas_reset_fusion(instance->host, 4491 MFI_IO_TIMEOUT_OCR); 4492 mutex_lock(&instance->reset_mutex); 4493 break; 4494 case KILL_ADAPTER: 4495 megaraid_sas_kill_hba(instance); 4496 break; 4497 case IGNORE_TIMEOUT: 4498 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n", 4499 __func__, __LINE__); 4500 break; 4501 } 4502 4503 break; 4504 4505 case DCMD_SUCCESS: 4506 pd_addr = ci->addr; 4507 if (megasas_dbg_lvl & LD_PD_DEBUG) 4508 dev_info(&instance->pdev->dev, "%s, sysPD count: 0x%x\n", 4509 __func__, le32_to_cpu(ci->count)); 4510 4511 if ((le32_to_cpu(ci->count) > 4512 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) 4513 break; 4514 4515 memset(instance->local_pd_list, 0, 4516 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); 4517 4518 for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) { 4519 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid = 4520 le16_to_cpu(pd_addr->deviceId); 4521 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType = 4522 pd_addr->scsiDevType; 4523 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState = 4524 MR_PD_STATE_SYSTEM; 4525 if (megasas_dbg_lvl & LD_PD_DEBUG) 4526 dev_info(&instance->pdev->dev, 4527 "PD%d: targetID: 0x%03x deviceType:0x%x\n", 4528 pd_index, le16_to_cpu(pd_addr->deviceId), 4529 pd_addr->scsiDevType); 4530 pd_addr++; 4531 } 4532 4533 memcpy(instance->pd_list, instance->local_pd_list, 4534 sizeof(instance->pd_list)); 4535 break; 4536 4537 } 4538 4539 if (ret != DCMD_TIMEOUT) 4540 megasas_return_cmd(instance, cmd); 4541 4542 return ret; 4543 } 4544 4545 /* 4546 * megasas_get_ld_list_info - Returns FW's ld_list structure 4547 * @instance: Adapter soft state 4548 * @ld_list: ld_list structure 4549 * 4550 * Issues an internal command (DCMD) to get the FW's controller PD 4551 * list structure. This information is mainly used to find out SYSTEM 4552 * supported by the FW. 4553 */ 4554 static int 4555 megasas_get_ld_list(struct megasas_instance *instance) 4556 { 4557 int ret = 0, ld_index = 0, ids = 0; 4558 struct megasas_cmd *cmd; 4559 struct megasas_dcmd_frame *dcmd; 4560 struct MR_LD_LIST *ci; 4561 dma_addr_t ci_h = 0; 4562 u32 ld_count; 4563 4564 ci = instance->ld_list_buf; 4565 ci_h = instance->ld_list_buf_h; 4566 4567 cmd = megasas_get_cmd(instance); 4568 4569 if (!cmd) { 4570 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n"); 4571 return -ENOMEM; 4572 } 4573 4574 dcmd = &cmd->frame->dcmd; 4575 4576 memset(ci, 0, sizeof(*ci)); 4577 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4578 4579 if (instance->supportmax256vd) 4580 dcmd->mbox.b[0] = 1; 4581 dcmd->cmd = MFI_CMD_DCMD; 4582 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4583 dcmd->sge_count = 1; 4584 dcmd->flags = MFI_FRAME_DIR_READ; 4585 dcmd->timeout = 0; 4586 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST)); 4587 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST); 4588 dcmd->pad_0 = 0; 4589 4590 megasas_set_dma_settings(instance, dcmd, ci_h, 4591 sizeof(struct MR_LD_LIST)); 4592 4593 if ((instance->adapter_type != MFI_SERIES) && 4594 !instance->mask_interrupts) 4595 ret = megasas_issue_blocked_cmd(instance, cmd, 4596 MFI_IO_TIMEOUT_SECS); 4597 else 4598 ret = megasas_issue_polled(instance, cmd); 4599 4600 ld_count = le32_to_cpu(ci->ldCount); 4601 4602 switch (ret) { 4603 case DCMD_FAILED: 4604 megaraid_sas_kill_hba(instance); 4605 break; 4606 case DCMD_TIMEOUT: 4607 4608 switch (dcmd_timeout_ocr_possible(instance)) { 4609 case INITIATE_OCR: 4610 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4611 /* 4612 * DCMD failed from AEN path. 4613 * AEN path already hold reset_mutex to avoid PCI access 4614 * while OCR is in progress. 4615 */ 4616 mutex_unlock(&instance->reset_mutex); 4617 megasas_reset_fusion(instance->host, 4618 MFI_IO_TIMEOUT_OCR); 4619 mutex_lock(&instance->reset_mutex); 4620 break; 4621 case KILL_ADAPTER: 4622 megaraid_sas_kill_hba(instance); 4623 break; 4624 case IGNORE_TIMEOUT: 4625 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4626 __func__, __LINE__); 4627 break; 4628 } 4629 4630 break; 4631 4632 case DCMD_SUCCESS: 4633 if (megasas_dbg_lvl & LD_PD_DEBUG) 4634 dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n", 4635 __func__, ld_count); 4636 4637 if (ld_count > instance->fw_supported_vd_count) 4638 break; 4639 4640 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 4641 4642 for (ld_index = 0; ld_index < ld_count; ld_index++) { 4643 if (ci->ldList[ld_index].state != 0) { 4644 ids = ci->ldList[ld_index].ref.targetId; 4645 instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId; 4646 if (megasas_dbg_lvl & LD_PD_DEBUG) 4647 dev_info(&instance->pdev->dev, 4648 "LD%d: targetID: 0x%03x\n", 4649 ld_index, ids); 4650 } 4651 } 4652 4653 break; 4654 } 4655 4656 if (ret != DCMD_TIMEOUT) 4657 megasas_return_cmd(instance, cmd); 4658 4659 return ret; 4660 } 4661 4662 /** 4663 * megasas_ld_list_query - Returns FW's ld_list structure 4664 * @instance: Adapter soft state 4665 * @ld_list: ld_list structure 4666 * 4667 * Issues an internal command (DCMD) to get the FW's controller PD 4668 * list structure. This information is mainly used to find out SYSTEM 4669 * supported by the FW. 4670 */ 4671 static int 4672 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) 4673 { 4674 int ret = 0, ld_index = 0, ids = 0; 4675 struct megasas_cmd *cmd; 4676 struct megasas_dcmd_frame *dcmd; 4677 struct MR_LD_TARGETID_LIST *ci; 4678 dma_addr_t ci_h = 0; 4679 u32 tgtid_count; 4680 4681 ci = instance->ld_targetid_list_buf; 4682 ci_h = instance->ld_targetid_list_buf_h; 4683 4684 cmd = megasas_get_cmd(instance); 4685 4686 if (!cmd) { 4687 dev_warn(&instance->pdev->dev, 4688 "megasas_ld_list_query: Failed to get cmd\n"); 4689 return -ENOMEM; 4690 } 4691 4692 dcmd = &cmd->frame->dcmd; 4693 4694 memset(ci, 0, sizeof(*ci)); 4695 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4696 4697 dcmd->mbox.b[0] = query_type; 4698 if (instance->supportmax256vd) 4699 dcmd->mbox.b[2] = 1; 4700 4701 dcmd->cmd = MFI_CMD_DCMD; 4702 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4703 dcmd->sge_count = 1; 4704 dcmd->flags = MFI_FRAME_DIR_READ; 4705 dcmd->timeout = 0; 4706 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); 4707 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY); 4708 dcmd->pad_0 = 0; 4709 4710 megasas_set_dma_settings(instance, dcmd, ci_h, 4711 sizeof(struct MR_LD_TARGETID_LIST)); 4712 4713 if ((instance->adapter_type != MFI_SERIES) && 4714 !instance->mask_interrupts) 4715 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4716 else 4717 ret = megasas_issue_polled(instance, cmd); 4718 4719 switch (ret) { 4720 case DCMD_FAILED: 4721 dev_info(&instance->pdev->dev, 4722 "DCMD not supported by firmware - %s %d\n", 4723 __func__, __LINE__); 4724 ret = megasas_get_ld_list(instance); 4725 break; 4726 case DCMD_TIMEOUT: 4727 switch (dcmd_timeout_ocr_possible(instance)) { 4728 case INITIATE_OCR: 4729 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4730 /* 4731 * DCMD failed from AEN path. 4732 * AEN path already hold reset_mutex to avoid PCI access 4733 * while OCR is in progress. 4734 */ 4735 mutex_unlock(&instance->reset_mutex); 4736 megasas_reset_fusion(instance->host, 4737 MFI_IO_TIMEOUT_OCR); 4738 mutex_lock(&instance->reset_mutex); 4739 break; 4740 case KILL_ADAPTER: 4741 megaraid_sas_kill_hba(instance); 4742 break; 4743 case IGNORE_TIMEOUT: 4744 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4745 __func__, __LINE__); 4746 break; 4747 } 4748 4749 break; 4750 case DCMD_SUCCESS: 4751 tgtid_count = le32_to_cpu(ci->count); 4752 4753 if (megasas_dbg_lvl & LD_PD_DEBUG) 4754 dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n", 4755 __func__, tgtid_count); 4756 4757 if ((tgtid_count > (instance->fw_supported_vd_count))) 4758 break; 4759 4760 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 4761 for (ld_index = 0; ld_index < tgtid_count; ld_index++) { 4762 ids = ci->targetId[ld_index]; 4763 instance->ld_ids[ids] = ci->targetId[ld_index]; 4764 if (megasas_dbg_lvl & LD_PD_DEBUG) 4765 dev_info(&instance->pdev->dev, "LD%d: targetID: 0x%03x\n", 4766 ld_index, ci->targetId[ld_index]); 4767 } 4768 4769 break; 4770 } 4771 4772 if (ret != DCMD_TIMEOUT) 4773 megasas_return_cmd(instance, cmd); 4774 4775 return ret; 4776 } 4777 4778 /** 4779 * dcmd.opcode - MR_DCMD_CTRL_DEVICE_LIST_GET 4780 * dcmd.mbox - reserved 4781 * dcmd.sge IN - ptr to return MR_HOST_DEVICE_LIST structure 4782 * Desc: This DCMD will return the combined device list 4783 * Status: MFI_STAT_OK - List returned successfully 4784 * MFI_STAT_INVALID_CMD - Firmware support for the feature has been 4785 * disabled 4786 * @instance: Adapter soft state 4787 * @is_probe: Driver probe check 4788 * Return: 0 if DCMD succeeded 4789 * non-zero if failed 4790 */ 4791 static int 4792 megasas_host_device_list_query(struct megasas_instance *instance, 4793 bool is_probe) 4794 { 4795 int ret, i, target_id; 4796 struct megasas_cmd *cmd; 4797 struct megasas_dcmd_frame *dcmd; 4798 struct MR_HOST_DEVICE_LIST *ci; 4799 u32 count; 4800 dma_addr_t ci_h; 4801 4802 ci = instance->host_device_list_buf; 4803 ci_h = instance->host_device_list_buf_h; 4804 4805 cmd = megasas_get_cmd(instance); 4806 4807 if (!cmd) { 4808 dev_warn(&instance->pdev->dev, 4809 "%s: failed to get cmd\n", 4810 __func__); 4811 return -ENOMEM; 4812 } 4813 4814 dcmd = &cmd->frame->dcmd; 4815 4816 memset(ci, 0, sizeof(*ci)); 4817 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4818 4819 dcmd->mbox.b[0] = is_probe ? 0 : 1; 4820 dcmd->cmd = MFI_CMD_DCMD; 4821 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4822 dcmd->sge_count = 1; 4823 dcmd->flags = MFI_FRAME_DIR_READ; 4824 dcmd->timeout = 0; 4825 dcmd->pad_0 = 0; 4826 dcmd->data_xfer_len = cpu_to_le32(HOST_DEVICE_LIST_SZ); 4827 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_DEVICE_LIST_GET); 4828 4829 megasas_set_dma_settings(instance, dcmd, ci_h, HOST_DEVICE_LIST_SZ); 4830 4831 if (!instance->mask_interrupts) { 4832 ret = megasas_issue_blocked_cmd(instance, cmd, 4833 MFI_IO_TIMEOUT_SECS); 4834 } else { 4835 ret = megasas_issue_polled(instance, cmd); 4836 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4837 } 4838 4839 switch (ret) { 4840 case DCMD_SUCCESS: 4841 /* Fill the internal pd_list and ld_ids array based on 4842 * targetIds returned by FW 4843 */ 4844 count = le32_to_cpu(ci->count); 4845 4846 if (count > (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT)) 4847 break; 4848 4849 if (megasas_dbg_lvl & LD_PD_DEBUG) 4850 dev_info(&instance->pdev->dev, "%s, Device count: 0x%x\n", 4851 __func__, count); 4852 4853 memset(instance->local_pd_list, 0, 4854 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); 4855 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 4856 for (i = 0; i < count; i++) { 4857 target_id = le16_to_cpu(ci->host_device_list[i].target_id); 4858 if (ci->host_device_list[i].flags.u.bits.is_sys_pd) { 4859 instance->local_pd_list[target_id].tid = target_id; 4860 instance->local_pd_list[target_id].driveType = 4861 ci->host_device_list[i].scsi_type; 4862 instance->local_pd_list[target_id].driveState = 4863 MR_PD_STATE_SYSTEM; 4864 if (megasas_dbg_lvl & LD_PD_DEBUG) 4865 dev_info(&instance->pdev->dev, 4866 "Device %d: PD targetID: 0x%03x deviceType:0x%x\n", 4867 i, target_id, ci->host_device_list[i].scsi_type); 4868 } else { 4869 instance->ld_ids[target_id] = target_id; 4870 if (megasas_dbg_lvl & LD_PD_DEBUG) 4871 dev_info(&instance->pdev->dev, 4872 "Device %d: LD targetID: 0x%03x\n", 4873 i, target_id); 4874 } 4875 } 4876 4877 memcpy(instance->pd_list, instance->local_pd_list, 4878 sizeof(instance->pd_list)); 4879 break; 4880 4881 case DCMD_TIMEOUT: 4882 switch (dcmd_timeout_ocr_possible(instance)) { 4883 case INITIATE_OCR: 4884 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4885 mutex_unlock(&instance->reset_mutex); 4886 megasas_reset_fusion(instance->host, 4887 MFI_IO_TIMEOUT_OCR); 4888 mutex_lock(&instance->reset_mutex); 4889 break; 4890 case KILL_ADAPTER: 4891 megaraid_sas_kill_hba(instance); 4892 break; 4893 case IGNORE_TIMEOUT: 4894 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4895 __func__, __LINE__); 4896 break; 4897 } 4898 break; 4899 case DCMD_FAILED: 4900 dev_err(&instance->pdev->dev, 4901 "%s: MR_DCMD_CTRL_DEVICE_LIST_GET failed\n", 4902 __func__); 4903 break; 4904 } 4905 4906 if (ret != DCMD_TIMEOUT) 4907 megasas_return_cmd(instance, cmd); 4908 4909 return ret; 4910 } 4911 4912 /* 4913 * megasas_update_ext_vd_details : Update details w.r.t Extended VD 4914 * instance : Controller's instance 4915 */ 4916 static void megasas_update_ext_vd_details(struct megasas_instance *instance) 4917 { 4918 struct fusion_context *fusion; 4919 u32 ventura_map_sz = 0; 4920 4921 fusion = instance->ctrl_context; 4922 /* For MFI based controllers return dummy success */ 4923 if (!fusion) 4924 return; 4925 4926 instance->supportmax256vd = 4927 instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs; 4928 /* Below is additional check to address future FW enhancement */ 4929 if (instance->ctrl_info_buf->max_lds > 64) 4930 instance->supportmax256vd = 1; 4931 4932 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS 4933 * MEGASAS_MAX_DEV_PER_CHANNEL; 4934 instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS 4935 * MEGASAS_MAX_DEV_PER_CHANNEL; 4936 if (instance->supportmax256vd) { 4937 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; 4938 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 4939 } else { 4940 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 4941 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 4942 } 4943 4944 dev_info(&instance->pdev->dev, 4945 "FW provided supportMaxExtLDs: %d\tmax_lds: %d\n", 4946 instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0, 4947 instance->ctrl_info_buf->max_lds); 4948 4949 if (instance->max_raid_mapsize) { 4950 ventura_map_sz = instance->max_raid_mapsize * 4951 MR_MIN_MAP_SIZE; /* 64k */ 4952 fusion->current_map_sz = ventura_map_sz; 4953 fusion->max_map_sz = ventura_map_sz; 4954 } else { 4955 fusion->old_map_sz = sizeof(struct MR_FW_RAID_MAP) + 4956 (sizeof(struct MR_LD_SPAN_MAP) * 4957 (instance->fw_supported_vd_count - 1)); 4958 fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT); 4959 4960 fusion->max_map_sz = 4961 max(fusion->old_map_sz, fusion->new_map_sz); 4962 4963 if (instance->supportmax256vd) 4964 fusion->current_map_sz = fusion->new_map_sz; 4965 else 4966 fusion->current_map_sz = fusion->old_map_sz; 4967 } 4968 /* irrespective of FW raid maps, driver raid map is constant */ 4969 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL); 4970 } 4971 4972 /* 4973 * dcmd.opcode - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES 4974 * dcmd.hdr.length - number of bytes to read 4975 * dcmd.sge - Ptr to MR_SNAPDUMP_PROPERTIES 4976 * Desc: Fill in snapdump properties 4977 * Status: MFI_STAT_OK- Command successful 4978 */ 4979 void megasas_get_snapdump_properties(struct megasas_instance *instance) 4980 { 4981 int ret = 0; 4982 struct megasas_cmd *cmd; 4983 struct megasas_dcmd_frame *dcmd; 4984 struct MR_SNAPDUMP_PROPERTIES *ci; 4985 dma_addr_t ci_h = 0; 4986 4987 ci = instance->snapdump_prop; 4988 ci_h = instance->snapdump_prop_h; 4989 4990 if (!ci) 4991 return; 4992 4993 cmd = megasas_get_cmd(instance); 4994 4995 if (!cmd) { 4996 dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n"); 4997 return; 4998 } 4999 5000 dcmd = &cmd->frame->dcmd; 5001 5002 memset(ci, 0, sizeof(*ci)); 5003 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5004 5005 dcmd->cmd = MFI_CMD_DCMD; 5006 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 5007 dcmd->sge_count = 1; 5008 dcmd->flags = MFI_FRAME_DIR_READ; 5009 dcmd->timeout = 0; 5010 dcmd->pad_0 = 0; 5011 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES)); 5012 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES); 5013 5014 megasas_set_dma_settings(instance, dcmd, ci_h, 5015 sizeof(struct MR_SNAPDUMP_PROPERTIES)); 5016 5017 if (!instance->mask_interrupts) { 5018 ret = megasas_issue_blocked_cmd(instance, cmd, 5019 MFI_IO_TIMEOUT_SECS); 5020 } else { 5021 ret = megasas_issue_polled(instance, cmd); 5022 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5023 } 5024 5025 switch (ret) { 5026 case DCMD_SUCCESS: 5027 instance->snapdump_wait_time = 5028 min_t(u8, ci->trigger_min_num_sec_before_ocr, 5029 MEGASAS_MAX_SNAP_DUMP_WAIT_TIME); 5030 break; 5031 5032 case DCMD_TIMEOUT: 5033 switch (dcmd_timeout_ocr_possible(instance)) { 5034 case INITIATE_OCR: 5035 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5036 mutex_unlock(&instance->reset_mutex); 5037 megasas_reset_fusion(instance->host, 5038 MFI_IO_TIMEOUT_OCR); 5039 mutex_lock(&instance->reset_mutex); 5040 break; 5041 case KILL_ADAPTER: 5042 megaraid_sas_kill_hba(instance); 5043 break; 5044 case IGNORE_TIMEOUT: 5045 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 5046 __func__, __LINE__); 5047 break; 5048 } 5049 } 5050 5051 if (ret != DCMD_TIMEOUT) 5052 megasas_return_cmd(instance, cmd); 5053 } 5054 5055 /** 5056 * megasas_get_controller_info - Returns FW's controller structure 5057 * @instance: Adapter soft state 5058 * 5059 * Issues an internal command (DCMD) to get the FW's controller structure. 5060 * This information is mainly used to find out the maximum IO transfer per 5061 * command supported by the FW. 5062 */ 5063 int 5064 megasas_get_ctrl_info(struct megasas_instance *instance) 5065 { 5066 int ret = 0; 5067 struct megasas_cmd *cmd; 5068 struct megasas_dcmd_frame *dcmd; 5069 struct megasas_ctrl_info *ci; 5070 dma_addr_t ci_h = 0; 5071 5072 ci = instance->ctrl_info_buf; 5073 ci_h = instance->ctrl_info_buf_h; 5074 5075 cmd = megasas_get_cmd(instance); 5076 5077 if (!cmd) { 5078 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n"); 5079 return -ENOMEM; 5080 } 5081 5082 dcmd = &cmd->frame->dcmd; 5083 5084 memset(ci, 0, sizeof(*ci)); 5085 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5086 5087 dcmd->cmd = MFI_CMD_DCMD; 5088 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 5089 dcmd->sge_count = 1; 5090 dcmd->flags = MFI_FRAME_DIR_READ; 5091 dcmd->timeout = 0; 5092 dcmd->pad_0 = 0; 5093 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info)); 5094 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO); 5095 dcmd->mbox.b[0] = 1; 5096 5097 megasas_set_dma_settings(instance, dcmd, ci_h, 5098 sizeof(struct megasas_ctrl_info)); 5099 5100 if ((instance->adapter_type != MFI_SERIES) && 5101 !instance->mask_interrupts) { 5102 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 5103 } else { 5104 ret = megasas_issue_polled(instance, cmd); 5105 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5106 } 5107 5108 switch (ret) { 5109 case DCMD_SUCCESS: 5110 /* Save required controller information in 5111 * CPU endianness format. 5112 */ 5113 le32_to_cpus((u32 *)&ci->properties.OnOffProperties); 5114 le16_to_cpus((u16 *)&ci->properties.on_off_properties2); 5115 le32_to_cpus((u32 *)&ci->adapterOperations2); 5116 le32_to_cpus((u32 *)&ci->adapterOperations3); 5117 le16_to_cpus((u16 *)&ci->adapter_operations4); 5118 le32_to_cpus((u32 *)&ci->adapter_operations5); 5119 5120 /* Update the latest Ext VD info. 5121 * From Init path, store current firmware details. 5122 * From OCR path, detect any firmware properties changes. 5123 * in case of Firmware upgrade without system reboot. 5124 */ 5125 megasas_update_ext_vd_details(instance); 5126 instance->support_seqnum_jbod_fp = 5127 ci->adapterOperations3.useSeqNumJbodFP; 5128 instance->support_morethan256jbod = 5129 ci->adapter_operations4.support_pd_map_target_id; 5130 instance->support_nvme_passthru = 5131 ci->adapter_operations4.support_nvme_passthru; 5132 instance->support_pci_lane_margining = 5133 ci->adapter_operations5.support_pci_lane_margining; 5134 instance->task_abort_tmo = ci->TaskAbortTO; 5135 instance->max_reset_tmo = ci->MaxResetTO; 5136 5137 /*Check whether controller is iMR or MR */ 5138 instance->is_imr = (ci->memory_size ? 0 : 1); 5139 5140 instance->snapdump_wait_time = 5141 (ci->properties.on_off_properties2.enable_snap_dump ? 5142 MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0); 5143 5144 instance->enable_fw_dev_list = 5145 ci->properties.on_off_properties2.enable_fw_dev_list; 5146 5147 dev_info(&instance->pdev->dev, 5148 "controller type\t: %s(%dMB)\n", 5149 instance->is_imr ? "iMR" : "MR", 5150 le16_to_cpu(ci->memory_size)); 5151 5152 instance->disableOnlineCtrlReset = 5153 ci->properties.OnOffProperties.disableOnlineCtrlReset; 5154 instance->secure_jbod_support = 5155 ci->adapterOperations3.supportSecurityonJBOD; 5156 dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n", 5157 instance->disableOnlineCtrlReset ? "Disabled" : "Enabled"); 5158 dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n", 5159 instance->secure_jbod_support ? "Yes" : "No"); 5160 dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n", 5161 instance->support_nvme_passthru ? "Yes" : "No"); 5162 dev_info(&instance->pdev->dev, 5163 "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n", 5164 instance->task_abort_tmo, instance->max_reset_tmo); 5165 dev_info(&instance->pdev->dev, "JBOD sequence map support\t: %s\n", 5166 instance->support_seqnum_jbod_fp ? "Yes" : "No"); 5167 dev_info(&instance->pdev->dev, "PCI Lane Margining support\t: %s\n", 5168 instance->support_pci_lane_margining ? "Yes" : "No"); 5169 5170 break; 5171 5172 case DCMD_TIMEOUT: 5173 switch (dcmd_timeout_ocr_possible(instance)) { 5174 case INITIATE_OCR: 5175 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5176 mutex_unlock(&instance->reset_mutex); 5177 megasas_reset_fusion(instance->host, 5178 MFI_IO_TIMEOUT_OCR); 5179 mutex_lock(&instance->reset_mutex); 5180 break; 5181 case KILL_ADAPTER: 5182 megaraid_sas_kill_hba(instance); 5183 break; 5184 case IGNORE_TIMEOUT: 5185 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 5186 __func__, __LINE__); 5187 break; 5188 } 5189 break; 5190 case DCMD_FAILED: 5191 megaraid_sas_kill_hba(instance); 5192 break; 5193 5194 } 5195 5196 if (ret != DCMD_TIMEOUT) 5197 megasas_return_cmd(instance, cmd); 5198 5199 return ret; 5200 } 5201 5202 /* 5203 * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer 5204 * to firmware 5205 * 5206 * @instance: Adapter soft state 5207 * @crash_buf_state - tell FW to turn ON/OFF crash dump feature 5208 MR_CRASH_BUF_TURN_OFF = 0 5209 MR_CRASH_BUF_TURN_ON = 1 5210 * @return 0 on success non-zero on failure. 5211 * Issues an internal command (DCMD) to set parameters for crash dump feature. 5212 * Driver will send address of crash dump DMA buffer and set mbox to tell FW 5213 * that driver supports crash dump feature. This DCMD will be sent only if 5214 * crash dump feature is supported by the FW. 5215 * 5216 */ 5217 int megasas_set_crash_dump_params(struct megasas_instance *instance, 5218 u8 crash_buf_state) 5219 { 5220 int ret = 0; 5221 struct megasas_cmd *cmd; 5222 struct megasas_dcmd_frame *dcmd; 5223 5224 cmd = megasas_get_cmd(instance); 5225 5226 if (!cmd) { 5227 dev_err(&instance->pdev->dev, "Failed to get a free cmd\n"); 5228 return -ENOMEM; 5229 } 5230 5231 5232 dcmd = &cmd->frame->dcmd; 5233 5234 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5235 dcmd->mbox.b[0] = crash_buf_state; 5236 dcmd->cmd = MFI_CMD_DCMD; 5237 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 5238 dcmd->sge_count = 1; 5239 dcmd->flags = MFI_FRAME_DIR_NONE; 5240 dcmd->timeout = 0; 5241 dcmd->pad_0 = 0; 5242 dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE); 5243 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS); 5244 5245 megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h, 5246 CRASH_DMA_BUF_SIZE); 5247 5248 if ((instance->adapter_type != MFI_SERIES) && 5249 !instance->mask_interrupts) 5250 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 5251 else 5252 ret = megasas_issue_polled(instance, cmd); 5253 5254 if (ret == DCMD_TIMEOUT) { 5255 switch (dcmd_timeout_ocr_possible(instance)) { 5256 case INITIATE_OCR: 5257 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5258 megasas_reset_fusion(instance->host, 5259 MFI_IO_TIMEOUT_OCR); 5260 break; 5261 case KILL_ADAPTER: 5262 megaraid_sas_kill_hba(instance); 5263 break; 5264 case IGNORE_TIMEOUT: 5265 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 5266 __func__, __LINE__); 5267 break; 5268 } 5269 } else 5270 megasas_return_cmd(instance, cmd); 5271 5272 return ret; 5273 } 5274 5275 /** 5276 * megasas_issue_init_mfi - Initializes the FW 5277 * @instance: Adapter soft state 5278 * 5279 * Issues the INIT MFI cmd 5280 */ 5281 static int 5282 megasas_issue_init_mfi(struct megasas_instance *instance) 5283 { 5284 __le32 context; 5285 struct megasas_cmd *cmd; 5286 struct megasas_init_frame *init_frame; 5287 struct megasas_init_queue_info *initq_info; 5288 dma_addr_t init_frame_h; 5289 dma_addr_t initq_info_h; 5290 5291 /* 5292 * Prepare a init frame. Note the init frame points to queue info 5293 * structure. Each frame has SGL allocated after first 64 bytes. For 5294 * this frame - since we don't need any SGL - we use SGL's space as 5295 * queue info structure 5296 * 5297 * We will not get a NULL command below. We just created the pool. 5298 */ 5299 cmd = megasas_get_cmd(instance); 5300 5301 init_frame = (struct megasas_init_frame *)cmd->frame; 5302 initq_info = (struct megasas_init_queue_info *) 5303 ((unsigned long)init_frame + 64); 5304 5305 init_frame_h = cmd->frame_phys_addr; 5306 initq_info_h = init_frame_h + 64; 5307 5308 context = init_frame->context; 5309 memset(init_frame, 0, MEGAMFI_FRAME_SIZE); 5310 memset(initq_info, 0, sizeof(struct megasas_init_queue_info)); 5311 init_frame->context = context; 5312 5313 initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1); 5314 initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h); 5315 5316 initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h); 5317 initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h); 5318 5319 init_frame->cmd = MFI_CMD_INIT; 5320 init_frame->cmd_status = MFI_STAT_INVALID_STATUS; 5321 init_frame->queue_info_new_phys_addr_lo = 5322 cpu_to_le32(lower_32_bits(initq_info_h)); 5323 init_frame->queue_info_new_phys_addr_hi = 5324 cpu_to_le32(upper_32_bits(initq_info_h)); 5325 5326 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info)); 5327 5328 /* 5329 * disable the intr before firing the init frame to FW 5330 */ 5331 instance->instancet->disable_intr(instance); 5332 5333 /* 5334 * Issue the init frame in polled mode 5335 */ 5336 5337 if (megasas_issue_polled(instance, cmd)) { 5338 dev_err(&instance->pdev->dev, "Failed to init firmware\n"); 5339 megasas_return_cmd(instance, cmd); 5340 goto fail_fw_init; 5341 } 5342 5343 megasas_return_cmd(instance, cmd); 5344 5345 return 0; 5346 5347 fail_fw_init: 5348 return -EINVAL; 5349 } 5350 5351 static u32 5352 megasas_init_adapter_mfi(struct megasas_instance *instance) 5353 { 5354 u32 context_sz; 5355 u32 reply_q_sz; 5356 5357 /* 5358 * Get various operational parameters from status register 5359 */ 5360 instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF; 5361 /* 5362 * Reduce the max supported cmds by 1. This is to ensure that the 5363 * reply_q_sz (1 more than the max cmd that driver may send) 5364 * does not exceed max cmds that the FW can support 5365 */ 5366 instance->max_fw_cmds = instance->max_fw_cmds-1; 5367 instance->max_mfi_cmds = instance->max_fw_cmds; 5368 instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >> 5369 0x10; 5370 /* 5371 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands 5372 * are reserved for IOCTL + driver's internal DCMDs. 5373 */ 5374 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 5375 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { 5376 instance->max_scsi_cmds = (instance->max_fw_cmds - 5377 MEGASAS_SKINNY_INT_CMDS); 5378 sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); 5379 } else { 5380 instance->max_scsi_cmds = (instance->max_fw_cmds - 5381 MEGASAS_INT_CMDS); 5382 sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS)); 5383 } 5384 5385 instance->cur_can_queue = instance->max_scsi_cmds; 5386 /* 5387 * Create a pool of commands 5388 */ 5389 if (megasas_alloc_cmds(instance)) 5390 goto fail_alloc_cmds; 5391 5392 /* 5393 * Allocate memory for reply queue. Length of reply queue should 5394 * be _one_ more than the maximum commands handled by the firmware. 5395 * 5396 * Note: When FW completes commands, it places corresponding contex 5397 * values in this circular reply queue. This circular queue is a fairly 5398 * typical producer-consumer queue. FW is the producer (of completed 5399 * commands) and the driver is the consumer. 5400 */ 5401 context_sz = sizeof(u32); 5402 reply_q_sz = context_sz * (instance->max_fw_cmds + 1); 5403 5404 instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev, 5405 reply_q_sz, &instance->reply_queue_h, GFP_KERNEL); 5406 5407 if (!instance->reply_queue) { 5408 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n"); 5409 goto fail_reply_queue; 5410 } 5411 5412 if (megasas_issue_init_mfi(instance)) 5413 goto fail_fw_init; 5414 5415 if (megasas_get_ctrl_info(instance)) { 5416 dev_err(&instance->pdev->dev, "(%d): Could get controller info " 5417 "Fail from %s %d\n", instance->unique_id, 5418 __func__, __LINE__); 5419 goto fail_fw_init; 5420 } 5421 5422 instance->fw_support_ieee = 0; 5423 instance->fw_support_ieee = 5424 (instance->instancet->read_fw_status_reg(instance) & 5425 0x04000000); 5426 5427 dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d", 5428 instance->fw_support_ieee); 5429 5430 if (instance->fw_support_ieee) 5431 instance->flag_ieee = 1; 5432 5433 return 0; 5434 5435 fail_fw_init: 5436 5437 dma_free_coherent(&instance->pdev->dev, reply_q_sz, 5438 instance->reply_queue, instance->reply_queue_h); 5439 fail_reply_queue: 5440 megasas_free_cmds(instance); 5441 5442 fail_alloc_cmds: 5443 return 1; 5444 } 5445 5446 static 5447 void megasas_setup_irq_poll(struct megasas_instance *instance) 5448 { 5449 struct megasas_irq_context *irq_ctx; 5450 u32 count, i; 5451 5452 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 5453 5454 /* Initialize IRQ poll */ 5455 for (i = 0; i < count; i++) { 5456 irq_ctx = &instance->irq_context[i]; 5457 irq_ctx->os_irq = pci_irq_vector(instance->pdev, i); 5458 irq_ctx->irq_poll_scheduled = false; 5459 irq_poll_init(&irq_ctx->irqpoll, 5460 instance->threshold_reply_count, 5461 megasas_irqpoll); 5462 } 5463 } 5464 5465 /* 5466 * megasas_setup_irqs_ioapic - register legacy interrupts. 5467 * @instance: Adapter soft state 5468 * 5469 * Do not enable interrupt, only setup ISRs. 5470 * 5471 * Return 0 on success. 5472 */ 5473 static int 5474 megasas_setup_irqs_ioapic(struct megasas_instance *instance) 5475 { 5476 struct pci_dev *pdev; 5477 5478 pdev = instance->pdev; 5479 instance->irq_context[0].instance = instance; 5480 instance->irq_context[0].MSIxIndex = 0; 5481 if (request_irq(pci_irq_vector(pdev, 0), 5482 instance->instancet->service_isr, IRQF_SHARED, 5483 "megasas", &instance->irq_context[0])) { 5484 dev_err(&instance->pdev->dev, 5485 "Failed to register IRQ from %s %d\n", 5486 __func__, __LINE__); 5487 return -1; 5488 } 5489 instance->perf_mode = MR_LATENCY_PERF_MODE; 5490 instance->low_latency_index_start = 0; 5491 return 0; 5492 } 5493 5494 /** 5495 * megasas_setup_irqs_msix - register MSI-x interrupts. 5496 * @instance: Adapter soft state 5497 * @is_probe: Driver probe check 5498 * 5499 * Do not enable interrupt, only setup ISRs. 5500 * 5501 * Return 0 on success. 5502 */ 5503 static int 5504 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) 5505 { 5506 int i, j; 5507 struct pci_dev *pdev; 5508 5509 pdev = instance->pdev; 5510 5511 /* Try MSI-x */ 5512 for (i = 0; i < instance->msix_vectors; i++) { 5513 instance->irq_context[i].instance = instance; 5514 instance->irq_context[i].MSIxIndex = i; 5515 if (request_irq(pci_irq_vector(pdev, i), 5516 instance->instancet->service_isr, 0, "megasas", 5517 &instance->irq_context[i])) { 5518 dev_err(&instance->pdev->dev, 5519 "Failed to register IRQ for vector %d.\n", i); 5520 for (j = 0; j < i; j++) 5521 free_irq(pci_irq_vector(pdev, j), 5522 &instance->irq_context[j]); 5523 /* Retry irq register for IO_APIC*/ 5524 instance->msix_vectors = 0; 5525 instance->msix_load_balance = false; 5526 if (is_probe) { 5527 pci_free_irq_vectors(instance->pdev); 5528 return megasas_setup_irqs_ioapic(instance); 5529 } else { 5530 return -1; 5531 } 5532 } 5533 } 5534 5535 return 0; 5536 } 5537 5538 /* 5539 * megasas_destroy_irqs- unregister interrupts. 5540 * @instance: Adapter soft state 5541 * return: void 5542 */ 5543 static void 5544 megasas_destroy_irqs(struct megasas_instance *instance) { 5545 5546 int i; 5547 int count; 5548 struct megasas_irq_context *irq_ctx; 5549 5550 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 5551 if (instance->adapter_type != MFI_SERIES) { 5552 for (i = 0; i < count; i++) { 5553 irq_ctx = &instance->irq_context[i]; 5554 irq_poll_disable(&irq_ctx->irqpoll); 5555 } 5556 } 5557 5558 if (instance->msix_vectors) 5559 for (i = 0; i < instance->msix_vectors; i++) { 5560 free_irq(pci_irq_vector(instance->pdev, i), 5561 &instance->irq_context[i]); 5562 } 5563 else 5564 free_irq(pci_irq_vector(instance->pdev, 0), 5565 &instance->irq_context[0]); 5566 } 5567 5568 /** 5569 * megasas_setup_jbod_map - setup jbod map for FP seq_number. 5570 * @instance: Adapter soft state 5571 * @is_probe: Driver probe check 5572 * 5573 * Return 0 on success. 5574 */ 5575 void 5576 megasas_setup_jbod_map(struct megasas_instance *instance) 5577 { 5578 int i; 5579 struct fusion_context *fusion = instance->ctrl_context; 5580 u32 pd_seq_map_sz; 5581 5582 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 5583 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1)); 5584 5585 instance->use_seqnum_jbod_fp = 5586 instance->support_seqnum_jbod_fp; 5587 if (reset_devices || !fusion || 5588 !instance->support_seqnum_jbod_fp) { 5589 dev_info(&instance->pdev->dev, 5590 "JBOD sequence map is disabled %s %d\n", 5591 __func__, __LINE__); 5592 instance->use_seqnum_jbod_fp = false; 5593 return; 5594 } 5595 5596 if (fusion->pd_seq_sync[0]) 5597 goto skip_alloc; 5598 5599 for (i = 0; i < JBOD_MAPS_COUNT; i++) { 5600 fusion->pd_seq_sync[i] = dma_alloc_coherent 5601 (&instance->pdev->dev, pd_seq_map_sz, 5602 &fusion->pd_seq_phys[i], GFP_KERNEL); 5603 if (!fusion->pd_seq_sync[i]) { 5604 dev_err(&instance->pdev->dev, 5605 "Failed to allocate memory from %s %d\n", 5606 __func__, __LINE__); 5607 if (i == 1) { 5608 dma_free_coherent(&instance->pdev->dev, 5609 pd_seq_map_sz, fusion->pd_seq_sync[0], 5610 fusion->pd_seq_phys[0]); 5611 fusion->pd_seq_sync[0] = NULL; 5612 } 5613 instance->use_seqnum_jbod_fp = false; 5614 return; 5615 } 5616 } 5617 5618 skip_alloc: 5619 if (!megasas_sync_pd_seq_num(instance, false) && 5620 !megasas_sync_pd_seq_num(instance, true)) 5621 instance->use_seqnum_jbod_fp = true; 5622 else 5623 instance->use_seqnum_jbod_fp = false; 5624 } 5625 5626 static void megasas_setup_reply_map(struct megasas_instance *instance) 5627 { 5628 const struct cpumask *mask; 5629 unsigned int queue, cpu, low_latency_index_start; 5630 5631 low_latency_index_start = instance->low_latency_index_start; 5632 5633 for (queue = low_latency_index_start; queue < instance->msix_vectors; queue++) { 5634 mask = pci_irq_get_affinity(instance->pdev, queue); 5635 if (!mask) 5636 goto fallback; 5637 5638 for_each_cpu(cpu, mask) 5639 instance->reply_map[cpu] = queue; 5640 } 5641 return; 5642 5643 fallback: 5644 queue = low_latency_index_start; 5645 for_each_possible_cpu(cpu) { 5646 instance->reply_map[cpu] = queue; 5647 if (queue == (instance->msix_vectors - 1)) 5648 queue = low_latency_index_start; 5649 else 5650 queue++; 5651 } 5652 } 5653 5654 /** 5655 * megasas_get_device_list - Get the PD and LD device list from FW. 5656 * @instance: Adapter soft state 5657 * @return: Success or failure 5658 * 5659 * Issue DCMDs to Firmware to get the PD and LD list. 5660 * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination 5661 * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list. 5662 */ 5663 static 5664 int megasas_get_device_list(struct megasas_instance *instance) 5665 { 5666 memset(instance->pd_list, 0, 5667 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); 5668 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 5669 5670 if (instance->enable_fw_dev_list) { 5671 if (megasas_host_device_list_query(instance, true)) 5672 return FAILED; 5673 } else { 5674 if (megasas_get_pd_list(instance) < 0) { 5675 dev_err(&instance->pdev->dev, "failed to get PD list\n"); 5676 return FAILED; 5677 } 5678 5679 if (megasas_ld_list_query(instance, 5680 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) { 5681 dev_err(&instance->pdev->dev, "failed to get LD list\n"); 5682 return FAILED; 5683 } 5684 } 5685 5686 return SUCCESS; 5687 } 5688 5689 /** 5690 * megasas_set_high_iops_queue_affinity_hint - Set affinity hint for high IOPS queues 5691 * @instance: Adapter soft state 5692 * return: void 5693 */ 5694 static inline void 5695 megasas_set_high_iops_queue_affinity_hint(struct megasas_instance *instance) 5696 { 5697 int i; 5698 int local_numa_node; 5699 5700 if (instance->perf_mode == MR_BALANCED_PERF_MODE) { 5701 local_numa_node = dev_to_node(&instance->pdev->dev); 5702 5703 for (i = 0; i < instance->low_latency_index_start; i++) 5704 irq_set_affinity_hint(pci_irq_vector(instance->pdev, i), 5705 cpumask_of_node(local_numa_node)); 5706 } 5707 } 5708 5709 static int 5710 __megasas_alloc_irq_vectors(struct megasas_instance *instance) 5711 { 5712 int i, irq_flags; 5713 struct irq_affinity desc = { .pre_vectors = instance->low_latency_index_start }; 5714 struct irq_affinity *descp = &desc; 5715 5716 irq_flags = PCI_IRQ_MSIX; 5717 5718 if (instance->smp_affinity_enable) 5719 irq_flags |= PCI_IRQ_AFFINITY; 5720 else 5721 descp = NULL; 5722 5723 i = pci_alloc_irq_vectors_affinity(instance->pdev, 5724 instance->low_latency_index_start, 5725 instance->msix_vectors, irq_flags, descp); 5726 5727 return i; 5728 } 5729 5730 /** 5731 * megasas_alloc_irq_vectors - Allocate IRQ vectors/enable MSI-x vectors 5732 * @instance: Adapter soft state 5733 * return: void 5734 */ 5735 static void 5736 megasas_alloc_irq_vectors(struct megasas_instance *instance) 5737 { 5738 int i; 5739 unsigned int num_msix_req; 5740 5741 i = __megasas_alloc_irq_vectors(instance); 5742 5743 if ((instance->perf_mode == MR_BALANCED_PERF_MODE) && 5744 (i != instance->msix_vectors)) { 5745 if (instance->msix_vectors) 5746 pci_free_irq_vectors(instance->pdev); 5747 /* Disable Balanced IOPS mode and try realloc vectors */ 5748 instance->perf_mode = MR_LATENCY_PERF_MODE; 5749 instance->low_latency_index_start = 1; 5750 num_msix_req = num_online_cpus() + instance->low_latency_index_start; 5751 5752 instance->msix_vectors = min(num_msix_req, 5753 instance->msix_vectors); 5754 5755 i = __megasas_alloc_irq_vectors(instance); 5756 5757 } 5758 5759 dev_info(&instance->pdev->dev, 5760 "requested/available msix %d/%d\n", instance->msix_vectors, i); 5761 5762 if (i > 0) 5763 instance->msix_vectors = i; 5764 else 5765 instance->msix_vectors = 0; 5766 5767 if (instance->smp_affinity_enable) 5768 megasas_set_high_iops_queue_affinity_hint(instance); 5769 } 5770 5771 /** 5772 * megasas_init_fw - Initializes the FW 5773 * @instance: Adapter soft state 5774 * 5775 * This is the main function for initializing firmware 5776 */ 5777 5778 static int megasas_init_fw(struct megasas_instance *instance) 5779 { 5780 u32 max_sectors_1; 5781 u32 max_sectors_2, tmp_sectors, msix_enable; 5782 u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg; 5783 resource_size_t base_addr; 5784 void *base_addr_phys; 5785 struct megasas_ctrl_info *ctrl_info = NULL; 5786 unsigned long bar_list; 5787 int i, j, loop; 5788 struct IOV_111 *iovPtr; 5789 struct fusion_context *fusion; 5790 bool intr_coalescing; 5791 unsigned int num_msix_req; 5792 u16 lnksta, speed; 5793 5794 fusion = instance->ctrl_context; 5795 5796 /* Find first memory bar */ 5797 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); 5798 instance->bar = find_first_bit(&bar_list, BITS_PER_LONG); 5799 if (pci_request_selected_regions(instance->pdev, 1<<instance->bar, 5800 "megasas: LSI")) { 5801 dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n"); 5802 return -EBUSY; 5803 } 5804 5805 base_addr = pci_resource_start(instance->pdev, instance->bar); 5806 instance->reg_set = ioremap_nocache(base_addr, 8192); 5807 5808 if (!instance->reg_set) { 5809 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n"); 5810 goto fail_ioremap; 5811 } 5812 5813 base_addr_phys = &base_addr; 5814 dev_printk(KERN_DEBUG, &instance->pdev->dev, 5815 "BAR:0x%lx BAR's base_addr(phys):%pa mapped virt_addr:0x%p\n", 5816 instance->bar, base_addr_phys, instance->reg_set); 5817 5818 if (instance->adapter_type != MFI_SERIES) 5819 instance->instancet = &megasas_instance_template_fusion; 5820 else { 5821 switch (instance->pdev->device) { 5822 case PCI_DEVICE_ID_LSI_SAS1078R: 5823 case PCI_DEVICE_ID_LSI_SAS1078DE: 5824 instance->instancet = &megasas_instance_template_ppc; 5825 break; 5826 case PCI_DEVICE_ID_LSI_SAS1078GEN2: 5827 case PCI_DEVICE_ID_LSI_SAS0079GEN2: 5828 instance->instancet = &megasas_instance_template_gen2; 5829 break; 5830 case PCI_DEVICE_ID_LSI_SAS0073SKINNY: 5831 case PCI_DEVICE_ID_LSI_SAS0071SKINNY: 5832 instance->instancet = &megasas_instance_template_skinny; 5833 break; 5834 case PCI_DEVICE_ID_LSI_SAS1064R: 5835 case PCI_DEVICE_ID_DELL_PERC5: 5836 default: 5837 instance->instancet = &megasas_instance_template_xscale; 5838 instance->pd_list_not_supported = 1; 5839 break; 5840 } 5841 } 5842 5843 if (megasas_transition_to_ready(instance, 0)) { 5844 dev_info(&instance->pdev->dev, 5845 "Failed to transition controller to ready from %s!\n", 5846 __func__); 5847 if (instance->adapter_type != MFI_SERIES) { 5848 status_reg = instance->instancet->read_fw_status_reg( 5849 instance); 5850 if (status_reg & MFI_RESET_ADAPTER) { 5851 if (megasas_adp_reset_wait_for_ready 5852 (instance, true, 0) == FAILED) 5853 goto fail_ready_state; 5854 } else { 5855 goto fail_ready_state; 5856 } 5857 } else { 5858 atomic_set(&instance->fw_reset_no_pci_access, 1); 5859 instance->instancet->adp_reset 5860 (instance, instance->reg_set); 5861 atomic_set(&instance->fw_reset_no_pci_access, 0); 5862 5863 /*waiting for about 30 second before retry*/ 5864 ssleep(30); 5865 5866 if (megasas_transition_to_ready(instance, 0)) 5867 goto fail_ready_state; 5868 } 5869 5870 dev_info(&instance->pdev->dev, 5871 "FW restarted successfully from %s!\n", 5872 __func__); 5873 } 5874 5875 megasas_init_ctrl_params(instance); 5876 5877 if (megasas_set_dma_mask(instance)) 5878 goto fail_ready_state; 5879 5880 if (megasas_alloc_ctrl_mem(instance)) 5881 goto fail_alloc_dma_buf; 5882 5883 if (megasas_alloc_ctrl_dma_buffers(instance)) 5884 goto fail_alloc_dma_buf; 5885 5886 fusion = instance->ctrl_context; 5887 5888 if (instance->adapter_type >= VENTURA_SERIES) { 5889 scratch_pad_2 = 5890 megasas_readl(instance, 5891 &instance->reg_set->outbound_scratch_pad_2); 5892 instance->max_raid_mapsize = ((scratch_pad_2 >> 5893 MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) & 5894 MR_MAX_RAID_MAP_SIZE_MASK); 5895 } 5896 5897 switch (instance->adapter_type) { 5898 case VENTURA_SERIES: 5899 fusion->pcie_bw_limitation = true; 5900 break; 5901 case AERO_SERIES: 5902 fusion->r56_div_offload = true; 5903 break; 5904 default: 5905 break; 5906 } 5907 5908 /* Check if MSI-X is supported while in ready state */ 5909 msix_enable = (instance->instancet->read_fw_status_reg(instance) & 5910 0x4000000) >> 0x1a; 5911 if (msix_enable && !msix_disable) { 5912 5913 scratch_pad_1 = megasas_readl 5914 (instance, &instance->reg_set->outbound_scratch_pad_1); 5915 /* Check max MSI-X vectors */ 5916 if (fusion) { 5917 if (instance->adapter_type == THUNDERBOLT_SERIES) { 5918 /* Thunderbolt Series*/ 5919 instance->msix_vectors = (scratch_pad_1 5920 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; 5921 } else { 5922 instance->msix_vectors = ((scratch_pad_1 5923 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) 5924 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; 5925 5926 /* 5927 * For Invader series, > 8 MSI-x vectors 5928 * supported by FW/HW implies combined 5929 * reply queue mode is enabled. 5930 * For Ventura series, > 16 MSI-x vectors 5931 * supported by FW/HW implies combined 5932 * reply queue mode is enabled. 5933 */ 5934 switch (instance->adapter_type) { 5935 case INVADER_SERIES: 5936 if (instance->msix_vectors > 8) 5937 instance->msix_combined = true; 5938 break; 5939 case AERO_SERIES: 5940 case VENTURA_SERIES: 5941 if (instance->msix_vectors > 16) 5942 instance->msix_combined = true; 5943 break; 5944 } 5945 5946 if (rdpq_enable) 5947 instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ? 5948 1 : 0; 5949 5950 if (instance->adapter_type >= INVADER_SERIES && 5951 !instance->msix_combined) { 5952 instance->msix_load_balance = true; 5953 instance->smp_affinity_enable = false; 5954 } 5955 5956 /* Save 1-15 reply post index address to local memory 5957 * Index 0 is already saved from reg offset 5958 * MPI2_REPLY_POST_HOST_INDEX_OFFSET 5959 */ 5960 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) { 5961 instance->reply_post_host_index_addr[loop] = 5962 (u32 __iomem *) 5963 ((u8 __iomem *)instance->reg_set + 5964 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET 5965 + (loop * 0x10)); 5966 } 5967 } 5968 5969 dev_info(&instance->pdev->dev, 5970 "firmware supports msix\t: (%d)", 5971 instance->msix_vectors); 5972 if (msix_vectors) 5973 instance->msix_vectors = min(msix_vectors, 5974 instance->msix_vectors); 5975 } else /* MFI adapters */ 5976 instance->msix_vectors = 1; 5977 5978 5979 /* 5980 * For Aero (if some conditions are met), driver will configure a 5981 * few additional reply queues with interrupt coalescing enabled. 5982 * These queues with interrupt coalescing enabled are called 5983 * High IOPS queues and rest of reply queues (based on number of 5984 * logical CPUs) are termed as Low latency queues. 5985 * 5986 * Total Number of reply queues = High IOPS queues + low latency queues 5987 * 5988 * For rest of fusion adapters, 1 additional reply queue will be 5989 * reserved for management commands, rest of reply queues 5990 * (based on number of logical CPUs) will be used for IOs and 5991 * referenced as IO queues. 5992 * Total Number of reply queues = 1 + IO queues 5993 * 5994 * MFI adapters supports single MSI-x so single reply queue 5995 * will be used for IO and management commands. 5996 */ 5997 5998 intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ? 5999 true : false; 6000 if (intr_coalescing && 6001 (num_online_cpus() >= MR_HIGH_IOPS_QUEUE_COUNT) && 6002 (instance->msix_vectors == MEGASAS_MAX_MSIX_QUEUES)) 6003 instance->perf_mode = MR_BALANCED_PERF_MODE; 6004 else 6005 instance->perf_mode = MR_LATENCY_PERF_MODE; 6006 6007 6008 if (instance->adapter_type == AERO_SERIES) { 6009 pcie_capability_read_word(instance->pdev, PCI_EXP_LNKSTA, &lnksta); 6010 speed = lnksta & PCI_EXP_LNKSTA_CLS; 6011 6012 /* 6013 * For Aero, if PCIe link speed is <16 GT/s, then driver should operate 6014 * in latency perf mode and enable R1 PCI bandwidth algorithm 6015 */ 6016 if (speed < 0x4) { 6017 instance->perf_mode = MR_LATENCY_PERF_MODE; 6018 fusion->pcie_bw_limitation = true; 6019 } 6020 6021 /* 6022 * Performance mode settings provided through module parameter-perf_mode will 6023 * take affect only for: 6024 * 1. Aero family of adapters. 6025 * 2. When user sets module parameter- perf_mode in range of 0-2. 6026 */ 6027 if ((perf_mode >= MR_BALANCED_PERF_MODE) && 6028 (perf_mode <= MR_LATENCY_PERF_MODE)) 6029 instance->perf_mode = perf_mode; 6030 /* 6031 * If intr coalescing is not supported by controller FW, then IOPS 6032 * and Balanced modes are not feasible. 6033 */ 6034 if (!intr_coalescing) 6035 instance->perf_mode = MR_LATENCY_PERF_MODE; 6036 6037 } 6038 6039 if (instance->perf_mode == MR_BALANCED_PERF_MODE) 6040 instance->low_latency_index_start = 6041 MR_HIGH_IOPS_QUEUE_COUNT; 6042 else 6043 instance->low_latency_index_start = 1; 6044 6045 num_msix_req = num_online_cpus() + instance->low_latency_index_start; 6046 6047 instance->msix_vectors = min(num_msix_req, 6048 instance->msix_vectors); 6049 6050 megasas_alloc_irq_vectors(instance); 6051 if (!instance->msix_vectors) 6052 instance->msix_load_balance = false; 6053 } 6054 /* 6055 * MSI-X host index 0 is common for all adapter. 6056 * It is used for all MPT based Adapters. 6057 */ 6058 if (instance->msix_combined) { 6059 instance->reply_post_host_index_addr[0] = 6060 (u32 *)((u8 *)instance->reg_set + 6061 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET); 6062 } else { 6063 instance->reply_post_host_index_addr[0] = 6064 (u32 *)((u8 *)instance->reg_set + 6065 MPI2_REPLY_POST_HOST_INDEX_OFFSET); 6066 } 6067 6068 if (!instance->msix_vectors) { 6069 i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); 6070 if (i < 0) 6071 goto fail_init_adapter; 6072 } 6073 6074 megasas_setup_reply_map(instance); 6075 6076 dev_info(&instance->pdev->dev, 6077 "current msix/online cpus\t: (%d/%d)\n", 6078 instance->msix_vectors, (unsigned int)num_online_cpus()); 6079 dev_info(&instance->pdev->dev, 6080 "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled"); 6081 6082 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 6083 (unsigned long)instance); 6084 6085 /* 6086 * Below are default value for legacy Firmware. 6087 * non-fusion based controllers 6088 */ 6089 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 6090 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 6091 /* Get operational params, sge flags, send init cmd to controller */ 6092 if (instance->instancet->init_adapter(instance)) 6093 goto fail_init_adapter; 6094 6095 if (instance->adapter_type >= VENTURA_SERIES) { 6096 scratch_pad_3 = 6097 megasas_readl(instance, 6098 &instance->reg_set->outbound_scratch_pad_3); 6099 if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >= 6100 MR_DEFAULT_NVME_PAGE_SHIFT) 6101 instance->nvme_page_size = 6102 (1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK)); 6103 6104 dev_info(&instance->pdev->dev, 6105 "NVME page size\t: (%d)\n", instance->nvme_page_size); 6106 } 6107 6108 if (instance->msix_vectors ? 6109 megasas_setup_irqs_msix(instance, 1) : 6110 megasas_setup_irqs_ioapic(instance)) 6111 goto fail_init_adapter; 6112 6113 if (instance->adapter_type != MFI_SERIES) 6114 megasas_setup_irq_poll(instance); 6115 6116 instance->instancet->enable_intr(instance); 6117 6118 dev_info(&instance->pdev->dev, "INIT adapter done\n"); 6119 6120 megasas_setup_jbod_map(instance); 6121 6122 if (megasas_get_device_list(instance) != SUCCESS) { 6123 dev_err(&instance->pdev->dev, 6124 "%s: megasas_get_device_list failed\n", 6125 __func__); 6126 goto fail_get_ld_pd_list; 6127 } 6128 6129 /* stream detection initialization */ 6130 if (instance->adapter_type >= VENTURA_SERIES) { 6131 fusion->stream_detect_by_ld = 6132 kcalloc(MAX_LOGICAL_DRIVES_EXT, 6133 sizeof(struct LD_STREAM_DETECT *), 6134 GFP_KERNEL); 6135 if (!fusion->stream_detect_by_ld) { 6136 dev_err(&instance->pdev->dev, 6137 "unable to allocate stream detection for pool of LDs\n"); 6138 goto fail_get_ld_pd_list; 6139 } 6140 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) { 6141 fusion->stream_detect_by_ld[i] = 6142 kzalloc(sizeof(struct LD_STREAM_DETECT), 6143 GFP_KERNEL); 6144 if (!fusion->stream_detect_by_ld[i]) { 6145 dev_err(&instance->pdev->dev, 6146 "unable to allocate stream detect by LD\n "); 6147 for (j = 0; j < i; ++j) 6148 kfree(fusion->stream_detect_by_ld[j]); 6149 kfree(fusion->stream_detect_by_ld); 6150 fusion->stream_detect_by_ld = NULL; 6151 goto fail_get_ld_pd_list; 6152 } 6153 fusion->stream_detect_by_ld[i]->mru_bit_map 6154 = MR_STREAM_BITMAP; 6155 } 6156 } 6157 6158 /* 6159 * Compute the max allowed sectors per IO: The controller info has two 6160 * limits on max sectors. Driver should use the minimum of these two. 6161 * 6162 * 1 << stripe_sz_ops.min = max sectors per strip 6163 * 6164 * Note that older firmwares ( < FW ver 30) didn't report information 6165 * to calculate max_sectors_1. So the number ended up as zero always. 6166 */ 6167 tmp_sectors = 0; 6168 ctrl_info = instance->ctrl_info_buf; 6169 6170 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * 6171 le16_to_cpu(ctrl_info->max_strips_per_io); 6172 max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size); 6173 6174 tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2); 6175 6176 instance->peerIsPresent = ctrl_info->cluster.peerIsPresent; 6177 instance->passive = ctrl_info->cluster.passive; 6178 memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId)); 6179 instance->UnevenSpanSupport = 6180 ctrl_info->adapterOperations2.supportUnevenSpans; 6181 if (instance->UnevenSpanSupport) { 6182 struct fusion_context *fusion = instance->ctrl_context; 6183 if (MR_ValidateMapInfo(instance, instance->map_id)) 6184 fusion->fast_path_io = 1; 6185 else 6186 fusion->fast_path_io = 0; 6187 6188 } 6189 if (ctrl_info->host_interface.SRIOV) { 6190 instance->requestorId = ctrl_info->iov.requestorId; 6191 if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) { 6192 if (!ctrl_info->adapterOperations2.activePassive) 6193 instance->PlasmaFW111 = 1; 6194 6195 dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n", 6196 instance->PlasmaFW111 ? "1.11" : "new"); 6197 6198 if (instance->PlasmaFW111) { 6199 iovPtr = (struct IOV_111 *) 6200 ((unsigned char *)ctrl_info + IOV_111_OFFSET); 6201 instance->requestorId = iovPtr->requestorId; 6202 } 6203 } 6204 dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n", 6205 instance->requestorId); 6206 } 6207 6208 instance->crash_dump_fw_support = 6209 ctrl_info->adapterOperations3.supportCrashDump; 6210 instance->crash_dump_drv_support = 6211 (instance->crash_dump_fw_support && 6212 instance->crash_dump_buf); 6213 if (instance->crash_dump_drv_support) 6214 megasas_set_crash_dump_params(instance, 6215 MR_CRASH_BUF_TURN_OFF); 6216 6217 else { 6218 if (instance->crash_dump_buf) 6219 dma_free_coherent(&instance->pdev->dev, 6220 CRASH_DMA_BUF_SIZE, 6221 instance->crash_dump_buf, 6222 instance->crash_dump_h); 6223 instance->crash_dump_buf = NULL; 6224 } 6225 6226 if (instance->snapdump_wait_time) { 6227 megasas_get_snapdump_properties(instance); 6228 dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n", 6229 instance->snapdump_wait_time); 6230 } 6231 6232 dev_info(&instance->pdev->dev, 6233 "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n", 6234 le16_to_cpu(ctrl_info->pci.vendor_id), 6235 le16_to_cpu(ctrl_info->pci.device_id), 6236 le16_to_cpu(ctrl_info->pci.sub_vendor_id), 6237 le16_to_cpu(ctrl_info->pci.sub_device_id)); 6238 dev_info(&instance->pdev->dev, "unevenspan support : %s\n", 6239 instance->UnevenSpanSupport ? "yes" : "no"); 6240 dev_info(&instance->pdev->dev, "firmware crash dump : %s\n", 6241 instance->crash_dump_drv_support ? "yes" : "no"); 6242 dev_info(&instance->pdev->dev, "JBOD sequence map : %s\n", 6243 instance->use_seqnum_jbod_fp ? "enabled" : "disabled"); 6244 6245 instance->max_sectors_per_req = instance->max_num_sge * 6246 SGE_BUFFER_SIZE / 512; 6247 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) 6248 instance->max_sectors_per_req = tmp_sectors; 6249 6250 /* Check for valid throttlequeuedepth module parameter */ 6251 if (throttlequeuedepth && 6252 throttlequeuedepth <= instance->max_scsi_cmds) 6253 instance->throttlequeuedepth = throttlequeuedepth; 6254 else 6255 instance->throttlequeuedepth = 6256 MEGASAS_THROTTLE_QUEUE_DEPTH; 6257 6258 if ((resetwaittime < 1) || 6259 (resetwaittime > MEGASAS_RESET_WAIT_TIME)) 6260 resetwaittime = MEGASAS_RESET_WAIT_TIME; 6261 6262 if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT)) 6263 scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; 6264 6265 /* Launch SR-IOV heartbeat timer */ 6266 if (instance->requestorId) { 6267 if (!megasas_sriov_start_heartbeat(instance, 1)) { 6268 megasas_start_timer(instance); 6269 } else { 6270 instance->skip_heartbeat_timer_del = 1; 6271 goto fail_get_ld_pd_list; 6272 } 6273 } 6274 6275 /* 6276 * Create and start watchdog thread which will monitor 6277 * controller state every 1 sec and trigger OCR when 6278 * it enters fault state 6279 */ 6280 if (instance->adapter_type != MFI_SERIES) 6281 if (megasas_fusion_start_watchdog(instance) != SUCCESS) 6282 goto fail_start_watchdog; 6283 6284 return 0; 6285 6286 fail_start_watchdog: 6287 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 6288 del_timer_sync(&instance->sriov_heartbeat_timer); 6289 fail_get_ld_pd_list: 6290 instance->instancet->disable_intr(instance); 6291 megasas_destroy_irqs(instance); 6292 fail_init_adapter: 6293 if (instance->msix_vectors) 6294 pci_free_irq_vectors(instance->pdev); 6295 instance->msix_vectors = 0; 6296 fail_alloc_dma_buf: 6297 megasas_free_ctrl_dma_buffers(instance); 6298 megasas_free_ctrl_mem(instance); 6299 fail_ready_state: 6300 iounmap(instance->reg_set); 6301 6302 fail_ioremap: 6303 pci_release_selected_regions(instance->pdev, 1<<instance->bar); 6304 6305 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 6306 __func__, __LINE__); 6307 return -EINVAL; 6308 } 6309 6310 /** 6311 * megasas_release_mfi - Reverses the FW initialization 6312 * @instance: Adapter soft state 6313 */ 6314 static void megasas_release_mfi(struct megasas_instance *instance) 6315 { 6316 u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1); 6317 6318 if (instance->reply_queue) 6319 dma_free_coherent(&instance->pdev->dev, reply_q_sz, 6320 instance->reply_queue, instance->reply_queue_h); 6321 6322 megasas_free_cmds(instance); 6323 6324 iounmap(instance->reg_set); 6325 6326 pci_release_selected_regions(instance->pdev, 1<<instance->bar); 6327 } 6328 6329 /** 6330 * megasas_get_seq_num - Gets latest event sequence numbers 6331 * @instance: Adapter soft state 6332 * @eli: FW event log sequence numbers information 6333 * 6334 * FW maintains a log of all events in a non-volatile area. Upper layers would 6335 * usually find out the latest sequence number of the events, the seq number at 6336 * the boot etc. They would "read" all the events below the latest seq number 6337 * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq 6338 * number), they would subsribe to AEN (asynchronous event notification) and 6339 * wait for the events to happen. 6340 */ 6341 static int 6342 megasas_get_seq_num(struct megasas_instance *instance, 6343 struct megasas_evt_log_info *eli) 6344 { 6345 struct megasas_cmd *cmd; 6346 struct megasas_dcmd_frame *dcmd; 6347 struct megasas_evt_log_info *el_info; 6348 dma_addr_t el_info_h = 0; 6349 int ret; 6350 6351 cmd = megasas_get_cmd(instance); 6352 6353 if (!cmd) { 6354 return -ENOMEM; 6355 } 6356 6357 dcmd = &cmd->frame->dcmd; 6358 el_info = dma_alloc_coherent(&instance->pdev->dev, 6359 sizeof(struct megasas_evt_log_info), 6360 &el_info_h, GFP_KERNEL); 6361 if (!el_info) { 6362 megasas_return_cmd(instance, cmd); 6363 return -ENOMEM; 6364 } 6365 6366 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6367 6368 dcmd->cmd = MFI_CMD_DCMD; 6369 dcmd->cmd_status = 0x0; 6370 dcmd->sge_count = 1; 6371 dcmd->flags = MFI_FRAME_DIR_READ; 6372 dcmd->timeout = 0; 6373 dcmd->pad_0 = 0; 6374 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info)); 6375 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO); 6376 6377 megasas_set_dma_settings(instance, dcmd, el_info_h, 6378 sizeof(struct megasas_evt_log_info)); 6379 6380 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 6381 if (ret != DCMD_SUCCESS) { 6382 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 6383 __func__, __LINE__); 6384 goto dcmd_failed; 6385 } 6386 6387 /* 6388 * Copy the data back into callers buffer 6389 */ 6390 eli->newest_seq_num = el_info->newest_seq_num; 6391 eli->oldest_seq_num = el_info->oldest_seq_num; 6392 eli->clear_seq_num = el_info->clear_seq_num; 6393 eli->shutdown_seq_num = el_info->shutdown_seq_num; 6394 eli->boot_seq_num = el_info->boot_seq_num; 6395 6396 dcmd_failed: 6397 dma_free_coherent(&instance->pdev->dev, 6398 sizeof(struct megasas_evt_log_info), 6399 el_info, el_info_h); 6400 6401 megasas_return_cmd(instance, cmd); 6402 6403 return ret; 6404 } 6405 6406 /** 6407 * megasas_register_aen - Registers for asynchronous event notification 6408 * @instance: Adapter soft state 6409 * @seq_num: The starting sequence number 6410 * @class_locale: Class of the event 6411 * 6412 * This function subscribes for AEN for events beyond the @seq_num. It requests 6413 * to be notified if and only if the event is of type @class_locale 6414 */ 6415 static int 6416 megasas_register_aen(struct megasas_instance *instance, u32 seq_num, 6417 u32 class_locale_word) 6418 { 6419 int ret_val; 6420 struct megasas_cmd *cmd; 6421 struct megasas_dcmd_frame *dcmd; 6422 union megasas_evt_class_locale curr_aen; 6423 union megasas_evt_class_locale prev_aen; 6424 6425 /* 6426 * If there an AEN pending already (aen_cmd), check if the 6427 * class_locale of that pending AEN is inclusive of the new 6428 * AEN request we currently have. If it is, then we don't have 6429 * to do anything. In other words, whichever events the current 6430 * AEN request is subscribing to, have already been subscribed 6431 * to. 6432 * 6433 * If the old_cmd is _not_ inclusive, then we have to abort 6434 * that command, form a class_locale that is superset of both 6435 * old and current and re-issue to the FW 6436 */ 6437 6438 curr_aen.word = class_locale_word; 6439 6440 if (instance->aen_cmd) { 6441 6442 prev_aen.word = 6443 le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]); 6444 6445 if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) || 6446 (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) { 6447 dev_info(&instance->pdev->dev, 6448 "%s %d out of range class %d send by application\n", 6449 __func__, __LINE__, curr_aen.members.class); 6450 return 0; 6451 } 6452 6453 /* 6454 * A class whose enum value is smaller is inclusive of all 6455 * higher values. If a PROGRESS (= -1) was previously 6456 * registered, then a new registration requests for higher 6457 * classes need not be sent to FW. They are automatically 6458 * included. 6459 * 6460 * Locale numbers don't have such hierarchy. They are bitmap 6461 * values 6462 */ 6463 if ((prev_aen.members.class <= curr_aen.members.class) && 6464 !((prev_aen.members.locale & curr_aen.members.locale) ^ 6465 curr_aen.members.locale)) { 6466 /* 6467 * Previously issued event registration includes 6468 * current request. Nothing to do. 6469 */ 6470 return 0; 6471 } else { 6472 curr_aen.members.locale |= prev_aen.members.locale; 6473 6474 if (prev_aen.members.class < curr_aen.members.class) 6475 curr_aen.members.class = prev_aen.members.class; 6476 6477 instance->aen_cmd->abort_aen = 1; 6478 ret_val = megasas_issue_blocked_abort_cmd(instance, 6479 instance-> 6480 aen_cmd, 30); 6481 6482 if (ret_val) { 6483 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort " 6484 "previous AEN command\n"); 6485 return ret_val; 6486 } 6487 } 6488 } 6489 6490 cmd = megasas_get_cmd(instance); 6491 6492 if (!cmd) 6493 return -ENOMEM; 6494 6495 dcmd = &cmd->frame->dcmd; 6496 6497 memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail)); 6498 6499 /* 6500 * Prepare DCMD for aen registration 6501 */ 6502 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6503 6504 dcmd->cmd = MFI_CMD_DCMD; 6505 dcmd->cmd_status = 0x0; 6506 dcmd->sge_count = 1; 6507 dcmd->flags = MFI_FRAME_DIR_READ; 6508 dcmd->timeout = 0; 6509 dcmd->pad_0 = 0; 6510 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail)); 6511 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT); 6512 dcmd->mbox.w[0] = cpu_to_le32(seq_num); 6513 instance->last_seq_num = seq_num; 6514 dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word); 6515 6516 megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h, 6517 sizeof(struct megasas_evt_detail)); 6518 6519 if (instance->aen_cmd != NULL) { 6520 megasas_return_cmd(instance, cmd); 6521 return 0; 6522 } 6523 6524 /* 6525 * Store reference to the cmd used to register for AEN. When an 6526 * application wants us to register for AEN, we have to abort this 6527 * cmd and re-register with a new EVENT LOCALE supplied by that app 6528 */ 6529 instance->aen_cmd = cmd; 6530 6531 /* 6532 * Issue the aen registration frame 6533 */ 6534 instance->instancet->issue_dcmd(instance, cmd); 6535 6536 return 0; 6537 } 6538 6539 /* megasas_get_target_prop - Send DCMD with below details to firmware. 6540 * 6541 * This DCMD will fetch few properties of LD/system PD defined 6542 * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value. 6543 * 6544 * DCMD send by drivers whenever new target is added to the OS. 6545 * 6546 * dcmd.opcode - MR_DCMD_DEV_GET_TARGET_PROP 6547 * dcmd.mbox.b[0] - DCMD is to be fired for LD or system PD. 6548 * 0 = system PD, 1 = LD. 6549 * dcmd.mbox.s[1] - TargetID for LD/system PD. 6550 * dcmd.sge IN - Pointer to return MR_TARGET_DEV_PROPERTIES. 6551 * 6552 * @instance: Adapter soft state 6553 * @sdev: OS provided scsi device 6554 * 6555 * Returns 0 on success non-zero on failure. 6556 */ 6557 int 6558 megasas_get_target_prop(struct megasas_instance *instance, 6559 struct scsi_device *sdev) 6560 { 6561 int ret; 6562 struct megasas_cmd *cmd; 6563 struct megasas_dcmd_frame *dcmd; 6564 u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + 6565 sdev->id; 6566 6567 cmd = megasas_get_cmd(instance); 6568 6569 if (!cmd) { 6570 dev_err(&instance->pdev->dev, 6571 "Failed to get cmd %s\n", __func__); 6572 return -ENOMEM; 6573 } 6574 6575 dcmd = &cmd->frame->dcmd; 6576 6577 memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop)); 6578 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6579 dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev); 6580 6581 dcmd->mbox.s[1] = cpu_to_le16(targetId); 6582 dcmd->cmd = MFI_CMD_DCMD; 6583 dcmd->cmd_status = 0xFF; 6584 dcmd->sge_count = 1; 6585 dcmd->flags = MFI_FRAME_DIR_READ; 6586 dcmd->timeout = 0; 6587 dcmd->pad_0 = 0; 6588 dcmd->data_xfer_len = 6589 cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES)); 6590 dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP); 6591 6592 megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h, 6593 sizeof(struct MR_TARGET_PROPERTIES)); 6594 6595 if ((instance->adapter_type != MFI_SERIES) && 6596 !instance->mask_interrupts) 6597 ret = megasas_issue_blocked_cmd(instance, 6598 cmd, MFI_IO_TIMEOUT_SECS); 6599 else 6600 ret = megasas_issue_polled(instance, cmd); 6601 6602 switch (ret) { 6603 case DCMD_TIMEOUT: 6604 switch (dcmd_timeout_ocr_possible(instance)) { 6605 case INITIATE_OCR: 6606 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 6607 mutex_unlock(&instance->reset_mutex); 6608 megasas_reset_fusion(instance->host, 6609 MFI_IO_TIMEOUT_OCR); 6610 mutex_lock(&instance->reset_mutex); 6611 break; 6612 case KILL_ADAPTER: 6613 megaraid_sas_kill_hba(instance); 6614 break; 6615 case IGNORE_TIMEOUT: 6616 dev_info(&instance->pdev->dev, 6617 "Ignore DCMD timeout: %s %d\n", 6618 __func__, __LINE__); 6619 break; 6620 } 6621 break; 6622 6623 default: 6624 megasas_return_cmd(instance, cmd); 6625 } 6626 if (ret != DCMD_SUCCESS) 6627 dev_err(&instance->pdev->dev, 6628 "return from %s %d return value %d\n", 6629 __func__, __LINE__, ret); 6630 6631 return ret; 6632 } 6633 6634 /** 6635 * megasas_start_aen - Subscribes to AEN during driver load time 6636 * @instance: Adapter soft state 6637 */ 6638 static int megasas_start_aen(struct megasas_instance *instance) 6639 { 6640 struct megasas_evt_log_info eli; 6641 union megasas_evt_class_locale class_locale; 6642 6643 /* 6644 * Get the latest sequence number from FW 6645 */ 6646 memset(&eli, 0, sizeof(eli)); 6647 6648 if (megasas_get_seq_num(instance, &eli)) 6649 return -1; 6650 6651 /* 6652 * Register AEN with FW for latest sequence number plus 1 6653 */ 6654 class_locale.members.reserved = 0; 6655 class_locale.members.locale = MR_EVT_LOCALE_ALL; 6656 class_locale.members.class = MR_EVT_CLASS_DEBUG; 6657 6658 return megasas_register_aen(instance, 6659 le32_to_cpu(eli.newest_seq_num) + 1, 6660 class_locale.word); 6661 } 6662 6663 /** 6664 * megasas_io_attach - Attaches this driver to SCSI mid-layer 6665 * @instance: Adapter soft state 6666 */ 6667 static int megasas_io_attach(struct megasas_instance *instance) 6668 { 6669 struct Scsi_Host *host = instance->host; 6670 6671 /* 6672 * Export parameters required by SCSI mid-layer 6673 */ 6674 host->unique_id = instance->unique_id; 6675 host->can_queue = instance->max_scsi_cmds; 6676 host->this_id = instance->init_id; 6677 host->sg_tablesize = instance->max_num_sge; 6678 6679 if (instance->fw_support_ieee) 6680 instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE; 6681 6682 /* 6683 * Check if the module parameter value for max_sectors can be used 6684 */ 6685 if (max_sectors && max_sectors < instance->max_sectors_per_req) 6686 instance->max_sectors_per_req = max_sectors; 6687 else { 6688 if (max_sectors) { 6689 if (((instance->pdev->device == 6690 PCI_DEVICE_ID_LSI_SAS1078GEN2) || 6691 (instance->pdev->device == 6692 PCI_DEVICE_ID_LSI_SAS0079GEN2)) && 6693 (max_sectors <= MEGASAS_MAX_SECTORS)) { 6694 instance->max_sectors_per_req = max_sectors; 6695 } else { 6696 dev_info(&instance->pdev->dev, "max_sectors should be > 0" 6697 "and <= %d (or < 1MB for GEN2 controller)\n", 6698 instance->max_sectors_per_req); 6699 } 6700 } 6701 } 6702 6703 host->max_sectors = instance->max_sectors_per_req; 6704 host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN; 6705 host->max_channel = MEGASAS_MAX_CHANNELS - 1; 6706 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; 6707 host->max_lun = MEGASAS_MAX_LUN; 6708 host->max_cmd_len = 16; 6709 6710 /* 6711 * Notify the mid-layer about the new controller 6712 */ 6713 if (scsi_add_host(host, &instance->pdev->dev)) { 6714 dev_err(&instance->pdev->dev, 6715 "Failed to add host from %s %d\n", 6716 __func__, __LINE__); 6717 return -ENODEV; 6718 } 6719 6720 return 0; 6721 } 6722 6723 /** 6724 * megasas_set_dma_mask - Set DMA mask for supported controllers 6725 * 6726 * @instance: Adapter soft state 6727 * Description: 6728 * 6729 * For Ventura, driver/FW will operate in 63bit DMA addresses. 6730 * 6731 * For invader- 6732 * By default, driver/FW will operate in 32bit DMA addresses 6733 * for consistent DMA mapping but if 32 bit consistent 6734 * DMA mask fails, driver will try with 63 bit consistent 6735 * mask provided FW is true 63bit DMA capable 6736 * 6737 * For older controllers(Thunderbolt and MFI based adapters)- 6738 * driver/FW will operate in 32 bit consistent DMA addresses. 6739 */ 6740 static int 6741 megasas_set_dma_mask(struct megasas_instance *instance) 6742 { 6743 u64 consistent_mask; 6744 struct pci_dev *pdev; 6745 u32 scratch_pad_1; 6746 6747 pdev = instance->pdev; 6748 consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ? 6749 DMA_BIT_MASK(63) : DMA_BIT_MASK(32); 6750 6751 if (IS_DMA64) { 6752 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) && 6753 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) 6754 goto fail_set_dma_mask; 6755 6756 if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) && 6757 (dma_set_coherent_mask(&pdev->dev, consistent_mask) && 6758 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) { 6759 /* 6760 * If 32 bit DMA mask fails, then try for 64 bit mask 6761 * for FW capable of handling 64 bit DMA. 6762 */ 6763 scratch_pad_1 = megasas_readl 6764 (instance, &instance->reg_set->outbound_scratch_pad_1); 6765 6766 if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET)) 6767 goto fail_set_dma_mask; 6768 else if (dma_set_mask_and_coherent(&pdev->dev, 6769 DMA_BIT_MASK(63))) 6770 goto fail_set_dma_mask; 6771 } 6772 } else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) 6773 goto fail_set_dma_mask; 6774 6775 if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32)) 6776 instance->consistent_mask_64bit = false; 6777 else 6778 instance->consistent_mask_64bit = true; 6779 6780 dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n", 6781 ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"), 6782 (instance->consistent_mask_64bit ? "63" : "32")); 6783 6784 return 0; 6785 6786 fail_set_dma_mask: 6787 dev_err(&pdev->dev, "Failed to set DMA mask\n"); 6788 return -1; 6789 6790 } 6791 6792 /* 6793 * megasas_set_adapter_type - Set adapter type. 6794 * Supported controllers can be divided in 6795 * different categories- 6796 * enum MR_ADAPTER_TYPE { 6797 * MFI_SERIES = 1, 6798 * THUNDERBOLT_SERIES = 2, 6799 * INVADER_SERIES = 3, 6800 * VENTURA_SERIES = 4, 6801 * AERO_SERIES = 5, 6802 * }; 6803 * @instance: Adapter soft state 6804 * return: void 6805 */ 6806 static inline void megasas_set_adapter_type(struct megasas_instance *instance) 6807 { 6808 if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) && 6809 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) { 6810 instance->adapter_type = MFI_SERIES; 6811 } else { 6812 switch (instance->pdev->device) { 6813 case PCI_DEVICE_ID_LSI_AERO_10E1: 6814 case PCI_DEVICE_ID_LSI_AERO_10E2: 6815 case PCI_DEVICE_ID_LSI_AERO_10E5: 6816 case PCI_DEVICE_ID_LSI_AERO_10E6: 6817 instance->adapter_type = AERO_SERIES; 6818 break; 6819 case PCI_DEVICE_ID_LSI_VENTURA: 6820 case PCI_DEVICE_ID_LSI_CRUSADER: 6821 case PCI_DEVICE_ID_LSI_HARPOON: 6822 case PCI_DEVICE_ID_LSI_TOMCAT: 6823 case PCI_DEVICE_ID_LSI_VENTURA_4PORT: 6824 case PCI_DEVICE_ID_LSI_CRUSADER_4PORT: 6825 instance->adapter_type = VENTURA_SERIES; 6826 break; 6827 case PCI_DEVICE_ID_LSI_FUSION: 6828 case PCI_DEVICE_ID_LSI_PLASMA: 6829 instance->adapter_type = THUNDERBOLT_SERIES; 6830 break; 6831 case PCI_DEVICE_ID_LSI_INVADER: 6832 case PCI_DEVICE_ID_LSI_INTRUDER: 6833 case PCI_DEVICE_ID_LSI_INTRUDER_24: 6834 case PCI_DEVICE_ID_LSI_CUTLASS_52: 6835 case PCI_DEVICE_ID_LSI_CUTLASS_53: 6836 case PCI_DEVICE_ID_LSI_FURY: 6837 instance->adapter_type = INVADER_SERIES; 6838 break; 6839 default: /* For all other supported controllers */ 6840 instance->adapter_type = MFI_SERIES; 6841 break; 6842 } 6843 } 6844 } 6845 6846 static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance) 6847 { 6848 instance->producer = dma_alloc_coherent(&instance->pdev->dev, 6849 sizeof(u32), &instance->producer_h, GFP_KERNEL); 6850 instance->consumer = dma_alloc_coherent(&instance->pdev->dev, 6851 sizeof(u32), &instance->consumer_h, GFP_KERNEL); 6852 6853 if (!instance->producer || !instance->consumer) { 6854 dev_err(&instance->pdev->dev, 6855 "Failed to allocate memory for producer, consumer\n"); 6856 return -1; 6857 } 6858 6859 *instance->producer = 0; 6860 *instance->consumer = 0; 6861 return 0; 6862 } 6863 6864 /** 6865 * megasas_alloc_ctrl_mem - Allocate per controller memory for core data 6866 * structures which are not common across MFI 6867 * adapters and fusion adapters. 6868 * For MFI based adapters, allocate producer and 6869 * consumer buffers. For fusion adapters, allocate 6870 * memory for fusion context. 6871 * @instance: Adapter soft state 6872 * return: 0 for SUCCESS 6873 */ 6874 static int megasas_alloc_ctrl_mem(struct megasas_instance *instance) 6875 { 6876 instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int), 6877 GFP_KERNEL); 6878 if (!instance->reply_map) 6879 return -ENOMEM; 6880 6881 switch (instance->adapter_type) { 6882 case MFI_SERIES: 6883 if (megasas_alloc_mfi_ctrl_mem(instance)) 6884 goto fail; 6885 break; 6886 case AERO_SERIES: 6887 case VENTURA_SERIES: 6888 case THUNDERBOLT_SERIES: 6889 case INVADER_SERIES: 6890 if (megasas_alloc_fusion_context(instance)) 6891 goto fail; 6892 break; 6893 } 6894 6895 return 0; 6896 fail: 6897 kfree(instance->reply_map); 6898 instance->reply_map = NULL; 6899 return -ENOMEM; 6900 } 6901 6902 /* 6903 * megasas_free_ctrl_mem - Free fusion context for fusion adapters and 6904 * producer, consumer buffers for MFI adapters 6905 * 6906 * @instance - Adapter soft instance 6907 * 6908 */ 6909 static inline void megasas_free_ctrl_mem(struct megasas_instance *instance) 6910 { 6911 kfree(instance->reply_map); 6912 if (instance->adapter_type == MFI_SERIES) { 6913 if (instance->producer) 6914 dma_free_coherent(&instance->pdev->dev, sizeof(u32), 6915 instance->producer, 6916 instance->producer_h); 6917 if (instance->consumer) 6918 dma_free_coherent(&instance->pdev->dev, sizeof(u32), 6919 instance->consumer, 6920 instance->consumer_h); 6921 } else { 6922 megasas_free_fusion_context(instance); 6923 } 6924 } 6925 6926 /** 6927 * megasas_alloc_ctrl_dma_buffers - Allocate consistent DMA buffers during 6928 * driver load time 6929 * 6930 * @instance- Adapter soft instance 6931 * @return- O for SUCCESS 6932 */ 6933 static inline 6934 int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance) 6935 { 6936 struct pci_dev *pdev = instance->pdev; 6937 struct fusion_context *fusion = instance->ctrl_context; 6938 6939 instance->evt_detail = dma_alloc_coherent(&pdev->dev, 6940 sizeof(struct megasas_evt_detail), 6941 &instance->evt_detail_h, GFP_KERNEL); 6942 6943 if (!instance->evt_detail) { 6944 dev_err(&instance->pdev->dev, 6945 "Failed to allocate event detail buffer\n"); 6946 return -ENOMEM; 6947 } 6948 6949 if (fusion) { 6950 fusion->ioc_init_request = 6951 dma_alloc_coherent(&pdev->dev, 6952 sizeof(struct MPI2_IOC_INIT_REQUEST), 6953 &fusion->ioc_init_request_phys, 6954 GFP_KERNEL); 6955 6956 if (!fusion->ioc_init_request) { 6957 dev_err(&pdev->dev, 6958 "Failed to allocate PD list buffer\n"); 6959 return -ENOMEM; 6960 } 6961 6962 instance->snapdump_prop = dma_alloc_coherent(&pdev->dev, 6963 sizeof(struct MR_SNAPDUMP_PROPERTIES), 6964 &instance->snapdump_prop_h, GFP_KERNEL); 6965 6966 if (!instance->snapdump_prop) 6967 dev_err(&pdev->dev, 6968 "Failed to allocate snapdump properties buffer\n"); 6969 6970 instance->host_device_list_buf = dma_alloc_coherent(&pdev->dev, 6971 HOST_DEVICE_LIST_SZ, 6972 &instance->host_device_list_buf_h, 6973 GFP_KERNEL); 6974 6975 if (!instance->host_device_list_buf) { 6976 dev_err(&pdev->dev, 6977 "Failed to allocate targetid list buffer\n"); 6978 return -ENOMEM; 6979 } 6980 6981 } 6982 6983 instance->pd_list_buf = 6984 dma_alloc_coherent(&pdev->dev, 6985 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), 6986 &instance->pd_list_buf_h, GFP_KERNEL); 6987 6988 if (!instance->pd_list_buf) { 6989 dev_err(&pdev->dev, "Failed to allocate PD list buffer\n"); 6990 return -ENOMEM; 6991 } 6992 6993 instance->ctrl_info_buf = 6994 dma_alloc_coherent(&pdev->dev, 6995 sizeof(struct megasas_ctrl_info), 6996 &instance->ctrl_info_buf_h, GFP_KERNEL); 6997 6998 if (!instance->ctrl_info_buf) { 6999 dev_err(&pdev->dev, 7000 "Failed to allocate controller info buffer\n"); 7001 return -ENOMEM; 7002 } 7003 7004 instance->ld_list_buf = 7005 dma_alloc_coherent(&pdev->dev, 7006 sizeof(struct MR_LD_LIST), 7007 &instance->ld_list_buf_h, GFP_KERNEL); 7008 7009 if (!instance->ld_list_buf) { 7010 dev_err(&pdev->dev, "Failed to allocate LD list buffer\n"); 7011 return -ENOMEM; 7012 } 7013 7014 instance->ld_targetid_list_buf = 7015 dma_alloc_coherent(&pdev->dev, 7016 sizeof(struct MR_LD_TARGETID_LIST), 7017 &instance->ld_targetid_list_buf_h, GFP_KERNEL); 7018 7019 if (!instance->ld_targetid_list_buf) { 7020 dev_err(&pdev->dev, 7021 "Failed to allocate LD targetid list buffer\n"); 7022 return -ENOMEM; 7023 } 7024 7025 if (!reset_devices) { 7026 instance->system_info_buf = 7027 dma_alloc_coherent(&pdev->dev, 7028 sizeof(struct MR_DRV_SYSTEM_INFO), 7029 &instance->system_info_h, GFP_KERNEL); 7030 instance->pd_info = 7031 dma_alloc_coherent(&pdev->dev, 7032 sizeof(struct MR_PD_INFO), 7033 &instance->pd_info_h, GFP_KERNEL); 7034 instance->tgt_prop = 7035 dma_alloc_coherent(&pdev->dev, 7036 sizeof(struct MR_TARGET_PROPERTIES), 7037 &instance->tgt_prop_h, GFP_KERNEL); 7038 instance->crash_dump_buf = 7039 dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE, 7040 &instance->crash_dump_h, GFP_KERNEL); 7041 7042 if (!instance->system_info_buf) 7043 dev_err(&instance->pdev->dev, 7044 "Failed to allocate system info buffer\n"); 7045 7046 if (!instance->pd_info) 7047 dev_err(&instance->pdev->dev, 7048 "Failed to allocate pd_info buffer\n"); 7049 7050 if (!instance->tgt_prop) 7051 dev_err(&instance->pdev->dev, 7052 "Failed to allocate tgt_prop buffer\n"); 7053 7054 if (!instance->crash_dump_buf) 7055 dev_err(&instance->pdev->dev, 7056 "Failed to allocate crash dump buffer\n"); 7057 } 7058 7059 return 0; 7060 } 7061 7062 /* 7063 * megasas_free_ctrl_dma_buffers - Free consistent DMA buffers allocated 7064 * during driver load time 7065 * 7066 * @instance- Adapter soft instance 7067 * 7068 */ 7069 static inline 7070 void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance) 7071 { 7072 struct pci_dev *pdev = instance->pdev; 7073 struct fusion_context *fusion = instance->ctrl_context; 7074 7075 if (instance->evt_detail) 7076 dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail), 7077 instance->evt_detail, 7078 instance->evt_detail_h); 7079 7080 if (fusion && fusion->ioc_init_request) 7081 dma_free_coherent(&pdev->dev, 7082 sizeof(struct MPI2_IOC_INIT_REQUEST), 7083 fusion->ioc_init_request, 7084 fusion->ioc_init_request_phys); 7085 7086 if (instance->pd_list_buf) 7087 dma_free_coherent(&pdev->dev, 7088 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), 7089 instance->pd_list_buf, 7090 instance->pd_list_buf_h); 7091 7092 if (instance->ld_list_buf) 7093 dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST), 7094 instance->ld_list_buf, 7095 instance->ld_list_buf_h); 7096 7097 if (instance->ld_targetid_list_buf) 7098 dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST), 7099 instance->ld_targetid_list_buf, 7100 instance->ld_targetid_list_buf_h); 7101 7102 if (instance->ctrl_info_buf) 7103 dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info), 7104 instance->ctrl_info_buf, 7105 instance->ctrl_info_buf_h); 7106 7107 if (instance->system_info_buf) 7108 dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO), 7109 instance->system_info_buf, 7110 instance->system_info_h); 7111 7112 if (instance->pd_info) 7113 dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO), 7114 instance->pd_info, instance->pd_info_h); 7115 7116 if (instance->tgt_prop) 7117 dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES), 7118 instance->tgt_prop, instance->tgt_prop_h); 7119 7120 if (instance->crash_dump_buf) 7121 dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE, 7122 instance->crash_dump_buf, 7123 instance->crash_dump_h); 7124 7125 if (instance->snapdump_prop) 7126 dma_free_coherent(&pdev->dev, 7127 sizeof(struct MR_SNAPDUMP_PROPERTIES), 7128 instance->snapdump_prop, 7129 instance->snapdump_prop_h); 7130 7131 if (instance->host_device_list_buf) 7132 dma_free_coherent(&pdev->dev, 7133 HOST_DEVICE_LIST_SZ, 7134 instance->host_device_list_buf, 7135 instance->host_device_list_buf_h); 7136 7137 } 7138 7139 /* 7140 * megasas_init_ctrl_params - Initialize controller's instance 7141 * parameters before FW init 7142 * @instance - Adapter soft instance 7143 * @return - void 7144 */ 7145 static inline void megasas_init_ctrl_params(struct megasas_instance *instance) 7146 { 7147 instance->fw_crash_state = UNAVAILABLE; 7148 7149 megasas_poll_wait_aen = 0; 7150 instance->issuepend_done = 1; 7151 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 7152 7153 /* 7154 * Initialize locks and queues 7155 */ 7156 INIT_LIST_HEAD(&instance->cmd_pool); 7157 INIT_LIST_HEAD(&instance->internal_reset_pending_q); 7158 7159 atomic_set(&instance->fw_outstanding, 0); 7160 atomic64_set(&instance->total_io_count, 0); 7161 7162 init_waitqueue_head(&instance->int_cmd_wait_q); 7163 init_waitqueue_head(&instance->abort_cmd_wait_q); 7164 7165 spin_lock_init(&instance->crashdump_lock); 7166 spin_lock_init(&instance->mfi_pool_lock); 7167 spin_lock_init(&instance->hba_lock); 7168 spin_lock_init(&instance->stream_lock); 7169 spin_lock_init(&instance->completion_lock); 7170 7171 mutex_init(&instance->reset_mutex); 7172 7173 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 7174 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) 7175 instance->flag_ieee = 1; 7176 7177 megasas_dbg_lvl = 0; 7178 instance->flag = 0; 7179 instance->unload = 1; 7180 instance->last_time = 0; 7181 instance->disableOnlineCtrlReset = 1; 7182 instance->UnevenSpanSupport = 0; 7183 instance->smp_affinity_enable = smp_affinity_enable ? true : false; 7184 instance->msix_load_balance = false; 7185 7186 if (instance->adapter_type != MFI_SERIES) 7187 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); 7188 else 7189 INIT_WORK(&instance->work_init, process_fw_state_change_wq); 7190 } 7191 7192 /** 7193 * megasas_probe_one - PCI hotplug entry point 7194 * @pdev: PCI device structure 7195 * @id: PCI ids of supported hotplugged adapter 7196 */ 7197 static int megasas_probe_one(struct pci_dev *pdev, 7198 const struct pci_device_id *id) 7199 { 7200 int rval, pos; 7201 struct Scsi_Host *host; 7202 struct megasas_instance *instance; 7203 u16 control = 0; 7204 7205 switch (pdev->device) { 7206 case PCI_DEVICE_ID_LSI_AERO_10E0: 7207 case PCI_DEVICE_ID_LSI_AERO_10E3: 7208 case PCI_DEVICE_ID_LSI_AERO_10E4: 7209 case PCI_DEVICE_ID_LSI_AERO_10E7: 7210 dev_err(&pdev->dev, "Adapter is in non secure mode\n"); 7211 return 1; 7212 case PCI_DEVICE_ID_LSI_AERO_10E1: 7213 case PCI_DEVICE_ID_LSI_AERO_10E5: 7214 dev_info(&pdev->dev, "Adapter is in configurable secure mode\n"); 7215 break; 7216 } 7217 7218 /* Reset MSI-X in the kdump kernel */ 7219 if (reset_devices) { 7220 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 7221 if (pos) { 7222 pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, 7223 &control); 7224 if (control & PCI_MSIX_FLAGS_ENABLE) { 7225 dev_info(&pdev->dev, "resetting MSI-X\n"); 7226 pci_write_config_word(pdev, 7227 pos + PCI_MSIX_FLAGS, 7228 control & 7229 ~PCI_MSIX_FLAGS_ENABLE); 7230 } 7231 } 7232 } 7233 7234 /* 7235 * PCI prepping: enable device set bus mastering and dma mask 7236 */ 7237 rval = pci_enable_device_mem(pdev); 7238 7239 if (rval) { 7240 return rval; 7241 } 7242 7243 pci_set_master(pdev); 7244 7245 host = scsi_host_alloc(&megasas_template, 7246 sizeof(struct megasas_instance)); 7247 7248 if (!host) { 7249 dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n"); 7250 goto fail_alloc_instance; 7251 } 7252 7253 instance = (struct megasas_instance *)host->hostdata; 7254 memset(instance, 0, sizeof(*instance)); 7255 atomic_set(&instance->fw_reset_no_pci_access, 0); 7256 7257 /* 7258 * Initialize PCI related and misc parameters 7259 */ 7260 instance->pdev = pdev; 7261 instance->host = host; 7262 instance->unique_id = pdev->bus->number << 8 | pdev->devfn; 7263 instance->init_id = MEGASAS_DEFAULT_INIT_ID; 7264 7265 megasas_set_adapter_type(instance); 7266 7267 /* 7268 * Initialize MFI Firmware 7269 */ 7270 if (megasas_init_fw(instance)) 7271 goto fail_init_mfi; 7272 7273 if (instance->requestorId) { 7274 if (instance->PlasmaFW111) { 7275 instance->vf_affiliation_111 = 7276 dma_alloc_coherent(&pdev->dev, 7277 sizeof(struct MR_LD_VF_AFFILIATION_111), 7278 &instance->vf_affiliation_111_h, 7279 GFP_KERNEL); 7280 if (!instance->vf_affiliation_111) 7281 dev_warn(&pdev->dev, "Can't allocate " 7282 "memory for VF affiliation buffer\n"); 7283 } else { 7284 instance->vf_affiliation = 7285 dma_alloc_coherent(&pdev->dev, 7286 (MAX_LOGICAL_DRIVES + 1) * 7287 sizeof(struct MR_LD_VF_AFFILIATION), 7288 &instance->vf_affiliation_h, 7289 GFP_KERNEL); 7290 if (!instance->vf_affiliation) 7291 dev_warn(&pdev->dev, "Can't allocate " 7292 "memory for VF affiliation buffer\n"); 7293 } 7294 } 7295 7296 /* 7297 * Store instance in PCI softstate 7298 */ 7299 pci_set_drvdata(pdev, instance); 7300 7301 /* 7302 * Add this controller to megasas_mgmt_info structure so that it 7303 * can be exported to management applications 7304 */ 7305 megasas_mgmt_info.count++; 7306 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance; 7307 megasas_mgmt_info.max_index++; 7308 7309 /* 7310 * Register with SCSI mid-layer 7311 */ 7312 if (megasas_io_attach(instance)) 7313 goto fail_io_attach; 7314 7315 instance->unload = 0; 7316 /* 7317 * Trigger SCSI to scan our drives 7318 */ 7319 if (!instance->enable_fw_dev_list || 7320 (instance->host_device_list_buf->count > 0)) 7321 scsi_scan_host(host); 7322 7323 /* 7324 * Initiate AEN (Asynchronous Event Notification) 7325 */ 7326 if (megasas_start_aen(instance)) { 7327 dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n"); 7328 goto fail_start_aen; 7329 } 7330 7331 megasas_setup_debugfs(instance); 7332 7333 /* Get current SR-IOV LD/VF affiliation */ 7334 if (instance->requestorId) 7335 megasas_get_ld_vf_affiliation(instance, 1); 7336 7337 return 0; 7338 7339 fail_start_aen: 7340 fail_io_attach: 7341 megasas_mgmt_info.count--; 7342 megasas_mgmt_info.max_index--; 7343 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; 7344 7345 instance->instancet->disable_intr(instance); 7346 megasas_destroy_irqs(instance); 7347 7348 if (instance->adapter_type != MFI_SERIES) 7349 megasas_release_fusion(instance); 7350 else 7351 megasas_release_mfi(instance); 7352 if (instance->msix_vectors) 7353 pci_free_irq_vectors(instance->pdev); 7354 fail_init_mfi: 7355 scsi_host_put(host); 7356 fail_alloc_instance: 7357 pci_disable_device(pdev); 7358 7359 return -ENODEV; 7360 } 7361 7362 /** 7363 * megasas_flush_cache - Requests FW to flush all its caches 7364 * @instance: Adapter soft state 7365 */ 7366 static void megasas_flush_cache(struct megasas_instance *instance) 7367 { 7368 struct megasas_cmd *cmd; 7369 struct megasas_dcmd_frame *dcmd; 7370 7371 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 7372 return; 7373 7374 cmd = megasas_get_cmd(instance); 7375 7376 if (!cmd) 7377 return; 7378 7379 dcmd = &cmd->frame->dcmd; 7380 7381 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 7382 7383 dcmd->cmd = MFI_CMD_DCMD; 7384 dcmd->cmd_status = 0x0; 7385 dcmd->sge_count = 0; 7386 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 7387 dcmd->timeout = 0; 7388 dcmd->pad_0 = 0; 7389 dcmd->data_xfer_len = 0; 7390 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH); 7391 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 7392 7393 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) 7394 != DCMD_SUCCESS) { 7395 dev_err(&instance->pdev->dev, 7396 "return from %s %d\n", __func__, __LINE__); 7397 return; 7398 } 7399 7400 megasas_return_cmd(instance, cmd); 7401 } 7402 7403 /** 7404 * megasas_shutdown_controller - Instructs FW to shutdown the controller 7405 * @instance: Adapter soft state 7406 * @opcode: Shutdown/Hibernate 7407 */ 7408 static void megasas_shutdown_controller(struct megasas_instance *instance, 7409 u32 opcode) 7410 { 7411 struct megasas_cmd *cmd; 7412 struct megasas_dcmd_frame *dcmd; 7413 7414 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 7415 return; 7416 7417 cmd = megasas_get_cmd(instance); 7418 7419 if (!cmd) 7420 return; 7421 7422 if (instance->aen_cmd) 7423 megasas_issue_blocked_abort_cmd(instance, 7424 instance->aen_cmd, MFI_IO_TIMEOUT_SECS); 7425 if (instance->map_update_cmd) 7426 megasas_issue_blocked_abort_cmd(instance, 7427 instance->map_update_cmd, MFI_IO_TIMEOUT_SECS); 7428 if (instance->jbod_seq_cmd) 7429 megasas_issue_blocked_abort_cmd(instance, 7430 instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS); 7431 7432 dcmd = &cmd->frame->dcmd; 7433 7434 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 7435 7436 dcmd->cmd = MFI_CMD_DCMD; 7437 dcmd->cmd_status = 0x0; 7438 dcmd->sge_count = 0; 7439 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 7440 dcmd->timeout = 0; 7441 dcmd->pad_0 = 0; 7442 dcmd->data_xfer_len = 0; 7443 dcmd->opcode = cpu_to_le32(opcode); 7444 7445 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) 7446 != DCMD_SUCCESS) { 7447 dev_err(&instance->pdev->dev, 7448 "return from %s %d\n", __func__, __LINE__); 7449 return; 7450 } 7451 7452 megasas_return_cmd(instance, cmd); 7453 } 7454 7455 #ifdef CONFIG_PM 7456 /** 7457 * megasas_suspend - driver suspend entry point 7458 * @pdev: PCI device structure 7459 * @state: PCI power state to suspend routine 7460 */ 7461 static int 7462 megasas_suspend(struct pci_dev *pdev, pm_message_t state) 7463 { 7464 struct megasas_instance *instance; 7465 7466 instance = pci_get_drvdata(pdev); 7467 7468 if (!instance) 7469 return 0; 7470 7471 instance->unload = 1; 7472 7473 dev_info(&pdev->dev, "%s is called\n", __func__); 7474 7475 /* Shutdown SR-IOV heartbeat timer */ 7476 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7477 del_timer_sync(&instance->sriov_heartbeat_timer); 7478 7479 /* Stop the FW fault detection watchdog */ 7480 if (instance->adapter_type != MFI_SERIES) 7481 megasas_fusion_stop_watchdog(instance); 7482 7483 megasas_flush_cache(instance); 7484 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); 7485 7486 /* cancel the delayed work if this work still in queue */ 7487 if (instance->ev != NULL) { 7488 struct megasas_aen_event *ev = instance->ev; 7489 cancel_delayed_work_sync(&ev->hotplug_work); 7490 instance->ev = NULL; 7491 } 7492 7493 tasklet_kill(&instance->isr_tasklet); 7494 7495 pci_set_drvdata(instance->pdev, instance); 7496 instance->instancet->disable_intr(instance); 7497 7498 megasas_destroy_irqs(instance); 7499 7500 if (instance->msix_vectors) 7501 pci_free_irq_vectors(instance->pdev); 7502 7503 pci_save_state(pdev); 7504 pci_disable_device(pdev); 7505 7506 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 7507 7508 return 0; 7509 } 7510 7511 /** 7512 * megasas_resume- driver resume entry point 7513 * @pdev: PCI device structure 7514 */ 7515 static int 7516 megasas_resume(struct pci_dev *pdev) 7517 { 7518 int rval; 7519 struct Scsi_Host *host; 7520 struct megasas_instance *instance; 7521 int irq_flags = PCI_IRQ_LEGACY; 7522 7523 instance = pci_get_drvdata(pdev); 7524 7525 if (!instance) 7526 return 0; 7527 7528 host = instance->host; 7529 pci_set_power_state(pdev, PCI_D0); 7530 pci_enable_wake(pdev, PCI_D0, 0); 7531 pci_restore_state(pdev); 7532 7533 dev_info(&pdev->dev, "%s is called\n", __func__); 7534 /* 7535 * PCI prepping: enable device set bus mastering and dma mask 7536 */ 7537 rval = pci_enable_device_mem(pdev); 7538 7539 if (rval) { 7540 dev_err(&pdev->dev, "Enable device failed\n"); 7541 return rval; 7542 } 7543 7544 pci_set_master(pdev); 7545 7546 /* 7547 * We expect the FW state to be READY 7548 */ 7549 if (megasas_transition_to_ready(instance, 0)) 7550 goto fail_ready_state; 7551 7552 if (megasas_set_dma_mask(instance)) 7553 goto fail_set_dma_mask; 7554 7555 /* 7556 * Initialize MFI Firmware 7557 */ 7558 7559 atomic_set(&instance->fw_outstanding, 0); 7560 atomic_set(&instance->ldio_outstanding, 0); 7561 7562 /* Now re-enable MSI-X */ 7563 if (instance->msix_vectors) { 7564 irq_flags = PCI_IRQ_MSIX; 7565 if (instance->smp_affinity_enable) 7566 irq_flags |= PCI_IRQ_AFFINITY; 7567 } 7568 rval = pci_alloc_irq_vectors(instance->pdev, 1, 7569 instance->msix_vectors ? 7570 instance->msix_vectors : 1, irq_flags); 7571 if (rval < 0) 7572 goto fail_reenable_msix; 7573 7574 megasas_setup_reply_map(instance); 7575 7576 if (instance->adapter_type != MFI_SERIES) { 7577 megasas_reset_reply_desc(instance); 7578 if (megasas_ioc_init_fusion(instance)) { 7579 megasas_free_cmds(instance); 7580 megasas_free_cmds_fusion(instance); 7581 goto fail_init_mfi; 7582 } 7583 if (!megasas_get_map_info(instance)) 7584 megasas_sync_map_info(instance); 7585 } else { 7586 *instance->producer = 0; 7587 *instance->consumer = 0; 7588 if (megasas_issue_init_mfi(instance)) 7589 goto fail_init_mfi; 7590 } 7591 7592 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) 7593 goto fail_init_mfi; 7594 7595 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 7596 (unsigned long)instance); 7597 7598 if (instance->msix_vectors ? 7599 megasas_setup_irqs_msix(instance, 0) : 7600 megasas_setup_irqs_ioapic(instance)) 7601 goto fail_init_mfi; 7602 7603 if (instance->adapter_type != MFI_SERIES) 7604 megasas_setup_irq_poll(instance); 7605 7606 /* Re-launch SR-IOV heartbeat timer */ 7607 if (instance->requestorId) { 7608 if (!megasas_sriov_start_heartbeat(instance, 0)) 7609 megasas_start_timer(instance); 7610 else { 7611 instance->skip_heartbeat_timer_del = 1; 7612 goto fail_init_mfi; 7613 } 7614 } 7615 7616 instance->instancet->enable_intr(instance); 7617 megasas_setup_jbod_map(instance); 7618 instance->unload = 0; 7619 7620 /* 7621 * Initiate AEN (Asynchronous Event Notification) 7622 */ 7623 if (megasas_start_aen(instance)) 7624 dev_err(&instance->pdev->dev, "Start AEN failed\n"); 7625 7626 /* Re-launch FW fault watchdog */ 7627 if (instance->adapter_type != MFI_SERIES) 7628 if (megasas_fusion_start_watchdog(instance) != SUCCESS) 7629 goto fail_start_watchdog; 7630 7631 return 0; 7632 7633 fail_start_watchdog: 7634 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7635 del_timer_sync(&instance->sriov_heartbeat_timer); 7636 fail_init_mfi: 7637 megasas_free_ctrl_dma_buffers(instance); 7638 megasas_free_ctrl_mem(instance); 7639 scsi_host_put(host); 7640 7641 fail_reenable_msix: 7642 fail_set_dma_mask: 7643 fail_ready_state: 7644 7645 pci_disable_device(pdev); 7646 7647 return -ENODEV; 7648 } 7649 #else 7650 #define megasas_suspend NULL 7651 #define megasas_resume NULL 7652 #endif 7653 7654 static inline int 7655 megasas_wait_for_adapter_operational(struct megasas_instance *instance) 7656 { 7657 int wait_time = MEGASAS_RESET_WAIT_TIME * 2; 7658 int i; 7659 u8 adp_state; 7660 7661 for (i = 0; i < wait_time; i++) { 7662 adp_state = atomic_read(&instance->adprecovery); 7663 if ((adp_state == MEGASAS_HBA_OPERATIONAL) || 7664 (adp_state == MEGASAS_HW_CRITICAL_ERROR)) 7665 break; 7666 7667 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) 7668 dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n"); 7669 7670 msleep(1000); 7671 } 7672 7673 if (adp_state != MEGASAS_HBA_OPERATIONAL) { 7674 dev_info(&instance->pdev->dev, 7675 "%s HBA failed to become operational, adp_state %d\n", 7676 __func__, adp_state); 7677 return 1; 7678 } 7679 7680 return 0; 7681 } 7682 7683 /** 7684 * megasas_detach_one - PCI hot"un"plug entry point 7685 * @pdev: PCI device structure 7686 */ 7687 static void megasas_detach_one(struct pci_dev *pdev) 7688 { 7689 int i; 7690 struct Scsi_Host *host; 7691 struct megasas_instance *instance; 7692 struct fusion_context *fusion; 7693 u32 pd_seq_map_sz; 7694 7695 instance = pci_get_drvdata(pdev); 7696 7697 if (!instance) 7698 return; 7699 7700 host = instance->host; 7701 fusion = instance->ctrl_context; 7702 7703 /* Shutdown SR-IOV heartbeat timer */ 7704 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7705 del_timer_sync(&instance->sriov_heartbeat_timer); 7706 7707 /* Stop the FW fault detection watchdog */ 7708 if (instance->adapter_type != MFI_SERIES) 7709 megasas_fusion_stop_watchdog(instance); 7710 7711 if (instance->fw_crash_state != UNAVAILABLE) 7712 megasas_free_host_crash_buffer(instance); 7713 scsi_remove_host(instance->host); 7714 instance->unload = 1; 7715 7716 if (megasas_wait_for_adapter_operational(instance)) 7717 goto skip_firing_dcmds; 7718 7719 megasas_flush_cache(instance); 7720 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 7721 7722 skip_firing_dcmds: 7723 /* cancel the delayed work if this work still in queue*/ 7724 if (instance->ev != NULL) { 7725 struct megasas_aen_event *ev = instance->ev; 7726 cancel_delayed_work_sync(&ev->hotplug_work); 7727 instance->ev = NULL; 7728 } 7729 7730 /* cancel all wait events */ 7731 wake_up_all(&instance->int_cmd_wait_q); 7732 7733 tasklet_kill(&instance->isr_tasklet); 7734 7735 /* 7736 * Take the instance off the instance array. Note that we will not 7737 * decrement the max_index. We let this array be sparse array 7738 */ 7739 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 7740 if (megasas_mgmt_info.instance[i] == instance) { 7741 megasas_mgmt_info.count--; 7742 megasas_mgmt_info.instance[i] = NULL; 7743 7744 break; 7745 } 7746 } 7747 7748 instance->instancet->disable_intr(instance); 7749 7750 megasas_destroy_irqs(instance); 7751 7752 if (instance->msix_vectors) 7753 pci_free_irq_vectors(instance->pdev); 7754 7755 if (instance->adapter_type >= VENTURA_SERIES) { 7756 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) 7757 kfree(fusion->stream_detect_by_ld[i]); 7758 kfree(fusion->stream_detect_by_ld); 7759 fusion->stream_detect_by_ld = NULL; 7760 } 7761 7762 7763 if (instance->adapter_type != MFI_SERIES) { 7764 megasas_release_fusion(instance); 7765 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 7766 (sizeof(struct MR_PD_CFG_SEQ) * 7767 (MAX_PHYSICAL_DEVICES - 1)); 7768 for (i = 0; i < 2 ; i++) { 7769 if (fusion->ld_map[i]) 7770 dma_free_coherent(&instance->pdev->dev, 7771 fusion->max_map_sz, 7772 fusion->ld_map[i], 7773 fusion->ld_map_phys[i]); 7774 if (fusion->ld_drv_map[i]) { 7775 if (is_vmalloc_addr(fusion->ld_drv_map[i])) 7776 vfree(fusion->ld_drv_map[i]); 7777 else 7778 free_pages((ulong)fusion->ld_drv_map[i], 7779 fusion->drv_map_pages); 7780 } 7781 7782 if (fusion->pd_seq_sync[i]) 7783 dma_free_coherent(&instance->pdev->dev, 7784 pd_seq_map_sz, 7785 fusion->pd_seq_sync[i], 7786 fusion->pd_seq_phys[i]); 7787 } 7788 } else { 7789 megasas_release_mfi(instance); 7790 } 7791 7792 if (instance->vf_affiliation) 7793 dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) * 7794 sizeof(struct MR_LD_VF_AFFILIATION), 7795 instance->vf_affiliation, 7796 instance->vf_affiliation_h); 7797 7798 if (instance->vf_affiliation_111) 7799 dma_free_coherent(&pdev->dev, 7800 sizeof(struct MR_LD_VF_AFFILIATION_111), 7801 instance->vf_affiliation_111, 7802 instance->vf_affiliation_111_h); 7803 7804 if (instance->hb_host_mem) 7805 dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM), 7806 instance->hb_host_mem, 7807 instance->hb_host_mem_h); 7808 7809 megasas_free_ctrl_dma_buffers(instance); 7810 7811 megasas_free_ctrl_mem(instance); 7812 7813 megasas_destroy_debugfs(instance); 7814 7815 scsi_host_put(host); 7816 7817 pci_disable_device(pdev); 7818 } 7819 7820 /** 7821 * megasas_shutdown - Shutdown entry point 7822 * @device: Generic device structure 7823 */ 7824 static void megasas_shutdown(struct pci_dev *pdev) 7825 { 7826 struct megasas_instance *instance = pci_get_drvdata(pdev); 7827 7828 if (!instance) 7829 return; 7830 7831 instance->unload = 1; 7832 7833 if (megasas_wait_for_adapter_operational(instance)) 7834 goto skip_firing_dcmds; 7835 7836 megasas_flush_cache(instance); 7837 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 7838 7839 skip_firing_dcmds: 7840 instance->instancet->disable_intr(instance); 7841 megasas_destroy_irqs(instance); 7842 7843 if (instance->msix_vectors) 7844 pci_free_irq_vectors(instance->pdev); 7845 } 7846 7847 /** 7848 * megasas_mgmt_open - char node "open" entry point 7849 */ 7850 static int megasas_mgmt_open(struct inode *inode, struct file *filep) 7851 { 7852 /* 7853 * Allow only those users with admin rights 7854 */ 7855 if (!capable(CAP_SYS_ADMIN)) 7856 return -EACCES; 7857 7858 return 0; 7859 } 7860 7861 /** 7862 * megasas_mgmt_fasync - Async notifier registration from applications 7863 * 7864 * This function adds the calling process to a driver global queue. When an 7865 * event occurs, SIGIO will be sent to all processes in this queue. 7866 */ 7867 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode) 7868 { 7869 int rc; 7870 7871 mutex_lock(&megasas_async_queue_mutex); 7872 7873 rc = fasync_helper(fd, filep, mode, &megasas_async_queue); 7874 7875 mutex_unlock(&megasas_async_queue_mutex); 7876 7877 if (rc >= 0) { 7878 /* For sanity check when we get ioctl */ 7879 filep->private_data = filep; 7880 return 0; 7881 } 7882 7883 printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc); 7884 7885 return rc; 7886 } 7887 7888 /** 7889 * megasas_mgmt_poll - char node "poll" entry point 7890 * */ 7891 static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait) 7892 { 7893 __poll_t mask; 7894 unsigned long flags; 7895 7896 poll_wait(file, &megasas_poll_wait, wait); 7897 spin_lock_irqsave(&poll_aen_lock, flags); 7898 if (megasas_poll_wait_aen) 7899 mask = (EPOLLIN | EPOLLRDNORM); 7900 else 7901 mask = 0; 7902 megasas_poll_wait_aen = 0; 7903 spin_unlock_irqrestore(&poll_aen_lock, flags); 7904 return mask; 7905 } 7906 7907 /* 7908 * megasas_set_crash_dump_params_ioctl: 7909 * Send CRASH_DUMP_MODE DCMD to all controllers 7910 * @cmd: MFI command frame 7911 */ 7912 7913 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd) 7914 { 7915 struct megasas_instance *local_instance; 7916 int i, error = 0; 7917 int crash_support; 7918 7919 crash_support = cmd->frame->dcmd.mbox.w[0]; 7920 7921 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 7922 local_instance = megasas_mgmt_info.instance[i]; 7923 if (local_instance && local_instance->crash_dump_drv_support) { 7924 if ((atomic_read(&local_instance->adprecovery) == 7925 MEGASAS_HBA_OPERATIONAL) && 7926 !megasas_set_crash_dump_params(local_instance, 7927 crash_support)) { 7928 local_instance->crash_dump_app_support = 7929 crash_support; 7930 dev_info(&local_instance->pdev->dev, 7931 "Application firmware crash " 7932 "dump mode set success\n"); 7933 error = 0; 7934 } else { 7935 dev_info(&local_instance->pdev->dev, 7936 "Application firmware crash " 7937 "dump mode set failed\n"); 7938 error = -1; 7939 } 7940 } 7941 } 7942 return error; 7943 } 7944 7945 /** 7946 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW 7947 * @instance: Adapter soft state 7948 * @argp: User's ioctl packet 7949 */ 7950 static int 7951 megasas_mgmt_fw_ioctl(struct megasas_instance *instance, 7952 struct megasas_iocpacket __user * user_ioc, 7953 struct megasas_iocpacket *ioc) 7954 { 7955 struct megasas_sge64 *kern_sge64 = NULL; 7956 struct megasas_sge32 *kern_sge32 = NULL; 7957 struct megasas_cmd *cmd; 7958 void *kbuff_arr[MAX_IOCTL_SGE]; 7959 dma_addr_t buf_handle = 0; 7960 int error = 0, i; 7961 void *sense = NULL; 7962 dma_addr_t sense_handle; 7963 unsigned long *sense_ptr; 7964 u32 opcode = 0; 7965 7966 memset(kbuff_arr, 0, sizeof(kbuff_arr)); 7967 7968 if (ioc->sge_count > MAX_IOCTL_SGE) { 7969 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n", 7970 ioc->sge_count, MAX_IOCTL_SGE); 7971 return -EINVAL; 7972 } 7973 7974 if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) || 7975 ((ioc->frame.hdr.cmd == MFI_CMD_NVME) && 7976 !instance->support_nvme_passthru) || 7977 ((ioc->frame.hdr.cmd == MFI_CMD_TOOLBOX) && 7978 !instance->support_pci_lane_margining)) { 7979 dev_err(&instance->pdev->dev, 7980 "Received invalid ioctl command 0x%x\n", 7981 ioc->frame.hdr.cmd); 7982 return -ENOTSUPP; 7983 } 7984 7985 cmd = megasas_get_cmd(instance); 7986 if (!cmd) { 7987 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n"); 7988 return -ENOMEM; 7989 } 7990 7991 /* 7992 * User's IOCTL packet has 2 frames (maximum). Copy those two 7993 * frames into our cmd's frames. cmd->frame's context will get 7994 * overwritten when we copy from user's frames. So set that value 7995 * alone separately 7996 */ 7997 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); 7998 cmd->frame->hdr.context = cpu_to_le32(cmd->index); 7999 cmd->frame->hdr.pad_0 = 0; 8000 8001 cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE); 8002 8003 if (instance->consistent_mask_64bit) 8004 cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 | 8005 MFI_FRAME_SENSE64)); 8006 else 8007 cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 | 8008 MFI_FRAME_SENSE64)); 8009 8010 if (cmd->frame->hdr.cmd == MFI_CMD_DCMD) 8011 opcode = le32_to_cpu(cmd->frame->dcmd.opcode); 8012 8013 if (opcode == MR_DCMD_CTRL_SHUTDOWN) { 8014 mutex_lock(&instance->reset_mutex); 8015 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) { 8016 megasas_return_cmd(instance, cmd); 8017 mutex_unlock(&instance->reset_mutex); 8018 return -1; 8019 } 8020 mutex_unlock(&instance->reset_mutex); 8021 } 8022 8023 if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) { 8024 error = megasas_set_crash_dump_params_ioctl(cmd); 8025 megasas_return_cmd(instance, cmd); 8026 return error; 8027 } 8028 8029 /* 8030 * The management interface between applications and the fw uses 8031 * MFI frames. E.g, RAID configuration changes, LD property changes 8032 * etc are accomplishes through different kinds of MFI frames. The 8033 * driver needs to care only about substituting user buffers with 8034 * kernel buffers in SGLs. The location of SGL is embedded in the 8035 * struct iocpacket itself. 8036 */ 8037 if (instance->consistent_mask_64bit) 8038 kern_sge64 = (struct megasas_sge64 *) 8039 ((unsigned long)cmd->frame + ioc->sgl_off); 8040 else 8041 kern_sge32 = (struct megasas_sge32 *) 8042 ((unsigned long)cmd->frame + ioc->sgl_off); 8043 8044 /* 8045 * For each user buffer, create a mirror buffer and copy in 8046 */ 8047 for (i = 0; i < ioc->sge_count; i++) { 8048 if (!ioc->sgl[i].iov_len) 8049 continue; 8050 8051 kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev, 8052 ioc->sgl[i].iov_len, 8053 &buf_handle, GFP_KERNEL); 8054 if (!kbuff_arr[i]) { 8055 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc " 8056 "kernel SGL buffer for IOCTL\n"); 8057 error = -ENOMEM; 8058 goto out; 8059 } 8060 8061 /* 8062 * We don't change the dma_coherent_mask, so 8063 * dma_alloc_coherent only returns 32bit addresses 8064 */ 8065 if (instance->consistent_mask_64bit) { 8066 kern_sge64[i].phys_addr = cpu_to_le64(buf_handle); 8067 kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len); 8068 } else { 8069 kern_sge32[i].phys_addr = cpu_to_le32(buf_handle); 8070 kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len); 8071 } 8072 8073 /* 8074 * We created a kernel buffer corresponding to the 8075 * user buffer. Now copy in from the user buffer 8076 */ 8077 if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base, 8078 (u32) (ioc->sgl[i].iov_len))) { 8079 error = -EFAULT; 8080 goto out; 8081 } 8082 } 8083 8084 if (ioc->sense_len) { 8085 sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len, 8086 &sense_handle, GFP_KERNEL); 8087 if (!sense) { 8088 error = -ENOMEM; 8089 goto out; 8090 } 8091 8092 sense_ptr = 8093 (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off); 8094 if (instance->consistent_mask_64bit) 8095 *sense_ptr = cpu_to_le64(sense_handle); 8096 else 8097 *sense_ptr = cpu_to_le32(sense_handle); 8098 } 8099 8100 /* 8101 * Set the sync_cmd flag so that the ISR knows not to complete this 8102 * cmd to the SCSI mid-layer 8103 */ 8104 cmd->sync_cmd = 1; 8105 if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) { 8106 cmd->sync_cmd = 0; 8107 dev_err(&instance->pdev->dev, 8108 "return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n", 8109 __func__, __LINE__, cmd->frame->hdr.cmd, opcode, 8110 cmd->cmd_status_drv); 8111 return -EBUSY; 8112 } 8113 8114 cmd->sync_cmd = 0; 8115 8116 if (instance->unload == 1) { 8117 dev_info(&instance->pdev->dev, "Driver unload is in progress " 8118 "don't submit data to application\n"); 8119 goto out; 8120 } 8121 /* 8122 * copy out the kernel buffers to user buffers 8123 */ 8124 for (i = 0; i < ioc->sge_count; i++) { 8125 if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i], 8126 ioc->sgl[i].iov_len)) { 8127 error = -EFAULT; 8128 goto out; 8129 } 8130 } 8131 8132 /* 8133 * copy out the sense 8134 */ 8135 if (ioc->sense_len) { 8136 /* 8137 * sense_ptr points to the location that has the user 8138 * sense buffer address 8139 */ 8140 sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw + 8141 ioc->sense_off); 8142 8143 if (copy_to_user((void __user *)((unsigned long) 8144 get_unaligned((unsigned long *)sense_ptr)), 8145 sense, ioc->sense_len)) { 8146 dev_err(&instance->pdev->dev, "Failed to copy out to user " 8147 "sense data\n"); 8148 error = -EFAULT; 8149 goto out; 8150 } 8151 } 8152 8153 /* 8154 * copy the status codes returned by the fw 8155 */ 8156 if (copy_to_user(&user_ioc->frame.hdr.cmd_status, 8157 &cmd->frame->hdr.cmd_status, sizeof(u8))) { 8158 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n"); 8159 error = -EFAULT; 8160 } 8161 8162 out: 8163 if (sense) { 8164 dma_free_coherent(&instance->pdev->dev, ioc->sense_len, 8165 sense, sense_handle); 8166 } 8167 8168 for (i = 0; i < ioc->sge_count; i++) { 8169 if (kbuff_arr[i]) { 8170 if (instance->consistent_mask_64bit) 8171 dma_free_coherent(&instance->pdev->dev, 8172 le32_to_cpu(kern_sge64[i].length), 8173 kbuff_arr[i], 8174 le64_to_cpu(kern_sge64[i].phys_addr)); 8175 else 8176 dma_free_coherent(&instance->pdev->dev, 8177 le32_to_cpu(kern_sge32[i].length), 8178 kbuff_arr[i], 8179 le32_to_cpu(kern_sge32[i].phys_addr)); 8180 kbuff_arr[i] = NULL; 8181 } 8182 } 8183 8184 megasas_return_cmd(instance, cmd); 8185 return error; 8186 } 8187 8188 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) 8189 { 8190 struct megasas_iocpacket __user *user_ioc = 8191 (struct megasas_iocpacket __user *)arg; 8192 struct megasas_iocpacket *ioc; 8193 struct megasas_instance *instance; 8194 int error; 8195 8196 ioc = memdup_user(user_ioc, sizeof(*ioc)); 8197 if (IS_ERR(ioc)) 8198 return PTR_ERR(ioc); 8199 8200 instance = megasas_lookup_instance(ioc->host_no); 8201 if (!instance) { 8202 error = -ENODEV; 8203 goto out_kfree_ioc; 8204 } 8205 8206 /* Block ioctls in VF mode */ 8207 if (instance->requestorId && !allow_vf_ioctls) { 8208 error = -ENODEV; 8209 goto out_kfree_ioc; 8210 } 8211 8212 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 8213 dev_err(&instance->pdev->dev, "Controller in crit error\n"); 8214 error = -ENODEV; 8215 goto out_kfree_ioc; 8216 } 8217 8218 if (instance->unload == 1) { 8219 error = -ENODEV; 8220 goto out_kfree_ioc; 8221 } 8222 8223 if (down_interruptible(&instance->ioctl_sem)) { 8224 error = -ERESTARTSYS; 8225 goto out_kfree_ioc; 8226 } 8227 8228 if (megasas_wait_for_adapter_operational(instance)) { 8229 error = -ENODEV; 8230 goto out_up; 8231 } 8232 8233 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); 8234 out_up: 8235 up(&instance->ioctl_sem); 8236 8237 out_kfree_ioc: 8238 kfree(ioc); 8239 return error; 8240 } 8241 8242 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) 8243 { 8244 struct megasas_instance *instance; 8245 struct megasas_aen aen; 8246 int error; 8247 8248 if (file->private_data != file) { 8249 printk(KERN_DEBUG "megasas: fasync_helper was not " 8250 "called first\n"); 8251 return -EINVAL; 8252 } 8253 8254 if (copy_from_user(&aen, (void __user *)arg, sizeof(aen))) 8255 return -EFAULT; 8256 8257 instance = megasas_lookup_instance(aen.host_no); 8258 8259 if (!instance) 8260 return -ENODEV; 8261 8262 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 8263 return -ENODEV; 8264 } 8265 8266 if (instance->unload == 1) { 8267 return -ENODEV; 8268 } 8269 8270 if (megasas_wait_for_adapter_operational(instance)) 8271 return -ENODEV; 8272 8273 mutex_lock(&instance->reset_mutex); 8274 error = megasas_register_aen(instance, aen.seq_num, 8275 aen.class_locale_word); 8276 mutex_unlock(&instance->reset_mutex); 8277 return error; 8278 } 8279 8280 /** 8281 * megasas_mgmt_ioctl - char node ioctl entry point 8282 */ 8283 static long 8284 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 8285 { 8286 switch (cmd) { 8287 case MEGASAS_IOC_FIRMWARE: 8288 return megasas_mgmt_ioctl_fw(file, arg); 8289 8290 case MEGASAS_IOC_GET_AEN: 8291 return megasas_mgmt_ioctl_aen(file, arg); 8292 } 8293 8294 return -ENOTTY; 8295 } 8296 8297 #ifdef CONFIG_COMPAT 8298 static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) 8299 { 8300 struct compat_megasas_iocpacket __user *cioc = 8301 (struct compat_megasas_iocpacket __user *)arg; 8302 struct megasas_iocpacket __user *ioc = 8303 compat_alloc_user_space(sizeof(struct megasas_iocpacket)); 8304 int i; 8305 int error = 0; 8306 compat_uptr_t ptr; 8307 u32 local_sense_off; 8308 u32 local_sense_len; 8309 u32 user_sense_off; 8310 8311 if (clear_user(ioc, sizeof(*ioc))) 8312 return -EFAULT; 8313 8314 if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) || 8315 copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) || 8316 copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) || 8317 copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) || 8318 copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) || 8319 copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32))) 8320 return -EFAULT; 8321 8322 /* 8323 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when 8324 * sense_len is not null, so prepare the 64bit value under 8325 * the same condition. 8326 */ 8327 if (get_user(local_sense_off, &ioc->sense_off) || 8328 get_user(local_sense_len, &ioc->sense_len) || 8329 get_user(user_sense_off, &cioc->sense_off)) 8330 return -EFAULT; 8331 8332 if (local_sense_off != user_sense_off) 8333 return -EINVAL; 8334 8335 if (local_sense_len) { 8336 void __user **sense_ioc_ptr = 8337 (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off); 8338 compat_uptr_t *sense_cioc_ptr = 8339 (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off); 8340 if (get_user(ptr, sense_cioc_ptr) || 8341 put_user(compat_ptr(ptr), sense_ioc_ptr)) 8342 return -EFAULT; 8343 } 8344 8345 for (i = 0; i < MAX_IOCTL_SGE; i++) { 8346 if (get_user(ptr, &cioc->sgl[i].iov_base) || 8347 put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) || 8348 copy_in_user(&ioc->sgl[i].iov_len, 8349 &cioc->sgl[i].iov_len, sizeof(compat_size_t))) 8350 return -EFAULT; 8351 } 8352 8353 error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc); 8354 8355 if (copy_in_user(&cioc->frame.hdr.cmd_status, 8356 &ioc->frame.hdr.cmd_status, sizeof(u8))) { 8357 printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n"); 8358 return -EFAULT; 8359 } 8360 return error; 8361 } 8362 8363 static long 8364 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd, 8365 unsigned long arg) 8366 { 8367 switch (cmd) { 8368 case MEGASAS_IOC_FIRMWARE32: 8369 return megasas_mgmt_compat_ioctl_fw(file, arg); 8370 case MEGASAS_IOC_GET_AEN: 8371 return megasas_mgmt_ioctl_aen(file, arg); 8372 } 8373 8374 return -ENOTTY; 8375 } 8376 #endif 8377 8378 /* 8379 * File operations structure for management interface 8380 */ 8381 static const struct file_operations megasas_mgmt_fops = { 8382 .owner = THIS_MODULE, 8383 .open = megasas_mgmt_open, 8384 .fasync = megasas_mgmt_fasync, 8385 .unlocked_ioctl = megasas_mgmt_ioctl, 8386 .poll = megasas_mgmt_poll, 8387 #ifdef CONFIG_COMPAT 8388 .compat_ioctl = megasas_mgmt_compat_ioctl, 8389 #endif 8390 .llseek = noop_llseek, 8391 }; 8392 8393 /* 8394 * PCI hotplug support registration structure 8395 */ 8396 static struct pci_driver megasas_pci_driver = { 8397 8398 .name = "megaraid_sas", 8399 .id_table = megasas_pci_table, 8400 .probe = megasas_probe_one, 8401 .remove = megasas_detach_one, 8402 .suspend = megasas_suspend, 8403 .resume = megasas_resume, 8404 .shutdown = megasas_shutdown, 8405 }; 8406 8407 /* 8408 * Sysfs driver attributes 8409 */ 8410 static ssize_t version_show(struct device_driver *dd, char *buf) 8411 { 8412 return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n", 8413 MEGASAS_VERSION); 8414 } 8415 static DRIVER_ATTR_RO(version); 8416 8417 static ssize_t release_date_show(struct device_driver *dd, char *buf) 8418 { 8419 return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n", 8420 MEGASAS_RELDATE); 8421 } 8422 static DRIVER_ATTR_RO(release_date); 8423 8424 static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf) 8425 { 8426 return sprintf(buf, "%u\n", support_poll_for_event); 8427 } 8428 static DRIVER_ATTR_RO(support_poll_for_event); 8429 8430 static ssize_t support_device_change_show(struct device_driver *dd, char *buf) 8431 { 8432 return sprintf(buf, "%u\n", support_device_change); 8433 } 8434 static DRIVER_ATTR_RO(support_device_change); 8435 8436 static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf) 8437 { 8438 return sprintf(buf, "%u\n", megasas_dbg_lvl); 8439 } 8440 8441 static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf, 8442 size_t count) 8443 { 8444 int retval = count; 8445 8446 if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) { 8447 printk(KERN_ERR "megasas: could not set dbg_lvl\n"); 8448 retval = -EINVAL; 8449 } 8450 return retval; 8451 } 8452 static DRIVER_ATTR_RW(dbg_lvl); 8453 8454 static ssize_t 8455 support_nvme_encapsulation_show(struct device_driver *dd, char *buf) 8456 { 8457 return sprintf(buf, "%u\n", support_nvme_encapsulation); 8458 } 8459 8460 static DRIVER_ATTR_RO(support_nvme_encapsulation); 8461 8462 static ssize_t 8463 support_pci_lane_margining_show(struct device_driver *dd, char *buf) 8464 { 8465 return sprintf(buf, "%u\n", support_pci_lane_margining); 8466 } 8467 8468 static DRIVER_ATTR_RO(support_pci_lane_margining); 8469 8470 static inline void megasas_remove_scsi_device(struct scsi_device *sdev) 8471 { 8472 sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n"); 8473 scsi_remove_device(sdev); 8474 scsi_device_put(sdev); 8475 } 8476 8477 /** 8478 * megasas_update_device_list - Update the PD and LD device list from FW 8479 * after an AEN event notification 8480 * @instance: Adapter soft state 8481 * @event_type: Indicates type of event (PD or LD event) 8482 * 8483 * @return: Success or failure 8484 * 8485 * Issue DCMDs to Firmware to update the internal device list in driver. 8486 * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination 8487 * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list. 8488 */ 8489 static 8490 int megasas_update_device_list(struct megasas_instance *instance, 8491 int event_type) 8492 { 8493 int dcmd_ret = DCMD_SUCCESS; 8494 8495 if (instance->enable_fw_dev_list) { 8496 dcmd_ret = megasas_host_device_list_query(instance, false); 8497 if (dcmd_ret != DCMD_SUCCESS) 8498 goto out; 8499 } else { 8500 if (event_type & SCAN_PD_CHANNEL) { 8501 dcmd_ret = megasas_get_pd_list(instance); 8502 8503 if (dcmd_ret != DCMD_SUCCESS) 8504 goto out; 8505 } 8506 8507 if (event_type & SCAN_VD_CHANNEL) { 8508 if (!instance->requestorId || 8509 (instance->requestorId && 8510 megasas_get_ld_vf_affiliation(instance, 0))) { 8511 dcmd_ret = megasas_ld_list_query(instance, 8512 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); 8513 if (dcmd_ret != DCMD_SUCCESS) 8514 goto out; 8515 } 8516 } 8517 } 8518 8519 out: 8520 return dcmd_ret; 8521 } 8522 8523 /** 8524 * megasas_add_remove_devices - Add/remove devices to SCSI mid-layer 8525 * after an AEN event notification 8526 * @instance: Adapter soft state 8527 * @scan_type: Indicates type of devices (PD/LD) to add 8528 * @return void 8529 */ 8530 static 8531 void megasas_add_remove_devices(struct megasas_instance *instance, 8532 int scan_type) 8533 { 8534 int i, j; 8535 u16 pd_index = 0; 8536 u16 ld_index = 0; 8537 u16 channel = 0, id = 0; 8538 struct Scsi_Host *host; 8539 struct scsi_device *sdev1; 8540 struct MR_HOST_DEVICE_LIST *targetid_list = NULL; 8541 struct MR_HOST_DEVICE_LIST_ENTRY *targetid_entry = NULL; 8542 8543 host = instance->host; 8544 8545 if (instance->enable_fw_dev_list) { 8546 targetid_list = instance->host_device_list_buf; 8547 for (i = 0; i < targetid_list->count; i++) { 8548 targetid_entry = &targetid_list->host_device_list[i]; 8549 if (targetid_entry->flags.u.bits.is_sys_pd) { 8550 channel = le16_to_cpu(targetid_entry->target_id) / 8551 MEGASAS_MAX_DEV_PER_CHANNEL; 8552 id = le16_to_cpu(targetid_entry->target_id) % 8553 MEGASAS_MAX_DEV_PER_CHANNEL; 8554 } else { 8555 channel = MEGASAS_MAX_PD_CHANNELS + 8556 (le16_to_cpu(targetid_entry->target_id) / 8557 MEGASAS_MAX_DEV_PER_CHANNEL); 8558 id = le16_to_cpu(targetid_entry->target_id) % 8559 MEGASAS_MAX_DEV_PER_CHANNEL; 8560 } 8561 sdev1 = scsi_device_lookup(host, channel, id, 0); 8562 if (!sdev1) { 8563 scsi_add_device(host, channel, id, 0); 8564 } else { 8565 scsi_device_put(sdev1); 8566 } 8567 } 8568 } 8569 8570 if (scan_type & SCAN_PD_CHANNEL) { 8571 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { 8572 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 8573 pd_index = i * MEGASAS_MAX_DEV_PER_CHANNEL + j; 8574 sdev1 = scsi_device_lookup(host, i, j, 0); 8575 if (instance->pd_list[pd_index].driveState == 8576 MR_PD_STATE_SYSTEM) { 8577 if (!sdev1) 8578 scsi_add_device(host, i, j, 0); 8579 else 8580 scsi_device_put(sdev1); 8581 } else { 8582 if (sdev1) 8583 megasas_remove_scsi_device(sdev1); 8584 } 8585 } 8586 } 8587 } 8588 8589 if (scan_type & SCAN_VD_CHANNEL) { 8590 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { 8591 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 8592 ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; 8593 sdev1 = scsi_device_lookup(host, 8594 MEGASAS_MAX_PD_CHANNELS + i, j, 0); 8595 if (instance->ld_ids[ld_index] != 0xff) { 8596 if (!sdev1) 8597 scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); 8598 else 8599 scsi_device_put(sdev1); 8600 } else { 8601 if (sdev1) 8602 megasas_remove_scsi_device(sdev1); 8603 } 8604 } 8605 } 8606 } 8607 8608 } 8609 8610 static void 8611 megasas_aen_polling(struct work_struct *work) 8612 { 8613 struct megasas_aen_event *ev = 8614 container_of(work, struct megasas_aen_event, hotplug_work.work); 8615 struct megasas_instance *instance = ev->instance; 8616 union megasas_evt_class_locale class_locale; 8617 int event_type = 0; 8618 u32 seq_num; 8619 int error; 8620 u8 dcmd_ret = DCMD_SUCCESS; 8621 8622 if (!instance) { 8623 printk(KERN_ERR "invalid instance!\n"); 8624 kfree(ev); 8625 return; 8626 } 8627 8628 /* Don't run the event workqueue thread if OCR is running */ 8629 mutex_lock(&instance->reset_mutex); 8630 8631 instance->ev = NULL; 8632 if (instance->evt_detail) { 8633 megasas_decode_evt(instance); 8634 8635 switch (le32_to_cpu(instance->evt_detail->code)) { 8636 8637 case MR_EVT_PD_INSERTED: 8638 case MR_EVT_PD_REMOVED: 8639 event_type = SCAN_PD_CHANNEL; 8640 break; 8641 8642 case MR_EVT_LD_OFFLINE: 8643 case MR_EVT_CFG_CLEARED: 8644 case MR_EVT_LD_DELETED: 8645 case MR_EVT_LD_CREATED: 8646 event_type = SCAN_VD_CHANNEL; 8647 break; 8648 8649 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: 8650 case MR_EVT_FOREIGN_CFG_IMPORTED: 8651 case MR_EVT_LD_STATE_CHANGE: 8652 event_type = SCAN_PD_CHANNEL | SCAN_VD_CHANNEL; 8653 dev_info(&instance->pdev->dev, "scanning for scsi%d...\n", 8654 instance->host->host_no); 8655 break; 8656 8657 case MR_EVT_CTRL_PROP_CHANGED: 8658 dcmd_ret = megasas_get_ctrl_info(instance); 8659 if (dcmd_ret == DCMD_SUCCESS && 8660 instance->snapdump_wait_time) { 8661 megasas_get_snapdump_properties(instance); 8662 dev_info(&instance->pdev->dev, 8663 "Snap dump wait time\t: %d\n", 8664 instance->snapdump_wait_time); 8665 } 8666 break; 8667 default: 8668 event_type = 0; 8669 break; 8670 } 8671 } else { 8672 dev_err(&instance->pdev->dev, "invalid evt_detail!\n"); 8673 mutex_unlock(&instance->reset_mutex); 8674 kfree(ev); 8675 return; 8676 } 8677 8678 if (event_type) 8679 dcmd_ret = megasas_update_device_list(instance, event_type); 8680 8681 mutex_unlock(&instance->reset_mutex); 8682 8683 if (event_type && dcmd_ret == DCMD_SUCCESS) 8684 megasas_add_remove_devices(instance, event_type); 8685 8686 if (dcmd_ret == DCMD_SUCCESS) 8687 seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1; 8688 else 8689 seq_num = instance->last_seq_num; 8690 8691 /* Register AEN with FW for latest sequence number plus 1 */ 8692 class_locale.members.reserved = 0; 8693 class_locale.members.locale = MR_EVT_LOCALE_ALL; 8694 class_locale.members.class = MR_EVT_CLASS_DEBUG; 8695 8696 if (instance->aen_cmd != NULL) { 8697 kfree(ev); 8698 return; 8699 } 8700 8701 mutex_lock(&instance->reset_mutex); 8702 error = megasas_register_aen(instance, seq_num, 8703 class_locale.word); 8704 if (error) 8705 dev_err(&instance->pdev->dev, 8706 "register aen failed error %x\n", error); 8707 8708 mutex_unlock(&instance->reset_mutex); 8709 kfree(ev); 8710 } 8711 8712 /** 8713 * megasas_init - Driver load entry point 8714 */ 8715 static int __init megasas_init(void) 8716 { 8717 int rval; 8718 8719 /* 8720 * Booted in kdump kernel, minimize memory footprints by 8721 * disabling few features 8722 */ 8723 if (reset_devices) { 8724 msix_vectors = 1; 8725 rdpq_enable = 0; 8726 dual_qdepth_disable = 1; 8727 } 8728 8729 /* 8730 * Announce driver version and other information 8731 */ 8732 pr_info("megasas: %s\n", MEGASAS_VERSION); 8733 8734 spin_lock_init(&poll_aen_lock); 8735 8736 support_poll_for_event = 2; 8737 support_device_change = 1; 8738 support_nvme_encapsulation = true; 8739 support_pci_lane_margining = true; 8740 8741 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); 8742 8743 /* 8744 * Register character device node 8745 */ 8746 rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops); 8747 8748 if (rval < 0) { 8749 printk(KERN_DEBUG "megasas: failed to open device node\n"); 8750 return rval; 8751 } 8752 8753 megasas_mgmt_majorno = rval; 8754 8755 megasas_init_debugfs(); 8756 8757 /* 8758 * Register ourselves as PCI hotplug module 8759 */ 8760 rval = pci_register_driver(&megasas_pci_driver); 8761 8762 if (rval) { 8763 printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n"); 8764 goto err_pcidrv; 8765 } 8766 8767 if ((event_log_level < MFI_EVT_CLASS_DEBUG) || 8768 (event_log_level > MFI_EVT_CLASS_DEAD)) { 8769 pr_warn("megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n"); 8770 event_log_level = MFI_EVT_CLASS_CRITICAL; 8771 } 8772 8773 rval = driver_create_file(&megasas_pci_driver.driver, 8774 &driver_attr_version); 8775 if (rval) 8776 goto err_dcf_attr_ver; 8777 8778 rval = driver_create_file(&megasas_pci_driver.driver, 8779 &driver_attr_release_date); 8780 if (rval) 8781 goto err_dcf_rel_date; 8782 8783 rval = driver_create_file(&megasas_pci_driver.driver, 8784 &driver_attr_support_poll_for_event); 8785 if (rval) 8786 goto err_dcf_support_poll_for_event; 8787 8788 rval = driver_create_file(&megasas_pci_driver.driver, 8789 &driver_attr_dbg_lvl); 8790 if (rval) 8791 goto err_dcf_dbg_lvl; 8792 rval = driver_create_file(&megasas_pci_driver.driver, 8793 &driver_attr_support_device_change); 8794 if (rval) 8795 goto err_dcf_support_device_change; 8796 8797 rval = driver_create_file(&megasas_pci_driver.driver, 8798 &driver_attr_support_nvme_encapsulation); 8799 if (rval) 8800 goto err_dcf_support_nvme_encapsulation; 8801 8802 rval = driver_create_file(&megasas_pci_driver.driver, 8803 &driver_attr_support_pci_lane_margining); 8804 if (rval) 8805 goto err_dcf_support_pci_lane_margining; 8806 8807 return rval; 8808 8809 err_dcf_support_pci_lane_margining: 8810 driver_remove_file(&megasas_pci_driver.driver, 8811 &driver_attr_support_nvme_encapsulation); 8812 8813 err_dcf_support_nvme_encapsulation: 8814 driver_remove_file(&megasas_pci_driver.driver, 8815 &driver_attr_support_device_change); 8816 8817 err_dcf_support_device_change: 8818 driver_remove_file(&megasas_pci_driver.driver, 8819 &driver_attr_dbg_lvl); 8820 err_dcf_dbg_lvl: 8821 driver_remove_file(&megasas_pci_driver.driver, 8822 &driver_attr_support_poll_for_event); 8823 err_dcf_support_poll_for_event: 8824 driver_remove_file(&megasas_pci_driver.driver, 8825 &driver_attr_release_date); 8826 err_dcf_rel_date: 8827 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 8828 err_dcf_attr_ver: 8829 pci_unregister_driver(&megasas_pci_driver); 8830 err_pcidrv: 8831 megasas_exit_debugfs(); 8832 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 8833 return rval; 8834 } 8835 8836 /** 8837 * megasas_exit - Driver unload entry point 8838 */ 8839 static void __exit megasas_exit(void) 8840 { 8841 driver_remove_file(&megasas_pci_driver.driver, 8842 &driver_attr_dbg_lvl); 8843 driver_remove_file(&megasas_pci_driver.driver, 8844 &driver_attr_support_poll_for_event); 8845 driver_remove_file(&megasas_pci_driver.driver, 8846 &driver_attr_support_device_change); 8847 driver_remove_file(&megasas_pci_driver.driver, 8848 &driver_attr_release_date); 8849 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 8850 driver_remove_file(&megasas_pci_driver.driver, 8851 &driver_attr_support_nvme_encapsulation); 8852 driver_remove_file(&megasas_pci_driver.driver, 8853 &driver_attr_support_pci_lane_margining); 8854 8855 pci_unregister_driver(&megasas_pci_driver); 8856 megasas_exit_debugfs(); 8857 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 8858 } 8859 8860 module_init(megasas_init); 8861 module_exit(megasas_exit); 8862