1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux MegaRAID driver for SAS based RAID controllers 4 * 5 * Copyright (c) 2003-2013 LSI Corporation 6 * Copyright (c) 2013-2016 Avago Technologies 7 * Copyright (c) 2016-2018 Broadcom Inc. 8 * 9 * Authors: Broadcom Inc. 10 * Sreenivas Bagalkote 11 * Sumant Patro 12 * Bo Yang 13 * Adam Radford 14 * Kashyap Desai <kashyap.desai@broadcom.com> 15 * Sumit Saxena <sumit.saxena@broadcom.com> 16 * 17 * Send feedback to: megaraidlinux.pdl@broadcom.com 18 */ 19 20 #include <linux/kernel.h> 21 #include <linux/types.h> 22 #include <linux/pci.h> 23 #include <linux/list.h> 24 #include <linux/moduleparam.h> 25 #include <linux/module.h> 26 #include <linux/spinlock.h> 27 #include <linux/interrupt.h> 28 #include <linux/delay.h> 29 #include <linux/uio.h> 30 #include <linux/slab.h> 31 #include <linux/uaccess.h> 32 #include <asm/unaligned.h> 33 #include <linux/fs.h> 34 #include <linux/compat.h> 35 #include <linux/blkdev.h> 36 #include <linux/mutex.h> 37 #include <linux/poll.h> 38 #include <linux/vmalloc.h> 39 #include <linux/irq_poll.h> 40 #include <linux/blk-mq-pci.h> 41 42 #include <scsi/scsi.h> 43 #include <scsi/scsi_cmnd.h> 44 #include <scsi/scsi_device.h> 45 #include <scsi/scsi_host.h> 46 #include <scsi/scsi_tcq.h> 47 #include <scsi/scsi_dbg.h> 48 #include "megaraid_sas_fusion.h" 49 #include "megaraid_sas.h" 50 51 /* 52 * Number of sectors per IO command 53 * Will be set in megasas_init_mfi if user does not provide 54 */ 55 static unsigned int max_sectors; 56 module_param_named(max_sectors, max_sectors, int, 0444); 57 MODULE_PARM_DESC(max_sectors, 58 "Maximum number of sectors per IO command"); 59 60 static int msix_disable; 61 module_param(msix_disable, int, 0444); 62 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0"); 63 64 static unsigned int msix_vectors; 65 module_param(msix_vectors, int, 0444); 66 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW"); 67 68 static int allow_vf_ioctls; 69 module_param(allow_vf_ioctls, int, 0444); 70 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0"); 71 72 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; 73 module_param(throttlequeuedepth, int, 0444); 74 MODULE_PARM_DESC(throttlequeuedepth, 75 "Adapter queue depth when throttled due to I/O timeout. Default: 16"); 76 77 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME; 78 module_param(resetwaittime, int, 0444); 79 MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s"); 80 81 static int smp_affinity_enable = 1; 82 module_param(smp_affinity_enable, int, 0444); 83 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)"); 84 85 static int rdpq_enable = 1; 86 module_param(rdpq_enable, int, 0444); 87 MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)"); 88 89 unsigned int dual_qdepth_disable; 90 module_param(dual_qdepth_disable, int, 0444); 91 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0"); 92 93 static unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; 94 module_param(scmd_timeout, int, 0444); 95 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer."); 96 97 int perf_mode = -1; 98 module_param(perf_mode, int, 0444); 99 MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t" 100 "0 - balanced: High iops and low latency queues are allocated &\n\t\t" 101 "interrupt coalescing is enabled only on high iops queues\n\t\t" 102 "1 - iops: High iops queues are not allocated &\n\t\t" 103 "interrupt coalescing is enabled on all queues\n\t\t" 104 "2 - latency: High iops queues are not allocated &\n\t\t" 105 "interrupt coalescing is disabled on all queues\n\t\t" 106 "default mode is 'balanced'" 107 ); 108 109 int event_log_level = MFI_EVT_CLASS_CRITICAL; 110 module_param(event_log_level, int, 0644); 111 MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)"); 112 113 unsigned int enable_sdev_max_qd; 114 module_param(enable_sdev_max_qd, int, 0444); 115 MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0"); 116 117 int poll_queues; 118 module_param(poll_queues, int, 0444); 119 MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t" 120 "This parameter is effective only if host_tagset_enable=1 &\n\t\t" 121 "It is not applicable for MFI_SERIES. &\n\t\t" 122 "Driver will work in latency mode. &\n\t\t" 123 "High iops queues are not allocated &\n\t\t" 124 ); 125 126 int host_tagset_enable = 1; 127 module_param(host_tagset_enable, int, 0444); 128 MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)"); 129 130 MODULE_LICENSE("GPL"); 131 MODULE_VERSION(MEGASAS_VERSION); 132 MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com"); 133 MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver"); 134 135 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); 136 static int megasas_get_pd_list(struct megasas_instance *instance); 137 static int megasas_ld_list_query(struct megasas_instance *instance, 138 u8 query_type); 139 static int megasas_issue_init_mfi(struct megasas_instance *instance); 140 static int megasas_register_aen(struct megasas_instance *instance, 141 u32 seq_num, u32 class_locale_word); 142 static void megasas_get_pd_info(struct megasas_instance *instance, 143 struct scsi_device *sdev); 144 static void 145 megasas_set_ld_removed_by_fw(struct megasas_instance *instance); 146 147 /* 148 * PCI ID table for all supported controllers 149 */ 150 static struct pci_device_id megasas_pci_table[] = { 151 152 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)}, 153 /* xscale IOP */ 154 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)}, 155 /* ppc IOP */ 156 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)}, 157 /* ppc IOP */ 158 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)}, 159 /* gen2*/ 160 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)}, 161 /* gen2*/ 162 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)}, 163 /* skinny*/ 164 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)}, 165 /* skinny*/ 166 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, 167 /* xscale IOP, vega */ 168 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, 169 /* xscale IOP */ 170 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)}, 171 /* Fusion */ 172 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)}, 173 /* Plasma */ 174 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)}, 175 /* Invader */ 176 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)}, 177 /* Fury */ 178 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)}, 179 /* Intruder */ 180 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)}, 181 /* Intruder 24 port*/ 182 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)}, 183 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)}, 184 /* VENTURA */ 185 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)}, 186 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)}, 187 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)}, 188 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)}, 189 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)}, 190 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)}, 191 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)}, 192 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)}, 193 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)}, 194 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)}, 195 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E0)}, 196 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E3)}, 197 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E4)}, 198 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E7)}, 199 {} 200 }; 201 202 MODULE_DEVICE_TABLE(pci, megasas_pci_table); 203 204 static int megasas_mgmt_majorno; 205 struct megasas_mgmt_info megasas_mgmt_info; 206 static struct fasync_struct *megasas_async_queue; 207 static DEFINE_MUTEX(megasas_async_queue_mutex); 208 209 static int megasas_poll_wait_aen; 210 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait); 211 static u32 support_poll_for_event; 212 u32 megasas_dbg_lvl; 213 static u32 support_device_change; 214 static bool support_nvme_encapsulation; 215 static bool support_pci_lane_margining; 216 217 /* define lock for aen poll */ 218 static DEFINE_SPINLOCK(poll_aen_lock); 219 220 extern struct dentry *megasas_debugfs_root; 221 extern int megasas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num); 222 223 void 224 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 225 u8 alt_status); 226 static u32 227 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance); 228 static int 229 megasas_adp_reset_gen2(struct megasas_instance *instance, 230 struct megasas_register_set __iomem *reg_set); 231 static irqreturn_t megasas_isr(int irq, void *devp); 232 static u32 233 megasas_init_adapter_mfi(struct megasas_instance *instance); 234 u32 235 megasas_build_and_issue_cmd(struct megasas_instance *instance, 236 struct scsi_cmnd *scmd); 237 static void megasas_complete_cmd_dpc(unsigned long instance_addr); 238 int 239 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, 240 int seconds); 241 void megasas_fusion_ocr_wq(struct work_struct *work); 242 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 243 int initial); 244 static int 245 megasas_set_dma_mask(struct megasas_instance *instance); 246 static int 247 megasas_alloc_ctrl_mem(struct megasas_instance *instance); 248 static inline void 249 megasas_free_ctrl_mem(struct megasas_instance *instance); 250 static inline int 251 megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance); 252 static inline void 253 megasas_free_ctrl_dma_buffers(struct megasas_instance *instance); 254 static inline void 255 megasas_init_ctrl_params(struct megasas_instance *instance); 256 257 u32 megasas_readl(struct megasas_instance *instance, 258 const volatile void __iomem *addr) 259 { 260 u32 i = 0, ret_val; 261 /* 262 * Due to a HW errata in Aero controllers, reads to certain 263 * Fusion registers could intermittently return all zeroes. 264 * This behavior is transient in nature and subsequent reads will 265 * return valid value. As a workaround in driver, retry readl for 266 * upto three times until a non-zero value is read. 267 */ 268 if (instance->adapter_type == AERO_SERIES) { 269 do { 270 ret_val = readl(addr); 271 i++; 272 } while (ret_val == 0 && i < 3); 273 return ret_val; 274 } else { 275 return readl(addr); 276 } 277 } 278 279 /** 280 * megasas_set_dma_settings - Populate DMA address, length and flags for DCMDs 281 * @instance: Adapter soft state 282 * @dcmd: DCMD frame inside MFI command 283 * @dma_addr: DMA address of buffer to be passed to FW 284 * @dma_len: Length of DMA buffer to be passed to FW 285 * @return: void 286 */ 287 void megasas_set_dma_settings(struct megasas_instance *instance, 288 struct megasas_dcmd_frame *dcmd, 289 dma_addr_t dma_addr, u32 dma_len) 290 { 291 if (instance->consistent_mask_64bit) { 292 dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr); 293 dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len); 294 dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64); 295 296 } else { 297 dcmd->sgl.sge32[0].phys_addr = 298 cpu_to_le32(lower_32_bits(dma_addr)); 299 dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len); 300 dcmd->flags = cpu_to_le16(dcmd->flags); 301 } 302 } 303 304 static void 305 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 306 { 307 instance->instancet->fire_cmd(instance, 308 cmd->frame_phys_addr, 0, instance->reg_set); 309 return; 310 } 311 312 /** 313 * megasas_get_cmd - Get a command from the free pool 314 * @instance: Adapter soft state 315 * 316 * Returns a free command from the pool 317 */ 318 struct megasas_cmd *megasas_get_cmd(struct megasas_instance 319 *instance) 320 { 321 unsigned long flags; 322 struct megasas_cmd *cmd = NULL; 323 324 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 325 326 if (!list_empty(&instance->cmd_pool)) { 327 cmd = list_entry((&instance->cmd_pool)->next, 328 struct megasas_cmd, list); 329 list_del_init(&cmd->list); 330 } else { 331 dev_err(&instance->pdev->dev, "Command pool empty!\n"); 332 } 333 334 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 335 return cmd; 336 } 337 338 /** 339 * megasas_return_cmd - Return a cmd to free command pool 340 * @instance: Adapter soft state 341 * @cmd: Command packet to be returned to free command pool 342 */ 343 void 344 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 345 { 346 unsigned long flags; 347 u32 blk_tags; 348 struct megasas_cmd_fusion *cmd_fusion; 349 struct fusion_context *fusion = instance->ctrl_context; 350 351 /* This flag is used only for fusion adapter. 352 * Wait for Interrupt for Polled mode DCMD 353 */ 354 if (cmd->flags & DRV_DCMD_POLLED_MODE) 355 return; 356 357 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 358 359 if (fusion) { 360 blk_tags = instance->max_scsi_cmds + cmd->index; 361 cmd_fusion = fusion->cmd_list[blk_tags]; 362 megasas_return_cmd_fusion(instance, cmd_fusion); 363 } 364 cmd->scmd = NULL; 365 cmd->frame_count = 0; 366 cmd->flags = 0; 367 memset(cmd->frame, 0, instance->mfi_frame_size); 368 cmd->frame->io.context = cpu_to_le32(cmd->index); 369 if (!fusion && reset_devices) 370 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 371 list_add(&cmd->list, (&instance->cmd_pool)->next); 372 373 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 374 375 } 376 377 static const char * 378 format_timestamp(uint32_t timestamp) 379 { 380 static char buffer[32]; 381 382 if ((timestamp & 0xff000000) == 0xff000000) 383 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp & 384 0x00ffffff); 385 else 386 snprintf(buffer, sizeof(buffer), "%us", timestamp); 387 return buffer; 388 } 389 390 static const char * 391 format_class(int8_t class) 392 { 393 static char buffer[6]; 394 395 switch (class) { 396 case MFI_EVT_CLASS_DEBUG: 397 return "debug"; 398 case MFI_EVT_CLASS_PROGRESS: 399 return "progress"; 400 case MFI_EVT_CLASS_INFO: 401 return "info"; 402 case MFI_EVT_CLASS_WARNING: 403 return "WARN"; 404 case MFI_EVT_CLASS_CRITICAL: 405 return "CRIT"; 406 case MFI_EVT_CLASS_FATAL: 407 return "FATAL"; 408 case MFI_EVT_CLASS_DEAD: 409 return "DEAD"; 410 default: 411 snprintf(buffer, sizeof(buffer), "%d", class); 412 return buffer; 413 } 414 } 415 416 /** 417 * megasas_decode_evt: Decode FW AEN event and print critical event 418 * for information. 419 * @instance: Adapter soft state 420 */ 421 static void 422 megasas_decode_evt(struct megasas_instance *instance) 423 { 424 struct megasas_evt_detail *evt_detail = instance->evt_detail; 425 union megasas_evt_class_locale class_locale; 426 class_locale.word = le32_to_cpu(evt_detail->cl.word); 427 428 if ((event_log_level < MFI_EVT_CLASS_DEBUG) || 429 (event_log_level > MFI_EVT_CLASS_DEAD)) { 430 printk(KERN_WARNING "megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n"); 431 event_log_level = MFI_EVT_CLASS_CRITICAL; 432 } 433 434 if (class_locale.members.class >= event_log_level) 435 dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n", 436 le32_to_cpu(evt_detail->seq_num), 437 format_timestamp(le32_to_cpu(evt_detail->time_stamp)), 438 (class_locale.members.locale), 439 format_class(class_locale.members.class), 440 evt_detail->description); 441 442 if (megasas_dbg_lvl & LD_PD_DEBUG) 443 dev_info(&instance->pdev->dev, 444 "evt_detail.args.ld.target_id/index %d/%d\n", 445 evt_detail->args.ld.target_id, evt_detail->args.ld.ld_index); 446 447 } 448 449 /* 450 * The following functions are defined for xscale 451 * (deviceid : 1064R, PERC5) controllers 452 */ 453 454 /** 455 * megasas_enable_intr_xscale - Enables interrupts 456 * @instance: Adapter soft state 457 */ 458 static inline void 459 megasas_enable_intr_xscale(struct megasas_instance *instance) 460 { 461 struct megasas_register_set __iomem *regs; 462 463 regs = instance->reg_set; 464 writel(0, &(regs)->outbound_intr_mask); 465 466 /* Dummy readl to force pci flush */ 467 readl(®s->outbound_intr_mask); 468 } 469 470 /** 471 * megasas_disable_intr_xscale -Disables interrupt 472 * @instance: Adapter soft state 473 */ 474 static inline void 475 megasas_disable_intr_xscale(struct megasas_instance *instance) 476 { 477 struct megasas_register_set __iomem *regs; 478 u32 mask = 0x1f; 479 480 regs = instance->reg_set; 481 writel(mask, ®s->outbound_intr_mask); 482 /* Dummy readl to force pci flush */ 483 readl(®s->outbound_intr_mask); 484 } 485 486 /** 487 * megasas_read_fw_status_reg_xscale - returns the current FW status value 488 * @instance: Adapter soft state 489 */ 490 static u32 491 megasas_read_fw_status_reg_xscale(struct megasas_instance *instance) 492 { 493 return readl(&instance->reg_set->outbound_msg_0); 494 } 495 /** 496 * megasas_clear_intr_xscale - Check & clear interrupt 497 * @instance: Adapter soft state 498 */ 499 static int 500 megasas_clear_intr_xscale(struct megasas_instance *instance) 501 { 502 u32 status; 503 u32 mfiStatus = 0; 504 struct megasas_register_set __iomem *regs; 505 regs = instance->reg_set; 506 507 /* 508 * Check if it is our interrupt 509 */ 510 status = readl(®s->outbound_intr_status); 511 512 if (status & MFI_OB_INTR_STATUS_MASK) 513 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 514 if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT) 515 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 516 517 /* 518 * Clear the interrupt by writing back the same value 519 */ 520 if (mfiStatus) 521 writel(status, ®s->outbound_intr_status); 522 523 /* Dummy readl to force pci flush */ 524 readl(®s->outbound_intr_status); 525 526 return mfiStatus; 527 } 528 529 /** 530 * megasas_fire_cmd_xscale - Sends command to the FW 531 * @instance: Adapter soft state 532 * @frame_phys_addr : Physical address of cmd 533 * @frame_count : Number of frames for the command 534 * @regs : MFI register set 535 */ 536 static inline void 537 megasas_fire_cmd_xscale(struct megasas_instance *instance, 538 dma_addr_t frame_phys_addr, 539 u32 frame_count, 540 struct megasas_register_set __iomem *regs) 541 { 542 unsigned long flags; 543 544 spin_lock_irqsave(&instance->hba_lock, flags); 545 writel((frame_phys_addr >> 3)|(frame_count), 546 &(regs)->inbound_queue_port); 547 spin_unlock_irqrestore(&instance->hba_lock, flags); 548 } 549 550 /** 551 * megasas_adp_reset_xscale - For controller reset 552 * @instance: Adapter soft state 553 * @regs: MFI register set 554 */ 555 static int 556 megasas_adp_reset_xscale(struct megasas_instance *instance, 557 struct megasas_register_set __iomem *regs) 558 { 559 u32 i; 560 u32 pcidata; 561 562 writel(MFI_ADP_RESET, ®s->inbound_doorbell); 563 564 for (i = 0; i < 3; i++) 565 msleep(1000); /* sleep for 3 secs */ 566 pcidata = 0; 567 pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata); 568 dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata); 569 if (pcidata & 0x2) { 570 dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata); 571 pcidata &= ~0x2; 572 pci_write_config_dword(instance->pdev, 573 MFI_1068_PCSR_OFFSET, pcidata); 574 575 for (i = 0; i < 2; i++) 576 msleep(1000); /* need to wait 2 secs again */ 577 578 pcidata = 0; 579 pci_read_config_dword(instance->pdev, 580 MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata); 581 dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata); 582 if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) { 583 dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata); 584 pcidata = 0; 585 pci_write_config_dword(instance->pdev, 586 MFI_1068_FW_HANDSHAKE_OFFSET, pcidata); 587 } 588 } 589 return 0; 590 } 591 592 /** 593 * megasas_check_reset_xscale - For controller reset check 594 * @instance: Adapter soft state 595 * @regs: MFI register set 596 */ 597 static int 598 megasas_check_reset_xscale(struct megasas_instance *instance, 599 struct megasas_register_set __iomem *regs) 600 { 601 if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) && 602 (le32_to_cpu(*instance->consumer) == 603 MEGASAS_ADPRESET_INPROG_SIGN)) 604 return 1; 605 return 0; 606 } 607 608 static struct megasas_instance_template megasas_instance_template_xscale = { 609 610 .fire_cmd = megasas_fire_cmd_xscale, 611 .enable_intr = megasas_enable_intr_xscale, 612 .disable_intr = megasas_disable_intr_xscale, 613 .clear_intr = megasas_clear_intr_xscale, 614 .read_fw_status_reg = megasas_read_fw_status_reg_xscale, 615 .adp_reset = megasas_adp_reset_xscale, 616 .check_reset = megasas_check_reset_xscale, 617 .service_isr = megasas_isr, 618 .tasklet = megasas_complete_cmd_dpc, 619 .init_adapter = megasas_init_adapter_mfi, 620 .build_and_issue_cmd = megasas_build_and_issue_cmd, 621 .issue_dcmd = megasas_issue_dcmd, 622 }; 623 624 /* 625 * This is the end of set of functions & definitions specific 626 * to xscale (deviceid : 1064R, PERC5) controllers 627 */ 628 629 /* 630 * The following functions are defined for ppc (deviceid : 0x60) 631 * controllers 632 */ 633 634 /** 635 * megasas_enable_intr_ppc - Enables interrupts 636 * @instance: Adapter soft state 637 */ 638 static inline void 639 megasas_enable_intr_ppc(struct megasas_instance *instance) 640 { 641 struct megasas_register_set __iomem *regs; 642 643 regs = instance->reg_set; 644 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 645 646 writel(~0x80000000, &(regs)->outbound_intr_mask); 647 648 /* Dummy readl to force pci flush */ 649 readl(®s->outbound_intr_mask); 650 } 651 652 /** 653 * megasas_disable_intr_ppc - Disable interrupt 654 * @instance: Adapter soft state 655 */ 656 static inline void 657 megasas_disable_intr_ppc(struct megasas_instance *instance) 658 { 659 struct megasas_register_set __iomem *regs; 660 u32 mask = 0xFFFFFFFF; 661 662 regs = instance->reg_set; 663 writel(mask, ®s->outbound_intr_mask); 664 /* Dummy readl to force pci flush */ 665 readl(®s->outbound_intr_mask); 666 } 667 668 /** 669 * megasas_read_fw_status_reg_ppc - returns the current FW status value 670 * @instance: Adapter soft state 671 */ 672 static u32 673 megasas_read_fw_status_reg_ppc(struct megasas_instance *instance) 674 { 675 return readl(&instance->reg_set->outbound_scratch_pad_0); 676 } 677 678 /** 679 * megasas_clear_intr_ppc - Check & clear interrupt 680 * @instance: Adapter soft state 681 */ 682 static int 683 megasas_clear_intr_ppc(struct megasas_instance *instance) 684 { 685 u32 status, mfiStatus = 0; 686 struct megasas_register_set __iomem *regs; 687 regs = instance->reg_set; 688 689 /* 690 * Check if it is our interrupt 691 */ 692 status = readl(®s->outbound_intr_status); 693 694 if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT) 695 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 696 697 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) 698 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 699 700 /* 701 * Clear the interrupt by writing back the same value 702 */ 703 writel(status, ®s->outbound_doorbell_clear); 704 705 /* Dummy readl to force pci flush */ 706 readl(®s->outbound_doorbell_clear); 707 708 return mfiStatus; 709 } 710 711 /** 712 * megasas_fire_cmd_ppc - Sends command to the FW 713 * @instance: Adapter soft state 714 * @frame_phys_addr: Physical address of cmd 715 * @frame_count: Number of frames for the command 716 * @regs: MFI register set 717 */ 718 static inline void 719 megasas_fire_cmd_ppc(struct megasas_instance *instance, 720 dma_addr_t frame_phys_addr, 721 u32 frame_count, 722 struct megasas_register_set __iomem *regs) 723 { 724 unsigned long flags; 725 726 spin_lock_irqsave(&instance->hba_lock, flags); 727 writel((frame_phys_addr | (frame_count<<1))|1, 728 &(regs)->inbound_queue_port); 729 spin_unlock_irqrestore(&instance->hba_lock, flags); 730 } 731 732 /** 733 * megasas_check_reset_ppc - For controller reset check 734 * @instance: Adapter soft state 735 * @regs: MFI register set 736 */ 737 static int 738 megasas_check_reset_ppc(struct megasas_instance *instance, 739 struct megasas_register_set __iomem *regs) 740 { 741 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 742 return 1; 743 744 return 0; 745 } 746 747 static struct megasas_instance_template megasas_instance_template_ppc = { 748 749 .fire_cmd = megasas_fire_cmd_ppc, 750 .enable_intr = megasas_enable_intr_ppc, 751 .disable_intr = megasas_disable_intr_ppc, 752 .clear_intr = megasas_clear_intr_ppc, 753 .read_fw_status_reg = megasas_read_fw_status_reg_ppc, 754 .adp_reset = megasas_adp_reset_xscale, 755 .check_reset = megasas_check_reset_ppc, 756 .service_isr = megasas_isr, 757 .tasklet = megasas_complete_cmd_dpc, 758 .init_adapter = megasas_init_adapter_mfi, 759 .build_and_issue_cmd = megasas_build_and_issue_cmd, 760 .issue_dcmd = megasas_issue_dcmd, 761 }; 762 763 /** 764 * megasas_enable_intr_skinny - Enables interrupts 765 * @instance: Adapter soft state 766 */ 767 static inline void 768 megasas_enable_intr_skinny(struct megasas_instance *instance) 769 { 770 struct megasas_register_set __iomem *regs; 771 772 regs = instance->reg_set; 773 writel(0xFFFFFFFF, &(regs)->outbound_intr_mask); 774 775 writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 776 777 /* Dummy readl to force pci flush */ 778 readl(®s->outbound_intr_mask); 779 } 780 781 /** 782 * megasas_disable_intr_skinny - Disables interrupt 783 * @instance: Adapter soft state 784 */ 785 static inline void 786 megasas_disable_intr_skinny(struct megasas_instance *instance) 787 { 788 struct megasas_register_set __iomem *regs; 789 u32 mask = 0xFFFFFFFF; 790 791 regs = instance->reg_set; 792 writel(mask, ®s->outbound_intr_mask); 793 /* Dummy readl to force pci flush */ 794 readl(®s->outbound_intr_mask); 795 } 796 797 /** 798 * megasas_read_fw_status_reg_skinny - returns the current FW status value 799 * @instance: Adapter soft state 800 */ 801 static u32 802 megasas_read_fw_status_reg_skinny(struct megasas_instance *instance) 803 { 804 return readl(&instance->reg_set->outbound_scratch_pad_0); 805 } 806 807 /** 808 * megasas_clear_intr_skinny - Check & clear interrupt 809 * @instance: Adapter soft state 810 */ 811 static int 812 megasas_clear_intr_skinny(struct megasas_instance *instance) 813 { 814 u32 status; 815 u32 mfiStatus = 0; 816 struct megasas_register_set __iomem *regs; 817 regs = instance->reg_set; 818 819 /* 820 * Check if it is our interrupt 821 */ 822 status = readl(®s->outbound_intr_status); 823 824 if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) { 825 return 0; 826 } 827 828 /* 829 * Check if it is our interrupt 830 */ 831 if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) == 832 MFI_STATE_FAULT) { 833 mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 834 } else 835 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 836 837 /* 838 * Clear the interrupt by writing back the same value 839 */ 840 writel(status, ®s->outbound_intr_status); 841 842 /* 843 * dummy read to flush PCI 844 */ 845 readl(®s->outbound_intr_status); 846 847 return mfiStatus; 848 } 849 850 /** 851 * megasas_fire_cmd_skinny - Sends command to the FW 852 * @instance: Adapter soft state 853 * @frame_phys_addr: Physical address of cmd 854 * @frame_count: Number of frames for the command 855 * @regs: MFI register set 856 */ 857 static inline void 858 megasas_fire_cmd_skinny(struct megasas_instance *instance, 859 dma_addr_t frame_phys_addr, 860 u32 frame_count, 861 struct megasas_register_set __iomem *regs) 862 { 863 unsigned long flags; 864 865 spin_lock_irqsave(&instance->hba_lock, flags); 866 writel(upper_32_bits(frame_phys_addr), 867 &(regs)->inbound_high_queue_port); 868 writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1, 869 &(regs)->inbound_low_queue_port); 870 spin_unlock_irqrestore(&instance->hba_lock, flags); 871 } 872 873 /** 874 * megasas_check_reset_skinny - For controller reset check 875 * @instance: Adapter soft state 876 * @regs: MFI register set 877 */ 878 static int 879 megasas_check_reset_skinny(struct megasas_instance *instance, 880 struct megasas_register_set __iomem *regs) 881 { 882 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 883 return 1; 884 885 return 0; 886 } 887 888 static struct megasas_instance_template megasas_instance_template_skinny = { 889 890 .fire_cmd = megasas_fire_cmd_skinny, 891 .enable_intr = megasas_enable_intr_skinny, 892 .disable_intr = megasas_disable_intr_skinny, 893 .clear_intr = megasas_clear_intr_skinny, 894 .read_fw_status_reg = megasas_read_fw_status_reg_skinny, 895 .adp_reset = megasas_adp_reset_gen2, 896 .check_reset = megasas_check_reset_skinny, 897 .service_isr = megasas_isr, 898 .tasklet = megasas_complete_cmd_dpc, 899 .init_adapter = megasas_init_adapter_mfi, 900 .build_and_issue_cmd = megasas_build_and_issue_cmd, 901 .issue_dcmd = megasas_issue_dcmd, 902 }; 903 904 905 /* 906 * The following functions are defined for gen2 (deviceid : 0x78 0x79) 907 * controllers 908 */ 909 910 /** 911 * megasas_enable_intr_gen2 - Enables interrupts 912 * @instance: Adapter soft state 913 */ 914 static inline void 915 megasas_enable_intr_gen2(struct megasas_instance *instance) 916 { 917 struct megasas_register_set __iomem *regs; 918 919 regs = instance->reg_set; 920 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 921 922 /* write ~0x00000005 (4 & 1) to the intr mask*/ 923 writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 924 925 /* Dummy readl to force pci flush */ 926 readl(®s->outbound_intr_mask); 927 } 928 929 /** 930 * megasas_disable_intr_gen2 - Disables interrupt 931 * @instance: Adapter soft state 932 */ 933 static inline void 934 megasas_disable_intr_gen2(struct megasas_instance *instance) 935 { 936 struct megasas_register_set __iomem *regs; 937 u32 mask = 0xFFFFFFFF; 938 939 regs = instance->reg_set; 940 writel(mask, ®s->outbound_intr_mask); 941 /* Dummy readl to force pci flush */ 942 readl(®s->outbound_intr_mask); 943 } 944 945 /** 946 * megasas_read_fw_status_reg_gen2 - returns the current FW status value 947 * @instance: Adapter soft state 948 */ 949 static u32 950 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance) 951 { 952 return readl(&instance->reg_set->outbound_scratch_pad_0); 953 } 954 955 /** 956 * megasas_clear_intr_gen2 - Check & clear interrupt 957 * @instance: Adapter soft state 958 */ 959 static int 960 megasas_clear_intr_gen2(struct megasas_instance *instance) 961 { 962 u32 status; 963 u32 mfiStatus = 0; 964 struct megasas_register_set __iomem *regs; 965 regs = instance->reg_set; 966 967 /* 968 * Check if it is our interrupt 969 */ 970 status = readl(®s->outbound_intr_status); 971 972 if (status & MFI_INTR_FLAG_REPLY_MESSAGE) { 973 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 974 } 975 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) { 976 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 977 } 978 979 /* 980 * Clear the interrupt by writing back the same value 981 */ 982 if (mfiStatus) 983 writel(status, ®s->outbound_doorbell_clear); 984 985 /* Dummy readl to force pci flush */ 986 readl(®s->outbound_intr_status); 987 988 return mfiStatus; 989 } 990 991 /** 992 * megasas_fire_cmd_gen2 - Sends command to the FW 993 * @instance: Adapter soft state 994 * @frame_phys_addr: Physical address of cmd 995 * @frame_count: Number of frames for the command 996 * @regs: MFI register set 997 */ 998 static inline void 999 megasas_fire_cmd_gen2(struct megasas_instance *instance, 1000 dma_addr_t frame_phys_addr, 1001 u32 frame_count, 1002 struct megasas_register_set __iomem *regs) 1003 { 1004 unsigned long flags; 1005 1006 spin_lock_irqsave(&instance->hba_lock, flags); 1007 writel((frame_phys_addr | (frame_count<<1))|1, 1008 &(regs)->inbound_queue_port); 1009 spin_unlock_irqrestore(&instance->hba_lock, flags); 1010 } 1011 1012 /** 1013 * megasas_adp_reset_gen2 - For controller reset 1014 * @instance: Adapter soft state 1015 * @reg_set: MFI register set 1016 */ 1017 static int 1018 megasas_adp_reset_gen2(struct megasas_instance *instance, 1019 struct megasas_register_set __iomem *reg_set) 1020 { 1021 u32 retry = 0 ; 1022 u32 HostDiag; 1023 u32 __iomem *seq_offset = ®_set->seq_offset; 1024 u32 __iomem *hostdiag_offset = ®_set->host_diag; 1025 1026 if (instance->instancet == &megasas_instance_template_skinny) { 1027 seq_offset = ®_set->fusion_seq_offset; 1028 hostdiag_offset = ®_set->fusion_host_diag; 1029 } 1030 1031 writel(0, seq_offset); 1032 writel(4, seq_offset); 1033 writel(0xb, seq_offset); 1034 writel(2, seq_offset); 1035 writel(7, seq_offset); 1036 writel(0xd, seq_offset); 1037 1038 msleep(1000); 1039 1040 HostDiag = (u32)readl(hostdiag_offset); 1041 1042 while (!(HostDiag & DIAG_WRITE_ENABLE)) { 1043 msleep(100); 1044 HostDiag = (u32)readl(hostdiag_offset); 1045 dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n", 1046 retry, HostDiag); 1047 1048 if (retry++ >= 100) 1049 return 1; 1050 1051 } 1052 1053 dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag); 1054 1055 writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset); 1056 1057 ssleep(10); 1058 1059 HostDiag = (u32)readl(hostdiag_offset); 1060 while (HostDiag & DIAG_RESET_ADAPTER) { 1061 msleep(100); 1062 HostDiag = (u32)readl(hostdiag_offset); 1063 dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n", 1064 retry, HostDiag); 1065 1066 if (retry++ >= 1000) 1067 return 1; 1068 1069 } 1070 return 0; 1071 } 1072 1073 /** 1074 * megasas_check_reset_gen2 - For controller reset check 1075 * @instance: Adapter soft state 1076 * @regs: MFI register set 1077 */ 1078 static int 1079 megasas_check_reset_gen2(struct megasas_instance *instance, 1080 struct megasas_register_set __iomem *regs) 1081 { 1082 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 1083 return 1; 1084 1085 return 0; 1086 } 1087 1088 static struct megasas_instance_template megasas_instance_template_gen2 = { 1089 1090 .fire_cmd = megasas_fire_cmd_gen2, 1091 .enable_intr = megasas_enable_intr_gen2, 1092 .disable_intr = megasas_disable_intr_gen2, 1093 .clear_intr = megasas_clear_intr_gen2, 1094 .read_fw_status_reg = megasas_read_fw_status_reg_gen2, 1095 .adp_reset = megasas_adp_reset_gen2, 1096 .check_reset = megasas_check_reset_gen2, 1097 .service_isr = megasas_isr, 1098 .tasklet = megasas_complete_cmd_dpc, 1099 .init_adapter = megasas_init_adapter_mfi, 1100 .build_and_issue_cmd = megasas_build_and_issue_cmd, 1101 .issue_dcmd = megasas_issue_dcmd, 1102 }; 1103 1104 /* 1105 * This is the end of set of functions & definitions 1106 * specific to gen2 (deviceid : 0x78, 0x79) controllers 1107 */ 1108 1109 /* 1110 * Template added for TB (Fusion) 1111 */ 1112 extern struct megasas_instance_template megasas_instance_template_fusion; 1113 1114 /** 1115 * megasas_issue_polled - Issues a polling command 1116 * @instance: Adapter soft state 1117 * @cmd: Command packet to be issued 1118 * 1119 * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting. 1120 */ 1121 int 1122 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) 1123 { 1124 struct megasas_header *frame_hdr = &cmd->frame->hdr; 1125 1126 frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS; 1127 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); 1128 1129 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1130 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1131 __func__, __LINE__); 1132 return DCMD_INIT; 1133 } 1134 1135 instance->instancet->issue_dcmd(instance, cmd); 1136 1137 return wait_and_poll(instance, cmd, instance->requestorId ? 1138 MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS); 1139 } 1140 1141 /** 1142 * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds 1143 * @instance: Adapter soft state 1144 * @cmd: Command to be issued 1145 * @timeout: Timeout in seconds 1146 * 1147 * This function waits on an event for the command to be returned from ISR. 1148 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 1149 * Used to issue ioctl commands. 1150 */ 1151 int 1152 megasas_issue_blocked_cmd(struct megasas_instance *instance, 1153 struct megasas_cmd *cmd, int timeout) 1154 { 1155 int ret = 0; 1156 cmd->cmd_status_drv = DCMD_INIT; 1157 1158 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1159 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1160 __func__, __LINE__); 1161 return DCMD_INIT; 1162 } 1163 1164 instance->instancet->issue_dcmd(instance, cmd); 1165 1166 if (timeout) { 1167 ret = wait_event_timeout(instance->int_cmd_wait_q, 1168 cmd->cmd_status_drv != DCMD_INIT, timeout * HZ); 1169 if (!ret) { 1170 dev_err(&instance->pdev->dev, 1171 "DCMD(opcode: 0x%x) is timed out, func:%s\n", 1172 cmd->frame->dcmd.opcode, __func__); 1173 return DCMD_TIMEOUT; 1174 } 1175 } else 1176 wait_event(instance->int_cmd_wait_q, 1177 cmd->cmd_status_drv != DCMD_INIT); 1178 1179 return cmd->cmd_status_drv; 1180 } 1181 1182 /** 1183 * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd 1184 * @instance: Adapter soft state 1185 * @cmd_to_abort: Previously issued cmd to be aborted 1186 * @timeout: Timeout in seconds 1187 * 1188 * MFI firmware can abort previously issued AEN comamnd (automatic event 1189 * notification). The megasas_issue_blocked_abort_cmd() issues such abort 1190 * cmd and waits for return status. 1191 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 1192 */ 1193 static int 1194 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, 1195 struct megasas_cmd *cmd_to_abort, int timeout) 1196 { 1197 struct megasas_cmd *cmd; 1198 struct megasas_abort_frame *abort_fr; 1199 int ret = 0; 1200 u32 opcode; 1201 1202 cmd = megasas_get_cmd(instance); 1203 1204 if (!cmd) 1205 return -1; 1206 1207 abort_fr = &cmd->frame->abort; 1208 1209 /* 1210 * Prepare and issue the abort frame 1211 */ 1212 abort_fr->cmd = MFI_CMD_ABORT; 1213 abort_fr->cmd_status = MFI_STAT_INVALID_STATUS; 1214 abort_fr->flags = cpu_to_le16(0); 1215 abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index); 1216 abort_fr->abort_mfi_phys_addr_lo = 1217 cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr)); 1218 abort_fr->abort_mfi_phys_addr_hi = 1219 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); 1220 1221 cmd->sync_cmd = 1; 1222 cmd->cmd_status_drv = DCMD_INIT; 1223 1224 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1225 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1226 __func__, __LINE__); 1227 return DCMD_INIT; 1228 } 1229 1230 instance->instancet->issue_dcmd(instance, cmd); 1231 1232 if (timeout) { 1233 ret = wait_event_timeout(instance->abort_cmd_wait_q, 1234 cmd->cmd_status_drv != DCMD_INIT, timeout * HZ); 1235 if (!ret) { 1236 opcode = cmd_to_abort->frame->dcmd.opcode; 1237 dev_err(&instance->pdev->dev, 1238 "Abort(to be aborted DCMD opcode: 0x%x) is timed out func:%s\n", 1239 opcode, __func__); 1240 return DCMD_TIMEOUT; 1241 } 1242 } else 1243 wait_event(instance->abort_cmd_wait_q, 1244 cmd->cmd_status_drv != DCMD_INIT); 1245 1246 cmd->sync_cmd = 0; 1247 1248 megasas_return_cmd(instance, cmd); 1249 return cmd->cmd_status_drv; 1250 } 1251 1252 /** 1253 * megasas_make_sgl32 - Prepares 32-bit SGL 1254 * @instance: Adapter soft state 1255 * @scp: SCSI command from the mid-layer 1256 * @mfi_sgl: SGL to be filled in 1257 * 1258 * If successful, this function returns the number of SG elements. Otherwise, 1259 * it returnes -1. 1260 */ 1261 static int 1262 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp, 1263 union megasas_sgl *mfi_sgl) 1264 { 1265 int i; 1266 int sge_count; 1267 struct scatterlist *os_sgl; 1268 1269 sge_count = scsi_dma_map(scp); 1270 BUG_ON(sge_count < 0); 1271 1272 if (sge_count) { 1273 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1274 mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl)); 1275 mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl)); 1276 } 1277 } 1278 return sge_count; 1279 } 1280 1281 /** 1282 * megasas_make_sgl64 - Prepares 64-bit SGL 1283 * @instance: Adapter soft state 1284 * @scp: SCSI command from the mid-layer 1285 * @mfi_sgl: SGL to be filled in 1286 * 1287 * If successful, this function returns the number of SG elements. Otherwise, 1288 * it returnes -1. 1289 */ 1290 static int 1291 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, 1292 union megasas_sgl *mfi_sgl) 1293 { 1294 int i; 1295 int sge_count; 1296 struct scatterlist *os_sgl; 1297 1298 sge_count = scsi_dma_map(scp); 1299 BUG_ON(sge_count < 0); 1300 1301 if (sge_count) { 1302 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1303 mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl)); 1304 mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl)); 1305 } 1306 } 1307 return sge_count; 1308 } 1309 1310 /** 1311 * megasas_make_sgl_skinny - Prepares IEEE SGL 1312 * @instance: Adapter soft state 1313 * @scp: SCSI command from the mid-layer 1314 * @mfi_sgl: SGL to be filled in 1315 * 1316 * If successful, this function returns the number of SG elements. Otherwise, 1317 * it returnes -1. 1318 */ 1319 static int 1320 megasas_make_sgl_skinny(struct megasas_instance *instance, 1321 struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) 1322 { 1323 int i; 1324 int sge_count; 1325 struct scatterlist *os_sgl; 1326 1327 sge_count = scsi_dma_map(scp); 1328 1329 if (sge_count) { 1330 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1331 mfi_sgl->sge_skinny[i].length = 1332 cpu_to_le32(sg_dma_len(os_sgl)); 1333 mfi_sgl->sge_skinny[i].phys_addr = 1334 cpu_to_le64(sg_dma_address(os_sgl)); 1335 mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0); 1336 } 1337 } 1338 return sge_count; 1339 } 1340 1341 /** 1342 * megasas_get_frame_count - Computes the number of frames 1343 * @frame_type : type of frame- io or pthru frame 1344 * @sge_count : number of sg elements 1345 * 1346 * Returns the number of frames required for numnber of sge's (sge_count) 1347 */ 1348 1349 static u32 megasas_get_frame_count(struct megasas_instance *instance, 1350 u8 sge_count, u8 frame_type) 1351 { 1352 int num_cnt; 1353 int sge_bytes; 1354 u32 sge_sz; 1355 u32 frame_count = 0; 1356 1357 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : 1358 sizeof(struct megasas_sge32); 1359 1360 if (instance->flag_ieee) { 1361 sge_sz = sizeof(struct megasas_sge_skinny); 1362 } 1363 1364 /* 1365 * Main frame can contain 2 SGEs for 64-bit SGLs and 1366 * 3 SGEs for 32-bit SGLs for ldio & 1367 * 1 SGEs for 64-bit SGLs and 1368 * 2 SGEs for 32-bit SGLs for pthru frame 1369 */ 1370 if (unlikely(frame_type == PTHRU_FRAME)) { 1371 if (instance->flag_ieee == 1) { 1372 num_cnt = sge_count - 1; 1373 } else if (IS_DMA64) 1374 num_cnt = sge_count - 1; 1375 else 1376 num_cnt = sge_count - 2; 1377 } else { 1378 if (instance->flag_ieee == 1) { 1379 num_cnt = sge_count - 1; 1380 } else if (IS_DMA64) 1381 num_cnt = sge_count - 2; 1382 else 1383 num_cnt = sge_count - 3; 1384 } 1385 1386 if (num_cnt > 0) { 1387 sge_bytes = sge_sz * num_cnt; 1388 1389 frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + 1390 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ; 1391 } 1392 /* Main frame */ 1393 frame_count += 1; 1394 1395 if (frame_count > 7) 1396 frame_count = 8; 1397 return frame_count; 1398 } 1399 1400 /** 1401 * megasas_build_dcdb - Prepares a direct cdb (DCDB) command 1402 * @instance: Adapter soft state 1403 * @scp: SCSI command 1404 * @cmd: Command to be prepared in 1405 * 1406 * This function prepares CDB commands. These are typcially pass-through 1407 * commands to the devices. 1408 */ 1409 static int 1410 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, 1411 struct megasas_cmd *cmd) 1412 { 1413 u32 is_logical; 1414 u32 device_id; 1415 u16 flags = 0; 1416 struct megasas_pthru_frame *pthru; 1417 1418 is_logical = MEGASAS_IS_LOGICAL(scp->device); 1419 device_id = MEGASAS_DEV_INDEX(scp); 1420 pthru = (struct megasas_pthru_frame *)cmd->frame; 1421 1422 if (scp->sc_data_direction == DMA_TO_DEVICE) 1423 flags = MFI_FRAME_DIR_WRITE; 1424 else if (scp->sc_data_direction == DMA_FROM_DEVICE) 1425 flags = MFI_FRAME_DIR_READ; 1426 else if (scp->sc_data_direction == DMA_NONE) 1427 flags = MFI_FRAME_DIR_NONE; 1428 1429 if (instance->flag_ieee == 1) { 1430 flags |= MFI_FRAME_IEEE; 1431 } 1432 1433 /* 1434 * Prepare the DCDB frame 1435 */ 1436 pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO; 1437 pthru->cmd_status = 0x0; 1438 pthru->scsi_status = 0x0; 1439 pthru->target_id = device_id; 1440 pthru->lun = scp->device->lun; 1441 pthru->cdb_len = scp->cmd_len; 1442 pthru->timeout = 0; 1443 pthru->pad_0 = 0; 1444 pthru->flags = cpu_to_le16(flags); 1445 pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp)); 1446 1447 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); 1448 1449 /* 1450 * If the command is for the tape device, set the 1451 * pthru timeout to the os layer timeout value. 1452 */ 1453 if (scp->device->type == TYPE_TAPE) { 1454 if (scsi_cmd_to_rq(scp)->timeout / HZ > 0xFFFF) 1455 pthru->timeout = cpu_to_le16(0xFFFF); 1456 else 1457 pthru->timeout = cpu_to_le16(scsi_cmd_to_rq(scp)->timeout / HZ); 1458 } 1459 1460 /* 1461 * Construct SGL 1462 */ 1463 if (instance->flag_ieee == 1) { 1464 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1465 pthru->sge_count = megasas_make_sgl_skinny(instance, scp, 1466 &pthru->sgl); 1467 } else if (IS_DMA64) { 1468 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1469 pthru->sge_count = megasas_make_sgl64(instance, scp, 1470 &pthru->sgl); 1471 } else 1472 pthru->sge_count = megasas_make_sgl32(instance, scp, 1473 &pthru->sgl); 1474 1475 if (pthru->sge_count > instance->max_num_sge) { 1476 dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n", 1477 pthru->sge_count); 1478 return 0; 1479 } 1480 1481 /* 1482 * Sense info specific 1483 */ 1484 pthru->sense_len = SCSI_SENSE_BUFFERSIZE; 1485 pthru->sense_buf_phys_addr_hi = 1486 cpu_to_le32(upper_32_bits(cmd->sense_phys_addr)); 1487 pthru->sense_buf_phys_addr_lo = 1488 cpu_to_le32(lower_32_bits(cmd->sense_phys_addr)); 1489 1490 /* 1491 * Compute the total number of frames this command consumes. FW uses 1492 * this number to pull sufficient number of frames from host memory. 1493 */ 1494 cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count, 1495 PTHRU_FRAME); 1496 1497 return cmd->frame_count; 1498 } 1499 1500 /** 1501 * megasas_build_ldio - Prepares IOs to logical devices 1502 * @instance: Adapter soft state 1503 * @scp: SCSI command 1504 * @cmd: Command to be prepared 1505 * 1506 * Frames (and accompanying SGLs) for regular SCSI IOs use this function. 1507 */ 1508 static int 1509 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, 1510 struct megasas_cmd *cmd) 1511 { 1512 u32 device_id; 1513 u8 sc = scp->cmnd[0]; 1514 u16 flags = 0; 1515 struct megasas_io_frame *ldio; 1516 1517 device_id = MEGASAS_DEV_INDEX(scp); 1518 ldio = (struct megasas_io_frame *)cmd->frame; 1519 1520 if (scp->sc_data_direction == DMA_TO_DEVICE) 1521 flags = MFI_FRAME_DIR_WRITE; 1522 else if (scp->sc_data_direction == DMA_FROM_DEVICE) 1523 flags = MFI_FRAME_DIR_READ; 1524 1525 if (instance->flag_ieee == 1) { 1526 flags |= MFI_FRAME_IEEE; 1527 } 1528 1529 /* 1530 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds 1531 */ 1532 ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ; 1533 ldio->cmd_status = 0x0; 1534 ldio->scsi_status = 0x0; 1535 ldio->target_id = device_id; 1536 ldio->timeout = 0; 1537 ldio->reserved_0 = 0; 1538 ldio->pad_0 = 0; 1539 ldio->flags = cpu_to_le16(flags); 1540 ldio->start_lba_hi = 0; 1541 ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0; 1542 1543 /* 1544 * 6-byte READ(0x08) or WRITE(0x0A) cdb 1545 */ 1546 if (scp->cmd_len == 6) { 1547 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]); 1548 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) | 1549 ((u32) scp->cmnd[2] << 8) | 1550 (u32) scp->cmnd[3]); 1551 1552 ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF); 1553 } 1554 1555 /* 1556 * 10-byte READ(0x28) or WRITE(0x2A) cdb 1557 */ 1558 else if (scp->cmd_len == 10) { 1559 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] | 1560 ((u32) scp->cmnd[7] << 8)); 1561 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1562 ((u32) scp->cmnd[3] << 16) | 1563 ((u32) scp->cmnd[4] << 8) | 1564 (u32) scp->cmnd[5]); 1565 } 1566 1567 /* 1568 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 1569 */ 1570 else if (scp->cmd_len == 12) { 1571 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) | 1572 ((u32) scp->cmnd[7] << 16) | 1573 ((u32) scp->cmnd[8] << 8) | 1574 (u32) scp->cmnd[9]); 1575 1576 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1577 ((u32) scp->cmnd[3] << 16) | 1578 ((u32) scp->cmnd[4] << 8) | 1579 (u32) scp->cmnd[5]); 1580 } 1581 1582 /* 1583 * 16-byte READ(0x88) or WRITE(0x8A) cdb 1584 */ 1585 else if (scp->cmd_len == 16) { 1586 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) | 1587 ((u32) scp->cmnd[11] << 16) | 1588 ((u32) scp->cmnd[12] << 8) | 1589 (u32) scp->cmnd[13]); 1590 1591 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) | 1592 ((u32) scp->cmnd[7] << 16) | 1593 ((u32) scp->cmnd[8] << 8) | 1594 (u32) scp->cmnd[9]); 1595 1596 ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1597 ((u32) scp->cmnd[3] << 16) | 1598 ((u32) scp->cmnd[4] << 8) | 1599 (u32) scp->cmnd[5]); 1600 1601 } 1602 1603 /* 1604 * Construct SGL 1605 */ 1606 if (instance->flag_ieee) { 1607 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1608 ldio->sge_count = megasas_make_sgl_skinny(instance, scp, 1609 &ldio->sgl); 1610 } else if (IS_DMA64) { 1611 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1612 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); 1613 } else 1614 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); 1615 1616 if (ldio->sge_count > instance->max_num_sge) { 1617 dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n", 1618 ldio->sge_count); 1619 return 0; 1620 } 1621 1622 /* 1623 * Sense info specific 1624 */ 1625 ldio->sense_len = SCSI_SENSE_BUFFERSIZE; 1626 ldio->sense_buf_phys_addr_hi = 0; 1627 ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr); 1628 1629 /* 1630 * Compute the total number of frames this command consumes. FW uses 1631 * this number to pull sufficient number of frames from host memory. 1632 */ 1633 cmd->frame_count = megasas_get_frame_count(instance, 1634 ldio->sge_count, IO_FRAME); 1635 1636 return cmd->frame_count; 1637 } 1638 1639 /** 1640 * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD 1641 * and whether it's RW or non RW 1642 * @cmd: SCSI command 1643 * 1644 */ 1645 inline int megasas_cmd_type(struct scsi_cmnd *cmd) 1646 { 1647 int ret; 1648 1649 switch (cmd->cmnd[0]) { 1650 case READ_10: 1651 case WRITE_10: 1652 case READ_12: 1653 case WRITE_12: 1654 case READ_6: 1655 case WRITE_6: 1656 case READ_16: 1657 case WRITE_16: 1658 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? 1659 READ_WRITE_LDIO : READ_WRITE_SYSPDIO; 1660 break; 1661 default: 1662 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? 1663 NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO; 1664 } 1665 return ret; 1666 } 1667 1668 /** 1669 * megasas_dump_pending_frames - Dumps the frame address of all pending cmds 1670 * in FW 1671 * @instance: Adapter soft state 1672 */ 1673 static inline void 1674 megasas_dump_pending_frames(struct megasas_instance *instance) 1675 { 1676 struct megasas_cmd *cmd; 1677 int i,n; 1678 union megasas_sgl *mfi_sgl; 1679 struct megasas_io_frame *ldio; 1680 struct megasas_pthru_frame *pthru; 1681 u32 sgcount; 1682 u16 max_cmd = instance->max_fw_cmds; 1683 1684 dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no); 1685 dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding)); 1686 if (IS_DMA64) 1687 dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no); 1688 else 1689 dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no); 1690 1691 dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no); 1692 for (i = 0; i < max_cmd; i++) { 1693 cmd = instance->cmd_list[i]; 1694 if (!cmd->scmd) 1695 continue; 1696 dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr); 1697 if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) { 1698 ldio = (struct megasas_io_frame *)cmd->frame; 1699 mfi_sgl = &ldio->sgl; 1700 sgcount = ldio->sge_count; 1701 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x," 1702 " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1703 instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id, 1704 le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi), 1705 le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount); 1706 } else { 1707 pthru = (struct megasas_pthru_frame *) cmd->frame; 1708 mfi_sgl = &pthru->sgl; 1709 sgcount = pthru->sge_count; 1710 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, " 1711 "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1712 instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id, 1713 pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len), 1714 le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount); 1715 } 1716 if (megasas_dbg_lvl & MEGASAS_DBG_LVL) { 1717 for (n = 0; n < sgcount; n++) { 1718 if (IS_DMA64) 1719 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n", 1720 le32_to_cpu(mfi_sgl->sge64[n].length), 1721 le64_to_cpu(mfi_sgl->sge64[n].phys_addr)); 1722 else 1723 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n", 1724 le32_to_cpu(mfi_sgl->sge32[n].length), 1725 le32_to_cpu(mfi_sgl->sge32[n].phys_addr)); 1726 } 1727 } 1728 } /*for max_cmd*/ 1729 dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no); 1730 for (i = 0; i < max_cmd; i++) { 1731 1732 cmd = instance->cmd_list[i]; 1733 1734 if (cmd->sync_cmd == 1) 1735 dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr); 1736 } 1737 dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no); 1738 } 1739 1740 u32 1741 megasas_build_and_issue_cmd(struct megasas_instance *instance, 1742 struct scsi_cmnd *scmd) 1743 { 1744 struct megasas_cmd *cmd; 1745 u32 frame_count; 1746 1747 cmd = megasas_get_cmd(instance); 1748 if (!cmd) 1749 return SCSI_MLQUEUE_HOST_BUSY; 1750 1751 /* 1752 * Logical drive command 1753 */ 1754 if (megasas_cmd_type(scmd) == READ_WRITE_LDIO) 1755 frame_count = megasas_build_ldio(instance, scmd, cmd); 1756 else 1757 frame_count = megasas_build_dcdb(instance, scmd, cmd); 1758 1759 if (!frame_count) 1760 goto out_return_cmd; 1761 1762 cmd->scmd = scmd; 1763 megasas_priv(scmd)->cmd_priv = cmd; 1764 1765 /* 1766 * Issue the command to the FW 1767 */ 1768 atomic_inc(&instance->fw_outstanding); 1769 1770 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 1771 cmd->frame_count-1, instance->reg_set); 1772 1773 return 0; 1774 out_return_cmd: 1775 megasas_return_cmd(instance, cmd); 1776 return SCSI_MLQUEUE_HOST_BUSY; 1777 } 1778 1779 1780 /** 1781 * megasas_queue_command - Queue entry point 1782 * @shost: adapter SCSI host 1783 * @scmd: SCSI command to be queued 1784 */ 1785 static int 1786 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) 1787 { 1788 struct megasas_instance *instance; 1789 struct MR_PRIV_DEVICE *mr_device_priv_data; 1790 u32 ld_tgt_id; 1791 1792 instance = (struct megasas_instance *) 1793 scmd->device->host->hostdata; 1794 1795 if (instance->unload == 1) { 1796 scmd->result = DID_NO_CONNECT << 16; 1797 scsi_done(scmd); 1798 return 0; 1799 } 1800 1801 if (instance->issuepend_done == 0) 1802 return SCSI_MLQUEUE_HOST_BUSY; 1803 1804 1805 /* Check for an mpio path and adjust behavior */ 1806 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { 1807 if (megasas_check_mpio_paths(instance, scmd) == 1808 (DID_REQUEUE << 16)) { 1809 return SCSI_MLQUEUE_HOST_BUSY; 1810 } else { 1811 scmd->result = DID_NO_CONNECT << 16; 1812 scsi_done(scmd); 1813 return 0; 1814 } 1815 } 1816 1817 mr_device_priv_data = scmd->device->hostdata; 1818 if (!mr_device_priv_data || 1819 (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)) { 1820 scmd->result = DID_NO_CONNECT << 16; 1821 scsi_done(scmd); 1822 return 0; 1823 } 1824 1825 if (MEGASAS_IS_LOGICAL(scmd->device)) { 1826 ld_tgt_id = MEGASAS_TARGET_ID(scmd->device); 1827 if (instance->ld_tgtid_status[ld_tgt_id] == LD_TARGET_ID_DELETED) { 1828 scmd->result = DID_NO_CONNECT << 16; 1829 scsi_done(scmd); 1830 return 0; 1831 } 1832 } 1833 1834 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 1835 return SCSI_MLQUEUE_HOST_BUSY; 1836 1837 if (mr_device_priv_data->tm_busy) 1838 return SCSI_MLQUEUE_DEVICE_BUSY; 1839 1840 1841 scmd->result = 0; 1842 1843 if (MEGASAS_IS_LOGICAL(scmd->device) && 1844 (scmd->device->id >= instance->fw_supported_vd_count || 1845 scmd->device->lun)) { 1846 scmd->result = DID_BAD_TARGET << 16; 1847 goto out_done; 1848 } 1849 1850 if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && 1851 MEGASAS_IS_LOGICAL(scmd->device) && 1852 (!instance->fw_sync_cache_support)) { 1853 scmd->result = DID_OK << 16; 1854 goto out_done; 1855 } 1856 1857 return instance->instancet->build_and_issue_cmd(instance, scmd); 1858 1859 out_done: 1860 scsi_done(scmd); 1861 return 0; 1862 } 1863 1864 static struct megasas_instance *megasas_lookup_instance(u16 host_no) 1865 { 1866 int i; 1867 1868 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 1869 1870 if ((megasas_mgmt_info.instance[i]) && 1871 (megasas_mgmt_info.instance[i]->host->host_no == host_no)) 1872 return megasas_mgmt_info.instance[i]; 1873 } 1874 1875 return NULL; 1876 } 1877 1878 /* 1879 * megasas_set_dynamic_target_properties - 1880 * Device property set by driver may not be static and it is required to be 1881 * updated after OCR 1882 * 1883 * set tm_capable. 1884 * set dma alignment (only for eedp protection enable vd). 1885 * 1886 * @sdev: OS provided scsi device 1887 * 1888 * Returns void 1889 */ 1890 void megasas_set_dynamic_target_properties(struct scsi_device *sdev, 1891 bool is_target_prop) 1892 { 1893 u16 pd_index = 0, ld; 1894 u32 device_id; 1895 struct megasas_instance *instance; 1896 struct fusion_context *fusion; 1897 struct MR_PRIV_DEVICE *mr_device_priv_data; 1898 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 1899 struct MR_LD_RAID *raid; 1900 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 1901 1902 instance = megasas_lookup_instance(sdev->host->host_no); 1903 fusion = instance->ctrl_context; 1904 mr_device_priv_data = sdev->hostdata; 1905 1906 if (!fusion || !mr_device_priv_data) 1907 return; 1908 1909 if (MEGASAS_IS_LOGICAL(sdev)) { 1910 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) 1911 + sdev->id; 1912 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 1913 ld = MR_TargetIdToLdGet(device_id, local_map_ptr); 1914 if (ld >= instance->fw_supported_vd_count) 1915 return; 1916 raid = MR_LdRaidGet(ld, local_map_ptr); 1917 1918 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) 1919 blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1920 1921 mr_device_priv_data->is_tm_capable = 1922 raid->capability.tmCapable; 1923 1924 if (!raid->flags.isEPD) 1925 sdev->no_write_same = 1; 1926 1927 } else if (instance->use_seqnum_jbod_fp) { 1928 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1929 sdev->id; 1930 pd_sync = (void *)fusion->pd_seq_sync 1931 [(instance->pd_seq_map_id - 1) & 1]; 1932 mr_device_priv_data->is_tm_capable = 1933 pd_sync->seq[pd_index].capability.tmCapable; 1934 } 1935 1936 if (is_target_prop && instance->tgt_prop->reset_tmo) { 1937 /* 1938 * If FW provides a target reset timeout value, driver will use 1939 * it. If not set, fallback to default values. 1940 */ 1941 mr_device_priv_data->target_reset_tmo = 1942 min_t(u8, instance->max_reset_tmo, 1943 instance->tgt_prop->reset_tmo); 1944 mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo; 1945 } else { 1946 mr_device_priv_data->target_reset_tmo = 1947 MEGASAS_DEFAULT_TM_TIMEOUT; 1948 mr_device_priv_data->task_abort_tmo = 1949 MEGASAS_DEFAULT_TM_TIMEOUT; 1950 } 1951 } 1952 1953 /* 1954 * megasas_set_nvme_device_properties - 1955 * set nomerges=2 1956 * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K). 1957 * set maximum io transfer = MDTS of NVME device provided by MR firmware. 1958 * 1959 * MR firmware provides value in KB. Caller of this function converts 1960 * kb into bytes. 1961 * 1962 * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size, 1963 * MR firmware provides value 128 as (32 * 4K) = 128K. 1964 * 1965 * @sdev: scsi device 1966 * @max_io_size: maximum io transfer size 1967 * 1968 */ 1969 static inline void 1970 megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size) 1971 { 1972 struct megasas_instance *instance; 1973 u32 mr_nvme_pg_size; 1974 1975 instance = (struct megasas_instance *)sdev->host->hostdata; 1976 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, 1977 MR_DEFAULT_NVME_PAGE_SIZE); 1978 1979 blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512)); 1980 1981 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue); 1982 blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1); 1983 } 1984 1985 /* 1986 * megasas_set_fw_assisted_qd - 1987 * set device queue depth to can_queue 1988 * set device queue depth to fw assisted qd 1989 * 1990 * @sdev: scsi device 1991 * @is_target_prop true, if fw provided target properties. 1992 */ 1993 static void megasas_set_fw_assisted_qd(struct scsi_device *sdev, 1994 bool is_target_prop) 1995 { 1996 u8 interface_type; 1997 u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN; 1998 u32 tgt_device_qd; 1999 struct megasas_instance *instance; 2000 struct MR_PRIV_DEVICE *mr_device_priv_data; 2001 2002 instance = megasas_lookup_instance(sdev->host->host_no); 2003 mr_device_priv_data = sdev->hostdata; 2004 interface_type = mr_device_priv_data->interface_type; 2005 2006 switch (interface_type) { 2007 case SAS_PD: 2008 device_qd = MEGASAS_SAS_QD; 2009 break; 2010 case SATA_PD: 2011 device_qd = MEGASAS_SATA_QD; 2012 break; 2013 case NVME_PD: 2014 device_qd = MEGASAS_NVME_QD; 2015 break; 2016 } 2017 2018 if (is_target_prop) { 2019 tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth); 2020 if (tgt_device_qd) 2021 device_qd = min(instance->host->can_queue, 2022 (int)tgt_device_qd); 2023 } 2024 2025 if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE) 2026 device_qd = instance->host->can_queue; 2027 2028 scsi_change_queue_depth(sdev, device_qd); 2029 } 2030 2031 /* 2032 * megasas_set_static_target_properties - 2033 * Device property set by driver are static and it is not required to be 2034 * updated after OCR. 2035 * 2036 * set io timeout 2037 * set device queue depth 2038 * set nvme device properties. see - megasas_set_nvme_device_properties 2039 * 2040 * @sdev: scsi device 2041 * @is_target_prop true, if fw provided target properties. 2042 */ 2043 static void megasas_set_static_target_properties(struct scsi_device *sdev, 2044 bool is_target_prop) 2045 { 2046 u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB; 2047 struct megasas_instance *instance; 2048 2049 instance = megasas_lookup_instance(sdev->host->host_no); 2050 2051 /* 2052 * The RAID firmware may require extended timeouts. 2053 */ 2054 blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ); 2055 2056 /* max_io_size_kb will be set to non zero for 2057 * nvme based vd and syspd. 2058 */ 2059 if (is_target_prop) 2060 max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb); 2061 2062 if (instance->nvme_page_size && max_io_size_kb) 2063 megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10)); 2064 2065 megasas_set_fw_assisted_qd(sdev, is_target_prop); 2066 } 2067 2068 2069 static int megasas_slave_configure(struct scsi_device *sdev) 2070 { 2071 u16 pd_index = 0; 2072 struct megasas_instance *instance; 2073 int ret_target_prop = DCMD_FAILED; 2074 bool is_target_prop = false; 2075 2076 instance = megasas_lookup_instance(sdev->host->host_no); 2077 if (instance->pd_list_not_supported) { 2078 if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) { 2079 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 2080 sdev->id; 2081 if (instance->pd_list[pd_index].driveState != 2082 MR_PD_STATE_SYSTEM) 2083 return -ENXIO; 2084 } 2085 } 2086 2087 mutex_lock(&instance->reset_mutex); 2088 /* Send DCMD to Firmware and cache the information */ 2089 if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev)) 2090 megasas_get_pd_info(instance, sdev); 2091 2092 /* Some ventura firmware may not have instance->nvme_page_size set. 2093 * Do not send MR_DCMD_DRV_GET_TARGET_PROP 2094 */ 2095 if ((instance->tgt_prop) && (instance->nvme_page_size)) 2096 ret_target_prop = megasas_get_target_prop(instance, sdev); 2097 2098 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; 2099 megasas_set_static_target_properties(sdev, is_target_prop); 2100 2101 /* This sdev property may change post OCR */ 2102 megasas_set_dynamic_target_properties(sdev, is_target_prop); 2103 2104 mutex_unlock(&instance->reset_mutex); 2105 2106 return 0; 2107 } 2108 2109 static int megasas_slave_alloc(struct scsi_device *sdev) 2110 { 2111 u16 pd_index = 0, ld_tgt_id; 2112 struct megasas_instance *instance ; 2113 struct MR_PRIV_DEVICE *mr_device_priv_data; 2114 2115 instance = megasas_lookup_instance(sdev->host->host_no); 2116 if (!MEGASAS_IS_LOGICAL(sdev)) { 2117 /* 2118 * Open the OS scan to the SYSTEM PD 2119 */ 2120 pd_index = 2121 (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 2122 sdev->id; 2123 if ((instance->pd_list_not_supported || 2124 instance->pd_list[pd_index].driveState == 2125 MR_PD_STATE_SYSTEM)) { 2126 goto scan_target; 2127 } 2128 return -ENXIO; 2129 } else if (!MEGASAS_IS_LUN_VALID(sdev)) { 2130 sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__); 2131 return -ENXIO; 2132 } 2133 2134 scan_target: 2135 mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data), 2136 GFP_KERNEL); 2137 if (!mr_device_priv_data) 2138 return -ENOMEM; 2139 2140 if (MEGASAS_IS_LOGICAL(sdev)) { 2141 ld_tgt_id = MEGASAS_TARGET_ID(sdev); 2142 instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_ACTIVE; 2143 if (megasas_dbg_lvl & LD_PD_DEBUG) 2144 sdev_printk(KERN_INFO, sdev, "LD target ID %d created.\n", ld_tgt_id); 2145 } 2146 2147 sdev->hostdata = mr_device_priv_data; 2148 2149 atomic_set(&mr_device_priv_data->r1_ldio_hint, 2150 instance->r1_ldio_hint_default); 2151 return 0; 2152 } 2153 2154 static void megasas_slave_destroy(struct scsi_device *sdev) 2155 { 2156 u16 ld_tgt_id; 2157 struct megasas_instance *instance; 2158 2159 instance = megasas_lookup_instance(sdev->host->host_no); 2160 2161 if (MEGASAS_IS_LOGICAL(sdev)) { 2162 if (!MEGASAS_IS_LUN_VALID(sdev)) { 2163 sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__); 2164 return; 2165 } 2166 ld_tgt_id = MEGASAS_TARGET_ID(sdev); 2167 instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED; 2168 if (megasas_dbg_lvl & LD_PD_DEBUG) 2169 sdev_printk(KERN_INFO, sdev, 2170 "LD target ID %d removed from OS stack\n", ld_tgt_id); 2171 } 2172 2173 kfree(sdev->hostdata); 2174 sdev->hostdata = NULL; 2175 } 2176 2177 /* 2178 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a 2179 * kill adapter 2180 * @instance: Adapter soft state 2181 * 2182 */ 2183 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance) 2184 { 2185 int i; 2186 struct megasas_cmd *cmd_mfi; 2187 struct megasas_cmd_fusion *cmd_fusion; 2188 struct fusion_context *fusion = instance->ctrl_context; 2189 2190 /* Find all outstanding ioctls */ 2191 if (fusion) { 2192 for (i = 0; i < instance->max_fw_cmds; i++) { 2193 cmd_fusion = fusion->cmd_list[i]; 2194 if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) { 2195 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 2196 if (cmd_mfi->sync_cmd && 2197 (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) { 2198 cmd_mfi->frame->hdr.cmd_status = 2199 MFI_STAT_WRONG_STATE; 2200 megasas_complete_cmd(instance, 2201 cmd_mfi, DID_OK); 2202 } 2203 } 2204 } 2205 } else { 2206 for (i = 0; i < instance->max_fw_cmds; i++) { 2207 cmd_mfi = instance->cmd_list[i]; 2208 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != 2209 MFI_CMD_ABORT) 2210 megasas_complete_cmd(instance, cmd_mfi, DID_OK); 2211 } 2212 } 2213 } 2214 2215 2216 void megaraid_sas_kill_hba(struct megasas_instance *instance) 2217 { 2218 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2219 dev_warn(&instance->pdev->dev, 2220 "Adapter already dead, skipping kill HBA\n"); 2221 return; 2222 } 2223 2224 /* Set critical error to block I/O & ioctls in case caller didn't */ 2225 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); 2226 /* Wait 1 second to ensure IO or ioctls in build have posted */ 2227 msleep(1000); 2228 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 2229 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 2230 (instance->adapter_type != MFI_SERIES)) { 2231 if (!instance->requestorId) { 2232 writel(MFI_STOP_ADP, &instance->reg_set->doorbell); 2233 /* Flush */ 2234 readl(&instance->reg_set->doorbell); 2235 } 2236 if (instance->requestorId && instance->peerIsPresent) 2237 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 2238 } else { 2239 writel(MFI_STOP_ADP, 2240 &instance->reg_set->inbound_doorbell); 2241 } 2242 /* Complete outstanding ioctls when adapter is killed */ 2243 megasas_complete_outstanding_ioctls(instance); 2244 } 2245 2246 /** 2247 * megasas_check_and_restore_queue_depth - Check if queue depth needs to be 2248 * restored to max value 2249 * @instance: Adapter soft state 2250 * 2251 */ 2252 void 2253 megasas_check_and_restore_queue_depth(struct megasas_instance *instance) 2254 { 2255 unsigned long flags; 2256 2257 if (instance->flag & MEGASAS_FW_BUSY 2258 && time_after(jiffies, instance->last_time + 5 * HZ) 2259 && atomic_read(&instance->fw_outstanding) < 2260 instance->throttlequeuedepth + 1) { 2261 2262 spin_lock_irqsave(instance->host->host_lock, flags); 2263 instance->flag &= ~MEGASAS_FW_BUSY; 2264 2265 instance->host->can_queue = instance->cur_can_queue; 2266 spin_unlock_irqrestore(instance->host->host_lock, flags); 2267 } 2268 } 2269 2270 /** 2271 * megasas_complete_cmd_dpc - Returns FW's controller structure 2272 * @instance_addr: Address of adapter soft state 2273 * 2274 * Tasklet to complete cmds 2275 */ 2276 static void megasas_complete_cmd_dpc(unsigned long instance_addr) 2277 { 2278 u32 producer; 2279 u32 consumer; 2280 u32 context; 2281 struct megasas_cmd *cmd; 2282 struct megasas_instance *instance = 2283 (struct megasas_instance *)instance_addr; 2284 unsigned long flags; 2285 2286 /* If we have already declared adapter dead, donot complete cmds */ 2287 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 2288 return; 2289 2290 spin_lock_irqsave(&instance->completion_lock, flags); 2291 2292 producer = le32_to_cpu(*instance->producer); 2293 consumer = le32_to_cpu(*instance->consumer); 2294 2295 while (consumer != producer) { 2296 context = le32_to_cpu(instance->reply_queue[consumer]); 2297 if (context >= instance->max_fw_cmds) { 2298 dev_err(&instance->pdev->dev, "Unexpected context value %x\n", 2299 context); 2300 BUG(); 2301 } 2302 2303 cmd = instance->cmd_list[context]; 2304 2305 megasas_complete_cmd(instance, cmd, DID_OK); 2306 2307 consumer++; 2308 if (consumer == (instance->max_fw_cmds + 1)) { 2309 consumer = 0; 2310 } 2311 } 2312 2313 *instance->consumer = cpu_to_le32(producer); 2314 2315 spin_unlock_irqrestore(&instance->completion_lock, flags); 2316 2317 /* 2318 * Check if we can restore can_queue 2319 */ 2320 megasas_check_and_restore_queue_depth(instance); 2321 } 2322 2323 static void megasas_sriov_heartbeat_handler(struct timer_list *t); 2324 2325 /** 2326 * megasas_start_timer - Initializes sriov heartbeat timer object 2327 * @instance: Adapter soft state 2328 * 2329 */ 2330 void megasas_start_timer(struct megasas_instance *instance) 2331 { 2332 struct timer_list *timer = &instance->sriov_heartbeat_timer; 2333 2334 timer_setup(timer, megasas_sriov_heartbeat_handler, 0); 2335 timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF; 2336 add_timer(timer); 2337 } 2338 2339 static void 2340 megasas_internal_reset_defer_cmds(struct megasas_instance *instance); 2341 2342 static void 2343 process_fw_state_change_wq(struct work_struct *work); 2344 2345 static void megasas_do_ocr(struct megasas_instance *instance) 2346 { 2347 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 2348 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 2349 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 2350 *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); 2351 } 2352 instance->instancet->disable_intr(instance); 2353 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 2354 instance->issuepend_done = 0; 2355 2356 atomic_set(&instance->fw_outstanding, 0); 2357 megasas_internal_reset_defer_cmds(instance); 2358 process_fw_state_change_wq(&instance->work_init); 2359 } 2360 2361 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance, 2362 int initial) 2363 { 2364 struct megasas_cmd *cmd; 2365 struct megasas_dcmd_frame *dcmd; 2366 struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL; 2367 dma_addr_t new_affiliation_111_h; 2368 int ld, retval = 0; 2369 u8 thisVf; 2370 2371 cmd = megasas_get_cmd(instance); 2372 2373 if (!cmd) { 2374 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:" 2375 "Failed to get cmd for scsi%d\n", 2376 instance->host->host_no); 2377 return -ENOMEM; 2378 } 2379 2380 dcmd = &cmd->frame->dcmd; 2381 2382 if (!instance->vf_affiliation_111) { 2383 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " 2384 "affiliation for scsi%d\n", instance->host->host_no); 2385 megasas_return_cmd(instance, cmd); 2386 return -ENOMEM; 2387 } 2388 2389 if (initial) 2390 memset(instance->vf_affiliation_111, 0, 2391 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2392 else { 2393 new_affiliation_111 = 2394 dma_alloc_coherent(&instance->pdev->dev, 2395 sizeof(struct MR_LD_VF_AFFILIATION_111), 2396 &new_affiliation_111_h, GFP_KERNEL); 2397 if (!new_affiliation_111) { 2398 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2399 "memory for new affiliation for scsi%d\n", 2400 instance->host->host_no); 2401 megasas_return_cmd(instance, cmd); 2402 return -ENOMEM; 2403 } 2404 } 2405 2406 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2407 2408 dcmd->cmd = MFI_CMD_DCMD; 2409 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2410 dcmd->sge_count = 1; 2411 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2412 dcmd->timeout = 0; 2413 dcmd->pad_0 = 0; 2414 dcmd->data_xfer_len = 2415 cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111)); 2416 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111); 2417 2418 if (initial) 2419 dcmd->sgl.sge32[0].phys_addr = 2420 cpu_to_le32(instance->vf_affiliation_111_h); 2421 else 2422 dcmd->sgl.sge32[0].phys_addr = 2423 cpu_to_le32(new_affiliation_111_h); 2424 2425 dcmd->sgl.sge32[0].length = cpu_to_le32( 2426 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2427 2428 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " 2429 "scsi%d\n", instance->host->host_no); 2430 2431 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { 2432 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" 2433 " failed with status 0x%x for scsi%d\n", 2434 dcmd->cmd_status, instance->host->host_no); 2435 retval = 1; /* Do a scan if we couldn't get affiliation */ 2436 goto out; 2437 } 2438 2439 if (!initial) { 2440 thisVf = new_affiliation_111->thisVf; 2441 for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++) 2442 if (instance->vf_affiliation_111->map[ld].policy[thisVf] != 2443 new_affiliation_111->map[ld].policy[thisVf]) { 2444 dev_warn(&instance->pdev->dev, "SR-IOV: " 2445 "Got new LD/VF affiliation for scsi%d\n", 2446 instance->host->host_no); 2447 memcpy(instance->vf_affiliation_111, 2448 new_affiliation_111, 2449 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2450 retval = 1; 2451 goto out; 2452 } 2453 } 2454 out: 2455 if (new_affiliation_111) { 2456 dma_free_coherent(&instance->pdev->dev, 2457 sizeof(struct MR_LD_VF_AFFILIATION_111), 2458 new_affiliation_111, 2459 new_affiliation_111_h); 2460 } 2461 2462 megasas_return_cmd(instance, cmd); 2463 2464 return retval; 2465 } 2466 2467 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance, 2468 int initial) 2469 { 2470 struct megasas_cmd *cmd; 2471 struct megasas_dcmd_frame *dcmd; 2472 struct MR_LD_VF_AFFILIATION *new_affiliation = NULL; 2473 struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL; 2474 dma_addr_t new_affiliation_h; 2475 int i, j, retval = 0, found = 0, doscan = 0; 2476 u8 thisVf; 2477 2478 cmd = megasas_get_cmd(instance); 2479 2480 if (!cmd) { 2481 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: " 2482 "Failed to get cmd for scsi%d\n", 2483 instance->host->host_no); 2484 return -ENOMEM; 2485 } 2486 2487 dcmd = &cmd->frame->dcmd; 2488 2489 if (!instance->vf_affiliation) { 2490 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " 2491 "affiliation for scsi%d\n", instance->host->host_no); 2492 megasas_return_cmd(instance, cmd); 2493 return -ENOMEM; 2494 } 2495 2496 if (initial) 2497 memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) * 2498 sizeof(struct MR_LD_VF_AFFILIATION)); 2499 else { 2500 new_affiliation = 2501 dma_alloc_coherent(&instance->pdev->dev, 2502 (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION), 2503 &new_affiliation_h, GFP_KERNEL); 2504 if (!new_affiliation) { 2505 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2506 "memory for new affiliation for scsi%d\n", 2507 instance->host->host_no); 2508 megasas_return_cmd(instance, cmd); 2509 return -ENOMEM; 2510 } 2511 } 2512 2513 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2514 2515 dcmd->cmd = MFI_CMD_DCMD; 2516 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2517 dcmd->sge_count = 1; 2518 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2519 dcmd->timeout = 0; 2520 dcmd->pad_0 = 0; 2521 dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * 2522 sizeof(struct MR_LD_VF_AFFILIATION)); 2523 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS); 2524 2525 if (initial) 2526 dcmd->sgl.sge32[0].phys_addr = 2527 cpu_to_le32(instance->vf_affiliation_h); 2528 else 2529 dcmd->sgl.sge32[0].phys_addr = 2530 cpu_to_le32(new_affiliation_h); 2531 2532 dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * 2533 sizeof(struct MR_LD_VF_AFFILIATION)); 2534 2535 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " 2536 "scsi%d\n", instance->host->host_no); 2537 2538 2539 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { 2540 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" 2541 " failed with status 0x%x for scsi%d\n", 2542 dcmd->cmd_status, instance->host->host_no); 2543 retval = 1; /* Do a scan if we couldn't get affiliation */ 2544 goto out; 2545 } 2546 2547 if (!initial) { 2548 if (!new_affiliation->ldCount) { 2549 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " 2550 "affiliation for passive path for scsi%d\n", 2551 instance->host->host_no); 2552 retval = 1; 2553 goto out; 2554 } 2555 newmap = new_affiliation->map; 2556 savedmap = instance->vf_affiliation->map; 2557 thisVf = new_affiliation->thisVf; 2558 for (i = 0 ; i < new_affiliation->ldCount; i++) { 2559 found = 0; 2560 for (j = 0; j < instance->vf_affiliation->ldCount; 2561 j++) { 2562 if (newmap->ref.targetId == 2563 savedmap->ref.targetId) { 2564 found = 1; 2565 if (newmap->policy[thisVf] != 2566 savedmap->policy[thisVf]) { 2567 doscan = 1; 2568 goto out; 2569 } 2570 } 2571 savedmap = (struct MR_LD_VF_MAP *) 2572 ((unsigned char *)savedmap + 2573 savedmap->size); 2574 } 2575 if (!found && newmap->policy[thisVf] != 2576 MR_LD_ACCESS_HIDDEN) { 2577 doscan = 1; 2578 goto out; 2579 } 2580 newmap = (struct MR_LD_VF_MAP *) 2581 ((unsigned char *)newmap + newmap->size); 2582 } 2583 2584 newmap = new_affiliation->map; 2585 savedmap = instance->vf_affiliation->map; 2586 2587 for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) { 2588 found = 0; 2589 for (j = 0 ; j < new_affiliation->ldCount; j++) { 2590 if (savedmap->ref.targetId == 2591 newmap->ref.targetId) { 2592 found = 1; 2593 if (savedmap->policy[thisVf] != 2594 newmap->policy[thisVf]) { 2595 doscan = 1; 2596 goto out; 2597 } 2598 } 2599 newmap = (struct MR_LD_VF_MAP *) 2600 ((unsigned char *)newmap + 2601 newmap->size); 2602 } 2603 if (!found && savedmap->policy[thisVf] != 2604 MR_LD_ACCESS_HIDDEN) { 2605 doscan = 1; 2606 goto out; 2607 } 2608 savedmap = (struct MR_LD_VF_MAP *) 2609 ((unsigned char *)savedmap + 2610 savedmap->size); 2611 } 2612 } 2613 out: 2614 if (doscan) { 2615 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " 2616 "affiliation for scsi%d\n", instance->host->host_no); 2617 memcpy(instance->vf_affiliation, new_affiliation, 2618 new_affiliation->size); 2619 retval = 1; 2620 } 2621 2622 if (new_affiliation) 2623 dma_free_coherent(&instance->pdev->dev, 2624 (MAX_LOGICAL_DRIVES + 1) * 2625 sizeof(struct MR_LD_VF_AFFILIATION), 2626 new_affiliation, new_affiliation_h); 2627 megasas_return_cmd(instance, cmd); 2628 2629 return retval; 2630 } 2631 2632 /* This function will get the current SR-IOV LD/VF affiliation */ 2633 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 2634 int initial) 2635 { 2636 int retval; 2637 2638 if (instance->PlasmaFW111) 2639 retval = megasas_get_ld_vf_affiliation_111(instance, initial); 2640 else 2641 retval = megasas_get_ld_vf_affiliation_12(instance, initial); 2642 return retval; 2643 } 2644 2645 /* This function will tell FW to start the SR-IOV heartbeat */ 2646 int megasas_sriov_start_heartbeat(struct megasas_instance *instance, 2647 int initial) 2648 { 2649 struct megasas_cmd *cmd; 2650 struct megasas_dcmd_frame *dcmd; 2651 int retval = 0; 2652 2653 cmd = megasas_get_cmd(instance); 2654 2655 if (!cmd) { 2656 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: " 2657 "Failed to get cmd for scsi%d\n", 2658 instance->host->host_no); 2659 return -ENOMEM; 2660 } 2661 2662 dcmd = &cmd->frame->dcmd; 2663 2664 if (initial) { 2665 instance->hb_host_mem = 2666 dma_alloc_coherent(&instance->pdev->dev, 2667 sizeof(struct MR_CTRL_HB_HOST_MEM), 2668 &instance->hb_host_mem_h, 2669 GFP_KERNEL); 2670 if (!instance->hb_host_mem) { 2671 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate" 2672 " memory for heartbeat host memory for scsi%d\n", 2673 instance->host->host_no); 2674 retval = -ENOMEM; 2675 goto out; 2676 } 2677 } 2678 2679 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2680 2681 dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2682 dcmd->cmd = MFI_CMD_DCMD; 2683 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2684 dcmd->sge_count = 1; 2685 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2686 dcmd->timeout = 0; 2687 dcmd->pad_0 = 0; 2688 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2689 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC); 2690 2691 megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h, 2692 sizeof(struct MR_CTRL_HB_HOST_MEM)); 2693 2694 dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n", 2695 instance->host->host_no); 2696 2697 if ((instance->adapter_type != MFI_SERIES) && 2698 !instance->mask_interrupts) 2699 retval = megasas_issue_blocked_cmd(instance, cmd, 2700 MEGASAS_ROUTINE_WAIT_TIME_VF); 2701 else 2702 retval = megasas_issue_polled(instance, cmd); 2703 2704 if (retval) { 2705 dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST" 2706 "_MEM_ALLOC DCMD %s for scsi%d\n", 2707 (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ? 2708 "timed out" : "failed", instance->host->host_no); 2709 retval = 1; 2710 } 2711 2712 out: 2713 megasas_return_cmd(instance, cmd); 2714 2715 return retval; 2716 } 2717 2718 /* Handler for SR-IOV heartbeat */ 2719 static void megasas_sriov_heartbeat_handler(struct timer_list *t) 2720 { 2721 struct megasas_instance *instance = 2722 from_timer(instance, t, sriov_heartbeat_timer); 2723 2724 if (instance->hb_host_mem->HB.fwCounter != 2725 instance->hb_host_mem->HB.driverCounter) { 2726 instance->hb_host_mem->HB.driverCounter = 2727 instance->hb_host_mem->HB.fwCounter; 2728 mod_timer(&instance->sriov_heartbeat_timer, 2729 jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 2730 } else { 2731 dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never " 2732 "completed for scsi%d\n", instance->host->host_no); 2733 schedule_work(&instance->work_init); 2734 } 2735 } 2736 2737 /** 2738 * megasas_wait_for_outstanding - Wait for all outstanding cmds 2739 * @instance: Adapter soft state 2740 * 2741 * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to 2742 * complete all its outstanding commands. Returns error if one or more IOs 2743 * are pending after this time period. It also marks the controller dead. 2744 */ 2745 static int megasas_wait_for_outstanding(struct megasas_instance *instance) 2746 { 2747 int i, sl, outstanding; 2748 u32 reset_index; 2749 u32 wait_time = MEGASAS_RESET_WAIT_TIME; 2750 unsigned long flags; 2751 struct list_head clist_local; 2752 struct megasas_cmd *reset_cmd; 2753 u32 fw_state; 2754 2755 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2756 dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n", 2757 __func__, __LINE__); 2758 return FAILED; 2759 } 2760 2761 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 2762 2763 INIT_LIST_HEAD(&clist_local); 2764 spin_lock_irqsave(&instance->hba_lock, flags); 2765 list_splice_init(&instance->internal_reset_pending_q, 2766 &clist_local); 2767 spin_unlock_irqrestore(&instance->hba_lock, flags); 2768 2769 dev_notice(&instance->pdev->dev, "HBA reset wait ...\n"); 2770 for (i = 0; i < wait_time; i++) { 2771 msleep(1000); 2772 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) 2773 break; 2774 } 2775 2776 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 2777 dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n"); 2778 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); 2779 return FAILED; 2780 } 2781 2782 reset_index = 0; 2783 while (!list_empty(&clist_local)) { 2784 reset_cmd = list_entry((&clist_local)->next, 2785 struct megasas_cmd, list); 2786 list_del_init(&reset_cmd->list); 2787 if (reset_cmd->scmd) { 2788 reset_cmd->scmd->result = DID_REQUEUE << 16; 2789 dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n", 2790 reset_index, reset_cmd, 2791 reset_cmd->scmd->cmnd[0]); 2792 2793 scsi_done(reset_cmd->scmd); 2794 megasas_return_cmd(instance, reset_cmd); 2795 } else if (reset_cmd->sync_cmd) { 2796 dev_notice(&instance->pdev->dev, "%p synch cmds" 2797 "reset queue\n", 2798 reset_cmd); 2799 2800 reset_cmd->cmd_status_drv = DCMD_INIT; 2801 instance->instancet->fire_cmd(instance, 2802 reset_cmd->frame_phys_addr, 2803 0, instance->reg_set); 2804 } else { 2805 dev_notice(&instance->pdev->dev, "%p unexpected" 2806 "cmds lst\n", 2807 reset_cmd); 2808 } 2809 reset_index++; 2810 } 2811 2812 return SUCCESS; 2813 } 2814 2815 for (i = 0; i < resetwaittime; i++) { 2816 outstanding = atomic_read(&instance->fw_outstanding); 2817 2818 if (!outstanding) 2819 break; 2820 2821 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 2822 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d " 2823 "commands to complete\n",i,outstanding); 2824 /* 2825 * Call cmd completion routine. Cmd to be 2826 * be completed directly without depending on isr. 2827 */ 2828 megasas_complete_cmd_dpc((unsigned long)instance); 2829 } 2830 2831 msleep(1000); 2832 } 2833 2834 i = 0; 2835 outstanding = atomic_read(&instance->fw_outstanding); 2836 fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK; 2837 2838 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) 2839 goto no_outstanding; 2840 2841 if (instance->disableOnlineCtrlReset) 2842 goto kill_hba_and_failed; 2843 do { 2844 if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) { 2845 dev_info(&instance->pdev->dev, 2846 "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, outstanding 0x%x\n", 2847 __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding)); 2848 if (i == 3) 2849 goto kill_hba_and_failed; 2850 megasas_do_ocr(instance); 2851 2852 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2853 dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n", 2854 __func__, __LINE__); 2855 return FAILED; 2856 } 2857 dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n", 2858 __func__, __LINE__); 2859 2860 for (sl = 0; sl < 10; sl++) 2861 msleep(500); 2862 2863 outstanding = atomic_read(&instance->fw_outstanding); 2864 2865 fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK; 2866 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) 2867 goto no_outstanding; 2868 } 2869 i++; 2870 } while (i <= 3); 2871 2872 no_outstanding: 2873 2874 dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n", 2875 __func__, __LINE__); 2876 return SUCCESS; 2877 2878 kill_hba_and_failed: 2879 2880 /* Reset not supported, kill adapter */ 2881 dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d" 2882 " disableOnlineCtrlReset %d fw_outstanding %d \n", 2883 __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset, 2884 atomic_read(&instance->fw_outstanding)); 2885 megasas_dump_pending_frames(instance); 2886 megaraid_sas_kill_hba(instance); 2887 2888 return FAILED; 2889 } 2890 2891 /** 2892 * megasas_generic_reset - Generic reset routine 2893 * @scmd: Mid-layer SCSI command 2894 * 2895 * This routine implements a generic reset handler for device, bus and host 2896 * reset requests. Device, bus and host specific reset handlers can use this 2897 * function after they do their specific tasks. 2898 */ 2899 static int megasas_generic_reset(struct scsi_cmnd *scmd) 2900 { 2901 int ret_val; 2902 struct megasas_instance *instance; 2903 2904 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2905 2906 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n", 2907 scmd->cmnd[0], scmd->retries); 2908 2909 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2910 dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n"); 2911 return FAILED; 2912 } 2913 2914 ret_val = megasas_wait_for_outstanding(instance); 2915 if (ret_val == SUCCESS) 2916 dev_notice(&instance->pdev->dev, "reset successful\n"); 2917 else 2918 dev_err(&instance->pdev->dev, "failed to do reset\n"); 2919 2920 return ret_val; 2921 } 2922 2923 /** 2924 * megasas_reset_timer - quiesce the adapter if required 2925 * @scmd: scsi cmnd 2926 * 2927 * Sets the FW busy flag and reduces the host->can_queue if the 2928 * cmd has not been completed within the timeout period. 2929 */ 2930 static enum 2931 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) 2932 { 2933 struct megasas_instance *instance; 2934 unsigned long flags; 2935 2936 if (time_after(jiffies, scmd->jiffies_at_alloc + 2937 (scmd_timeout * 2) * HZ)) { 2938 return BLK_EH_DONE; 2939 } 2940 2941 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2942 if (!(instance->flag & MEGASAS_FW_BUSY)) { 2943 /* FW is busy, throttle IO */ 2944 spin_lock_irqsave(instance->host->host_lock, flags); 2945 2946 instance->host->can_queue = instance->throttlequeuedepth; 2947 instance->last_time = jiffies; 2948 instance->flag |= MEGASAS_FW_BUSY; 2949 2950 spin_unlock_irqrestore(instance->host->host_lock, flags); 2951 } 2952 return BLK_EH_RESET_TIMER; 2953 } 2954 2955 /** 2956 * megasas_dump - This function will print hexdump of provided buffer. 2957 * @buf: Buffer to be dumped 2958 * @sz: Size in bytes 2959 * @format: Different formats of dumping e.g. format=n will 2960 * cause only 'n' 32 bit words to be dumped in a single 2961 * line. 2962 */ 2963 inline void 2964 megasas_dump(void *buf, int sz, int format) 2965 { 2966 int i; 2967 __le32 *buf_loc = (__le32 *)buf; 2968 2969 for (i = 0; i < (sz / sizeof(__le32)); i++) { 2970 if ((i % format) == 0) { 2971 if (i != 0) 2972 printk(KERN_CONT "\n"); 2973 printk(KERN_CONT "%08x: ", (i * 4)); 2974 } 2975 printk(KERN_CONT "%08x ", le32_to_cpu(buf_loc[i])); 2976 } 2977 printk(KERN_CONT "\n"); 2978 } 2979 2980 /** 2981 * megasas_dump_reg_set - This function will print hexdump of register set 2982 * @reg_set: Register set to be dumped 2983 */ 2984 inline void 2985 megasas_dump_reg_set(void __iomem *reg_set) 2986 { 2987 unsigned int i, sz = 256; 2988 u32 __iomem *reg = (u32 __iomem *)reg_set; 2989 2990 for (i = 0; i < (sz / sizeof(u32)); i++) 2991 printk("%08x: %08x\n", (i * 4), readl(®[i])); 2992 } 2993 2994 /** 2995 * megasas_dump_fusion_io - This function will print key details 2996 * of SCSI IO 2997 * @scmd: SCSI command pointer of SCSI IO 2998 */ 2999 void 3000 megasas_dump_fusion_io(struct scsi_cmnd *scmd) 3001 { 3002 struct megasas_cmd_fusion *cmd = megasas_priv(scmd)->cmd_priv; 3003 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3004 struct megasas_instance *instance; 3005 3006 instance = (struct megasas_instance *)scmd->device->host->hostdata; 3007 3008 scmd_printk(KERN_INFO, scmd, 3009 "scmd: (0x%p) retries: 0x%x allowed: 0x%x\n", 3010 scmd, scmd->retries, scmd->allowed); 3011 scsi_print_command(scmd); 3012 3013 if (cmd) { 3014 req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc; 3015 scmd_printk(KERN_INFO, scmd, "Request descriptor details:\n"); 3016 scmd_printk(KERN_INFO, scmd, 3017 "RequestFlags:0x%x MSIxIndex:0x%x SMID:0x%x LMID:0x%x DevHandle:0x%x\n", 3018 req_desc->SCSIIO.RequestFlags, 3019 req_desc->SCSIIO.MSIxIndex, req_desc->SCSIIO.SMID, 3020 req_desc->SCSIIO.LMID, req_desc->SCSIIO.DevHandle); 3021 3022 printk(KERN_INFO "IO request frame:\n"); 3023 megasas_dump(cmd->io_request, 3024 MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, 8); 3025 printk(KERN_INFO "Chain frame:\n"); 3026 megasas_dump(cmd->sg_frame, 3027 instance->max_chain_frame_sz, 8); 3028 } 3029 3030 } 3031 3032 /* 3033 * megasas_dump_sys_regs - This function will dump system registers through 3034 * sysfs. 3035 * @reg_set: Pointer to System register set. 3036 * @buf: Buffer to which output is to be written. 3037 * @return: Number of bytes written to buffer. 3038 */ 3039 static inline ssize_t 3040 megasas_dump_sys_regs(void __iomem *reg_set, char *buf) 3041 { 3042 unsigned int i, sz = 256; 3043 int bytes_wrote = 0; 3044 char *loc = (char *)buf; 3045 u32 __iomem *reg = (u32 __iomem *)reg_set; 3046 3047 for (i = 0; i < sz / sizeof(u32); i++) { 3048 bytes_wrote += scnprintf(loc + bytes_wrote, 3049 PAGE_SIZE - bytes_wrote, 3050 "%08x: %08x\n", (i * 4), 3051 readl(®[i])); 3052 } 3053 return bytes_wrote; 3054 } 3055 3056 /** 3057 * megasas_reset_bus_host - Bus & host reset handler entry point 3058 * @scmd: Mid-layer SCSI command 3059 */ 3060 static int megasas_reset_bus_host(struct scsi_cmnd *scmd) 3061 { 3062 int ret; 3063 struct megasas_instance *instance; 3064 3065 instance = (struct megasas_instance *)scmd->device->host->hostdata; 3066 3067 scmd_printk(KERN_INFO, scmd, 3068 "OCR is requested due to IO timeout!!\n"); 3069 3070 scmd_printk(KERN_INFO, scmd, 3071 "SCSI host state: %d SCSI host busy: %d FW outstanding: %d\n", 3072 scmd->device->host->shost_state, 3073 scsi_host_busy(scmd->device->host), 3074 atomic_read(&instance->fw_outstanding)); 3075 /* 3076 * First wait for all commands to complete 3077 */ 3078 if (instance->adapter_type == MFI_SERIES) { 3079 ret = megasas_generic_reset(scmd); 3080 } else { 3081 megasas_dump_fusion_io(scmd); 3082 ret = megasas_reset_fusion(scmd->device->host, 3083 SCSIIO_TIMEOUT_OCR); 3084 } 3085 3086 return ret; 3087 } 3088 3089 /** 3090 * megasas_task_abort - Issues task abort request to firmware 3091 * (supported only for fusion adapters) 3092 * @scmd: SCSI command pointer 3093 */ 3094 static int megasas_task_abort(struct scsi_cmnd *scmd) 3095 { 3096 int ret; 3097 struct megasas_instance *instance; 3098 3099 instance = (struct megasas_instance *)scmd->device->host->hostdata; 3100 3101 if (instance->adapter_type != MFI_SERIES) 3102 ret = megasas_task_abort_fusion(scmd); 3103 else { 3104 sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n"); 3105 ret = FAILED; 3106 } 3107 3108 return ret; 3109 } 3110 3111 /** 3112 * megasas_reset_target: Issues target reset request to firmware 3113 * (supported only for fusion adapters) 3114 * @scmd: SCSI command pointer 3115 */ 3116 static int megasas_reset_target(struct scsi_cmnd *scmd) 3117 { 3118 int ret; 3119 struct megasas_instance *instance; 3120 3121 instance = (struct megasas_instance *)scmd->device->host->hostdata; 3122 3123 if (instance->adapter_type != MFI_SERIES) 3124 ret = megasas_reset_target_fusion(scmd); 3125 else { 3126 sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n"); 3127 ret = FAILED; 3128 } 3129 3130 return ret; 3131 } 3132 3133 /** 3134 * megasas_bios_param - Returns disk geometry for a disk 3135 * @sdev: device handle 3136 * @bdev: block device 3137 * @capacity: drive capacity 3138 * @geom: geometry parameters 3139 */ 3140 static int 3141 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev, 3142 sector_t capacity, int geom[]) 3143 { 3144 int heads; 3145 int sectors; 3146 sector_t cylinders; 3147 unsigned long tmp; 3148 3149 /* Default heads (64) & sectors (32) */ 3150 heads = 64; 3151 sectors = 32; 3152 3153 tmp = heads * sectors; 3154 cylinders = capacity; 3155 3156 sector_div(cylinders, tmp); 3157 3158 /* 3159 * Handle extended translation size for logical drives > 1Gb 3160 */ 3161 3162 if (capacity >= 0x200000) { 3163 heads = 255; 3164 sectors = 63; 3165 tmp = heads*sectors; 3166 cylinders = capacity; 3167 sector_div(cylinders, tmp); 3168 } 3169 3170 geom[0] = heads; 3171 geom[1] = sectors; 3172 geom[2] = cylinders; 3173 3174 return 0; 3175 } 3176 3177 static int megasas_map_queues(struct Scsi_Host *shost) 3178 { 3179 struct megasas_instance *instance; 3180 int qoff = 0, offset; 3181 struct blk_mq_queue_map *map; 3182 3183 instance = (struct megasas_instance *)shost->hostdata; 3184 3185 if (shost->nr_hw_queues == 1) 3186 return 0; 3187 3188 offset = instance->low_latency_index_start; 3189 3190 /* Setup Default hctx */ 3191 map = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; 3192 map->nr_queues = instance->msix_vectors - offset; 3193 map->queue_offset = 0; 3194 blk_mq_pci_map_queues(map, instance->pdev, offset); 3195 qoff += map->nr_queues; 3196 offset += map->nr_queues; 3197 3198 /* we never use READ queue, so can't cheat blk-mq */ 3199 shost->tag_set.map[HCTX_TYPE_READ].nr_queues = 0; 3200 3201 /* Setup Poll hctx */ 3202 map = &shost->tag_set.map[HCTX_TYPE_POLL]; 3203 map->nr_queues = instance->iopoll_q_count; 3204 if (map->nr_queues) { 3205 /* 3206 * The poll queue(s) doesn't have an IRQ (and hence IRQ 3207 * affinity), so use the regular blk-mq cpu mapping 3208 */ 3209 map->queue_offset = qoff; 3210 blk_mq_map_queues(map); 3211 } 3212 3213 return 0; 3214 } 3215 3216 static void megasas_aen_polling(struct work_struct *work); 3217 3218 /** 3219 * megasas_service_aen - Processes an event notification 3220 * @instance: Adapter soft state 3221 * @cmd: AEN command completed by the ISR 3222 * 3223 * For AEN, driver sends a command down to FW that is held by the FW till an 3224 * event occurs. When an event of interest occurs, FW completes the command 3225 * that it was previously holding. 3226 * 3227 * This routines sends SIGIO signal to processes that have registered with the 3228 * driver for AEN. 3229 */ 3230 static void 3231 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) 3232 { 3233 unsigned long flags; 3234 3235 /* 3236 * Don't signal app if it is just an aborted previously registered aen 3237 */ 3238 if ((!cmd->abort_aen) && (instance->unload == 0)) { 3239 spin_lock_irqsave(&poll_aen_lock, flags); 3240 megasas_poll_wait_aen = 1; 3241 spin_unlock_irqrestore(&poll_aen_lock, flags); 3242 wake_up(&megasas_poll_wait); 3243 kill_fasync(&megasas_async_queue, SIGIO, POLL_IN); 3244 } 3245 else 3246 cmd->abort_aen = 0; 3247 3248 instance->aen_cmd = NULL; 3249 3250 megasas_return_cmd(instance, cmd); 3251 3252 if ((instance->unload == 0) && 3253 ((instance->issuepend_done == 1))) { 3254 struct megasas_aen_event *ev; 3255 3256 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 3257 if (!ev) { 3258 dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n"); 3259 } else { 3260 ev->instance = instance; 3261 instance->ev = ev; 3262 INIT_DELAYED_WORK(&ev->hotplug_work, 3263 megasas_aen_polling); 3264 schedule_delayed_work(&ev->hotplug_work, 0); 3265 } 3266 } 3267 } 3268 3269 static ssize_t 3270 fw_crash_buffer_store(struct device *cdev, 3271 struct device_attribute *attr, const char *buf, size_t count) 3272 { 3273 struct Scsi_Host *shost = class_to_shost(cdev); 3274 struct megasas_instance *instance = 3275 (struct megasas_instance *) shost->hostdata; 3276 int val = 0; 3277 unsigned long flags; 3278 3279 if (kstrtoint(buf, 0, &val) != 0) 3280 return -EINVAL; 3281 3282 spin_lock_irqsave(&instance->crashdump_lock, flags); 3283 instance->fw_crash_buffer_offset = val; 3284 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3285 return strlen(buf); 3286 } 3287 3288 static ssize_t 3289 fw_crash_buffer_show(struct device *cdev, 3290 struct device_attribute *attr, char *buf) 3291 { 3292 struct Scsi_Host *shost = class_to_shost(cdev); 3293 struct megasas_instance *instance = 3294 (struct megasas_instance *) shost->hostdata; 3295 u32 size; 3296 unsigned long dmachunk = CRASH_DMA_BUF_SIZE; 3297 unsigned long chunk_left_bytes; 3298 unsigned long src_addr; 3299 unsigned long flags; 3300 u32 buff_offset; 3301 3302 spin_lock_irqsave(&instance->crashdump_lock, flags); 3303 buff_offset = instance->fw_crash_buffer_offset; 3304 if (!instance->crash_dump_buf && 3305 !((instance->fw_crash_state == AVAILABLE) || 3306 (instance->fw_crash_state == COPYING))) { 3307 dev_err(&instance->pdev->dev, 3308 "Firmware crash dump is not available\n"); 3309 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3310 return -EINVAL; 3311 } 3312 3313 if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) { 3314 dev_err(&instance->pdev->dev, 3315 "Firmware crash dump offset is out of range\n"); 3316 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3317 return 0; 3318 } 3319 3320 size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset; 3321 chunk_left_bytes = dmachunk - (buff_offset % dmachunk); 3322 size = (size > chunk_left_bytes) ? chunk_left_bytes : size; 3323 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; 3324 3325 src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] + 3326 (buff_offset % dmachunk); 3327 memcpy(buf, (void *)src_addr, size); 3328 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3329 3330 return size; 3331 } 3332 3333 static ssize_t 3334 fw_crash_buffer_size_show(struct device *cdev, 3335 struct device_attribute *attr, char *buf) 3336 { 3337 struct Scsi_Host *shost = class_to_shost(cdev); 3338 struct megasas_instance *instance = 3339 (struct megasas_instance *) shost->hostdata; 3340 3341 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long) 3342 ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE); 3343 } 3344 3345 static ssize_t 3346 fw_crash_state_store(struct device *cdev, 3347 struct device_attribute *attr, const char *buf, size_t count) 3348 { 3349 struct Scsi_Host *shost = class_to_shost(cdev); 3350 struct megasas_instance *instance = 3351 (struct megasas_instance *) shost->hostdata; 3352 int val = 0; 3353 unsigned long flags; 3354 3355 if (kstrtoint(buf, 0, &val) != 0) 3356 return -EINVAL; 3357 3358 if ((val <= AVAILABLE || val > COPY_ERROR)) { 3359 dev_err(&instance->pdev->dev, "application updates invalid " 3360 "firmware crash state\n"); 3361 return -EINVAL; 3362 } 3363 3364 instance->fw_crash_state = val; 3365 3366 if ((val == COPIED) || (val == COPY_ERROR)) { 3367 spin_lock_irqsave(&instance->crashdump_lock, flags); 3368 megasas_free_host_crash_buffer(instance); 3369 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3370 if (val == COPY_ERROR) 3371 dev_info(&instance->pdev->dev, "application failed to " 3372 "copy Firmware crash dump\n"); 3373 else 3374 dev_info(&instance->pdev->dev, "Firmware crash dump " 3375 "copied successfully\n"); 3376 } 3377 return strlen(buf); 3378 } 3379 3380 static ssize_t 3381 fw_crash_state_show(struct device *cdev, 3382 struct device_attribute *attr, char *buf) 3383 { 3384 struct Scsi_Host *shost = class_to_shost(cdev); 3385 struct megasas_instance *instance = 3386 (struct megasas_instance *) shost->hostdata; 3387 3388 return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state); 3389 } 3390 3391 static ssize_t 3392 page_size_show(struct device *cdev, 3393 struct device_attribute *attr, char *buf) 3394 { 3395 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1); 3396 } 3397 3398 static ssize_t 3399 ldio_outstanding_show(struct device *cdev, struct device_attribute *attr, 3400 char *buf) 3401 { 3402 struct Scsi_Host *shost = class_to_shost(cdev); 3403 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3404 3405 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding)); 3406 } 3407 3408 static ssize_t 3409 fw_cmds_outstanding_show(struct device *cdev, 3410 struct device_attribute *attr, char *buf) 3411 { 3412 struct Scsi_Host *shost = class_to_shost(cdev); 3413 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3414 3415 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding)); 3416 } 3417 3418 static ssize_t 3419 enable_sdev_max_qd_show(struct device *cdev, 3420 struct device_attribute *attr, char *buf) 3421 { 3422 struct Scsi_Host *shost = class_to_shost(cdev); 3423 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3424 3425 return snprintf(buf, PAGE_SIZE, "%d\n", instance->enable_sdev_max_qd); 3426 } 3427 3428 static ssize_t 3429 enable_sdev_max_qd_store(struct device *cdev, 3430 struct device_attribute *attr, const char *buf, size_t count) 3431 { 3432 struct Scsi_Host *shost = class_to_shost(cdev); 3433 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3434 u32 val = 0; 3435 bool is_target_prop; 3436 int ret_target_prop = DCMD_FAILED; 3437 struct scsi_device *sdev; 3438 3439 if (kstrtou32(buf, 0, &val) != 0) { 3440 pr_err("megasas: could not set enable_sdev_max_qd\n"); 3441 return -EINVAL; 3442 } 3443 3444 mutex_lock(&instance->reset_mutex); 3445 if (val) 3446 instance->enable_sdev_max_qd = true; 3447 else 3448 instance->enable_sdev_max_qd = false; 3449 3450 shost_for_each_device(sdev, shost) { 3451 ret_target_prop = megasas_get_target_prop(instance, sdev); 3452 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; 3453 megasas_set_fw_assisted_qd(sdev, is_target_prop); 3454 } 3455 mutex_unlock(&instance->reset_mutex); 3456 3457 return strlen(buf); 3458 } 3459 3460 static ssize_t 3461 dump_system_regs_show(struct device *cdev, 3462 struct device_attribute *attr, char *buf) 3463 { 3464 struct Scsi_Host *shost = class_to_shost(cdev); 3465 struct megasas_instance *instance = 3466 (struct megasas_instance *)shost->hostdata; 3467 3468 return megasas_dump_sys_regs(instance->reg_set, buf); 3469 } 3470 3471 static ssize_t 3472 raid_map_id_show(struct device *cdev, struct device_attribute *attr, 3473 char *buf) 3474 { 3475 struct Scsi_Host *shost = class_to_shost(cdev); 3476 struct megasas_instance *instance = 3477 (struct megasas_instance *)shost->hostdata; 3478 3479 return snprintf(buf, PAGE_SIZE, "%ld\n", 3480 (unsigned long)instance->map_id); 3481 } 3482 3483 static DEVICE_ATTR_RW(fw_crash_buffer); 3484 static DEVICE_ATTR_RO(fw_crash_buffer_size); 3485 static DEVICE_ATTR_RW(fw_crash_state); 3486 static DEVICE_ATTR_RO(page_size); 3487 static DEVICE_ATTR_RO(ldio_outstanding); 3488 static DEVICE_ATTR_RO(fw_cmds_outstanding); 3489 static DEVICE_ATTR_RW(enable_sdev_max_qd); 3490 static DEVICE_ATTR_RO(dump_system_regs); 3491 static DEVICE_ATTR_RO(raid_map_id); 3492 3493 static struct attribute *megaraid_host_attrs[] = { 3494 &dev_attr_fw_crash_buffer_size.attr, 3495 &dev_attr_fw_crash_buffer.attr, 3496 &dev_attr_fw_crash_state.attr, 3497 &dev_attr_page_size.attr, 3498 &dev_attr_ldio_outstanding.attr, 3499 &dev_attr_fw_cmds_outstanding.attr, 3500 &dev_attr_enable_sdev_max_qd.attr, 3501 &dev_attr_dump_system_regs.attr, 3502 &dev_attr_raid_map_id.attr, 3503 NULL, 3504 }; 3505 3506 ATTRIBUTE_GROUPS(megaraid_host); 3507 3508 /* 3509 * Scsi host template for megaraid_sas driver 3510 */ 3511 static struct scsi_host_template megasas_template = { 3512 3513 .module = THIS_MODULE, 3514 .name = "Avago SAS based MegaRAID driver", 3515 .proc_name = "megaraid_sas", 3516 .slave_configure = megasas_slave_configure, 3517 .slave_alloc = megasas_slave_alloc, 3518 .slave_destroy = megasas_slave_destroy, 3519 .queuecommand = megasas_queue_command, 3520 .eh_target_reset_handler = megasas_reset_target, 3521 .eh_abort_handler = megasas_task_abort, 3522 .eh_host_reset_handler = megasas_reset_bus_host, 3523 .eh_timed_out = megasas_reset_timer, 3524 .shost_groups = megaraid_host_groups, 3525 .bios_param = megasas_bios_param, 3526 .map_queues = megasas_map_queues, 3527 .mq_poll = megasas_blk_mq_poll, 3528 .change_queue_depth = scsi_change_queue_depth, 3529 .max_segment_size = 0xffffffff, 3530 .cmd_size = sizeof(struct megasas_cmd_priv), 3531 }; 3532 3533 /** 3534 * megasas_complete_int_cmd - Completes an internal command 3535 * @instance: Adapter soft state 3536 * @cmd: Command to be completed 3537 * 3538 * The megasas_issue_blocked_cmd() function waits for a command to complete 3539 * after it issues a command. This function wakes up that waiting routine by 3540 * calling wake_up() on the wait queue. 3541 */ 3542 static void 3543 megasas_complete_int_cmd(struct megasas_instance *instance, 3544 struct megasas_cmd *cmd) 3545 { 3546 if (cmd->cmd_status_drv == DCMD_INIT) 3547 cmd->cmd_status_drv = 3548 (cmd->frame->io.cmd_status == MFI_STAT_OK) ? 3549 DCMD_SUCCESS : DCMD_FAILED; 3550 3551 wake_up(&instance->int_cmd_wait_q); 3552 } 3553 3554 /** 3555 * megasas_complete_abort - Completes aborting a command 3556 * @instance: Adapter soft state 3557 * @cmd: Cmd that was issued to abort another cmd 3558 * 3559 * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q 3560 * after it issues an abort on a previously issued command. This function 3561 * wakes up all functions waiting on the same wait queue. 3562 */ 3563 static void 3564 megasas_complete_abort(struct megasas_instance *instance, 3565 struct megasas_cmd *cmd) 3566 { 3567 if (cmd->sync_cmd) { 3568 cmd->sync_cmd = 0; 3569 cmd->cmd_status_drv = DCMD_SUCCESS; 3570 wake_up(&instance->abort_cmd_wait_q); 3571 } 3572 } 3573 3574 static void 3575 megasas_set_ld_removed_by_fw(struct megasas_instance *instance) 3576 { 3577 uint i; 3578 3579 for (i = 0; (i < MEGASAS_MAX_LD_IDS); i++) { 3580 if (instance->ld_ids_prev[i] != 0xff && 3581 instance->ld_ids_from_raidmap[i] == 0xff) { 3582 if (megasas_dbg_lvl & LD_PD_DEBUG) 3583 dev_info(&instance->pdev->dev, 3584 "LD target ID %d removed from RAID map\n", i); 3585 instance->ld_tgtid_status[i] = LD_TARGET_ID_DELETED; 3586 } 3587 } 3588 } 3589 3590 /** 3591 * megasas_complete_cmd - Completes a command 3592 * @instance: Adapter soft state 3593 * @cmd: Command to be completed 3594 * @alt_status: If non-zero, use this value as status to 3595 * SCSI mid-layer instead of the value returned 3596 * by the FW. This should be used if caller wants 3597 * an alternate status (as in the case of aborted 3598 * commands) 3599 */ 3600 void 3601 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 3602 u8 alt_status) 3603 { 3604 int exception = 0; 3605 struct megasas_header *hdr = &cmd->frame->hdr; 3606 unsigned long flags; 3607 struct fusion_context *fusion = instance->ctrl_context; 3608 u32 opcode, status; 3609 3610 /* flag for the retry reset */ 3611 cmd->retry_for_fw_reset = 0; 3612 3613 if (cmd->scmd) 3614 megasas_priv(cmd->scmd)->cmd_priv = NULL; 3615 3616 switch (hdr->cmd) { 3617 case MFI_CMD_INVALID: 3618 /* Some older 1068 controller FW may keep a pended 3619 MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel 3620 when booting the kdump kernel. Ignore this command to 3621 prevent a kernel panic on shutdown of the kdump kernel. */ 3622 dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command " 3623 "completed\n"); 3624 dev_warn(&instance->pdev->dev, "If you have a controller " 3625 "other than PERC5, please upgrade your firmware\n"); 3626 break; 3627 case MFI_CMD_PD_SCSI_IO: 3628 case MFI_CMD_LD_SCSI_IO: 3629 3630 /* 3631 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been 3632 * issued either through an IO path or an IOCTL path. If it 3633 * was via IOCTL, we will send it to internal completion. 3634 */ 3635 if (cmd->sync_cmd) { 3636 cmd->sync_cmd = 0; 3637 megasas_complete_int_cmd(instance, cmd); 3638 break; 3639 } 3640 fallthrough; 3641 3642 case MFI_CMD_LD_READ: 3643 case MFI_CMD_LD_WRITE: 3644 3645 if (alt_status) { 3646 cmd->scmd->result = alt_status << 16; 3647 exception = 1; 3648 } 3649 3650 if (exception) { 3651 3652 atomic_dec(&instance->fw_outstanding); 3653 3654 scsi_dma_unmap(cmd->scmd); 3655 scsi_done(cmd->scmd); 3656 megasas_return_cmd(instance, cmd); 3657 3658 break; 3659 } 3660 3661 switch (hdr->cmd_status) { 3662 3663 case MFI_STAT_OK: 3664 cmd->scmd->result = DID_OK << 16; 3665 break; 3666 3667 case MFI_STAT_SCSI_IO_FAILED: 3668 case MFI_STAT_LD_INIT_IN_PROGRESS: 3669 cmd->scmd->result = 3670 (DID_ERROR << 16) | hdr->scsi_status; 3671 break; 3672 3673 case MFI_STAT_SCSI_DONE_WITH_ERROR: 3674 3675 cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status; 3676 3677 if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) { 3678 memset(cmd->scmd->sense_buffer, 0, 3679 SCSI_SENSE_BUFFERSIZE); 3680 memcpy(cmd->scmd->sense_buffer, cmd->sense, 3681 hdr->sense_len); 3682 } 3683 3684 break; 3685 3686 case MFI_STAT_LD_OFFLINE: 3687 case MFI_STAT_DEVICE_NOT_FOUND: 3688 cmd->scmd->result = DID_BAD_TARGET << 16; 3689 break; 3690 3691 default: 3692 dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n", 3693 hdr->cmd_status); 3694 cmd->scmd->result = DID_ERROR << 16; 3695 break; 3696 } 3697 3698 atomic_dec(&instance->fw_outstanding); 3699 3700 scsi_dma_unmap(cmd->scmd); 3701 scsi_done(cmd->scmd); 3702 megasas_return_cmd(instance, cmd); 3703 3704 break; 3705 3706 case MFI_CMD_SMP: 3707 case MFI_CMD_STP: 3708 case MFI_CMD_NVME: 3709 case MFI_CMD_TOOLBOX: 3710 megasas_complete_int_cmd(instance, cmd); 3711 break; 3712 3713 case MFI_CMD_DCMD: 3714 opcode = le32_to_cpu(cmd->frame->dcmd.opcode); 3715 /* Check for LD map update */ 3716 if ((opcode == MR_DCMD_LD_MAP_GET_INFO) 3717 && (cmd->frame->dcmd.mbox.b[1] == 1)) { 3718 fusion->fast_path_io = 0; 3719 spin_lock_irqsave(instance->host->host_lock, flags); 3720 status = cmd->frame->hdr.cmd_status; 3721 instance->map_update_cmd = NULL; 3722 if (status != MFI_STAT_OK) { 3723 if (status != MFI_STAT_NOT_FOUND) 3724 dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n", 3725 cmd->frame->hdr.cmd_status); 3726 else { 3727 megasas_return_cmd(instance, cmd); 3728 spin_unlock_irqrestore( 3729 instance->host->host_lock, 3730 flags); 3731 break; 3732 } 3733 } 3734 3735 megasas_return_cmd(instance, cmd); 3736 3737 /* 3738 * Set fast path IO to ZERO. 3739 * Validate Map will set proper value. 3740 * Meanwhile all IOs will go as LD IO. 3741 */ 3742 if (status == MFI_STAT_OK && 3743 (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) { 3744 instance->map_id++; 3745 fusion->fast_path_io = 1; 3746 } else { 3747 fusion->fast_path_io = 0; 3748 } 3749 3750 if (instance->adapter_type >= INVADER_SERIES) 3751 megasas_set_ld_removed_by_fw(instance); 3752 3753 megasas_sync_map_info(instance); 3754 spin_unlock_irqrestore(instance->host->host_lock, 3755 flags); 3756 3757 break; 3758 } 3759 if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO || 3760 opcode == MR_DCMD_CTRL_EVENT_GET) { 3761 spin_lock_irqsave(&poll_aen_lock, flags); 3762 megasas_poll_wait_aen = 0; 3763 spin_unlock_irqrestore(&poll_aen_lock, flags); 3764 } 3765 3766 /* FW has an updated PD sequence */ 3767 if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && 3768 (cmd->frame->dcmd.mbox.b[0] == 1)) { 3769 3770 spin_lock_irqsave(instance->host->host_lock, flags); 3771 status = cmd->frame->hdr.cmd_status; 3772 instance->jbod_seq_cmd = NULL; 3773 megasas_return_cmd(instance, cmd); 3774 3775 if (status == MFI_STAT_OK) { 3776 instance->pd_seq_map_id++; 3777 /* Re-register a pd sync seq num cmd */ 3778 if (megasas_sync_pd_seq_num(instance, true)) 3779 instance->use_seqnum_jbod_fp = false; 3780 } else 3781 instance->use_seqnum_jbod_fp = false; 3782 3783 spin_unlock_irqrestore(instance->host->host_lock, flags); 3784 break; 3785 } 3786 3787 /* 3788 * See if got an event notification 3789 */ 3790 if (opcode == MR_DCMD_CTRL_EVENT_WAIT) 3791 megasas_service_aen(instance, cmd); 3792 else 3793 megasas_complete_int_cmd(instance, cmd); 3794 3795 break; 3796 3797 case MFI_CMD_ABORT: 3798 /* 3799 * Cmd issued to abort another cmd returned 3800 */ 3801 megasas_complete_abort(instance, cmd); 3802 break; 3803 3804 default: 3805 dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n", 3806 hdr->cmd); 3807 megasas_complete_int_cmd(instance, cmd); 3808 break; 3809 } 3810 } 3811 3812 /** 3813 * megasas_issue_pending_cmds_again - issue all pending cmds 3814 * in FW again because of the fw reset 3815 * @instance: Adapter soft state 3816 */ 3817 static inline void 3818 megasas_issue_pending_cmds_again(struct megasas_instance *instance) 3819 { 3820 struct megasas_cmd *cmd; 3821 struct list_head clist_local; 3822 union megasas_evt_class_locale class_locale; 3823 unsigned long flags; 3824 u32 seq_num; 3825 3826 INIT_LIST_HEAD(&clist_local); 3827 spin_lock_irqsave(&instance->hba_lock, flags); 3828 list_splice_init(&instance->internal_reset_pending_q, &clist_local); 3829 spin_unlock_irqrestore(&instance->hba_lock, flags); 3830 3831 while (!list_empty(&clist_local)) { 3832 cmd = list_entry((&clist_local)->next, 3833 struct megasas_cmd, list); 3834 list_del_init(&cmd->list); 3835 3836 if (cmd->sync_cmd || cmd->scmd) { 3837 dev_notice(&instance->pdev->dev, "command %p, %p:%d" 3838 "detected to be pending while HBA reset\n", 3839 cmd, cmd->scmd, cmd->sync_cmd); 3840 3841 cmd->retry_for_fw_reset++; 3842 3843 if (cmd->retry_for_fw_reset == 3) { 3844 dev_notice(&instance->pdev->dev, "cmd %p, %p:%d" 3845 "was tried multiple times during reset." 3846 "Shutting down the HBA\n", 3847 cmd, cmd->scmd, cmd->sync_cmd); 3848 instance->instancet->disable_intr(instance); 3849 atomic_set(&instance->fw_reset_no_pci_access, 1); 3850 megaraid_sas_kill_hba(instance); 3851 return; 3852 } 3853 } 3854 3855 if (cmd->sync_cmd == 1) { 3856 if (cmd->scmd) { 3857 dev_notice(&instance->pdev->dev, "unexpected" 3858 "cmd attached to internal command!\n"); 3859 } 3860 dev_notice(&instance->pdev->dev, "%p synchronous cmd" 3861 "on the internal reset queue," 3862 "issue it again.\n", cmd); 3863 cmd->cmd_status_drv = DCMD_INIT; 3864 instance->instancet->fire_cmd(instance, 3865 cmd->frame_phys_addr, 3866 0, instance->reg_set); 3867 } else if (cmd->scmd) { 3868 dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]" 3869 "detected on the internal queue, issue again.\n", 3870 cmd, cmd->scmd->cmnd[0]); 3871 3872 atomic_inc(&instance->fw_outstanding); 3873 instance->instancet->fire_cmd(instance, 3874 cmd->frame_phys_addr, 3875 cmd->frame_count-1, instance->reg_set); 3876 } else { 3877 dev_notice(&instance->pdev->dev, "%p unexpected cmd on the" 3878 "internal reset defer list while re-issue!!\n", 3879 cmd); 3880 } 3881 } 3882 3883 if (instance->aen_cmd) { 3884 dev_notice(&instance->pdev->dev, "aen_cmd in def process\n"); 3885 megasas_return_cmd(instance, instance->aen_cmd); 3886 3887 instance->aen_cmd = NULL; 3888 } 3889 3890 /* 3891 * Initiate AEN (Asynchronous Event Notification) 3892 */ 3893 seq_num = instance->last_seq_num; 3894 class_locale.members.reserved = 0; 3895 class_locale.members.locale = MR_EVT_LOCALE_ALL; 3896 class_locale.members.class = MR_EVT_CLASS_DEBUG; 3897 3898 megasas_register_aen(instance, seq_num, class_locale.word); 3899 } 3900 3901 /* 3902 * Move the internal reset pending commands to a deferred queue. 3903 * 3904 * We move the commands pending at internal reset time to a 3905 * pending queue. This queue would be flushed after successful 3906 * completion of the internal reset sequence. if the internal reset 3907 * did not complete in time, the kernel reset handler would flush 3908 * these commands. 3909 */ 3910 static void 3911 megasas_internal_reset_defer_cmds(struct megasas_instance *instance) 3912 { 3913 struct megasas_cmd *cmd; 3914 int i; 3915 u16 max_cmd = instance->max_fw_cmds; 3916 u32 defer_index; 3917 unsigned long flags; 3918 3919 defer_index = 0; 3920 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 3921 for (i = 0; i < max_cmd; i++) { 3922 cmd = instance->cmd_list[i]; 3923 if (cmd->sync_cmd == 1 || cmd->scmd) { 3924 dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p" 3925 "on the defer queue as internal\n", 3926 defer_index, cmd, cmd->sync_cmd, cmd->scmd); 3927 3928 if (!list_empty(&cmd->list)) { 3929 dev_notice(&instance->pdev->dev, "ERROR while" 3930 " moving this cmd:%p, %d %p, it was" 3931 "discovered on some list?\n", 3932 cmd, cmd->sync_cmd, cmd->scmd); 3933 3934 list_del_init(&cmd->list); 3935 } 3936 defer_index++; 3937 list_add_tail(&cmd->list, 3938 &instance->internal_reset_pending_q); 3939 } 3940 } 3941 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 3942 } 3943 3944 3945 static void 3946 process_fw_state_change_wq(struct work_struct *work) 3947 { 3948 struct megasas_instance *instance = 3949 container_of(work, struct megasas_instance, work_init); 3950 u32 wait; 3951 unsigned long flags; 3952 3953 if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) { 3954 dev_notice(&instance->pdev->dev, "error, recovery st %x\n", 3955 atomic_read(&instance->adprecovery)); 3956 return ; 3957 } 3958 3959 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { 3960 dev_notice(&instance->pdev->dev, "FW detected to be in fault" 3961 "state, restarting it...\n"); 3962 3963 instance->instancet->disable_intr(instance); 3964 atomic_set(&instance->fw_outstanding, 0); 3965 3966 atomic_set(&instance->fw_reset_no_pci_access, 1); 3967 instance->instancet->adp_reset(instance, instance->reg_set); 3968 atomic_set(&instance->fw_reset_no_pci_access, 0); 3969 3970 dev_notice(&instance->pdev->dev, "FW restarted successfully," 3971 "initiating next stage...\n"); 3972 3973 dev_notice(&instance->pdev->dev, "HBA recovery state machine," 3974 "state 2 starting...\n"); 3975 3976 /* waiting for about 20 second before start the second init */ 3977 for (wait = 0; wait < 30; wait++) { 3978 msleep(1000); 3979 } 3980 3981 if (megasas_transition_to_ready(instance, 1)) { 3982 dev_notice(&instance->pdev->dev, "adapter not ready\n"); 3983 3984 atomic_set(&instance->fw_reset_no_pci_access, 1); 3985 megaraid_sas_kill_hba(instance); 3986 return ; 3987 } 3988 3989 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 3990 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 3991 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR) 3992 ) { 3993 *instance->consumer = *instance->producer; 3994 } else { 3995 *instance->consumer = 0; 3996 *instance->producer = 0; 3997 } 3998 3999 megasas_issue_init_mfi(instance); 4000 4001 spin_lock_irqsave(&instance->hba_lock, flags); 4002 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 4003 spin_unlock_irqrestore(&instance->hba_lock, flags); 4004 instance->instancet->enable_intr(instance); 4005 4006 megasas_issue_pending_cmds_again(instance); 4007 instance->issuepend_done = 1; 4008 } 4009 } 4010 4011 /** 4012 * megasas_deplete_reply_queue - Processes all completed commands 4013 * @instance: Adapter soft state 4014 * @alt_status: Alternate status to be returned to 4015 * SCSI mid-layer instead of the status 4016 * returned by the FW 4017 * Note: this must be called with hba lock held 4018 */ 4019 static int 4020 megasas_deplete_reply_queue(struct megasas_instance *instance, 4021 u8 alt_status) 4022 { 4023 u32 mfiStatus; 4024 u32 fw_state; 4025 4026 if ((mfiStatus = instance->instancet->check_reset(instance, 4027 instance->reg_set)) == 1) { 4028 return IRQ_HANDLED; 4029 } 4030 4031 mfiStatus = instance->instancet->clear_intr(instance); 4032 if (mfiStatus == 0) { 4033 /* Hardware may not set outbound_intr_status in MSI-X mode */ 4034 if (!instance->msix_vectors) 4035 return IRQ_NONE; 4036 } 4037 4038 instance->mfiStatus = mfiStatus; 4039 4040 if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) { 4041 fw_state = instance->instancet->read_fw_status_reg( 4042 instance) & MFI_STATE_MASK; 4043 4044 if (fw_state != MFI_STATE_FAULT) { 4045 dev_notice(&instance->pdev->dev, "fw state:%x\n", 4046 fw_state); 4047 } 4048 4049 if ((fw_state == MFI_STATE_FAULT) && 4050 (instance->disableOnlineCtrlReset == 0)) { 4051 dev_notice(&instance->pdev->dev, "wait adp restart\n"); 4052 4053 if ((instance->pdev->device == 4054 PCI_DEVICE_ID_LSI_SAS1064R) || 4055 (instance->pdev->device == 4056 PCI_DEVICE_ID_DELL_PERC5) || 4057 (instance->pdev->device == 4058 PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 4059 4060 *instance->consumer = 4061 cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); 4062 } 4063 4064 4065 instance->instancet->disable_intr(instance); 4066 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 4067 instance->issuepend_done = 0; 4068 4069 atomic_set(&instance->fw_outstanding, 0); 4070 megasas_internal_reset_defer_cmds(instance); 4071 4072 dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n", 4073 fw_state, atomic_read(&instance->adprecovery)); 4074 4075 schedule_work(&instance->work_init); 4076 return IRQ_HANDLED; 4077 4078 } else { 4079 dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n", 4080 fw_state, instance->disableOnlineCtrlReset); 4081 } 4082 } 4083 4084 tasklet_schedule(&instance->isr_tasklet); 4085 return IRQ_HANDLED; 4086 } 4087 4088 /** 4089 * megasas_isr - isr entry point 4090 * @irq: IRQ number 4091 * @devp: IRQ context address 4092 */ 4093 static irqreturn_t megasas_isr(int irq, void *devp) 4094 { 4095 struct megasas_irq_context *irq_context = devp; 4096 struct megasas_instance *instance = irq_context->instance; 4097 unsigned long flags; 4098 irqreturn_t rc; 4099 4100 if (atomic_read(&instance->fw_reset_no_pci_access)) 4101 return IRQ_HANDLED; 4102 4103 spin_lock_irqsave(&instance->hba_lock, flags); 4104 rc = megasas_deplete_reply_queue(instance, DID_OK); 4105 spin_unlock_irqrestore(&instance->hba_lock, flags); 4106 4107 return rc; 4108 } 4109 4110 /** 4111 * megasas_transition_to_ready - Move the FW to READY state 4112 * @instance: Adapter soft state 4113 * @ocr: Adapter reset state 4114 * 4115 * During the initialization, FW passes can potentially be in any one of 4116 * several possible states. If the FW in operational, waiting-for-handshake 4117 * states, driver must take steps to bring it to ready state. Otherwise, it 4118 * has to wait for the ready state. 4119 */ 4120 int 4121 megasas_transition_to_ready(struct megasas_instance *instance, int ocr) 4122 { 4123 int i; 4124 u8 max_wait; 4125 u32 fw_state; 4126 u32 abs_state, curr_abs_state; 4127 4128 abs_state = instance->instancet->read_fw_status_reg(instance); 4129 fw_state = abs_state & MFI_STATE_MASK; 4130 4131 if (fw_state != MFI_STATE_READY) 4132 dev_info(&instance->pdev->dev, "Waiting for FW to come to ready" 4133 " state\n"); 4134 4135 while (fw_state != MFI_STATE_READY) { 4136 4137 switch (fw_state) { 4138 4139 case MFI_STATE_FAULT: 4140 dev_printk(KERN_ERR, &instance->pdev->dev, 4141 "FW in FAULT state, Fault code:0x%x subcode:0x%x func:%s\n", 4142 abs_state & MFI_STATE_FAULT_CODE, 4143 abs_state & MFI_STATE_FAULT_SUBCODE, __func__); 4144 if (ocr) { 4145 max_wait = MEGASAS_RESET_WAIT_TIME; 4146 break; 4147 } else { 4148 dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n"); 4149 megasas_dump_reg_set(instance->reg_set); 4150 return -ENODEV; 4151 } 4152 4153 case MFI_STATE_WAIT_HANDSHAKE: 4154 /* 4155 * Set the CLR bit in inbound doorbell 4156 */ 4157 if ((instance->pdev->device == 4158 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 4159 (instance->pdev->device == 4160 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 4161 (instance->adapter_type != MFI_SERIES)) 4162 writel( 4163 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 4164 &instance->reg_set->doorbell); 4165 else 4166 writel( 4167 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 4168 &instance->reg_set->inbound_doorbell); 4169 4170 max_wait = MEGASAS_RESET_WAIT_TIME; 4171 break; 4172 4173 case MFI_STATE_BOOT_MESSAGE_PENDING: 4174 if ((instance->pdev->device == 4175 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 4176 (instance->pdev->device == 4177 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 4178 (instance->adapter_type != MFI_SERIES)) 4179 writel(MFI_INIT_HOTPLUG, 4180 &instance->reg_set->doorbell); 4181 else 4182 writel(MFI_INIT_HOTPLUG, 4183 &instance->reg_set->inbound_doorbell); 4184 4185 max_wait = MEGASAS_RESET_WAIT_TIME; 4186 break; 4187 4188 case MFI_STATE_OPERATIONAL: 4189 /* 4190 * Bring it to READY state; assuming max wait 10 secs 4191 */ 4192 instance->instancet->disable_intr(instance); 4193 if ((instance->pdev->device == 4194 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 4195 (instance->pdev->device == 4196 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 4197 (instance->adapter_type != MFI_SERIES)) { 4198 writel(MFI_RESET_FLAGS, 4199 &instance->reg_set->doorbell); 4200 4201 if (instance->adapter_type != MFI_SERIES) { 4202 for (i = 0; i < (10 * 1000); i += 20) { 4203 if (megasas_readl( 4204 instance, 4205 &instance-> 4206 reg_set-> 4207 doorbell) & 1) 4208 msleep(20); 4209 else 4210 break; 4211 } 4212 } 4213 } else 4214 writel(MFI_RESET_FLAGS, 4215 &instance->reg_set->inbound_doorbell); 4216 4217 max_wait = MEGASAS_RESET_WAIT_TIME; 4218 break; 4219 4220 case MFI_STATE_UNDEFINED: 4221 /* 4222 * This state should not last for more than 2 seconds 4223 */ 4224 max_wait = MEGASAS_RESET_WAIT_TIME; 4225 break; 4226 4227 case MFI_STATE_BB_INIT: 4228 max_wait = MEGASAS_RESET_WAIT_TIME; 4229 break; 4230 4231 case MFI_STATE_FW_INIT: 4232 max_wait = MEGASAS_RESET_WAIT_TIME; 4233 break; 4234 4235 case MFI_STATE_FW_INIT_2: 4236 max_wait = MEGASAS_RESET_WAIT_TIME; 4237 break; 4238 4239 case MFI_STATE_DEVICE_SCAN: 4240 max_wait = MEGASAS_RESET_WAIT_TIME; 4241 break; 4242 4243 case MFI_STATE_FLUSH_CACHE: 4244 max_wait = MEGASAS_RESET_WAIT_TIME; 4245 break; 4246 4247 default: 4248 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n", 4249 fw_state); 4250 dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n"); 4251 megasas_dump_reg_set(instance->reg_set); 4252 return -ENODEV; 4253 } 4254 4255 /* 4256 * The cur_state should not last for more than max_wait secs 4257 */ 4258 for (i = 0; i < max_wait * 50; i++) { 4259 curr_abs_state = instance->instancet-> 4260 read_fw_status_reg(instance); 4261 4262 if (abs_state == curr_abs_state) { 4263 msleep(20); 4264 } else 4265 break; 4266 } 4267 4268 /* 4269 * Return error if fw_state hasn't changed after max_wait 4270 */ 4271 if (curr_abs_state == abs_state) { 4272 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed " 4273 "in %d secs\n", fw_state, max_wait); 4274 dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n"); 4275 megasas_dump_reg_set(instance->reg_set); 4276 return -ENODEV; 4277 } 4278 4279 abs_state = curr_abs_state; 4280 fw_state = curr_abs_state & MFI_STATE_MASK; 4281 } 4282 dev_info(&instance->pdev->dev, "FW now in Ready state\n"); 4283 4284 return 0; 4285 } 4286 4287 /** 4288 * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool 4289 * @instance: Adapter soft state 4290 */ 4291 static void megasas_teardown_frame_pool(struct megasas_instance *instance) 4292 { 4293 int i; 4294 u16 max_cmd = instance->max_mfi_cmds; 4295 struct megasas_cmd *cmd; 4296 4297 if (!instance->frame_dma_pool) 4298 return; 4299 4300 /* 4301 * Return all frames to pool 4302 */ 4303 for (i = 0; i < max_cmd; i++) { 4304 4305 cmd = instance->cmd_list[i]; 4306 4307 if (cmd->frame) 4308 dma_pool_free(instance->frame_dma_pool, cmd->frame, 4309 cmd->frame_phys_addr); 4310 4311 if (cmd->sense) 4312 dma_pool_free(instance->sense_dma_pool, cmd->sense, 4313 cmd->sense_phys_addr); 4314 } 4315 4316 /* 4317 * Now destroy the pool itself 4318 */ 4319 dma_pool_destroy(instance->frame_dma_pool); 4320 dma_pool_destroy(instance->sense_dma_pool); 4321 4322 instance->frame_dma_pool = NULL; 4323 instance->sense_dma_pool = NULL; 4324 } 4325 4326 /** 4327 * megasas_create_frame_pool - Creates DMA pool for cmd frames 4328 * @instance: Adapter soft state 4329 * 4330 * Each command packet has an embedded DMA memory buffer that is used for 4331 * filling MFI frame and the SG list that immediately follows the frame. This 4332 * function creates those DMA memory buffers for each command packet by using 4333 * PCI pool facility. 4334 */ 4335 static int megasas_create_frame_pool(struct megasas_instance *instance) 4336 { 4337 int i; 4338 u16 max_cmd; 4339 u32 frame_count; 4340 struct megasas_cmd *cmd; 4341 4342 max_cmd = instance->max_mfi_cmds; 4343 4344 /* 4345 * For MFI controllers. 4346 * max_num_sge = 60 4347 * max_sge_sz = 16 byte (sizeof megasas_sge_skinny) 4348 * Total 960 byte (15 MFI frame of 64 byte) 4349 * 4350 * Fusion adapter require only 3 extra frame. 4351 * max_num_sge = 16 (defined as MAX_IOCTL_SGE) 4352 * max_sge_sz = 12 byte (sizeof megasas_sge64) 4353 * Total 192 byte (3 MFI frame of 64 byte) 4354 */ 4355 frame_count = (instance->adapter_type == MFI_SERIES) ? 4356 (15 + 1) : (3 + 1); 4357 instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count; 4358 /* 4359 * Use DMA pool facility provided by PCI layer 4360 */ 4361 instance->frame_dma_pool = dma_pool_create("megasas frame pool", 4362 &instance->pdev->dev, 4363 instance->mfi_frame_size, 256, 0); 4364 4365 if (!instance->frame_dma_pool) { 4366 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n"); 4367 return -ENOMEM; 4368 } 4369 4370 instance->sense_dma_pool = dma_pool_create("megasas sense pool", 4371 &instance->pdev->dev, 128, 4372 4, 0); 4373 4374 if (!instance->sense_dma_pool) { 4375 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n"); 4376 4377 dma_pool_destroy(instance->frame_dma_pool); 4378 instance->frame_dma_pool = NULL; 4379 4380 return -ENOMEM; 4381 } 4382 4383 /* 4384 * Allocate and attach a frame to each of the commands in cmd_list. 4385 * By making cmd->index as the context instead of the &cmd, we can 4386 * always use 32bit context regardless of the architecture 4387 */ 4388 for (i = 0; i < max_cmd; i++) { 4389 4390 cmd = instance->cmd_list[i]; 4391 4392 cmd->frame = dma_pool_zalloc(instance->frame_dma_pool, 4393 GFP_KERNEL, &cmd->frame_phys_addr); 4394 4395 cmd->sense = dma_pool_alloc(instance->sense_dma_pool, 4396 GFP_KERNEL, &cmd->sense_phys_addr); 4397 4398 /* 4399 * megasas_teardown_frame_pool() takes care of freeing 4400 * whatever has been allocated 4401 */ 4402 if (!cmd->frame || !cmd->sense) { 4403 dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n"); 4404 megasas_teardown_frame_pool(instance); 4405 return -ENOMEM; 4406 } 4407 4408 cmd->frame->io.context = cpu_to_le32(cmd->index); 4409 cmd->frame->io.pad_0 = 0; 4410 if ((instance->adapter_type == MFI_SERIES) && reset_devices) 4411 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 4412 } 4413 4414 return 0; 4415 } 4416 4417 /** 4418 * megasas_free_cmds - Free all the cmds in the free cmd pool 4419 * @instance: Adapter soft state 4420 */ 4421 void megasas_free_cmds(struct megasas_instance *instance) 4422 { 4423 int i; 4424 4425 /* First free the MFI frame pool */ 4426 megasas_teardown_frame_pool(instance); 4427 4428 /* Free all the commands in the cmd_list */ 4429 for (i = 0; i < instance->max_mfi_cmds; i++) 4430 4431 kfree(instance->cmd_list[i]); 4432 4433 /* Free the cmd_list buffer itself */ 4434 kfree(instance->cmd_list); 4435 instance->cmd_list = NULL; 4436 4437 INIT_LIST_HEAD(&instance->cmd_pool); 4438 } 4439 4440 /** 4441 * megasas_alloc_cmds - Allocates the command packets 4442 * @instance: Adapter soft state 4443 * 4444 * Each command that is issued to the FW, whether IO commands from the OS or 4445 * internal commands like IOCTLs, are wrapped in local data structure called 4446 * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to 4447 * the FW. 4448 * 4449 * Each frame has a 32-bit field called context (tag). This context is used 4450 * to get back the megasas_cmd from the frame when a frame gets completed in 4451 * the ISR. Typically the address of the megasas_cmd itself would be used as 4452 * the context. But we wanted to keep the differences between 32 and 64 bit 4453 * systems to the mininum. We always use 32 bit integers for the context. In 4454 * this driver, the 32 bit values are the indices into an array cmd_list. 4455 * This array is used only to look up the megasas_cmd given the context. The 4456 * free commands themselves are maintained in a linked list called cmd_pool. 4457 */ 4458 int megasas_alloc_cmds(struct megasas_instance *instance) 4459 { 4460 int i; 4461 int j; 4462 u16 max_cmd; 4463 struct megasas_cmd *cmd; 4464 4465 max_cmd = instance->max_mfi_cmds; 4466 4467 /* 4468 * instance->cmd_list is an array of struct megasas_cmd pointers. 4469 * Allocate the dynamic array first and then allocate individual 4470 * commands. 4471 */ 4472 instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL); 4473 4474 if (!instance->cmd_list) { 4475 dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n"); 4476 return -ENOMEM; 4477 } 4478 4479 for (i = 0; i < max_cmd; i++) { 4480 instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd), 4481 GFP_KERNEL); 4482 4483 if (!instance->cmd_list[i]) { 4484 4485 for (j = 0; j < i; j++) 4486 kfree(instance->cmd_list[j]); 4487 4488 kfree(instance->cmd_list); 4489 instance->cmd_list = NULL; 4490 4491 return -ENOMEM; 4492 } 4493 } 4494 4495 for (i = 0; i < max_cmd; i++) { 4496 cmd = instance->cmd_list[i]; 4497 memset(cmd, 0, sizeof(struct megasas_cmd)); 4498 cmd->index = i; 4499 cmd->scmd = NULL; 4500 cmd->instance = instance; 4501 4502 list_add_tail(&cmd->list, &instance->cmd_pool); 4503 } 4504 4505 /* 4506 * Create a frame pool and assign one frame to each cmd 4507 */ 4508 if (megasas_create_frame_pool(instance)) { 4509 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n"); 4510 megasas_free_cmds(instance); 4511 return -ENOMEM; 4512 } 4513 4514 return 0; 4515 } 4516 4517 /* 4518 * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state. 4519 * @instance: Adapter soft state 4520 * 4521 * Return 0 for only Fusion adapter, if driver load/unload is not in progress 4522 * or FW is not under OCR. 4523 */ 4524 inline int 4525 dcmd_timeout_ocr_possible(struct megasas_instance *instance) { 4526 4527 if (instance->adapter_type == MFI_SERIES) 4528 return KILL_ADAPTER; 4529 else if (instance->unload || 4530 test_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, 4531 &instance->reset_flags)) 4532 return IGNORE_TIMEOUT; 4533 else 4534 return INITIATE_OCR; 4535 } 4536 4537 static void 4538 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev) 4539 { 4540 int ret; 4541 struct megasas_cmd *cmd; 4542 struct megasas_dcmd_frame *dcmd; 4543 4544 struct MR_PRIV_DEVICE *mr_device_priv_data; 4545 u16 device_id = 0; 4546 4547 device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; 4548 cmd = megasas_get_cmd(instance); 4549 4550 if (!cmd) { 4551 dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__); 4552 return; 4553 } 4554 4555 dcmd = &cmd->frame->dcmd; 4556 4557 memset(instance->pd_info, 0, sizeof(*instance->pd_info)); 4558 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4559 4560 dcmd->mbox.s[0] = cpu_to_le16(device_id); 4561 dcmd->cmd = MFI_CMD_DCMD; 4562 dcmd->cmd_status = 0xFF; 4563 dcmd->sge_count = 1; 4564 dcmd->flags = MFI_FRAME_DIR_READ; 4565 dcmd->timeout = 0; 4566 dcmd->pad_0 = 0; 4567 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO)); 4568 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO); 4569 4570 megasas_set_dma_settings(instance, dcmd, instance->pd_info_h, 4571 sizeof(struct MR_PD_INFO)); 4572 4573 if ((instance->adapter_type != MFI_SERIES) && 4574 !instance->mask_interrupts) 4575 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4576 else 4577 ret = megasas_issue_polled(instance, cmd); 4578 4579 switch (ret) { 4580 case DCMD_SUCCESS: 4581 mr_device_priv_data = sdev->hostdata; 4582 le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType); 4583 mr_device_priv_data->interface_type = 4584 instance->pd_info->state.ddf.pdType.intf; 4585 break; 4586 4587 case DCMD_TIMEOUT: 4588 4589 switch (dcmd_timeout_ocr_possible(instance)) { 4590 case INITIATE_OCR: 4591 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4592 mutex_unlock(&instance->reset_mutex); 4593 megasas_reset_fusion(instance->host, 4594 MFI_IO_TIMEOUT_OCR); 4595 mutex_lock(&instance->reset_mutex); 4596 break; 4597 case KILL_ADAPTER: 4598 megaraid_sas_kill_hba(instance); 4599 break; 4600 case IGNORE_TIMEOUT: 4601 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4602 __func__, __LINE__); 4603 break; 4604 } 4605 4606 break; 4607 } 4608 4609 if (ret != DCMD_TIMEOUT) 4610 megasas_return_cmd(instance, cmd); 4611 4612 return; 4613 } 4614 /* 4615 * megasas_get_pd_list_info - Returns FW's pd_list structure 4616 * @instance: Adapter soft state 4617 * @pd_list: pd_list structure 4618 * 4619 * Issues an internal command (DCMD) to get the FW's controller PD 4620 * list structure. This information is mainly used to find out SYSTEM 4621 * supported by the FW. 4622 */ 4623 static int 4624 megasas_get_pd_list(struct megasas_instance *instance) 4625 { 4626 int ret = 0, pd_index = 0; 4627 struct megasas_cmd *cmd; 4628 struct megasas_dcmd_frame *dcmd; 4629 struct MR_PD_LIST *ci; 4630 struct MR_PD_ADDRESS *pd_addr; 4631 4632 if (instance->pd_list_not_supported) { 4633 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " 4634 "not supported by firmware\n"); 4635 return ret; 4636 } 4637 4638 ci = instance->pd_list_buf; 4639 4640 cmd = megasas_get_cmd(instance); 4641 4642 if (!cmd) { 4643 dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n"); 4644 return -ENOMEM; 4645 } 4646 4647 dcmd = &cmd->frame->dcmd; 4648 4649 memset(ci, 0, sizeof(*ci)); 4650 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4651 4652 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; 4653 dcmd->mbox.b[1] = 0; 4654 dcmd->cmd = MFI_CMD_DCMD; 4655 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4656 dcmd->sge_count = 1; 4657 dcmd->flags = MFI_FRAME_DIR_READ; 4658 dcmd->timeout = 0; 4659 dcmd->pad_0 = 0; 4660 dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); 4661 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY); 4662 4663 megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h, 4664 (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST))); 4665 4666 if ((instance->adapter_type != MFI_SERIES) && 4667 !instance->mask_interrupts) 4668 ret = megasas_issue_blocked_cmd(instance, cmd, 4669 MFI_IO_TIMEOUT_SECS); 4670 else 4671 ret = megasas_issue_polled(instance, cmd); 4672 4673 switch (ret) { 4674 case DCMD_FAILED: 4675 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " 4676 "failed/not supported by firmware\n"); 4677 4678 if (instance->adapter_type != MFI_SERIES) 4679 megaraid_sas_kill_hba(instance); 4680 else 4681 instance->pd_list_not_supported = 1; 4682 break; 4683 case DCMD_TIMEOUT: 4684 4685 switch (dcmd_timeout_ocr_possible(instance)) { 4686 case INITIATE_OCR: 4687 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4688 /* 4689 * DCMD failed from AEN path. 4690 * AEN path already hold reset_mutex to avoid PCI access 4691 * while OCR is in progress. 4692 */ 4693 mutex_unlock(&instance->reset_mutex); 4694 megasas_reset_fusion(instance->host, 4695 MFI_IO_TIMEOUT_OCR); 4696 mutex_lock(&instance->reset_mutex); 4697 break; 4698 case KILL_ADAPTER: 4699 megaraid_sas_kill_hba(instance); 4700 break; 4701 case IGNORE_TIMEOUT: 4702 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n", 4703 __func__, __LINE__); 4704 break; 4705 } 4706 4707 break; 4708 4709 case DCMD_SUCCESS: 4710 pd_addr = ci->addr; 4711 if (megasas_dbg_lvl & LD_PD_DEBUG) 4712 dev_info(&instance->pdev->dev, "%s, sysPD count: 0x%x\n", 4713 __func__, le32_to_cpu(ci->count)); 4714 4715 if ((le32_to_cpu(ci->count) > 4716 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) 4717 break; 4718 4719 memset(instance->local_pd_list, 0, 4720 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); 4721 4722 for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) { 4723 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid = 4724 le16_to_cpu(pd_addr->deviceId); 4725 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType = 4726 pd_addr->scsiDevType; 4727 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState = 4728 MR_PD_STATE_SYSTEM; 4729 if (megasas_dbg_lvl & LD_PD_DEBUG) 4730 dev_info(&instance->pdev->dev, 4731 "PD%d: targetID: 0x%03x deviceType:0x%x\n", 4732 pd_index, le16_to_cpu(pd_addr->deviceId), 4733 pd_addr->scsiDevType); 4734 pd_addr++; 4735 } 4736 4737 memcpy(instance->pd_list, instance->local_pd_list, 4738 sizeof(instance->pd_list)); 4739 break; 4740 4741 } 4742 4743 if (ret != DCMD_TIMEOUT) 4744 megasas_return_cmd(instance, cmd); 4745 4746 return ret; 4747 } 4748 4749 /* 4750 * megasas_get_ld_list_info - Returns FW's ld_list structure 4751 * @instance: Adapter soft state 4752 * @ld_list: ld_list structure 4753 * 4754 * Issues an internal command (DCMD) to get the FW's controller PD 4755 * list structure. This information is mainly used to find out SYSTEM 4756 * supported by the FW. 4757 */ 4758 static int 4759 megasas_get_ld_list(struct megasas_instance *instance) 4760 { 4761 int ret = 0, ld_index = 0, ids = 0; 4762 struct megasas_cmd *cmd; 4763 struct megasas_dcmd_frame *dcmd; 4764 struct MR_LD_LIST *ci; 4765 dma_addr_t ci_h = 0; 4766 u32 ld_count; 4767 4768 ci = instance->ld_list_buf; 4769 ci_h = instance->ld_list_buf_h; 4770 4771 cmd = megasas_get_cmd(instance); 4772 4773 if (!cmd) { 4774 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n"); 4775 return -ENOMEM; 4776 } 4777 4778 dcmd = &cmd->frame->dcmd; 4779 4780 memset(ci, 0, sizeof(*ci)); 4781 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4782 4783 if (instance->supportmax256vd) 4784 dcmd->mbox.b[0] = 1; 4785 dcmd->cmd = MFI_CMD_DCMD; 4786 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4787 dcmd->sge_count = 1; 4788 dcmd->flags = MFI_FRAME_DIR_READ; 4789 dcmd->timeout = 0; 4790 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST)); 4791 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST); 4792 dcmd->pad_0 = 0; 4793 4794 megasas_set_dma_settings(instance, dcmd, ci_h, 4795 sizeof(struct MR_LD_LIST)); 4796 4797 if ((instance->adapter_type != MFI_SERIES) && 4798 !instance->mask_interrupts) 4799 ret = megasas_issue_blocked_cmd(instance, cmd, 4800 MFI_IO_TIMEOUT_SECS); 4801 else 4802 ret = megasas_issue_polled(instance, cmd); 4803 4804 ld_count = le32_to_cpu(ci->ldCount); 4805 4806 switch (ret) { 4807 case DCMD_FAILED: 4808 megaraid_sas_kill_hba(instance); 4809 break; 4810 case DCMD_TIMEOUT: 4811 4812 switch (dcmd_timeout_ocr_possible(instance)) { 4813 case INITIATE_OCR: 4814 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4815 /* 4816 * DCMD failed from AEN path. 4817 * AEN path already hold reset_mutex to avoid PCI access 4818 * while OCR is in progress. 4819 */ 4820 mutex_unlock(&instance->reset_mutex); 4821 megasas_reset_fusion(instance->host, 4822 MFI_IO_TIMEOUT_OCR); 4823 mutex_lock(&instance->reset_mutex); 4824 break; 4825 case KILL_ADAPTER: 4826 megaraid_sas_kill_hba(instance); 4827 break; 4828 case IGNORE_TIMEOUT: 4829 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4830 __func__, __LINE__); 4831 break; 4832 } 4833 4834 break; 4835 4836 case DCMD_SUCCESS: 4837 if (megasas_dbg_lvl & LD_PD_DEBUG) 4838 dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n", 4839 __func__, ld_count); 4840 4841 if (ld_count > instance->fw_supported_vd_count) 4842 break; 4843 4844 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 4845 4846 for (ld_index = 0; ld_index < ld_count; ld_index++) { 4847 if (ci->ldList[ld_index].state != 0) { 4848 ids = ci->ldList[ld_index].ref.targetId; 4849 instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId; 4850 if (megasas_dbg_lvl & LD_PD_DEBUG) 4851 dev_info(&instance->pdev->dev, 4852 "LD%d: targetID: 0x%03x\n", 4853 ld_index, ids); 4854 } 4855 } 4856 4857 break; 4858 } 4859 4860 if (ret != DCMD_TIMEOUT) 4861 megasas_return_cmd(instance, cmd); 4862 4863 return ret; 4864 } 4865 4866 /** 4867 * megasas_ld_list_query - Returns FW's ld_list structure 4868 * @instance: Adapter soft state 4869 * @query_type: ld_list structure type 4870 * 4871 * Issues an internal command (DCMD) to get the FW's controller PD 4872 * list structure. This information is mainly used to find out SYSTEM 4873 * supported by the FW. 4874 */ 4875 static int 4876 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) 4877 { 4878 int ret = 0, ld_index = 0, ids = 0; 4879 struct megasas_cmd *cmd; 4880 struct megasas_dcmd_frame *dcmd; 4881 struct MR_LD_TARGETID_LIST *ci; 4882 dma_addr_t ci_h = 0; 4883 u32 tgtid_count; 4884 4885 ci = instance->ld_targetid_list_buf; 4886 ci_h = instance->ld_targetid_list_buf_h; 4887 4888 cmd = megasas_get_cmd(instance); 4889 4890 if (!cmd) { 4891 dev_warn(&instance->pdev->dev, 4892 "megasas_ld_list_query: Failed to get cmd\n"); 4893 return -ENOMEM; 4894 } 4895 4896 dcmd = &cmd->frame->dcmd; 4897 4898 memset(ci, 0, sizeof(*ci)); 4899 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4900 4901 dcmd->mbox.b[0] = query_type; 4902 if (instance->supportmax256vd) 4903 dcmd->mbox.b[2] = 1; 4904 4905 dcmd->cmd = MFI_CMD_DCMD; 4906 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4907 dcmd->sge_count = 1; 4908 dcmd->flags = MFI_FRAME_DIR_READ; 4909 dcmd->timeout = 0; 4910 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); 4911 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY); 4912 dcmd->pad_0 = 0; 4913 4914 megasas_set_dma_settings(instance, dcmd, ci_h, 4915 sizeof(struct MR_LD_TARGETID_LIST)); 4916 4917 if ((instance->adapter_type != MFI_SERIES) && 4918 !instance->mask_interrupts) 4919 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4920 else 4921 ret = megasas_issue_polled(instance, cmd); 4922 4923 switch (ret) { 4924 case DCMD_FAILED: 4925 dev_info(&instance->pdev->dev, 4926 "DCMD not supported by firmware - %s %d\n", 4927 __func__, __LINE__); 4928 ret = megasas_get_ld_list(instance); 4929 break; 4930 case DCMD_TIMEOUT: 4931 switch (dcmd_timeout_ocr_possible(instance)) { 4932 case INITIATE_OCR: 4933 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4934 /* 4935 * DCMD failed from AEN path. 4936 * AEN path already hold reset_mutex to avoid PCI access 4937 * while OCR is in progress. 4938 */ 4939 mutex_unlock(&instance->reset_mutex); 4940 megasas_reset_fusion(instance->host, 4941 MFI_IO_TIMEOUT_OCR); 4942 mutex_lock(&instance->reset_mutex); 4943 break; 4944 case KILL_ADAPTER: 4945 megaraid_sas_kill_hba(instance); 4946 break; 4947 case IGNORE_TIMEOUT: 4948 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4949 __func__, __LINE__); 4950 break; 4951 } 4952 4953 break; 4954 case DCMD_SUCCESS: 4955 tgtid_count = le32_to_cpu(ci->count); 4956 4957 if (megasas_dbg_lvl & LD_PD_DEBUG) 4958 dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n", 4959 __func__, tgtid_count); 4960 4961 if ((tgtid_count > (instance->fw_supported_vd_count))) 4962 break; 4963 4964 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 4965 for (ld_index = 0; ld_index < tgtid_count; ld_index++) { 4966 ids = ci->targetId[ld_index]; 4967 instance->ld_ids[ids] = ci->targetId[ld_index]; 4968 if (megasas_dbg_lvl & LD_PD_DEBUG) 4969 dev_info(&instance->pdev->dev, "LD%d: targetID: 0x%03x\n", 4970 ld_index, ci->targetId[ld_index]); 4971 } 4972 4973 break; 4974 } 4975 4976 if (ret != DCMD_TIMEOUT) 4977 megasas_return_cmd(instance, cmd); 4978 4979 return ret; 4980 } 4981 4982 /** 4983 * megasas_host_device_list_query 4984 * dcmd.opcode - MR_DCMD_CTRL_DEVICE_LIST_GET 4985 * dcmd.mbox - reserved 4986 * dcmd.sge IN - ptr to return MR_HOST_DEVICE_LIST structure 4987 * Desc: This DCMD will return the combined device list 4988 * Status: MFI_STAT_OK - List returned successfully 4989 * MFI_STAT_INVALID_CMD - Firmware support for the feature has been 4990 * disabled 4991 * @instance: Adapter soft state 4992 * @is_probe: Driver probe check 4993 * Return: 0 if DCMD succeeded 4994 * non-zero if failed 4995 */ 4996 static int 4997 megasas_host_device_list_query(struct megasas_instance *instance, 4998 bool is_probe) 4999 { 5000 int ret, i, target_id; 5001 struct megasas_cmd *cmd; 5002 struct megasas_dcmd_frame *dcmd; 5003 struct MR_HOST_DEVICE_LIST *ci; 5004 u32 count; 5005 dma_addr_t ci_h; 5006 5007 ci = instance->host_device_list_buf; 5008 ci_h = instance->host_device_list_buf_h; 5009 5010 cmd = megasas_get_cmd(instance); 5011 5012 if (!cmd) { 5013 dev_warn(&instance->pdev->dev, 5014 "%s: failed to get cmd\n", 5015 __func__); 5016 return -ENOMEM; 5017 } 5018 5019 dcmd = &cmd->frame->dcmd; 5020 5021 memset(ci, 0, sizeof(*ci)); 5022 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5023 5024 dcmd->mbox.b[0] = is_probe ? 0 : 1; 5025 dcmd->cmd = MFI_CMD_DCMD; 5026 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 5027 dcmd->sge_count = 1; 5028 dcmd->flags = MFI_FRAME_DIR_READ; 5029 dcmd->timeout = 0; 5030 dcmd->pad_0 = 0; 5031 dcmd->data_xfer_len = cpu_to_le32(HOST_DEVICE_LIST_SZ); 5032 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_DEVICE_LIST_GET); 5033 5034 megasas_set_dma_settings(instance, dcmd, ci_h, HOST_DEVICE_LIST_SZ); 5035 5036 if (!instance->mask_interrupts) { 5037 ret = megasas_issue_blocked_cmd(instance, cmd, 5038 MFI_IO_TIMEOUT_SECS); 5039 } else { 5040 ret = megasas_issue_polled(instance, cmd); 5041 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5042 } 5043 5044 switch (ret) { 5045 case DCMD_SUCCESS: 5046 /* Fill the internal pd_list and ld_ids array based on 5047 * targetIds returned by FW 5048 */ 5049 count = le32_to_cpu(ci->count); 5050 5051 if (count > (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT)) 5052 break; 5053 5054 if (megasas_dbg_lvl & LD_PD_DEBUG) 5055 dev_info(&instance->pdev->dev, "%s, Device count: 0x%x\n", 5056 __func__, count); 5057 5058 memset(instance->local_pd_list, 0, 5059 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); 5060 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 5061 for (i = 0; i < count; i++) { 5062 target_id = le16_to_cpu(ci->host_device_list[i].target_id); 5063 if (ci->host_device_list[i].flags.u.bits.is_sys_pd) { 5064 instance->local_pd_list[target_id].tid = target_id; 5065 instance->local_pd_list[target_id].driveType = 5066 ci->host_device_list[i].scsi_type; 5067 instance->local_pd_list[target_id].driveState = 5068 MR_PD_STATE_SYSTEM; 5069 if (megasas_dbg_lvl & LD_PD_DEBUG) 5070 dev_info(&instance->pdev->dev, 5071 "Device %d: PD targetID: 0x%03x deviceType:0x%x\n", 5072 i, target_id, ci->host_device_list[i].scsi_type); 5073 } else { 5074 instance->ld_ids[target_id] = target_id; 5075 if (megasas_dbg_lvl & LD_PD_DEBUG) 5076 dev_info(&instance->pdev->dev, 5077 "Device %d: LD targetID: 0x%03x\n", 5078 i, target_id); 5079 } 5080 } 5081 5082 memcpy(instance->pd_list, instance->local_pd_list, 5083 sizeof(instance->pd_list)); 5084 break; 5085 5086 case DCMD_TIMEOUT: 5087 switch (dcmd_timeout_ocr_possible(instance)) { 5088 case INITIATE_OCR: 5089 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5090 mutex_unlock(&instance->reset_mutex); 5091 megasas_reset_fusion(instance->host, 5092 MFI_IO_TIMEOUT_OCR); 5093 mutex_lock(&instance->reset_mutex); 5094 break; 5095 case KILL_ADAPTER: 5096 megaraid_sas_kill_hba(instance); 5097 break; 5098 case IGNORE_TIMEOUT: 5099 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 5100 __func__, __LINE__); 5101 break; 5102 } 5103 break; 5104 case DCMD_FAILED: 5105 dev_err(&instance->pdev->dev, 5106 "%s: MR_DCMD_CTRL_DEVICE_LIST_GET failed\n", 5107 __func__); 5108 break; 5109 } 5110 5111 if (ret != DCMD_TIMEOUT) 5112 megasas_return_cmd(instance, cmd); 5113 5114 return ret; 5115 } 5116 5117 /* 5118 * megasas_update_ext_vd_details : Update details w.r.t Extended VD 5119 * instance : Controller's instance 5120 */ 5121 static void megasas_update_ext_vd_details(struct megasas_instance *instance) 5122 { 5123 struct fusion_context *fusion; 5124 u32 ventura_map_sz = 0; 5125 5126 fusion = instance->ctrl_context; 5127 /* For MFI based controllers return dummy success */ 5128 if (!fusion) 5129 return; 5130 5131 instance->supportmax256vd = 5132 instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs; 5133 /* Below is additional check to address future FW enhancement */ 5134 if (instance->ctrl_info_buf->max_lds > 64) 5135 instance->supportmax256vd = 1; 5136 5137 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS 5138 * MEGASAS_MAX_DEV_PER_CHANNEL; 5139 instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS 5140 * MEGASAS_MAX_DEV_PER_CHANNEL; 5141 if (instance->supportmax256vd) { 5142 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; 5143 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 5144 } else { 5145 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 5146 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 5147 } 5148 5149 dev_info(&instance->pdev->dev, 5150 "FW provided supportMaxExtLDs: %d\tmax_lds: %d\n", 5151 instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0, 5152 instance->ctrl_info_buf->max_lds); 5153 5154 if (instance->max_raid_mapsize) { 5155 ventura_map_sz = instance->max_raid_mapsize * 5156 MR_MIN_MAP_SIZE; /* 64k */ 5157 fusion->current_map_sz = ventura_map_sz; 5158 fusion->max_map_sz = ventura_map_sz; 5159 } else { 5160 fusion->old_map_sz = sizeof(struct MR_FW_RAID_MAP) + 5161 (sizeof(struct MR_LD_SPAN_MAP) * 5162 (instance->fw_supported_vd_count - 1)); 5163 fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT); 5164 5165 fusion->max_map_sz = 5166 max(fusion->old_map_sz, fusion->new_map_sz); 5167 5168 if (instance->supportmax256vd) 5169 fusion->current_map_sz = fusion->new_map_sz; 5170 else 5171 fusion->current_map_sz = fusion->old_map_sz; 5172 } 5173 /* irrespective of FW raid maps, driver raid map is constant */ 5174 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL); 5175 } 5176 5177 /* 5178 * dcmd.opcode - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES 5179 * dcmd.hdr.length - number of bytes to read 5180 * dcmd.sge - Ptr to MR_SNAPDUMP_PROPERTIES 5181 * Desc: Fill in snapdump properties 5182 * Status: MFI_STAT_OK- Command successful 5183 */ 5184 void megasas_get_snapdump_properties(struct megasas_instance *instance) 5185 { 5186 int ret = 0; 5187 struct megasas_cmd *cmd; 5188 struct megasas_dcmd_frame *dcmd; 5189 struct MR_SNAPDUMP_PROPERTIES *ci; 5190 dma_addr_t ci_h = 0; 5191 5192 ci = instance->snapdump_prop; 5193 ci_h = instance->snapdump_prop_h; 5194 5195 if (!ci) 5196 return; 5197 5198 cmd = megasas_get_cmd(instance); 5199 5200 if (!cmd) { 5201 dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n"); 5202 return; 5203 } 5204 5205 dcmd = &cmd->frame->dcmd; 5206 5207 memset(ci, 0, sizeof(*ci)); 5208 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5209 5210 dcmd->cmd = MFI_CMD_DCMD; 5211 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 5212 dcmd->sge_count = 1; 5213 dcmd->flags = MFI_FRAME_DIR_READ; 5214 dcmd->timeout = 0; 5215 dcmd->pad_0 = 0; 5216 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES)); 5217 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES); 5218 5219 megasas_set_dma_settings(instance, dcmd, ci_h, 5220 sizeof(struct MR_SNAPDUMP_PROPERTIES)); 5221 5222 if (!instance->mask_interrupts) { 5223 ret = megasas_issue_blocked_cmd(instance, cmd, 5224 MFI_IO_TIMEOUT_SECS); 5225 } else { 5226 ret = megasas_issue_polled(instance, cmd); 5227 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5228 } 5229 5230 switch (ret) { 5231 case DCMD_SUCCESS: 5232 instance->snapdump_wait_time = 5233 min_t(u8, ci->trigger_min_num_sec_before_ocr, 5234 MEGASAS_MAX_SNAP_DUMP_WAIT_TIME); 5235 break; 5236 5237 case DCMD_TIMEOUT: 5238 switch (dcmd_timeout_ocr_possible(instance)) { 5239 case INITIATE_OCR: 5240 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5241 mutex_unlock(&instance->reset_mutex); 5242 megasas_reset_fusion(instance->host, 5243 MFI_IO_TIMEOUT_OCR); 5244 mutex_lock(&instance->reset_mutex); 5245 break; 5246 case KILL_ADAPTER: 5247 megaraid_sas_kill_hba(instance); 5248 break; 5249 case IGNORE_TIMEOUT: 5250 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 5251 __func__, __LINE__); 5252 break; 5253 } 5254 } 5255 5256 if (ret != DCMD_TIMEOUT) 5257 megasas_return_cmd(instance, cmd); 5258 } 5259 5260 /** 5261 * megasas_get_ctrl_info - Returns FW's controller structure 5262 * @instance: Adapter soft state 5263 * 5264 * Issues an internal command (DCMD) to get the FW's controller structure. 5265 * This information is mainly used to find out the maximum IO transfer per 5266 * command supported by the FW. 5267 */ 5268 int 5269 megasas_get_ctrl_info(struct megasas_instance *instance) 5270 { 5271 int ret = 0; 5272 struct megasas_cmd *cmd; 5273 struct megasas_dcmd_frame *dcmd; 5274 struct megasas_ctrl_info *ci; 5275 dma_addr_t ci_h = 0; 5276 5277 ci = instance->ctrl_info_buf; 5278 ci_h = instance->ctrl_info_buf_h; 5279 5280 cmd = megasas_get_cmd(instance); 5281 5282 if (!cmd) { 5283 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n"); 5284 return -ENOMEM; 5285 } 5286 5287 dcmd = &cmd->frame->dcmd; 5288 5289 memset(ci, 0, sizeof(*ci)); 5290 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5291 5292 dcmd->cmd = MFI_CMD_DCMD; 5293 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 5294 dcmd->sge_count = 1; 5295 dcmd->flags = MFI_FRAME_DIR_READ; 5296 dcmd->timeout = 0; 5297 dcmd->pad_0 = 0; 5298 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info)); 5299 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO); 5300 dcmd->mbox.b[0] = 1; 5301 5302 megasas_set_dma_settings(instance, dcmd, ci_h, 5303 sizeof(struct megasas_ctrl_info)); 5304 5305 if ((instance->adapter_type != MFI_SERIES) && 5306 !instance->mask_interrupts) { 5307 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 5308 } else { 5309 ret = megasas_issue_polled(instance, cmd); 5310 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5311 } 5312 5313 switch (ret) { 5314 case DCMD_SUCCESS: 5315 /* Save required controller information in 5316 * CPU endianness format. 5317 */ 5318 le32_to_cpus((u32 *)&ci->properties.OnOffProperties); 5319 le16_to_cpus((u16 *)&ci->properties.on_off_properties2); 5320 le32_to_cpus((u32 *)&ci->adapterOperations2); 5321 le32_to_cpus((u32 *)&ci->adapterOperations3); 5322 le16_to_cpus((u16 *)&ci->adapter_operations4); 5323 le32_to_cpus((u32 *)&ci->adapter_operations5); 5324 5325 /* Update the latest Ext VD info. 5326 * From Init path, store current firmware details. 5327 * From OCR path, detect any firmware properties changes. 5328 * in case of Firmware upgrade without system reboot. 5329 */ 5330 megasas_update_ext_vd_details(instance); 5331 instance->support_seqnum_jbod_fp = 5332 ci->adapterOperations3.useSeqNumJbodFP; 5333 instance->support_morethan256jbod = 5334 ci->adapter_operations4.support_pd_map_target_id; 5335 instance->support_nvme_passthru = 5336 ci->adapter_operations4.support_nvme_passthru; 5337 instance->support_pci_lane_margining = 5338 ci->adapter_operations5.support_pci_lane_margining; 5339 instance->task_abort_tmo = ci->TaskAbortTO; 5340 instance->max_reset_tmo = ci->MaxResetTO; 5341 5342 /*Check whether controller is iMR or MR */ 5343 instance->is_imr = (ci->memory_size ? 0 : 1); 5344 5345 instance->snapdump_wait_time = 5346 (ci->properties.on_off_properties2.enable_snap_dump ? 5347 MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0); 5348 5349 instance->enable_fw_dev_list = 5350 ci->properties.on_off_properties2.enable_fw_dev_list; 5351 5352 dev_info(&instance->pdev->dev, 5353 "controller type\t: %s(%dMB)\n", 5354 instance->is_imr ? "iMR" : "MR", 5355 le16_to_cpu(ci->memory_size)); 5356 5357 instance->disableOnlineCtrlReset = 5358 ci->properties.OnOffProperties.disableOnlineCtrlReset; 5359 instance->secure_jbod_support = 5360 ci->adapterOperations3.supportSecurityonJBOD; 5361 dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n", 5362 instance->disableOnlineCtrlReset ? "Disabled" : "Enabled"); 5363 dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n", 5364 instance->secure_jbod_support ? "Yes" : "No"); 5365 dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n", 5366 instance->support_nvme_passthru ? "Yes" : "No"); 5367 dev_info(&instance->pdev->dev, 5368 "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n", 5369 instance->task_abort_tmo, instance->max_reset_tmo); 5370 dev_info(&instance->pdev->dev, "JBOD sequence map support\t: %s\n", 5371 instance->support_seqnum_jbod_fp ? "Yes" : "No"); 5372 dev_info(&instance->pdev->dev, "PCI Lane Margining support\t: %s\n", 5373 instance->support_pci_lane_margining ? "Yes" : "No"); 5374 5375 break; 5376 5377 case DCMD_TIMEOUT: 5378 switch (dcmd_timeout_ocr_possible(instance)) { 5379 case INITIATE_OCR: 5380 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5381 mutex_unlock(&instance->reset_mutex); 5382 megasas_reset_fusion(instance->host, 5383 MFI_IO_TIMEOUT_OCR); 5384 mutex_lock(&instance->reset_mutex); 5385 break; 5386 case KILL_ADAPTER: 5387 megaraid_sas_kill_hba(instance); 5388 break; 5389 case IGNORE_TIMEOUT: 5390 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 5391 __func__, __LINE__); 5392 break; 5393 } 5394 break; 5395 case DCMD_FAILED: 5396 megaraid_sas_kill_hba(instance); 5397 break; 5398 5399 } 5400 5401 if (ret != DCMD_TIMEOUT) 5402 megasas_return_cmd(instance, cmd); 5403 5404 return ret; 5405 } 5406 5407 /* 5408 * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer 5409 * to firmware 5410 * 5411 * @instance: Adapter soft state 5412 * @crash_buf_state - tell FW to turn ON/OFF crash dump feature 5413 MR_CRASH_BUF_TURN_OFF = 0 5414 MR_CRASH_BUF_TURN_ON = 1 5415 * @return 0 on success non-zero on failure. 5416 * Issues an internal command (DCMD) to set parameters for crash dump feature. 5417 * Driver will send address of crash dump DMA buffer and set mbox to tell FW 5418 * that driver supports crash dump feature. This DCMD will be sent only if 5419 * crash dump feature is supported by the FW. 5420 * 5421 */ 5422 int megasas_set_crash_dump_params(struct megasas_instance *instance, 5423 u8 crash_buf_state) 5424 { 5425 int ret = 0; 5426 struct megasas_cmd *cmd; 5427 struct megasas_dcmd_frame *dcmd; 5428 5429 cmd = megasas_get_cmd(instance); 5430 5431 if (!cmd) { 5432 dev_err(&instance->pdev->dev, "Failed to get a free cmd\n"); 5433 return -ENOMEM; 5434 } 5435 5436 5437 dcmd = &cmd->frame->dcmd; 5438 5439 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5440 dcmd->mbox.b[0] = crash_buf_state; 5441 dcmd->cmd = MFI_CMD_DCMD; 5442 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 5443 dcmd->sge_count = 1; 5444 dcmd->flags = MFI_FRAME_DIR_NONE; 5445 dcmd->timeout = 0; 5446 dcmd->pad_0 = 0; 5447 dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE); 5448 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS); 5449 5450 megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h, 5451 CRASH_DMA_BUF_SIZE); 5452 5453 if ((instance->adapter_type != MFI_SERIES) && 5454 !instance->mask_interrupts) 5455 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 5456 else 5457 ret = megasas_issue_polled(instance, cmd); 5458 5459 if (ret == DCMD_TIMEOUT) { 5460 switch (dcmd_timeout_ocr_possible(instance)) { 5461 case INITIATE_OCR: 5462 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5463 megasas_reset_fusion(instance->host, 5464 MFI_IO_TIMEOUT_OCR); 5465 break; 5466 case KILL_ADAPTER: 5467 megaraid_sas_kill_hba(instance); 5468 break; 5469 case IGNORE_TIMEOUT: 5470 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 5471 __func__, __LINE__); 5472 break; 5473 } 5474 } else 5475 megasas_return_cmd(instance, cmd); 5476 5477 return ret; 5478 } 5479 5480 /** 5481 * megasas_issue_init_mfi - Initializes the FW 5482 * @instance: Adapter soft state 5483 * 5484 * Issues the INIT MFI cmd 5485 */ 5486 static int 5487 megasas_issue_init_mfi(struct megasas_instance *instance) 5488 { 5489 __le32 context; 5490 struct megasas_cmd *cmd; 5491 struct megasas_init_frame *init_frame; 5492 struct megasas_init_queue_info *initq_info; 5493 dma_addr_t init_frame_h; 5494 dma_addr_t initq_info_h; 5495 5496 /* 5497 * Prepare a init frame. Note the init frame points to queue info 5498 * structure. Each frame has SGL allocated after first 64 bytes. For 5499 * this frame - since we don't need any SGL - we use SGL's space as 5500 * queue info structure 5501 * 5502 * We will not get a NULL command below. We just created the pool. 5503 */ 5504 cmd = megasas_get_cmd(instance); 5505 5506 init_frame = (struct megasas_init_frame *)cmd->frame; 5507 initq_info = (struct megasas_init_queue_info *) 5508 ((unsigned long)init_frame + 64); 5509 5510 init_frame_h = cmd->frame_phys_addr; 5511 initq_info_h = init_frame_h + 64; 5512 5513 context = init_frame->context; 5514 memset(init_frame, 0, MEGAMFI_FRAME_SIZE); 5515 memset(initq_info, 0, sizeof(struct megasas_init_queue_info)); 5516 init_frame->context = context; 5517 5518 initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1); 5519 initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h); 5520 5521 initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h); 5522 initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h); 5523 5524 init_frame->cmd = MFI_CMD_INIT; 5525 init_frame->cmd_status = MFI_STAT_INVALID_STATUS; 5526 init_frame->queue_info_new_phys_addr_lo = 5527 cpu_to_le32(lower_32_bits(initq_info_h)); 5528 init_frame->queue_info_new_phys_addr_hi = 5529 cpu_to_le32(upper_32_bits(initq_info_h)); 5530 5531 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info)); 5532 5533 /* 5534 * disable the intr before firing the init frame to FW 5535 */ 5536 instance->instancet->disable_intr(instance); 5537 5538 /* 5539 * Issue the init frame in polled mode 5540 */ 5541 5542 if (megasas_issue_polled(instance, cmd)) { 5543 dev_err(&instance->pdev->dev, "Failed to init firmware\n"); 5544 megasas_return_cmd(instance, cmd); 5545 goto fail_fw_init; 5546 } 5547 5548 megasas_return_cmd(instance, cmd); 5549 5550 return 0; 5551 5552 fail_fw_init: 5553 return -EINVAL; 5554 } 5555 5556 static u32 5557 megasas_init_adapter_mfi(struct megasas_instance *instance) 5558 { 5559 u32 context_sz; 5560 u32 reply_q_sz; 5561 5562 /* 5563 * Get various operational parameters from status register 5564 */ 5565 instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF; 5566 /* 5567 * Reduce the max supported cmds by 1. This is to ensure that the 5568 * reply_q_sz (1 more than the max cmd that driver may send) 5569 * does not exceed max cmds that the FW can support 5570 */ 5571 instance->max_fw_cmds = instance->max_fw_cmds-1; 5572 instance->max_mfi_cmds = instance->max_fw_cmds; 5573 instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >> 5574 0x10; 5575 /* 5576 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands 5577 * are reserved for IOCTL + driver's internal DCMDs. 5578 */ 5579 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 5580 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { 5581 instance->max_scsi_cmds = (instance->max_fw_cmds - 5582 MEGASAS_SKINNY_INT_CMDS); 5583 sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); 5584 } else { 5585 instance->max_scsi_cmds = (instance->max_fw_cmds - 5586 MEGASAS_INT_CMDS); 5587 sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS)); 5588 } 5589 5590 instance->cur_can_queue = instance->max_scsi_cmds; 5591 /* 5592 * Create a pool of commands 5593 */ 5594 if (megasas_alloc_cmds(instance)) 5595 goto fail_alloc_cmds; 5596 5597 /* 5598 * Allocate memory for reply queue. Length of reply queue should 5599 * be _one_ more than the maximum commands handled by the firmware. 5600 * 5601 * Note: When FW completes commands, it places corresponding contex 5602 * values in this circular reply queue. This circular queue is a fairly 5603 * typical producer-consumer queue. FW is the producer (of completed 5604 * commands) and the driver is the consumer. 5605 */ 5606 context_sz = sizeof(u32); 5607 reply_q_sz = context_sz * (instance->max_fw_cmds + 1); 5608 5609 instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev, 5610 reply_q_sz, &instance->reply_queue_h, GFP_KERNEL); 5611 5612 if (!instance->reply_queue) { 5613 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n"); 5614 goto fail_reply_queue; 5615 } 5616 5617 if (megasas_issue_init_mfi(instance)) 5618 goto fail_fw_init; 5619 5620 if (megasas_get_ctrl_info(instance)) { 5621 dev_err(&instance->pdev->dev, "(%d): Could get controller info " 5622 "Fail from %s %d\n", instance->unique_id, 5623 __func__, __LINE__); 5624 goto fail_fw_init; 5625 } 5626 5627 instance->fw_support_ieee = 0; 5628 instance->fw_support_ieee = 5629 (instance->instancet->read_fw_status_reg(instance) & 5630 0x04000000); 5631 5632 dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d", 5633 instance->fw_support_ieee); 5634 5635 if (instance->fw_support_ieee) 5636 instance->flag_ieee = 1; 5637 5638 return 0; 5639 5640 fail_fw_init: 5641 5642 dma_free_coherent(&instance->pdev->dev, reply_q_sz, 5643 instance->reply_queue, instance->reply_queue_h); 5644 fail_reply_queue: 5645 megasas_free_cmds(instance); 5646 5647 fail_alloc_cmds: 5648 return 1; 5649 } 5650 5651 static 5652 void megasas_setup_irq_poll(struct megasas_instance *instance) 5653 { 5654 struct megasas_irq_context *irq_ctx; 5655 u32 count, i; 5656 5657 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 5658 5659 /* Initialize IRQ poll */ 5660 for (i = 0; i < count; i++) { 5661 irq_ctx = &instance->irq_context[i]; 5662 irq_ctx->os_irq = pci_irq_vector(instance->pdev, i); 5663 irq_ctx->irq_poll_scheduled = false; 5664 irq_poll_init(&irq_ctx->irqpoll, 5665 instance->threshold_reply_count, 5666 megasas_irqpoll); 5667 } 5668 } 5669 5670 /* 5671 * megasas_setup_irqs_ioapic - register legacy interrupts. 5672 * @instance: Adapter soft state 5673 * 5674 * Do not enable interrupt, only setup ISRs. 5675 * 5676 * Return 0 on success. 5677 */ 5678 static int 5679 megasas_setup_irqs_ioapic(struct megasas_instance *instance) 5680 { 5681 struct pci_dev *pdev; 5682 5683 pdev = instance->pdev; 5684 instance->irq_context[0].instance = instance; 5685 instance->irq_context[0].MSIxIndex = 0; 5686 snprintf(instance->irq_context->name, MEGASAS_MSIX_NAME_LEN, "%s%u", 5687 "megasas", instance->host->host_no); 5688 if (request_irq(pci_irq_vector(pdev, 0), 5689 instance->instancet->service_isr, IRQF_SHARED, 5690 instance->irq_context->name, &instance->irq_context[0])) { 5691 dev_err(&instance->pdev->dev, 5692 "Failed to register IRQ from %s %d\n", 5693 __func__, __LINE__); 5694 return -1; 5695 } 5696 instance->perf_mode = MR_LATENCY_PERF_MODE; 5697 instance->low_latency_index_start = 0; 5698 return 0; 5699 } 5700 5701 /** 5702 * megasas_setup_irqs_msix - register MSI-x interrupts. 5703 * @instance: Adapter soft state 5704 * @is_probe: Driver probe check 5705 * 5706 * Do not enable interrupt, only setup ISRs. 5707 * 5708 * Return 0 on success. 5709 */ 5710 static int 5711 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) 5712 { 5713 int i, j; 5714 struct pci_dev *pdev; 5715 5716 pdev = instance->pdev; 5717 5718 /* Try MSI-x */ 5719 for (i = 0; i < instance->msix_vectors; i++) { 5720 instance->irq_context[i].instance = instance; 5721 instance->irq_context[i].MSIxIndex = i; 5722 snprintf(instance->irq_context[i].name, MEGASAS_MSIX_NAME_LEN, "%s%u-msix%u", 5723 "megasas", instance->host->host_no, i); 5724 if (request_irq(pci_irq_vector(pdev, i), 5725 instance->instancet->service_isr, 0, instance->irq_context[i].name, 5726 &instance->irq_context[i])) { 5727 dev_err(&instance->pdev->dev, 5728 "Failed to register IRQ for vector %d.\n", i); 5729 for (j = 0; j < i; j++) { 5730 if (j < instance->low_latency_index_start) 5731 irq_update_affinity_hint( 5732 pci_irq_vector(pdev, j), NULL); 5733 free_irq(pci_irq_vector(pdev, j), 5734 &instance->irq_context[j]); 5735 } 5736 /* Retry irq register for IO_APIC*/ 5737 instance->msix_vectors = 0; 5738 instance->msix_load_balance = false; 5739 if (is_probe) { 5740 pci_free_irq_vectors(instance->pdev); 5741 return megasas_setup_irqs_ioapic(instance); 5742 } else { 5743 return -1; 5744 } 5745 } 5746 } 5747 5748 return 0; 5749 } 5750 5751 /* 5752 * megasas_destroy_irqs- unregister interrupts. 5753 * @instance: Adapter soft state 5754 * return: void 5755 */ 5756 static void 5757 megasas_destroy_irqs(struct megasas_instance *instance) { 5758 5759 int i; 5760 int count; 5761 struct megasas_irq_context *irq_ctx; 5762 5763 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 5764 if (instance->adapter_type != MFI_SERIES) { 5765 for (i = 0; i < count; i++) { 5766 irq_ctx = &instance->irq_context[i]; 5767 irq_poll_disable(&irq_ctx->irqpoll); 5768 } 5769 } 5770 5771 if (instance->msix_vectors) 5772 for (i = 0; i < instance->msix_vectors; i++) { 5773 if (i < instance->low_latency_index_start) 5774 irq_update_affinity_hint( 5775 pci_irq_vector(instance->pdev, i), NULL); 5776 free_irq(pci_irq_vector(instance->pdev, i), 5777 &instance->irq_context[i]); 5778 } 5779 else 5780 free_irq(pci_irq_vector(instance->pdev, 0), 5781 &instance->irq_context[0]); 5782 } 5783 5784 /** 5785 * megasas_setup_jbod_map - setup jbod map for FP seq_number. 5786 * @instance: Adapter soft state 5787 * 5788 * Return 0 on success. 5789 */ 5790 void 5791 megasas_setup_jbod_map(struct megasas_instance *instance) 5792 { 5793 int i; 5794 struct fusion_context *fusion = instance->ctrl_context; 5795 u32 pd_seq_map_sz; 5796 5797 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 5798 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1)); 5799 5800 instance->use_seqnum_jbod_fp = 5801 instance->support_seqnum_jbod_fp; 5802 if (reset_devices || !fusion || 5803 !instance->support_seqnum_jbod_fp) { 5804 dev_info(&instance->pdev->dev, 5805 "JBOD sequence map is disabled %s %d\n", 5806 __func__, __LINE__); 5807 instance->use_seqnum_jbod_fp = false; 5808 return; 5809 } 5810 5811 if (fusion->pd_seq_sync[0]) 5812 goto skip_alloc; 5813 5814 for (i = 0; i < JBOD_MAPS_COUNT; i++) { 5815 fusion->pd_seq_sync[i] = dma_alloc_coherent 5816 (&instance->pdev->dev, pd_seq_map_sz, 5817 &fusion->pd_seq_phys[i], GFP_KERNEL); 5818 if (!fusion->pd_seq_sync[i]) { 5819 dev_err(&instance->pdev->dev, 5820 "Failed to allocate memory from %s %d\n", 5821 __func__, __LINE__); 5822 if (i == 1) { 5823 dma_free_coherent(&instance->pdev->dev, 5824 pd_seq_map_sz, fusion->pd_seq_sync[0], 5825 fusion->pd_seq_phys[0]); 5826 fusion->pd_seq_sync[0] = NULL; 5827 } 5828 instance->use_seqnum_jbod_fp = false; 5829 return; 5830 } 5831 } 5832 5833 skip_alloc: 5834 if (!megasas_sync_pd_seq_num(instance, false) && 5835 !megasas_sync_pd_seq_num(instance, true)) 5836 instance->use_seqnum_jbod_fp = true; 5837 else 5838 instance->use_seqnum_jbod_fp = false; 5839 } 5840 5841 static void megasas_setup_reply_map(struct megasas_instance *instance) 5842 { 5843 const struct cpumask *mask; 5844 unsigned int queue, cpu, low_latency_index_start; 5845 5846 low_latency_index_start = instance->low_latency_index_start; 5847 5848 for (queue = low_latency_index_start; queue < instance->msix_vectors; queue++) { 5849 mask = pci_irq_get_affinity(instance->pdev, queue); 5850 if (!mask) 5851 goto fallback; 5852 5853 for_each_cpu(cpu, mask) 5854 instance->reply_map[cpu] = queue; 5855 } 5856 return; 5857 5858 fallback: 5859 queue = low_latency_index_start; 5860 for_each_possible_cpu(cpu) { 5861 instance->reply_map[cpu] = queue; 5862 if (queue == (instance->msix_vectors - 1)) 5863 queue = low_latency_index_start; 5864 else 5865 queue++; 5866 } 5867 } 5868 5869 /** 5870 * megasas_get_device_list - Get the PD and LD device list from FW. 5871 * @instance: Adapter soft state 5872 * @return: Success or failure 5873 * 5874 * Issue DCMDs to Firmware to get the PD and LD list. 5875 * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination 5876 * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list. 5877 */ 5878 static 5879 int megasas_get_device_list(struct megasas_instance *instance) 5880 { 5881 memset(instance->pd_list, 0, 5882 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); 5883 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 5884 5885 if (instance->enable_fw_dev_list) { 5886 if (megasas_host_device_list_query(instance, true)) 5887 return FAILED; 5888 } else { 5889 if (megasas_get_pd_list(instance) < 0) { 5890 dev_err(&instance->pdev->dev, "failed to get PD list\n"); 5891 return FAILED; 5892 } 5893 5894 if (megasas_ld_list_query(instance, 5895 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) { 5896 dev_err(&instance->pdev->dev, "failed to get LD list\n"); 5897 return FAILED; 5898 } 5899 } 5900 5901 return SUCCESS; 5902 } 5903 5904 /** 5905 * megasas_set_high_iops_queue_affinity_and_hint - Set affinity and hint 5906 * for high IOPS queues 5907 * @instance: Adapter soft state 5908 * return: void 5909 */ 5910 static inline void 5911 megasas_set_high_iops_queue_affinity_and_hint(struct megasas_instance *instance) 5912 { 5913 int i; 5914 unsigned int irq; 5915 const struct cpumask *mask; 5916 5917 if (instance->perf_mode == MR_BALANCED_PERF_MODE) { 5918 mask = cpumask_of_node(dev_to_node(&instance->pdev->dev)); 5919 5920 for (i = 0; i < instance->low_latency_index_start; i++) { 5921 irq = pci_irq_vector(instance->pdev, i); 5922 irq_set_affinity_and_hint(irq, mask); 5923 } 5924 } 5925 } 5926 5927 static int 5928 __megasas_alloc_irq_vectors(struct megasas_instance *instance) 5929 { 5930 int i, irq_flags; 5931 struct irq_affinity desc = { .pre_vectors = instance->low_latency_index_start }; 5932 struct irq_affinity *descp = &desc; 5933 5934 irq_flags = PCI_IRQ_MSIX; 5935 5936 if (instance->smp_affinity_enable) 5937 irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; 5938 else 5939 descp = NULL; 5940 5941 /* Do not allocate msix vectors for poll_queues. 5942 * msix_vectors is always within a range of FW supported reply queue. 5943 */ 5944 i = pci_alloc_irq_vectors_affinity(instance->pdev, 5945 instance->low_latency_index_start, 5946 instance->msix_vectors - instance->iopoll_q_count, irq_flags, descp); 5947 5948 return i; 5949 } 5950 5951 /** 5952 * megasas_alloc_irq_vectors - Allocate IRQ vectors/enable MSI-x vectors 5953 * @instance: Adapter soft state 5954 * return: void 5955 */ 5956 static void 5957 megasas_alloc_irq_vectors(struct megasas_instance *instance) 5958 { 5959 int i; 5960 unsigned int num_msix_req; 5961 5962 instance->iopoll_q_count = 0; 5963 if ((instance->adapter_type != MFI_SERIES) && 5964 poll_queues) { 5965 5966 instance->perf_mode = MR_LATENCY_PERF_MODE; 5967 instance->low_latency_index_start = 1; 5968 5969 /* reserve for default and non-mananged pre-vector. */ 5970 if (instance->msix_vectors > (poll_queues + 2)) 5971 instance->iopoll_q_count = poll_queues; 5972 else 5973 instance->iopoll_q_count = 0; 5974 5975 num_msix_req = num_online_cpus() + instance->low_latency_index_start; 5976 instance->msix_vectors = min(num_msix_req, 5977 instance->msix_vectors); 5978 5979 } 5980 5981 i = __megasas_alloc_irq_vectors(instance); 5982 5983 if (((instance->perf_mode == MR_BALANCED_PERF_MODE) 5984 || instance->iopoll_q_count) && 5985 (i != (instance->msix_vectors - instance->iopoll_q_count))) { 5986 if (instance->msix_vectors) 5987 pci_free_irq_vectors(instance->pdev); 5988 /* Disable Balanced IOPS mode and try realloc vectors */ 5989 instance->perf_mode = MR_LATENCY_PERF_MODE; 5990 instance->low_latency_index_start = 1; 5991 num_msix_req = num_online_cpus() + instance->low_latency_index_start; 5992 5993 instance->msix_vectors = min(num_msix_req, 5994 instance->msix_vectors); 5995 5996 instance->iopoll_q_count = 0; 5997 i = __megasas_alloc_irq_vectors(instance); 5998 5999 } 6000 6001 dev_info(&instance->pdev->dev, 6002 "requested/available msix %d/%d poll_queue %d\n", 6003 instance->msix_vectors - instance->iopoll_q_count, 6004 i, instance->iopoll_q_count); 6005 6006 if (i > 0) 6007 instance->msix_vectors = i; 6008 else 6009 instance->msix_vectors = 0; 6010 6011 if (instance->smp_affinity_enable) 6012 megasas_set_high_iops_queue_affinity_and_hint(instance); 6013 } 6014 6015 /** 6016 * megasas_init_fw - Initializes the FW 6017 * @instance: Adapter soft state 6018 * 6019 * This is the main function for initializing firmware 6020 */ 6021 6022 static int megasas_init_fw(struct megasas_instance *instance) 6023 { 6024 u32 max_sectors_1; 6025 u32 max_sectors_2, tmp_sectors, msix_enable; 6026 u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg; 6027 resource_size_t base_addr; 6028 void *base_addr_phys; 6029 struct megasas_ctrl_info *ctrl_info = NULL; 6030 unsigned long bar_list; 6031 int i, j, loop; 6032 struct IOV_111 *iovPtr; 6033 struct fusion_context *fusion; 6034 bool intr_coalescing; 6035 unsigned int num_msix_req; 6036 u16 lnksta, speed; 6037 6038 fusion = instance->ctrl_context; 6039 6040 /* Find first memory bar */ 6041 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); 6042 instance->bar = find_first_bit(&bar_list, BITS_PER_LONG); 6043 if (pci_request_selected_regions(instance->pdev, 1<<instance->bar, 6044 "megasas: LSI")) { 6045 dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n"); 6046 return -EBUSY; 6047 } 6048 6049 base_addr = pci_resource_start(instance->pdev, instance->bar); 6050 instance->reg_set = ioremap(base_addr, 8192); 6051 6052 if (!instance->reg_set) { 6053 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n"); 6054 goto fail_ioremap; 6055 } 6056 6057 base_addr_phys = &base_addr; 6058 dev_printk(KERN_DEBUG, &instance->pdev->dev, 6059 "BAR:0x%lx BAR's base_addr(phys):%pa mapped virt_addr:0x%p\n", 6060 instance->bar, base_addr_phys, instance->reg_set); 6061 6062 if (instance->adapter_type != MFI_SERIES) 6063 instance->instancet = &megasas_instance_template_fusion; 6064 else { 6065 switch (instance->pdev->device) { 6066 case PCI_DEVICE_ID_LSI_SAS1078R: 6067 case PCI_DEVICE_ID_LSI_SAS1078DE: 6068 instance->instancet = &megasas_instance_template_ppc; 6069 break; 6070 case PCI_DEVICE_ID_LSI_SAS1078GEN2: 6071 case PCI_DEVICE_ID_LSI_SAS0079GEN2: 6072 instance->instancet = &megasas_instance_template_gen2; 6073 break; 6074 case PCI_DEVICE_ID_LSI_SAS0073SKINNY: 6075 case PCI_DEVICE_ID_LSI_SAS0071SKINNY: 6076 instance->instancet = &megasas_instance_template_skinny; 6077 break; 6078 case PCI_DEVICE_ID_LSI_SAS1064R: 6079 case PCI_DEVICE_ID_DELL_PERC5: 6080 default: 6081 instance->instancet = &megasas_instance_template_xscale; 6082 instance->pd_list_not_supported = 1; 6083 break; 6084 } 6085 } 6086 6087 if (megasas_transition_to_ready(instance, 0)) { 6088 dev_info(&instance->pdev->dev, 6089 "Failed to transition controller to ready from %s!\n", 6090 __func__); 6091 if (instance->adapter_type != MFI_SERIES) { 6092 status_reg = instance->instancet->read_fw_status_reg( 6093 instance); 6094 if (status_reg & MFI_RESET_ADAPTER) { 6095 if (megasas_adp_reset_wait_for_ready 6096 (instance, true, 0) == FAILED) 6097 goto fail_ready_state; 6098 } else { 6099 goto fail_ready_state; 6100 } 6101 } else { 6102 atomic_set(&instance->fw_reset_no_pci_access, 1); 6103 instance->instancet->adp_reset 6104 (instance, instance->reg_set); 6105 atomic_set(&instance->fw_reset_no_pci_access, 0); 6106 6107 /*waiting for about 30 second before retry*/ 6108 ssleep(30); 6109 6110 if (megasas_transition_to_ready(instance, 0)) 6111 goto fail_ready_state; 6112 } 6113 6114 dev_info(&instance->pdev->dev, 6115 "FW restarted successfully from %s!\n", 6116 __func__); 6117 } 6118 6119 megasas_init_ctrl_params(instance); 6120 6121 if (megasas_set_dma_mask(instance)) 6122 goto fail_ready_state; 6123 6124 if (megasas_alloc_ctrl_mem(instance)) 6125 goto fail_alloc_dma_buf; 6126 6127 if (megasas_alloc_ctrl_dma_buffers(instance)) 6128 goto fail_alloc_dma_buf; 6129 6130 fusion = instance->ctrl_context; 6131 6132 if (instance->adapter_type >= VENTURA_SERIES) { 6133 scratch_pad_2 = 6134 megasas_readl(instance, 6135 &instance->reg_set->outbound_scratch_pad_2); 6136 instance->max_raid_mapsize = ((scratch_pad_2 >> 6137 MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) & 6138 MR_MAX_RAID_MAP_SIZE_MASK); 6139 } 6140 6141 instance->enable_sdev_max_qd = enable_sdev_max_qd; 6142 6143 switch (instance->adapter_type) { 6144 case VENTURA_SERIES: 6145 fusion->pcie_bw_limitation = true; 6146 break; 6147 case AERO_SERIES: 6148 fusion->r56_div_offload = true; 6149 break; 6150 default: 6151 break; 6152 } 6153 6154 /* Check if MSI-X is supported while in ready state */ 6155 msix_enable = (instance->instancet->read_fw_status_reg(instance) & 6156 0x4000000) >> 0x1a; 6157 if (msix_enable && !msix_disable) { 6158 6159 scratch_pad_1 = megasas_readl 6160 (instance, &instance->reg_set->outbound_scratch_pad_1); 6161 /* Check max MSI-X vectors */ 6162 if (fusion) { 6163 if (instance->adapter_type == THUNDERBOLT_SERIES) { 6164 /* Thunderbolt Series*/ 6165 instance->msix_vectors = (scratch_pad_1 6166 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; 6167 } else { 6168 instance->msix_vectors = ((scratch_pad_1 6169 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) 6170 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; 6171 6172 /* 6173 * For Invader series, > 8 MSI-x vectors 6174 * supported by FW/HW implies combined 6175 * reply queue mode is enabled. 6176 * For Ventura series, > 16 MSI-x vectors 6177 * supported by FW/HW implies combined 6178 * reply queue mode is enabled. 6179 */ 6180 switch (instance->adapter_type) { 6181 case INVADER_SERIES: 6182 if (instance->msix_vectors > 8) 6183 instance->msix_combined = true; 6184 break; 6185 case AERO_SERIES: 6186 case VENTURA_SERIES: 6187 if (instance->msix_vectors > 16) 6188 instance->msix_combined = true; 6189 break; 6190 } 6191 6192 if (rdpq_enable) 6193 instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ? 6194 1 : 0; 6195 6196 if (instance->adapter_type >= INVADER_SERIES && 6197 !instance->msix_combined) { 6198 instance->msix_load_balance = true; 6199 instance->smp_affinity_enable = false; 6200 } 6201 6202 /* Save 1-15 reply post index address to local memory 6203 * Index 0 is already saved from reg offset 6204 * MPI2_REPLY_POST_HOST_INDEX_OFFSET 6205 */ 6206 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) { 6207 instance->reply_post_host_index_addr[loop] = 6208 (u32 __iomem *) 6209 ((u8 __iomem *)instance->reg_set + 6210 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET 6211 + (loop * 0x10)); 6212 } 6213 } 6214 6215 dev_info(&instance->pdev->dev, 6216 "firmware supports msix\t: (%d)", 6217 instance->msix_vectors); 6218 if (msix_vectors) 6219 instance->msix_vectors = min(msix_vectors, 6220 instance->msix_vectors); 6221 } else /* MFI adapters */ 6222 instance->msix_vectors = 1; 6223 6224 6225 /* 6226 * For Aero (if some conditions are met), driver will configure a 6227 * few additional reply queues with interrupt coalescing enabled. 6228 * These queues with interrupt coalescing enabled are called 6229 * High IOPS queues and rest of reply queues (based on number of 6230 * logical CPUs) are termed as Low latency queues. 6231 * 6232 * Total Number of reply queues = High IOPS queues + low latency queues 6233 * 6234 * For rest of fusion adapters, 1 additional reply queue will be 6235 * reserved for management commands, rest of reply queues 6236 * (based on number of logical CPUs) will be used for IOs and 6237 * referenced as IO queues. 6238 * Total Number of reply queues = 1 + IO queues 6239 * 6240 * MFI adapters supports single MSI-x so single reply queue 6241 * will be used for IO and management commands. 6242 */ 6243 6244 intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ? 6245 true : false; 6246 if (intr_coalescing && 6247 (num_online_cpus() >= MR_HIGH_IOPS_QUEUE_COUNT) && 6248 (instance->msix_vectors == MEGASAS_MAX_MSIX_QUEUES)) 6249 instance->perf_mode = MR_BALANCED_PERF_MODE; 6250 else 6251 instance->perf_mode = MR_LATENCY_PERF_MODE; 6252 6253 6254 if (instance->adapter_type == AERO_SERIES) { 6255 pcie_capability_read_word(instance->pdev, PCI_EXP_LNKSTA, &lnksta); 6256 speed = lnksta & PCI_EXP_LNKSTA_CLS; 6257 6258 /* 6259 * For Aero, if PCIe link speed is <16 GT/s, then driver should operate 6260 * in latency perf mode and enable R1 PCI bandwidth algorithm 6261 */ 6262 if (speed < 0x4) { 6263 instance->perf_mode = MR_LATENCY_PERF_MODE; 6264 fusion->pcie_bw_limitation = true; 6265 } 6266 6267 /* 6268 * Performance mode settings provided through module parameter-perf_mode will 6269 * take affect only for: 6270 * 1. Aero family of adapters. 6271 * 2. When user sets module parameter- perf_mode in range of 0-2. 6272 */ 6273 if ((perf_mode >= MR_BALANCED_PERF_MODE) && 6274 (perf_mode <= MR_LATENCY_PERF_MODE)) 6275 instance->perf_mode = perf_mode; 6276 /* 6277 * If intr coalescing is not supported by controller FW, then IOPS 6278 * and Balanced modes are not feasible. 6279 */ 6280 if (!intr_coalescing) 6281 instance->perf_mode = MR_LATENCY_PERF_MODE; 6282 6283 } 6284 6285 if (instance->perf_mode == MR_BALANCED_PERF_MODE) 6286 instance->low_latency_index_start = 6287 MR_HIGH_IOPS_QUEUE_COUNT; 6288 else 6289 instance->low_latency_index_start = 1; 6290 6291 num_msix_req = num_online_cpus() + instance->low_latency_index_start; 6292 6293 instance->msix_vectors = min(num_msix_req, 6294 instance->msix_vectors); 6295 6296 megasas_alloc_irq_vectors(instance); 6297 if (!instance->msix_vectors) 6298 instance->msix_load_balance = false; 6299 } 6300 /* 6301 * MSI-X host index 0 is common for all adapter. 6302 * It is used for all MPT based Adapters. 6303 */ 6304 if (instance->msix_combined) { 6305 instance->reply_post_host_index_addr[0] = 6306 (u32 *)((u8 *)instance->reg_set + 6307 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET); 6308 } else { 6309 instance->reply_post_host_index_addr[0] = 6310 (u32 *)((u8 *)instance->reg_set + 6311 MPI2_REPLY_POST_HOST_INDEX_OFFSET); 6312 } 6313 6314 if (!instance->msix_vectors) { 6315 i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); 6316 if (i < 0) 6317 goto fail_init_adapter; 6318 } 6319 6320 megasas_setup_reply_map(instance); 6321 6322 dev_info(&instance->pdev->dev, 6323 "current msix/online cpus\t: (%d/%d)\n", 6324 instance->msix_vectors, (unsigned int)num_online_cpus()); 6325 dev_info(&instance->pdev->dev, 6326 "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled"); 6327 6328 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 6329 (unsigned long)instance); 6330 6331 /* 6332 * Below are default value for legacy Firmware. 6333 * non-fusion based controllers 6334 */ 6335 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 6336 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 6337 /* Get operational params, sge flags, send init cmd to controller */ 6338 if (instance->instancet->init_adapter(instance)) 6339 goto fail_init_adapter; 6340 6341 if (instance->adapter_type >= VENTURA_SERIES) { 6342 scratch_pad_3 = 6343 megasas_readl(instance, 6344 &instance->reg_set->outbound_scratch_pad_3); 6345 if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >= 6346 MR_DEFAULT_NVME_PAGE_SHIFT) 6347 instance->nvme_page_size = 6348 (1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK)); 6349 6350 dev_info(&instance->pdev->dev, 6351 "NVME page size\t: (%d)\n", instance->nvme_page_size); 6352 } 6353 6354 if (instance->msix_vectors ? 6355 megasas_setup_irqs_msix(instance, 1) : 6356 megasas_setup_irqs_ioapic(instance)) 6357 goto fail_init_adapter; 6358 6359 if (instance->adapter_type != MFI_SERIES) 6360 megasas_setup_irq_poll(instance); 6361 6362 instance->instancet->enable_intr(instance); 6363 6364 dev_info(&instance->pdev->dev, "INIT adapter done\n"); 6365 6366 megasas_setup_jbod_map(instance); 6367 6368 if (megasas_get_device_list(instance) != SUCCESS) { 6369 dev_err(&instance->pdev->dev, 6370 "%s: megasas_get_device_list failed\n", 6371 __func__); 6372 goto fail_get_ld_pd_list; 6373 } 6374 6375 /* stream detection initialization */ 6376 if (instance->adapter_type >= VENTURA_SERIES) { 6377 fusion->stream_detect_by_ld = 6378 kcalloc(MAX_LOGICAL_DRIVES_EXT, 6379 sizeof(struct LD_STREAM_DETECT *), 6380 GFP_KERNEL); 6381 if (!fusion->stream_detect_by_ld) { 6382 dev_err(&instance->pdev->dev, 6383 "unable to allocate stream detection for pool of LDs\n"); 6384 goto fail_get_ld_pd_list; 6385 } 6386 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) { 6387 fusion->stream_detect_by_ld[i] = 6388 kzalloc(sizeof(struct LD_STREAM_DETECT), 6389 GFP_KERNEL); 6390 if (!fusion->stream_detect_by_ld[i]) { 6391 dev_err(&instance->pdev->dev, 6392 "unable to allocate stream detect by LD\n "); 6393 for (j = 0; j < i; ++j) 6394 kfree(fusion->stream_detect_by_ld[j]); 6395 kfree(fusion->stream_detect_by_ld); 6396 fusion->stream_detect_by_ld = NULL; 6397 goto fail_get_ld_pd_list; 6398 } 6399 fusion->stream_detect_by_ld[i]->mru_bit_map 6400 = MR_STREAM_BITMAP; 6401 } 6402 } 6403 6404 /* 6405 * Compute the max allowed sectors per IO: The controller info has two 6406 * limits on max sectors. Driver should use the minimum of these two. 6407 * 6408 * 1 << stripe_sz_ops.min = max sectors per strip 6409 * 6410 * Note that older firmwares ( < FW ver 30) didn't report information 6411 * to calculate max_sectors_1. So the number ended up as zero always. 6412 */ 6413 tmp_sectors = 0; 6414 ctrl_info = instance->ctrl_info_buf; 6415 6416 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * 6417 le16_to_cpu(ctrl_info->max_strips_per_io); 6418 max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size); 6419 6420 tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2); 6421 6422 instance->peerIsPresent = ctrl_info->cluster.peerIsPresent; 6423 instance->passive = ctrl_info->cluster.passive; 6424 memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId)); 6425 instance->UnevenSpanSupport = 6426 ctrl_info->adapterOperations2.supportUnevenSpans; 6427 if (instance->UnevenSpanSupport) { 6428 struct fusion_context *fusion = instance->ctrl_context; 6429 if (MR_ValidateMapInfo(instance, instance->map_id)) 6430 fusion->fast_path_io = 1; 6431 else 6432 fusion->fast_path_io = 0; 6433 6434 } 6435 if (ctrl_info->host_interface.SRIOV) { 6436 instance->requestorId = ctrl_info->iov.requestorId; 6437 if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) { 6438 if (!ctrl_info->adapterOperations2.activePassive) 6439 instance->PlasmaFW111 = 1; 6440 6441 dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n", 6442 instance->PlasmaFW111 ? "1.11" : "new"); 6443 6444 if (instance->PlasmaFW111) { 6445 iovPtr = (struct IOV_111 *) 6446 ((unsigned char *)ctrl_info + IOV_111_OFFSET); 6447 instance->requestorId = iovPtr->requestorId; 6448 } 6449 } 6450 dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n", 6451 instance->requestorId); 6452 } 6453 6454 instance->crash_dump_fw_support = 6455 ctrl_info->adapterOperations3.supportCrashDump; 6456 instance->crash_dump_drv_support = 6457 (instance->crash_dump_fw_support && 6458 instance->crash_dump_buf); 6459 if (instance->crash_dump_drv_support) 6460 megasas_set_crash_dump_params(instance, 6461 MR_CRASH_BUF_TURN_OFF); 6462 6463 else { 6464 if (instance->crash_dump_buf) 6465 dma_free_coherent(&instance->pdev->dev, 6466 CRASH_DMA_BUF_SIZE, 6467 instance->crash_dump_buf, 6468 instance->crash_dump_h); 6469 instance->crash_dump_buf = NULL; 6470 } 6471 6472 if (instance->snapdump_wait_time) { 6473 megasas_get_snapdump_properties(instance); 6474 dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n", 6475 instance->snapdump_wait_time); 6476 } 6477 6478 dev_info(&instance->pdev->dev, 6479 "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n", 6480 le16_to_cpu(ctrl_info->pci.vendor_id), 6481 le16_to_cpu(ctrl_info->pci.device_id), 6482 le16_to_cpu(ctrl_info->pci.sub_vendor_id), 6483 le16_to_cpu(ctrl_info->pci.sub_device_id)); 6484 dev_info(&instance->pdev->dev, "unevenspan support : %s\n", 6485 instance->UnevenSpanSupport ? "yes" : "no"); 6486 dev_info(&instance->pdev->dev, "firmware crash dump : %s\n", 6487 instance->crash_dump_drv_support ? "yes" : "no"); 6488 dev_info(&instance->pdev->dev, "JBOD sequence map : %s\n", 6489 instance->use_seqnum_jbod_fp ? "enabled" : "disabled"); 6490 6491 instance->max_sectors_per_req = instance->max_num_sge * 6492 SGE_BUFFER_SIZE / 512; 6493 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) 6494 instance->max_sectors_per_req = tmp_sectors; 6495 6496 /* Check for valid throttlequeuedepth module parameter */ 6497 if (throttlequeuedepth && 6498 throttlequeuedepth <= instance->max_scsi_cmds) 6499 instance->throttlequeuedepth = throttlequeuedepth; 6500 else 6501 instance->throttlequeuedepth = 6502 MEGASAS_THROTTLE_QUEUE_DEPTH; 6503 6504 if ((resetwaittime < 1) || 6505 (resetwaittime > MEGASAS_RESET_WAIT_TIME)) 6506 resetwaittime = MEGASAS_RESET_WAIT_TIME; 6507 6508 if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT)) 6509 scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; 6510 6511 /* Launch SR-IOV heartbeat timer */ 6512 if (instance->requestorId) { 6513 if (!megasas_sriov_start_heartbeat(instance, 1)) { 6514 megasas_start_timer(instance); 6515 } else { 6516 instance->skip_heartbeat_timer_del = 1; 6517 goto fail_get_ld_pd_list; 6518 } 6519 } 6520 6521 /* 6522 * Create and start watchdog thread which will monitor 6523 * controller state every 1 sec and trigger OCR when 6524 * it enters fault state 6525 */ 6526 if (instance->adapter_type != MFI_SERIES) 6527 if (megasas_fusion_start_watchdog(instance) != SUCCESS) 6528 goto fail_start_watchdog; 6529 6530 return 0; 6531 6532 fail_start_watchdog: 6533 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 6534 del_timer_sync(&instance->sriov_heartbeat_timer); 6535 fail_get_ld_pd_list: 6536 instance->instancet->disable_intr(instance); 6537 megasas_destroy_irqs(instance); 6538 fail_init_adapter: 6539 if (instance->msix_vectors) 6540 pci_free_irq_vectors(instance->pdev); 6541 instance->msix_vectors = 0; 6542 fail_alloc_dma_buf: 6543 megasas_free_ctrl_dma_buffers(instance); 6544 megasas_free_ctrl_mem(instance); 6545 fail_ready_state: 6546 iounmap(instance->reg_set); 6547 6548 fail_ioremap: 6549 pci_release_selected_regions(instance->pdev, 1<<instance->bar); 6550 6551 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 6552 __func__, __LINE__); 6553 return -EINVAL; 6554 } 6555 6556 /** 6557 * megasas_release_mfi - Reverses the FW initialization 6558 * @instance: Adapter soft state 6559 */ 6560 static void megasas_release_mfi(struct megasas_instance *instance) 6561 { 6562 u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1); 6563 6564 if (instance->reply_queue) 6565 dma_free_coherent(&instance->pdev->dev, reply_q_sz, 6566 instance->reply_queue, instance->reply_queue_h); 6567 6568 megasas_free_cmds(instance); 6569 6570 iounmap(instance->reg_set); 6571 6572 pci_release_selected_regions(instance->pdev, 1<<instance->bar); 6573 } 6574 6575 /** 6576 * megasas_get_seq_num - Gets latest event sequence numbers 6577 * @instance: Adapter soft state 6578 * @eli: FW event log sequence numbers information 6579 * 6580 * FW maintains a log of all events in a non-volatile area. Upper layers would 6581 * usually find out the latest sequence number of the events, the seq number at 6582 * the boot etc. They would "read" all the events below the latest seq number 6583 * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq 6584 * number), they would subsribe to AEN (asynchronous event notification) and 6585 * wait for the events to happen. 6586 */ 6587 static int 6588 megasas_get_seq_num(struct megasas_instance *instance, 6589 struct megasas_evt_log_info *eli) 6590 { 6591 struct megasas_cmd *cmd; 6592 struct megasas_dcmd_frame *dcmd; 6593 struct megasas_evt_log_info *el_info; 6594 dma_addr_t el_info_h = 0; 6595 int ret; 6596 6597 cmd = megasas_get_cmd(instance); 6598 6599 if (!cmd) { 6600 return -ENOMEM; 6601 } 6602 6603 dcmd = &cmd->frame->dcmd; 6604 el_info = dma_alloc_coherent(&instance->pdev->dev, 6605 sizeof(struct megasas_evt_log_info), 6606 &el_info_h, GFP_KERNEL); 6607 if (!el_info) { 6608 megasas_return_cmd(instance, cmd); 6609 return -ENOMEM; 6610 } 6611 6612 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6613 6614 dcmd->cmd = MFI_CMD_DCMD; 6615 dcmd->cmd_status = 0x0; 6616 dcmd->sge_count = 1; 6617 dcmd->flags = MFI_FRAME_DIR_READ; 6618 dcmd->timeout = 0; 6619 dcmd->pad_0 = 0; 6620 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info)); 6621 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO); 6622 6623 megasas_set_dma_settings(instance, dcmd, el_info_h, 6624 sizeof(struct megasas_evt_log_info)); 6625 6626 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 6627 if (ret != DCMD_SUCCESS) { 6628 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 6629 __func__, __LINE__); 6630 goto dcmd_failed; 6631 } 6632 6633 /* 6634 * Copy the data back into callers buffer 6635 */ 6636 eli->newest_seq_num = el_info->newest_seq_num; 6637 eli->oldest_seq_num = el_info->oldest_seq_num; 6638 eli->clear_seq_num = el_info->clear_seq_num; 6639 eli->shutdown_seq_num = el_info->shutdown_seq_num; 6640 eli->boot_seq_num = el_info->boot_seq_num; 6641 6642 dcmd_failed: 6643 dma_free_coherent(&instance->pdev->dev, 6644 sizeof(struct megasas_evt_log_info), 6645 el_info, el_info_h); 6646 6647 megasas_return_cmd(instance, cmd); 6648 6649 return ret; 6650 } 6651 6652 /** 6653 * megasas_register_aen - Registers for asynchronous event notification 6654 * @instance: Adapter soft state 6655 * @seq_num: The starting sequence number 6656 * @class_locale_word: Class of the event 6657 * 6658 * This function subscribes for AEN for events beyond the @seq_num. It requests 6659 * to be notified if and only if the event is of type @class_locale 6660 */ 6661 static int 6662 megasas_register_aen(struct megasas_instance *instance, u32 seq_num, 6663 u32 class_locale_word) 6664 { 6665 int ret_val; 6666 struct megasas_cmd *cmd; 6667 struct megasas_dcmd_frame *dcmd; 6668 union megasas_evt_class_locale curr_aen; 6669 union megasas_evt_class_locale prev_aen; 6670 6671 /* 6672 * If there an AEN pending already (aen_cmd), check if the 6673 * class_locale of that pending AEN is inclusive of the new 6674 * AEN request we currently have. If it is, then we don't have 6675 * to do anything. In other words, whichever events the current 6676 * AEN request is subscribing to, have already been subscribed 6677 * to. 6678 * 6679 * If the old_cmd is _not_ inclusive, then we have to abort 6680 * that command, form a class_locale that is superset of both 6681 * old and current and re-issue to the FW 6682 */ 6683 6684 curr_aen.word = class_locale_word; 6685 6686 if (instance->aen_cmd) { 6687 6688 prev_aen.word = 6689 le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]); 6690 6691 if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) || 6692 (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) { 6693 dev_info(&instance->pdev->dev, 6694 "%s %d out of range class %d send by application\n", 6695 __func__, __LINE__, curr_aen.members.class); 6696 return 0; 6697 } 6698 6699 /* 6700 * A class whose enum value is smaller is inclusive of all 6701 * higher values. If a PROGRESS (= -1) was previously 6702 * registered, then a new registration requests for higher 6703 * classes need not be sent to FW. They are automatically 6704 * included. 6705 * 6706 * Locale numbers don't have such hierarchy. They are bitmap 6707 * values 6708 */ 6709 if ((prev_aen.members.class <= curr_aen.members.class) && 6710 !((prev_aen.members.locale & curr_aen.members.locale) ^ 6711 curr_aen.members.locale)) { 6712 /* 6713 * Previously issued event registration includes 6714 * current request. Nothing to do. 6715 */ 6716 return 0; 6717 } else { 6718 curr_aen.members.locale |= prev_aen.members.locale; 6719 6720 if (prev_aen.members.class < curr_aen.members.class) 6721 curr_aen.members.class = prev_aen.members.class; 6722 6723 instance->aen_cmd->abort_aen = 1; 6724 ret_val = megasas_issue_blocked_abort_cmd(instance, 6725 instance-> 6726 aen_cmd, 30); 6727 6728 if (ret_val) { 6729 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort " 6730 "previous AEN command\n"); 6731 return ret_val; 6732 } 6733 } 6734 } 6735 6736 cmd = megasas_get_cmd(instance); 6737 6738 if (!cmd) 6739 return -ENOMEM; 6740 6741 dcmd = &cmd->frame->dcmd; 6742 6743 memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail)); 6744 6745 /* 6746 * Prepare DCMD for aen registration 6747 */ 6748 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6749 6750 dcmd->cmd = MFI_CMD_DCMD; 6751 dcmd->cmd_status = 0x0; 6752 dcmd->sge_count = 1; 6753 dcmd->flags = MFI_FRAME_DIR_READ; 6754 dcmd->timeout = 0; 6755 dcmd->pad_0 = 0; 6756 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail)); 6757 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT); 6758 dcmd->mbox.w[0] = cpu_to_le32(seq_num); 6759 instance->last_seq_num = seq_num; 6760 dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word); 6761 6762 megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h, 6763 sizeof(struct megasas_evt_detail)); 6764 6765 if (instance->aen_cmd != NULL) { 6766 megasas_return_cmd(instance, cmd); 6767 return 0; 6768 } 6769 6770 /* 6771 * Store reference to the cmd used to register for AEN. When an 6772 * application wants us to register for AEN, we have to abort this 6773 * cmd and re-register with a new EVENT LOCALE supplied by that app 6774 */ 6775 instance->aen_cmd = cmd; 6776 6777 /* 6778 * Issue the aen registration frame 6779 */ 6780 instance->instancet->issue_dcmd(instance, cmd); 6781 6782 return 0; 6783 } 6784 6785 /* megasas_get_target_prop - Send DCMD with below details to firmware. 6786 * 6787 * This DCMD will fetch few properties of LD/system PD defined 6788 * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value. 6789 * 6790 * DCMD send by drivers whenever new target is added to the OS. 6791 * 6792 * dcmd.opcode - MR_DCMD_DEV_GET_TARGET_PROP 6793 * dcmd.mbox.b[0] - DCMD is to be fired for LD or system PD. 6794 * 0 = system PD, 1 = LD. 6795 * dcmd.mbox.s[1] - TargetID for LD/system PD. 6796 * dcmd.sge IN - Pointer to return MR_TARGET_DEV_PROPERTIES. 6797 * 6798 * @instance: Adapter soft state 6799 * @sdev: OS provided scsi device 6800 * 6801 * Returns 0 on success non-zero on failure. 6802 */ 6803 int 6804 megasas_get_target_prop(struct megasas_instance *instance, 6805 struct scsi_device *sdev) 6806 { 6807 int ret; 6808 struct megasas_cmd *cmd; 6809 struct megasas_dcmd_frame *dcmd; 6810 u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + 6811 sdev->id; 6812 6813 cmd = megasas_get_cmd(instance); 6814 6815 if (!cmd) { 6816 dev_err(&instance->pdev->dev, 6817 "Failed to get cmd %s\n", __func__); 6818 return -ENOMEM; 6819 } 6820 6821 dcmd = &cmd->frame->dcmd; 6822 6823 memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop)); 6824 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6825 dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev); 6826 6827 dcmd->mbox.s[1] = cpu_to_le16(targetId); 6828 dcmd->cmd = MFI_CMD_DCMD; 6829 dcmd->cmd_status = 0xFF; 6830 dcmd->sge_count = 1; 6831 dcmd->flags = MFI_FRAME_DIR_READ; 6832 dcmd->timeout = 0; 6833 dcmd->pad_0 = 0; 6834 dcmd->data_xfer_len = 6835 cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES)); 6836 dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP); 6837 6838 megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h, 6839 sizeof(struct MR_TARGET_PROPERTIES)); 6840 6841 if ((instance->adapter_type != MFI_SERIES) && 6842 !instance->mask_interrupts) 6843 ret = megasas_issue_blocked_cmd(instance, 6844 cmd, MFI_IO_TIMEOUT_SECS); 6845 else 6846 ret = megasas_issue_polled(instance, cmd); 6847 6848 switch (ret) { 6849 case DCMD_TIMEOUT: 6850 switch (dcmd_timeout_ocr_possible(instance)) { 6851 case INITIATE_OCR: 6852 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 6853 mutex_unlock(&instance->reset_mutex); 6854 megasas_reset_fusion(instance->host, 6855 MFI_IO_TIMEOUT_OCR); 6856 mutex_lock(&instance->reset_mutex); 6857 break; 6858 case KILL_ADAPTER: 6859 megaraid_sas_kill_hba(instance); 6860 break; 6861 case IGNORE_TIMEOUT: 6862 dev_info(&instance->pdev->dev, 6863 "Ignore DCMD timeout: %s %d\n", 6864 __func__, __LINE__); 6865 break; 6866 } 6867 break; 6868 6869 default: 6870 megasas_return_cmd(instance, cmd); 6871 } 6872 if (ret != DCMD_SUCCESS) 6873 dev_err(&instance->pdev->dev, 6874 "return from %s %d return value %d\n", 6875 __func__, __LINE__, ret); 6876 6877 return ret; 6878 } 6879 6880 /** 6881 * megasas_start_aen - Subscribes to AEN during driver load time 6882 * @instance: Adapter soft state 6883 */ 6884 static int megasas_start_aen(struct megasas_instance *instance) 6885 { 6886 struct megasas_evt_log_info eli; 6887 union megasas_evt_class_locale class_locale; 6888 6889 /* 6890 * Get the latest sequence number from FW 6891 */ 6892 memset(&eli, 0, sizeof(eli)); 6893 6894 if (megasas_get_seq_num(instance, &eli)) 6895 return -1; 6896 6897 /* 6898 * Register AEN with FW for latest sequence number plus 1 6899 */ 6900 class_locale.members.reserved = 0; 6901 class_locale.members.locale = MR_EVT_LOCALE_ALL; 6902 class_locale.members.class = MR_EVT_CLASS_DEBUG; 6903 6904 return megasas_register_aen(instance, 6905 le32_to_cpu(eli.newest_seq_num) + 1, 6906 class_locale.word); 6907 } 6908 6909 /** 6910 * megasas_io_attach - Attaches this driver to SCSI mid-layer 6911 * @instance: Adapter soft state 6912 */ 6913 static int megasas_io_attach(struct megasas_instance *instance) 6914 { 6915 struct Scsi_Host *host = instance->host; 6916 6917 /* 6918 * Export parameters required by SCSI mid-layer 6919 */ 6920 host->unique_id = instance->unique_id; 6921 host->can_queue = instance->max_scsi_cmds; 6922 host->this_id = instance->init_id; 6923 host->sg_tablesize = instance->max_num_sge; 6924 6925 if (instance->fw_support_ieee) 6926 instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE; 6927 6928 /* 6929 * Check if the module parameter value for max_sectors can be used 6930 */ 6931 if (max_sectors && max_sectors < instance->max_sectors_per_req) 6932 instance->max_sectors_per_req = max_sectors; 6933 else { 6934 if (max_sectors) { 6935 if (((instance->pdev->device == 6936 PCI_DEVICE_ID_LSI_SAS1078GEN2) || 6937 (instance->pdev->device == 6938 PCI_DEVICE_ID_LSI_SAS0079GEN2)) && 6939 (max_sectors <= MEGASAS_MAX_SECTORS)) { 6940 instance->max_sectors_per_req = max_sectors; 6941 } else { 6942 dev_info(&instance->pdev->dev, "max_sectors should be > 0" 6943 "and <= %d (or < 1MB for GEN2 controller)\n", 6944 instance->max_sectors_per_req); 6945 } 6946 } 6947 } 6948 6949 host->max_sectors = instance->max_sectors_per_req; 6950 host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN; 6951 host->max_channel = MEGASAS_MAX_CHANNELS - 1; 6952 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; 6953 host->max_lun = MEGASAS_MAX_LUN; 6954 host->max_cmd_len = 16; 6955 6956 /* Use shared host tagset only for fusion adaptors 6957 * if there are managed interrupts (smp affinity enabled case). 6958 * Single msix_vectors in kdump, so shared host tag is also disabled. 6959 */ 6960 6961 host->host_tagset = 0; 6962 host->nr_hw_queues = 1; 6963 6964 if ((instance->adapter_type != MFI_SERIES) && 6965 (instance->msix_vectors > instance->low_latency_index_start) && 6966 host_tagset_enable && 6967 instance->smp_affinity_enable) { 6968 host->host_tagset = 1; 6969 host->nr_hw_queues = instance->msix_vectors - 6970 instance->low_latency_index_start + instance->iopoll_q_count; 6971 if (instance->iopoll_q_count) 6972 host->nr_maps = 3; 6973 } else { 6974 instance->iopoll_q_count = 0; 6975 } 6976 6977 dev_info(&instance->pdev->dev, 6978 "Max firmware commands: %d shared with default " 6979 "hw_queues = %d poll_queues %d\n", instance->max_fw_cmds, 6980 host->nr_hw_queues - instance->iopoll_q_count, 6981 instance->iopoll_q_count); 6982 /* 6983 * Notify the mid-layer about the new controller 6984 */ 6985 if (scsi_add_host(host, &instance->pdev->dev)) { 6986 dev_err(&instance->pdev->dev, 6987 "Failed to add host from %s %d\n", 6988 __func__, __LINE__); 6989 return -ENODEV; 6990 } 6991 6992 return 0; 6993 } 6994 6995 /** 6996 * megasas_set_dma_mask - Set DMA mask for supported controllers 6997 * 6998 * @instance: Adapter soft state 6999 * Description: 7000 * 7001 * For Ventura, driver/FW will operate in 63bit DMA addresses. 7002 * 7003 * For invader- 7004 * By default, driver/FW will operate in 32bit DMA addresses 7005 * for consistent DMA mapping but if 32 bit consistent 7006 * DMA mask fails, driver will try with 63 bit consistent 7007 * mask provided FW is true 63bit DMA capable 7008 * 7009 * For older controllers(Thunderbolt and MFI based adapters)- 7010 * driver/FW will operate in 32 bit consistent DMA addresses. 7011 */ 7012 static int 7013 megasas_set_dma_mask(struct megasas_instance *instance) 7014 { 7015 u64 consistent_mask; 7016 struct pci_dev *pdev; 7017 u32 scratch_pad_1; 7018 7019 pdev = instance->pdev; 7020 consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ? 7021 DMA_BIT_MASK(63) : DMA_BIT_MASK(32); 7022 7023 if (IS_DMA64) { 7024 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) && 7025 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) 7026 goto fail_set_dma_mask; 7027 7028 if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) && 7029 (dma_set_coherent_mask(&pdev->dev, consistent_mask) && 7030 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) { 7031 /* 7032 * If 32 bit DMA mask fails, then try for 64 bit mask 7033 * for FW capable of handling 64 bit DMA. 7034 */ 7035 scratch_pad_1 = megasas_readl 7036 (instance, &instance->reg_set->outbound_scratch_pad_1); 7037 7038 if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET)) 7039 goto fail_set_dma_mask; 7040 else if (dma_set_mask_and_coherent(&pdev->dev, 7041 DMA_BIT_MASK(63))) 7042 goto fail_set_dma_mask; 7043 } 7044 } else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) 7045 goto fail_set_dma_mask; 7046 7047 if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32)) 7048 instance->consistent_mask_64bit = false; 7049 else 7050 instance->consistent_mask_64bit = true; 7051 7052 dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n", 7053 ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"), 7054 (instance->consistent_mask_64bit ? "63" : "32")); 7055 7056 return 0; 7057 7058 fail_set_dma_mask: 7059 dev_err(&pdev->dev, "Failed to set DMA mask\n"); 7060 return -1; 7061 7062 } 7063 7064 /* 7065 * megasas_set_adapter_type - Set adapter type. 7066 * Supported controllers can be divided in 7067 * different categories- 7068 * enum MR_ADAPTER_TYPE { 7069 * MFI_SERIES = 1, 7070 * THUNDERBOLT_SERIES = 2, 7071 * INVADER_SERIES = 3, 7072 * VENTURA_SERIES = 4, 7073 * AERO_SERIES = 5, 7074 * }; 7075 * @instance: Adapter soft state 7076 * return: void 7077 */ 7078 static inline void megasas_set_adapter_type(struct megasas_instance *instance) 7079 { 7080 if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) && 7081 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) { 7082 instance->adapter_type = MFI_SERIES; 7083 } else { 7084 switch (instance->pdev->device) { 7085 case PCI_DEVICE_ID_LSI_AERO_10E1: 7086 case PCI_DEVICE_ID_LSI_AERO_10E2: 7087 case PCI_DEVICE_ID_LSI_AERO_10E5: 7088 case PCI_DEVICE_ID_LSI_AERO_10E6: 7089 instance->adapter_type = AERO_SERIES; 7090 break; 7091 case PCI_DEVICE_ID_LSI_VENTURA: 7092 case PCI_DEVICE_ID_LSI_CRUSADER: 7093 case PCI_DEVICE_ID_LSI_HARPOON: 7094 case PCI_DEVICE_ID_LSI_TOMCAT: 7095 case PCI_DEVICE_ID_LSI_VENTURA_4PORT: 7096 case PCI_DEVICE_ID_LSI_CRUSADER_4PORT: 7097 instance->adapter_type = VENTURA_SERIES; 7098 break; 7099 case PCI_DEVICE_ID_LSI_FUSION: 7100 case PCI_DEVICE_ID_LSI_PLASMA: 7101 instance->adapter_type = THUNDERBOLT_SERIES; 7102 break; 7103 case PCI_DEVICE_ID_LSI_INVADER: 7104 case PCI_DEVICE_ID_LSI_INTRUDER: 7105 case PCI_DEVICE_ID_LSI_INTRUDER_24: 7106 case PCI_DEVICE_ID_LSI_CUTLASS_52: 7107 case PCI_DEVICE_ID_LSI_CUTLASS_53: 7108 case PCI_DEVICE_ID_LSI_FURY: 7109 instance->adapter_type = INVADER_SERIES; 7110 break; 7111 default: /* For all other supported controllers */ 7112 instance->adapter_type = MFI_SERIES; 7113 break; 7114 } 7115 } 7116 } 7117 7118 static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance) 7119 { 7120 instance->producer = dma_alloc_coherent(&instance->pdev->dev, 7121 sizeof(u32), &instance->producer_h, GFP_KERNEL); 7122 instance->consumer = dma_alloc_coherent(&instance->pdev->dev, 7123 sizeof(u32), &instance->consumer_h, GFP_KERNEL); 7124 7125 if (!instance->producer || !instance->consumer) { 7126 dev_err(&instance->pdev->dev, 7127 "Failed to allocate memory for producer, consumer\n"); 7128 return -1; 7129 } 7130 7131 *instance->producer = 0; 7132 *instance->consumer = 0; 7133 return 0; 7134 } 7135 7136 /** 7137 * megasas_alloc_ctrl_mem - Allocate per controller memory for core data 7138 * structures which are not common across MFI 7139 * adapters and fusion adapters. 7140 * For MFI based adapters, allocate producer and 7141 * consumer buffers. For fusion adapters, allocate 7142 * memory for fusion context. 7143 * @instance: Adapter soft state 7144 * return: 0 for SUCCESS 7145 */ 7146 static int megasas_alloc_ctrl_mem(struct megasas_instance *instance) 7147 { 7148 instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int), 7149 GFP_KERNEL); 7150 if (!instance->reply_map) 7151 return -ENOMEM; 7152 7153 switch (instance->adapter_type) { 7154 case MFI_SERIES: 7155 if (megasas_alloc_mfi_ctrl_mem(instance)) 7156 goto fail; 7157 break; 7158 case AERO_SERIES: 7159 case VENTURA_SERIES: 7160 case THUNDERBOLT_SERIES: 7161 case INVADER_SERIES: 7162 if (megasas_alloc_fusion_context(instance)) 7163 goto fail; 7164 break; 7165 } 7166 7167 return 0; 7168 fail: 7169 kfree(instance->reply_map); 7170 instance->reply_map = NULL; 7171 return -ENOMEM; 7172 } 7173 7174 /* 7175 * megasas_free_ctrl_mem - Free fusion context for fusion adapters and 7176 * producer, consumer buffers for MFI adapters 7177 * 7178 * @instance - Adapter soft instance 7179 * 7180 */ 7181 static inline void megasas_free_ctrl_mem(struct megasas_instance *instance) 7182 { 7183 kfree(instance->reply_map); 7184 if (instance->adapter_type == MFI_SERIES) { 7185 if (instance->producer) 7186 dma_free_coherent(&instance->pdev->dev, sizeof(u32), 7187 instance->producer, 7188 instance->producer_h); 7189 if (instance->consumer) 7190 dma_free_coherent(&instance->pdev->dev, sizeof(u32), 7191 instance->consumer, 7192 instance->consumer_h); 7193 } else { 7194 megasas_free_fusion_context(instance); 7195 } 7196 } 7197 7198 /** 7199 * megasas_alloc_ctrl_dma_buffers - Allocate consistent DMA buffers during 7200 * driver load time 7201 * 7202 * @instance: Adapter soft instance 7203 * 7204 * @return: O for SUCCESS 7205 */ 7206 static inline 7207 int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance) 7208 { 7209 struct pci_dev *pdev = instance->pdev; 7210 struct fusion_context *fusion = instance->ctrl_context; 7211 7212 instance->evt_detail = dma_alloc_coherent(&pdev->dev, 7213 sizeof(struct megasas_evt_detail), 7214 &instance->evt_detail_h, GFP_KERNEL); 7215 7216 if (!instance->evt_detail) { 7217 dev_err(&instance->pdev->dev, 7218 "Failed to allocate event detail buffer\n"); 7219 return -ENOMEM; 7220 } 7221 7222 if (fusion) { 7223 fusion->ioc_init_request = 7224 dma_alloc_coherent(&pdev->dev, 7225 sizeof(struct MPI2_IOC_INIT_REQUEST), 7226 &fusion->ioc_init_request_phys, 7227 GFP_KERNEL); 7228 7229 if (!fusion->ioc_init_request) { 7230 dev_err(&pdev->dev, 7231 "Failed to allocate PD list buffer\n"); 7232 return -ENOMEM; 7233 } 7234 7235 instance->snapdump_prop = dma_alloc_coherent(&pdev->dev, 7236 sizeof(struct MR_SNAPDUMP_PROPERTIES), 7237 &instance->snapdump_prop_h, GFP_KERNEL); 7238 7239 if (!instance->snapdump_prop) 7240 dev_err(&pdev->dev, 7241 "Failed to allocate snapdump properties buffer\n"); 7242 7243 instance->host_device_list_buf = dma_alloc_coherent(&pdev->dev, 7244 HOST_DEVICE_LIST_SZ, 7245 &instance->host_device_list_buf_h, 7246 GFP_KERNEL); 7247 7248 if (!instance->host_device_list_buf) { 7249 dev_err(&pdev->dev, 7250 "Failed to allocate targetid list buffer\n"); 7251 return -ENOMEM; 7252 } 7253 7254 } 7255 7256 instance->pd_list_buf = 7257 dma_alloc_coherent(&pdev->dev, 7258 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), 7259 &instance->pd_list_buf_h, GFP_KERNEL); 7260 7261 if (!instance->pd_list_buf) { 7262 dev_err(&pdev->dev, "Failed to allocate PD list buffer\n"); 7263 return -ENOMEM; 7264 } 7265 7266 instance->ctrl_info_buf = 7267 dma_alloc_coherent(&pdev->dev, 7268 sizeof(struct megasas_ctrl_info), 7269 &instance->ctrl_info_buf_h, GFP_KERNEL); 7270 7271 if (!instance->ctrl_info_buf) { 7272 dev_err(&pdev->dev, 7273 "Failed to allocate controller info buffer\n"); 7274 return -ENOMEM; 7275 } 7276 7277 instance->ld_list_buf = 7278 dma_alloc_coherent(&pdev->dev, 7279 sizeof(struct MR_LD_LIST), 7280 &instance->ld_list_buf_h, GFP_KERNEL); 7281 7282 if (!instance->ld_list_buf) { 7283 dev_err(&pdev->dev, "Failed to allocate LD list buffer\n"); 7284 return -ENOMEM; 7285 } 7286 7287 instance->ld_targetid_list_buf = 7288 dma_alloc_coherent(&pdev->dev, 7289 sizeof(struct MR_LD_TARGETID_LIST), 7290 &instance->ld_targetid_list_buf_h, GFP_KERNEL); 7291 7292 if (!instance->ld_targetid_list_buf) { 7293 dev_err(&pdev->dev, 7294 "Failed to allocate LD targetid list buffer\n"); 7295 return -ENOMEM; 7296 } 7297 7298 if (!reset_devices) { 7299 instance->system_info_buf = 7300 dma_alloc_coherent(&pdev->dev, 7301 sizeof(struct MR_DRV_SYSTEM_INFO), 7302 &instance->system_info_h, GFP_KERNEL); 7303 instance->pd_info = 7304 dma_alloc_coherent(&pdev->dev, 7305 sizeof(struct MR_PD_INFO), 7306 &instance->pd_info_h, GFP_KERNEL); 7307 instance->tgt_prop = 7308 dma_alloc_coherent(&pdev->dev, 7309 sizeof(struct MR_TARGET_PROPERTIES), 7310 &instance->tgt_prop_h, GFP_KERNEL); 7311 instance->crash_dump_buf = 7312 dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE, 7313 &instance->crash_dump_h, GFP_KERNEL); 7314 7315 if (!instance->system_info_buf) 7316 dev_err(&instance->pdev->dev, 7317 "Failed to allocate system info buffer\n"); 7318 7319 if (!instance->pd_info) 7320 dev_err(&instance->pdev->dev, 7321 "Failed to allocate pd_info buffer\n"); 7322 7323 if (!instance->tgt_prop) 7324 dev_err(&instance->pdev->dev, 7325 "Failed to allocate tgt_prop buffer\n"); 7326 7327 if (!instance->crash_dump_buf) 7328 dev_err(&instance->pdev->dev, 7329 "Failed to allocate crash dump buffer\n"); 7330 } 7331 7332 return 0; 7333 } 7334 7335 /* 7336 * megasas_free_ctrl_dma_buffers - Free consistent DMA buffers allocated 7337 * during driver load time 7338 * 7339 * @instance- Adapter soft instance 7340 * 7341 */ 7342 static inline 7343 void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance) 7344 { 7345 struct pci_dev *pdev = instance->pdev; 7346 struct fusion_context *fusion = instance->ctrl_context; 7347 7348 if (instance->evt_detail) 7349 dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail), 7350 instance->evt_detail, 7351 instance->evt_detail_h); 7352 7353 if (fusion && fusion->ioc_init_request) 7354 dma_free_coherent(&pdev->dev, 7355 sizeof(struct MPI2_IOC_INIT_REQUEST), 7356 fusion->ioc_init_request, 7357 fusion->ioc_init_request_phys); 7358 7359 if (instance->pd_list_buf) 7360 dma_free_coherent(&pdev->dev, 7361 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), 7362 instance->pd_list_buf, 7363 instance->pd_list_buf_h); 7364 7365 if (instance->ld_list_buf) 7366 dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST), 7367 instance->ld_list_buf, 7368 instance->ld_list_buf_h); 7369 7370 if (instance->ld_targetid_list_buf) 7371 dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST), 7372 instance->ld_targetid_list_buf, 7373 instance->ld_targetid_list_buf_h); 7374 7375 if (instance->ctrl_info_buf) 7376 dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info), 7377 instance->ctrl_info_buf, 7378 instance->ctrl_info_buf_h); 7379 7380 if (instance->system_info_buf) 7381 dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO), 7382 instance->system_info_buf, 7383 instance->system_info_h); 7384 7385 if (instance->pd_info) 7386 dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO), 7387 instance->pd_info, instance->pd_info_h); 7388 7389 if (instance->tgt_prop) 7390 dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES), 7391 instance->tgt_prop, instance->tgt_prop_h); 7392 7393 if (instance->crash_dump_buf) 7394 dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE, 7395 instance->crash_dump_buf, 7396 instance->crash_dump_h); 7397 7398 if (instance->snapdump_prop) 7399 dma_free_coherent(&pdev->dev, 7400 sizeof(struct MR_SNAPDUMP_PROPERTIES), 7401 instance->snapdump_prop, 7402 instance->snapdump_prop_h); 7403 7404 if (instance->host_device_list_buf) 7405 dma_free_coherent(&pdev->dev, 7406 HOST_DEVICE_LIST_SZ, 7407 instance->host_device_list_buf, 7408 instance->host_device_list_buf_h); 7409 7410 } 7411 7412 /* 7413 * megasas_init_ctrl_params - Initialize controller's instance 7414 * parameters before FW init 7415 * @instance - Adapter soft instance 7416 * @return - void 7417 */ 7418 static inline void megasas_init_ctrl_params(struct megasas_instance *instance) 7419 { 7420 instance->fw_crash_state = UNAVAILABLE; 7421 7422 megasas_poll_wait_aen = 0; 7423 instance->issuepend_done = 1; 7424 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 7425 7426 /* 7427 * Initialize locks and queues 7428 */ 7429 INIT_LIST_HEAD(&instance->cmd_pool); 7430 INIT_LIST_HEAD(&instance->internal_reset_pending_q); 7431 7432 atomic_set(&instance->fw_outstanding, 0); 7433 atomic64_set(&instance->total_io_count, 0); 7434 7435 init_waitqueue_head(&instance->int_cmd_wait_q); 7436 init_waitqueue_head(&instance->abort_cmd_wait_q); 7437 7438 spin_lock_init(&instance->crashdump_lock); 7439 spin_lock_init(&instance->mfi_pool_lock); 7440 spin_lock_init(&instance->hba_lock); 7441 spin_lock_init(&instance->stream_lock); 7442 spin_lock_init(&instance->completion_lock); 7443 7444 mutex_init(&instance->reset_mutex); 7445 7446 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 7447 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) 7448 instance->flag_ieee = 1; 7449 7450 megasas_dbg_lvl = 0; 7451 instance->flag = 0; 7452 instance->unload = 1; 7453 instance->last_time = 0; 7454 instance->disableOnlineCtrlReset = 1; 7455 instance->UnevenSpanSupport = 0; 7456 instance->smp_affinity_enable = smp_affinity_enable ? true : false; 7457 instance->msix_load_balance = false; 7458 7459 if (instance->adapter_type != MFI_SERIES) 7460 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); 7461 else 7462 INIT_WORK(&instance->work_init, process_fw_state_change_wq); 7463 } 7464 7465 /** 7466 * megasas_probe_one - PCI hotplug entry point 7467 * @pdev: PCI device structure 7468 * @id: PCI ids of supported hotplugged adapter 7469 */ 7470 static int megasas_probe_one(struct pci_dev *pdev, 7471 const struct pci_device_id *id) 7472 { 7473 int rval, pos; 7474 struct Scsi_Host *host; 7475 struct megasas_instance *instance; 7476 u16 control = 0; 7477 7478 switch (pdev->device) { 7479 case PCI_DEVICE_ID_LSI_AERO_10E0: 7480 case PCI_DEVICE_ID_LSI_AERO_10E3: 7481 case PCI_DEVICE_ID_LSI_AERO_10E4: 7482 case PCI_DEVICE_ID_LSI_AERO_10E7: 7483 dev_err(&pdev->dev, "Adapter is in non secure mode\n"); 7484 return 1; 7485 case PCI_DEVICE_ID_LSI_AERO_10E1: 7486 case PCI_DEVICE_ID_LSI_AERO_10E5: 7487 dev_info(&pdev->dev, "Adapter is in configurable secure mode\n"); 7488 break; 7489 } 7490 7491 /* Reset MSI-X in the kdump kernel */ 7492 if (reset_devices) { 7493 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 7494 if (pos) { 7495 pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, 7496 &control); 7497 if (control & PCI_MSIX_FLAGS_ENABLE) { 7498 dev_info(&pdev->dev, "resetting MSI-X\n"); 7499 pci_write_config_word(pdev, 7500 pos + PCI_MSIX_FLAGS, 7501 control & 7502 ~PCI_MSIX_FLAGS_ENABLE); 7503 } 7504 } 7505 } 7506 7507 /* 7508 * PCI prepping: enable device set bus mastering and dma mask 7509 */ 7510 rval = pci_enable_device_mem(pdev); 7511 7512 if (rval) { 7513 return rval; 7514 } 7515 7516 pci_set_master(pdev); 7517 7518 host = scsi_host_alloc(&megasas_template, 7519 sizeof(struct megasas_instance)); 7520 7521 if (!host) { 7522 dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n"); 7523 goto fail_alloc_instance; 7524 } 7525 7526 instance = (struct megasas_instance *)host->hostdata; 7527 memset(instance, 0, sizeof(*instance)); 7528 atomic_set(&instance->fw_reset_no_pci_access, 0); 7529 7530 /* 7531 * Initialize PCI related and misc parameters 7532 */ 7533 instance->pdev = pdev; 7534 instance->host = host; 7535 instance->unique_id = pdev->bus->number << 8 | pdev->devfn; 7536 instance->init_id = MEGASAS_DEFAULT_INIT_ID; 7537 7538 megasas_set_adapter_type(instance); 7539 7540 /* 7541 * Initialize MFI Firmware 7542 */ 7543 if (megasas_init_fw(instance)) 7544 goto fail_init_mfi; 7545 7546 if (instance->requestorId) { 7547 if (instance->PlasmaFW111) { 7548 instance->vf_affiliation_111 = 7549 dma_alloc_coherent(&pdev->dev, 7550 sizeof(struct MR_LD_VF_AFFILIATION_111), 7551 &instance->vf_affiliation_111_h, 7552 GFP_KERNEL); 7553 if (!instance->vf_affiliation_111) 7554 dev_warn(&pdev->dev, "Can't allocate " 7555 "memory for VF affiliation buffer\n"); 7556 } else { 7557 instance->vf_affiliation = 7558 dma_alloc_coherent(&pdev->dev, 7559 (MAX_LOGICAL_DRIVES + 1) * 7560 sizeof(struct MR_LD_VF_AFFILIATION), 7561 &instance->vf_affiliation_h, 7562 GFP_KERNEL); 7563 if (!instance->vf_affiliation) 7564 dev_warn(&pdev->dev, "Can't allocate " 7565 "memory for VF affiliation buffer\n"); 7566 } 7567 } 7568 7569 /* 7570 * Store instance in PCI softstate 7571 */ 7572 pci_set_drvdata(pdev, instance); 7573 7574 /* 7575 * Add this controller to megasas_mgmt_info structure so that it 7576 * can be exported to management applications 7577 */ 7578 megasas_mgmt_info.count++; 7579 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance; 7580 megasas_mgmt_info.max_index++; 7581 7582 /* 7583 * Register with SCSI mid-layer 7584 */ 7585 if (megasas_io_attach(instance)) 7586 goto fail_io_attach; 7587 7588 instance->unload = 0; 7589 /* 7590 * Trigger SCSI to scan our drives 7591 */ 7592 if (!instance->enable_fw_dev_list || 7593 (instance->host_device_list_buf->count > 0)) 7594 scsi_scan_host(host); 7595 7596 /* 7597 * Initiate AEN (Asynchronous Event Notification) 7598 */ 7599 if (megasas_start_aen(instance)) { 7600 dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n"); 7601 goto fail_start_aen; 7602 } 7603 7604 megasas_setup_debugfs(instance); 7605 7606 /* Get current SR-IOV LD/VF affiliation */ 7607 if (instance->requestorId) 7608 megasas_get_ld_vf_affiliation(instance, 1); 7609 7610 return 0; 7611 7612 fail_start_aen: 7613 instance->unload = 1; 7614 scsi_remove_host(instance->host); 7615 fail_io_attach: 7616 megasas_mgmt_info.count--; 7617 megasas_mgmt_info.max_index--; 7618 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; 7619 7620 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7621 del_timer_sync(&instance->sriov_heartbeat_timer); 7622 7623 instance->instancet->disable_intr(instance); 7624 megasas_destroy_irqs(instance); 7625 7626 if (instance->adapter_type != MFI_SERIES) 7627 megasas_release_fusion(instance); 7628 else 7629 megasas_release_mfi(instance); 7630 7631 if (instance->msix_vectors) 7632 pci_free_irq_vectors(instance->pdev); 7633 instance->msix_vectors = 0; 7634 7635 if (instance->fw_crash_state != UNAVAILABLE) 7636 megasas_free_host_crash_buffer(instance); 7637 7638 if (instance->adapter_type != MFI_SERIES) 7639 megasas_fusion_stop_watchdog(instance); 7640 fail_init_mfi: 7641 scsi_host_put(host); 7642 fail_alloc_instance: 7643 pci_disable_device(pdev); 7644 7645 return -ENODEV; 7646 } 7647 7648 /** 7649 * megasas_flush_cache - Requests FW to flush all its caches 7650 * @instance: Adapter soft state 7651 */ 7652 static void megasas_flush_cache(struct megasas_instance *instance) 7653 { 7654 struct megasas_cmd *cmd; 7655 struct megasas_dcmd_frame *dcmd; 7656 7657 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 7658 return; 7659 7660 cmd = megasas_get_cmd(instance); 7661 7662 if (!cmd) 7663 return; 7664 7665 dcmd = &cmd->frame->dcmd; 7666 7667 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 7668 7669 dcmd->cmd = MFI_CMD_DCMD; 7670 dcmd->cmd_status = 0x0; 7671 dcmd->sge_count = 0; 7672 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 7673 dcmd->timeout = 0; 7674 dcmd->pad_0 = 0; 7675 dcmd->data_xfer_len = 0; 7676 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH); 7677 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 7678 7679 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) 7680 != DCMD_SUCCESS) { 7681 dev_err(&instance->pdev->dev, 7682 "return from %s %d\n", __func__, __LINE__); 7683 return; 7684 } 7685 7686 megasas_return_cmd(instance, cmd); 7687 } 7688 7689 /** 7690 * megasas_shutdown_controller - Instructs FW to shutdown the controller 7691 * @instance: Adapter soft state 7692 * @opcode: Shutdown/Hibernate 7693 */ 7694 static void megasas_shutdown_controller(struct megasas_instance *instance, 7695 u32 opcode) 7696 { 7697 struct megasas_cmd *cmd; 7698 struct megasas_dcmd_frame *dcmd; 7699 7700 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 7701 return; 7702 7703 cmd = megasas_get_cmd(instance); 7704 7705 if (!cmd) 7706 return; 7707 7708 if (instance->aen_cmd) 7709 megasas_issue_blocked_abort_cmd(instance, 7710 instance->aen_cmd, MFI_IO_TIMEOUT_SECS); 7711 if (instance->map_update_cmd) 7712 megasas_issue_blocked_abort_cmd(instance, 7713 instance->map_update_cmd, MFI_IO_TIMEOUT_SECS); 7714 if (instance->jbod_seq_cmd) 7715 megasas_issue_blocked_abort_cmd(instance, 7716 instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS); 7717 7718 dcmd = &cmd->frame->dcmd; 7719 7720 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 7721 7722 dcmd->cmd = MFI_CMD_DCMD; 7723 dcmd->cmd_status = 0x0; 7724 dcmd->sge_count = 0; 7725 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 7726 dcmd->timeout = 0; 7727 dcmd->pad_0 = 0; 7728 dcmd->data_xfer_len = 0; 7729 dcmd->opcode = cpu_to_le32(opcode); 7730 7731 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) 7732 != DCMD_SUCCESS) { 7733 dev_err(&instance->pdev->dev, 7734 "return from %s %d\n", __func__, __LINE__); 7735 return; 7736 } 7737 7738 megasas_return_cmd(instance, cmd); 7739 } 7740 7741 /** 7742 * megasas_suspend - driver suspend entry point 7743 * @dev: Device structure 7744 */ 7745 static int __maybe_unused 7746 megasas_suspend(struct device *dev) 7747 { 7748 struct megasas_instance *instance; 7749 7750 instance = dev_get_drvdata(dev); 7751 7752 if (!instance) 7753 return 0; 7754 7755 instance->unload = 1; 7756 7757 dev_info(dev, "%s is called\n", __func__); 7758 7759 /* Shutdown SR-IOV heartbeat timer */ 7760 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7761 del_timer_sync(&instance->sriov_heartbeat_timer); 7762 7763 /* Stop the FW fault detection watchdog */ 7764 if (instance->adapter_type != MFI_SERIES) 7765 megasas_fusion_stop_watchdog(instance); 7766 7767 megasas_flush_cache(instance); 7768 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); 7769 7770 /* cancel the delayed work if this work still in queue */ 7771 if (instance->ev != NULL) { 7772 struct megasas_aen_event *ev = instance->ev; 7773 cancel_delayed_work_sync(&ev->hotplug_work); 7774 instance->ev = NULL; 7775 } 7776 7777 tasklet_kill(&instance->isr_tasklet); 7778 7779 pci_set_drvdata(instance->pdev, instance); 7780 instance->instancet->disable_intr(instance); 7781 7782 megasas_destroy_irqs(instance); 7783 7784 if (instance->msix_vectors) 7785 pci_free_irq_vectors(instance->pdev); 7786 7787 return 0; 7788 } 7789 7790 /** 7791 * megasas_resume- driver resume entry point 7792 * @dev: Device structure 7793 */ 7794 static int __maybe_unused 7795 megasas_resume(struct device *dev) 7796 { 7797 int rval; 7798 struct Scsi_Host *host; 7799 struct megasas_instance *instance; 7800 u32 status_reg; 7801 7802 instance = dev_get_drvdata(dev); 7803 7804 if (!instance) 7805 return 0; 7806 7807 host = instance->host; 7808 7809 dev_info(dev, "%s is called\n", __func__); 7810 7811 /* 7812 * We expect the FW state to be READY 7813 */ 7814 7815 if (megasas_transition_to_ready(instance, 0)) { 7816 dev_info(&instance->pdev->dev, 7817 "Failed to transition controller to ready from %s!\n", 7818 __func__); 7819 if (instance->adapter_type != MFI_SERIES) { 7820 status_reg = 7821 instance->instancet->read_fw_status_reg(instance); 7822 if (!(status_reg & MFI_RESET_ADAPTER) || 7823 ((megasas_adp_reset_wait_for_ready 7824 (instance, true, 0)) == FAILED)) 7825 goto fail_ready_state; 7826 } else { 7827 atomic_set(&instance->fw_reset_no_pci_access, 1); 7828 instance->instancet->adp_reset 7829 (instance, instance->reg_set); 7830 atomic_set(&instance->fw_reset_no_pci_access, 0); 7831 7832 /* waiting for about 30 seconds before retry */ 7833 ssleep(30); 7834 7835 if (megasas_transition_to_ready(instance, 0)) 7836 goto fail_ready_state; 7837 } 7838 7839 dev_info(&instance->pdev->dev, 7840 "FW restarted successfully from %s!\n", 7841 __func__); 7842 } 7843 if (megasas_set_dma_mask(instance)) 7844 goto fail_set_dma_mask; 7845 7846 /* 7847 * Initialize MFI Firmware 7848 */ 7849 7850 atomic_set(&instance->fw_outstanding, 0); 7851 atomic_set(&instance->ldio_outstanding, 0); 7852 7853 /* Now re-enable MSI-X */ 7854 if (instance->msix_vectors) 7855 megasas_alloc_irq_vectors(instance); 7856 7857 if (!instance->msix_vectors) { 7858 rval = pci_alloc_irq_vectors(instance->pdev, 1, 1, 7859 PCI_IRQ_LEGACY); 7860 if (rval < 0) 7861 goto fail_reenable_msix; 7862 } 7863 7864 megasas_setup_reply_map(instance); 7865 7866 if (instance->adapter_type != MFI_SERIES) { 7867 megasas_reset_reply_desc(instance); 7868 if (megasas_ioc_init_fusion(instance)) { 7869 megasas_free_cmds(instance); 7870 megasas_free_cmds_fusion(instance); 7871 goto fail_init_mfi; 7872 } 7873 if (!megasas_get_map_info(instance)) 7874 megasas_sync_map_info(instance); 7875 } else { 7876 *instance->producer = 0; 7877 *instance->consumer = 0; 7878 if (megasas_issue_init_mfi(instance)) 7879 goto fail_init_mfi; 7880 } 7881 7882 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) 7883 goto fail_init_mfi; 7884 7885 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 7886 (unsigned long)instance); 7887 7888 if (instance->msix_vectors ? 7889 megasas_setup_irqs_msix(instance, 0) : 7890 megasas_setup_irqs_ioapic(instance)) 7891 goto fail_init_mfi; 7892 7893 if (instance->adapter_type != MFI_SERIES) 7894 megasas_setup_irq_poll(instance); 7895 7896 /* Re-launch SR-IOV heartbeat timer */ 7897 if (instance->requestorId) { 7898 if (!megasas_sriov_start_heartbeat(instance, 0)) 7899 megasas_start_timer(instance); 7900 else { 7901 instance->skip_heartbeat_timer_del = 1; 7902 goto fail_init_mfi; 7903 } 7904 } 7905 7906 instance->instancet->enable_intr(instance); 7907 megasas_setup_jbod_map(instance); 7908 instance->unload = 0; 7909 7910 /* 7911 * Initiate AEN (Asynchronous Event Notification) 7912 */ 7913 if (megasas_start_aen(instance)) 7914 dev_err(&instance->pdev->dev, "Start AEN failed\n"); 7915 7916 /* Re-launch FW fault watchdog */ 7917 if (instance->adapter_type != MFI_SERIES) 7918 if (megasas_fusion_start_watchdog(instance) != SUCCESS) 7919 goto fail_start_watchdog; 7920 7921 return 0; 7922 7923 fail_start_watchdog: 7924 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7925 del_timer_sync(&instance->sriov_heartbeat_timer); 7926 fail_init_mfi: 7927 megasas_free_ctrl_dma_buffers(instance); 7928 megasas_free_ctrl_mem(instance); 7929 scsi_host_put(host); 7930 7931 fail_reenable_msix: 7932 fail_set_dma_mask: 7933 fail_ready_state: 7934 7935 return -ENODEV; 7936 } 7937 7938 static inline int 7939 megasas_wait_for_adapter_operational(struct megasas_instance *instance) 7940 { 7941 int wait_time = MEGASAS_RESET_WAIT_TIME * 2; 7942 int i; 7943 u8 adp_state; 7944 7945 for (i = 0; i < wait_time; i++) { 7946 adp_state = atomic_read(&instance->adprecovery); 7947 if ((adp_state == MEGASAS_HBA_OPERATIONAL) || 7948 (adp_state == MEGASAS_HW_CRITICAL_ERROR)) 7949 break; 7950 7951 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) 7952 dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n"); 7953 7954 msleep(1000); 7955 } 7956 7957 if (adp_state != MEGASAS_HBA_OPERATIONAL) { 7958 dev_info(&instance->pdev->dev, 7959 "%s HBA failed to become operational, adp_state %d\n", 7960 __func__, adp_state); 7961 return 1; 7962 } 7963 7964 return 0; 7965 } 7966 7967 /** 7968 * megasas_detach_one - PCI hot"un"plug entry point 7969 * @pdev: PCI device structure 7970 */ 7971 static void megasas_detach_one(struct pci_dev *pdev) 7972 { 7973 int i; 7974 struct Scsi_Host *host; 7975 struct megasas_instance *instance; 7976 struct fusion_context *fusion; 7977 u32 pd_seq_map_sz; 7978 7979 instance = pci_get_drvdata(pdev); 7980 7981 if (!instance) 7982 return; 7983 7984 host = instance->host; 7985 fusion = instance->ctrl_context; 7986 7987 /* Shutdown SR-IOV heartbeat timer */ 7988 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7989 del_timer_sync(&instance->sriov_heartbeat_timer); 7990 7991 /* Stop the FW fault detection watchdog */ 7992 if (instance->adapter_type != MFI_SERIES) 7993 megasas_fusion_stop_watchdog(instance); 7994 7995 if (instance->fw_crash_state != UNAVAILABLE) 7996 megasas_free_host_crash_buffer(instance); 7997 scsi_remove_host(instance->host); 7998 instance->unload = 1; 7999 8000 if (megasas_wait_for_adapter_operational(instance)) 8001 goto skip_firing_dcmds; 8002 8003 megasas_flush_cache(instance); 8004 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 8005 8006 skip_firing_dcmds: 8007 /* cancel the delayed work if this work still in queue*/ 8008 if (instance->ev != NULL) { 8009 struct megasas_aen_event *ev = instance->ev; 8010 cancel_delayed_work_sync(&ev->hotplug_work); 8011 instance->ev = NULL; 8012 } 8013 8014 /* cancel all wait events */ 8015 wake_up_all(&instance->int_cmd_wait_q); 8016 8017 tasklet_kill(&instance->isr_tasklet); 8018 8019 /* 8020 * Take the instance off the instance array. Note that we will not 8021 * decrement the max_index. We let this array be sparse array 8022 */ 8023 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 8024 if (megasas_mgmt_info.instance[i] == instance) { 8025 megasas_mgmt_info.count--; 8026 megasas_mgmt_info.instance[i] = NULL; 8027 8028 break; 8029 } 8030 } 8031 8032 instance->instancet->disable_intr(instance); 8033 8034 megasas_destroy_irqs(instance); 8035 8036 if (instance->msix_vectors) 8037 pci_free_irq_vectors(instance->pdev); 8038 8039 if (instance->adapter_type >= VENTURA_SERIES) { 8040 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) 8041 kfree(fusion->stream_detect_by_ld[i]); 8042 kfree(fusion->stream_detect_by_ld); 8043 fusion->stream_detect_by_ld = NULL; 8044 } 8045 8046 8047 if (instance->adapter_type != MFI_SERIES) { 8048 megasas_release_fusion(instance); 8049 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 8050 (sizeof(struct MR_PD_CFG_SEQ) * 8051 (MAX_PHYSICAL_DEVICES - 1)); 8052 for (i = 0; i < 2 ; i++) { 8053 if (fusion->ld_map[i]) 8054 dma_free_coherent(&instance->pdev->dev, 8055 fusion->max_map_sz, 8056 fusion->ld_map[i], 8057 fusion->ld_map_phys[i]); 8058 if (fusion->ld_drv_map[i]) { 8059 if (is_vmalloc_addr(fusion->ld_drv_map[i])) 8060 vfree(fusion->ld_drv_map[i]); 8061 else 8062 free_pages((ulong)fusion->ld_drv_map[i], 8063 fusion->drv_map_pages); 8064 } 8065 8066 if (fusion->pd_seq_sync[i]) 8067 dma_free_coherent(&instance->pdev->dev, 8068 pd_seq_map_sz, 8069 fusion->pd_seq_sync[i], 8070 fusion->pd_seq_phys[i]); 8071 } 8072 } else { 8073 megasas_release_mfi(instance); 8074 } 8075 8076 if (instance->vf_affiliation) 8077 dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) * 8078 sizeof(struct MR_LD_VF_AFFILIATION), 8079 instance->vf_affiliation, 8080 instance->vf_affiliation_h); 8081 8082 if (instance->vf_affiliation_111) 8083 dma_free_coherent(&pdev->dev, 8084 sizeof(struct MR_LD_VF_AFFILIATION_111), 8085 instance->vf_affiliation_111, 8086 instance->vf_affiliation_111_h); 8087 8088 if (instance->hb_host_mem) 8089 dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM), 8090 instance->hb_host_mem, 8091 instance->hb_host_mem_h); 8092 8093 megasas_free_ctrl_dma_buffers(instance); 8094 8095 megasas_free_ctrl_mem(instance); 8096 8097 megasas_destroy_debugfs(instance); 8098 8099 scsi_host_put(host); 8100 8101 pci_disable_device(pdev); 8102 } 8103 8104 /** 8105 * megasas_shutdown - Shutdown entry point 8106 * @pdev: PCI device structure 8107 */ 8108 static void megasas_shutdown(struct pci_dev *pdev) 8109 { 8110 struct megasas_instance *instance = pci_get_drvdata(pdev); 8111 8112 if (!instance) 8113 return; 8114 8115 instance->unload = 1; 8116 8117 if (megasas_wait_for_adapter_operational(instance)) 8118 goto skip_firing_dcmds; 8119 8120 megasas_flush_cache(instance); 8121 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 8122 8123 skip_firing_dcmds: 8124 instance->instancet->disable_intr(instance); 8125 megasas_destroy_irqs(instance); 8126 8127 if (instance->msix_vectors) 8128 pci_free_irq_vectors(instance->pdev); 8129 } 8130 8131 /* 8132 * megasas_mgmt_open - char node "open" entry point 8133 * @inode: char node inode 8134 * @filep: char node file 8135 */ 8136 static int megasas_mgmt_open(struct inode *inode, struct file *filep) 8137 { 8138 /* 8139 * Allow only those users with admin rights 8140 */ 8141 if (!capable(CAP_SYS_ADMIN)) 8142 return -EACCES; 8143 8144 return 0; 8145 } 8146 8147 /* 8148 * megasas_mgmt_fasync - Async notifier registration from applications 8149 * @fd: char node file descriptor number 8150 * @filep: char node file 8151 * @mode: notifier on/off 8152 * 8153 * This function adds the calling process to a driver global queue. When an 8154 * event occurs, SIGIO will be sent to all processes in this queue. 8155 */ 8156 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode) 8157 { 8158 int rc; 8159 8160 mutex_lock(&megasas_async_queue_mutex); 8161 8162 rc = fasync_helper(fd, filep, mode, &megasas_async_queue); 8163 8164 mutex_unlock(&megasas_async_queue_mutex); 8165 8166 if (rc >= 0) { 8167 /* For sanity check when we get ioctl */ 8168 filep->private_data = filep; 8169 return 0; 8170 } 8171 8172 printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc); 8173 8174 return rc; 8175 } 8176 8177 /* 8178 * megasas_mgmt_poll - char node "poll" entry point 8179 * @filep: char node file 8180 * @wait: Events to poll for 8181 */ 8182 static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait) 8183 { 8184 __poll_t mask; 8185 unsigned long flags; 8186 8187 poll_wait(file, &megasas_poll_wait, wait); 8188 spin_lock_irqsave(&poll_aen_lock, flags); 8189 if (megasas_poll_wait_aen) 8190 mask = (EPOLLIN | EPOLLRDNORM); 8191 else 8192 mask = 0; 8193 megasas_poll_wait_aen = 0; 8194 spin_unlock_irqrestore(&poll_aen_lock, flags); 8195 return mask; 8196 } 8197 8198 /* 8199 * megasas_set_crash_dump_params_ioctl: 8200 * Send CRASH_DUMP_MODE DCMD to all controllers 8201 * @cmd: MFI command frame 8202 */ 8203 8204 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd) 8205 { 8206 struct megasas_instance *local_instance; 8207 int i, error = 0; 8208 int crash_support; 8209 8210 crash_support = cmd->frame->dcmd.mbox.w[0]; 8211 8212 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 8213 local_instance = megasas_mgmt_info.instance[i]; 8214 if (local_instance && local_instance->crash_dump_drv_support) { 8215 if ((atomic_read(&local_instance->adprecovery) == 8216 MEGASAS_HBA_OPERATIONAL) && 8217 !megasas_set_crash_dump_params(local_instance, 8218 crash_support)) { 8219 local_instance->crash_dump_app_support = 8220 crash_support; 8221 dev_info(&local_instance->pdev->dev, 8222 "Application firmware crash " 8223 "dump mode set success\n"); 8224 error = 0; 8225 } else { 8226 dev_info(&local_instance->pdev->dev, 8227 "Application firmware crash " 8228 "dump mode set failed\n"); 8229 error = -1; 8230 } 8231 } 8232 } 8233 return error; 8234 } 8235 8236 /** 8237 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW 8238 * @instance: Adapter soft state 8239 * @user_ioc: User's ioctl packet 8240 * @ioc: ioctl packet 8241 */ 8242 static int 8243 megasas_mgmt_fw_ioctl(struct megasas_instance *instance, 8244 struct megasas_iocpacket __user * user_ioc, 8245 struct megasas_iocpacket *ioc) 8246 { 8247 struct megasas_sge64 *kern_sge64 = NULL; 8248 struct megasas_sge32 *kern_sge32 = NULL; 8249 struct megasas_cmd *cmd; 8250 void *kbuff_arr[MAX_IOCTL_SGE]; 8251 dma_addr_t buf_handle = 0; 8252 int error = 0, i; 8253 void *sense = NULL; 8254 dma_addr_t sense_handle; 8255 void *sense_ptr; 8256 u32 opcode = 0; 8257 int ret = DCMD_SUCCESS; 8258 8259 memset(kbuff_arr, 0, sizeof(kbuff_arr)); 8260 8261 if (ioc->sge_count > MAX_IOCTL_SGE) { 8262 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n", 8263 ioc->sge_count, MAX_IOCTL_SGE); 8264 return -EINVAL; 8265 } 8266 8267 if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) || 8268 ((ioc->frame.hdr.cmd == MFI_CMD_NVME) && 8269 !instance->support_nvme_passthru) || 8270 ((ioc->frame.hdr.cmd == MFI_CMD_TOOLBOX) && 8271 !instance->support_pci_lane_margining)) { 8272 dev_err(&instance->pdev->dev, 8273 "Received invalid ioctl command 0x%x\n", 8274 ioc->frame.hdr.cmd); 8275 return -ENOTSUPP; 8276 } 8277 8278 cmd = megasas_get_cmd(instance); 8279 if (!cmd) { 8280 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n"); 8281 return -ENOMEM; 8282 } 8283 8284 /* 8285 * User's IOCTL packet has 2 frames (maximum). Copy those two 8286 * frames into our cmd's frames. cmd->frame's context will get 8287 * overwritten when we copy from user's frames. So set that value 8288 * alone separately 8289 */ 8290 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); 8291 cmd->frame->hdr.context = cpu_to_le32(cmd->index); 8292 cmd->frame->hdr.pad_0 = 0; 8293 8294 cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE); 8295 8296 if (instance->consistent_mask_64bit) 8297 cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 | 8298 MFI_FRAME_SENSE64)); 8299 else 8300 cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 | 8301 MFI_FRAME_SENSE64)); 8302 8303 if (cmd->frame->hdr.cmd == MFI_CMD_DCMD) 8304 opcode = le32_to_cpu(cmd->frame->dcmd.opcode); 8305 8306 if (opcode == MR_DCMD_CTRL_SHUTDOWN) { 8307 mutex_lock(&instance->reset_mutex); 8308 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) { 8309 megasas_return_cmd(instance, cmd); 8310 mutex_unlock(&instance->reset_mutex); 8311 return -1; 8312 } 8313 mutex_unlock(&instance->reset_mutex); 8314 } 8315 8316 if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) { 8317 error = megasas_set_crash_dump_params_ioctl(cmd); 8318 megasas_return_cmd(instance, cmd); 8319 return error; 8320 } 8321 8322 /* 8323 * The management interface between applications and the fw uses 8324 * MFI frames. E.g, RAID configuration changes, LD property changes 8325 * etc are accomplishes through different kinds of MFI frames. The 8326 * driver needs to care only about substituting user buffers with 8327 * kernel buffers in SGLs. The location of SGL is embedded in the 8328 * struct iocpacket itself. 8329 */ 8330 if (instance->consistent_mask_64bit) 8331 kern_sge64 = (struct megasas_sge64 *) 8332 ((unsigned long)cmd->frame + ioc->sgl_off); 8333 else 8334 kern_sge32 = (struct megasas_sge32 *) 8335 ((unsigned long)cmd->frame + ioc->sgl_off); 8336 8337 /* 8338 * For each user buffer, create a mirror buffer and copy in 8339 */ 8340 for (i = 0; i < ioc->sge_count; i++) { 8341 if (!ioc->sgl[i].iov_len) 8342 continue; 8343 8344 kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev, 8345 ioc->sgl[i].iov_len, 8346 &buf_handle, GFP_KERNEL); 8347 if (!kbuff_arr[i]) { 8348 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc " 8349 "kernel SGL buffer for IOCTL\n"); 8350 error = -ENOMEM; 8351 goto out; 8352 } 8353 8354 /* 8355 * We don't change the dma_coherent_mask, so 8356 * dma_alloc_coherent only returns 32bit addresses 8357 */ 8358 if (instance->consistent_mask_64bit) { 8359 kern_sge64[i].phys_addr = cpu_to_le64(buf_handle); 8360 kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len); 8361 } else { 8362 kern_sge32[i].phys_addr = cpu_to_le32(buf_handle); 8363 kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len); 8364 } 8365 8366 /* 8367 * We created a kernel buffer corresponding to the 8368 * user buffer. Now copy in from the user buffer 8369 */ 8370 if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base, 8371 (u32) (ioc->sgl[i].iov_len))) { 8372 error = -EFAULT; 8373 goto out; 8374 } 8375 } 8376 8377 if (ioc->sense_len) { 8378 /* make sure the pointer is part of the frame */ 8379 if (ioc->sense_off > 8380 (sizeof(union megasas_frame) - sizeof(__le64))) { 8381 error = -EINVAL; 8382 goto out; 8383 } 8384 8385 sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len, 8386 &sense_handle, GFP_KERNEL); 8387 if (!sense) { 8388 error = -ENOMEM; 8389 goto out; 8390 } 8391 8392 /* always store 64 bits regardless of addressing */ 8393 sense_ptr = (void *)cmd->frame + ioc->sense_off; 8394 put_unaligned_le64(sense_handle, sense_ptr); 8395 } 8396 8397 /* 8398 * Set the sync_cmd flag so that the ISR knows not to complete this 8399 * cmd to the SCSI mid-layer 8400 */ 8401 cmd->sync_cmd = 1; 8402 8403 ret = megasas_issue_blocked_cmd(instance, cmd, 0); 8404 switch (ret) { 8405 case DCMD_INIT: 8406 case DCMD_BUSY: 8407 cmd->sync_cmd = 0; 8408 dev_err(&instance->pdev->dev, 8409 "return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n", 8410 __func__, __LINE__, cmd->frame->hdr.cmd, opcode, 8411 cmd->cmd_status_drv); 8412 error = -EBUSY; 8413 goto out; 8414 } 8415 8416 cmd->sync_cmd = 0; 8417 8418 if (instance->unload == 1) { 8419 dev_info(&instance->pdev->dev, "Driver unload is in progress " 8420 "don't submit data to application\n"); 8421 goto out; 8422 } 8423 /* 8424 * copy out the kernel buffers to user buffers 8425 */ 8426 for (i = 0; i < ioc->sge_count; i++) { 8427 if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i], 8428 ioc->sgl[i].iov_len)) { 8429 error = -EFAULT; 8430 goto out; 8431 } 8432 } 8433 8434 /* 8435 * copy out the sense 8436 */ 8437 if (ioc->sense_len) { 8438 void __user *uptr; 8439 /* 8440 * sense_ptr points to the location that has the user 8441 * sense buffer address 8442 */ 8443 sense_ptr = (void *)ioc->frame.raw + ioc->sense_off; 8444 if (in_compat_syscall()) 8445 uptr = compat_ptr(get_unaligned((compat_uptr_t *) 8446 sense_ptr)); 8447 else 8448 uptr = get_unaligned((void __user **)sense_ptr); 8449 8450 if (copy_to_user(uptr, sense, ioc->sense_len)) { 8451 dev_err(&instance->pdev->dev, "Failed to copy out to user " 8452 "sense data\n"); 8453 error = -EFAULT; 8454 goto out; 8455 } 8456 } 8457 8458 /* 8459 * copy the status codes returned by the fw 8460 */ 8461 if (copy_to_user(&user_ioc->frame.hdr.cmd_status, 8462 &cmd->frame->hdr.cmd_status, sizeof(u8))) { 8463 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n"); 8464 error = -EFAULT; 8465 } 8466 8467 out: 8468 if (sense) { 8469 dma_free_coherent(&instance->pdev->dev, ioc->sense_len, 8470 sense, sense_handle); 8471 } 8472 8473 for (i = 0; i < ioc->sge_count; i++) { 8474 if (kbuff_arr[i]) { 8475 if (instance->consistent_mask_64bit) 8476 dma_free_coherent(&instance->pdev->dev, 8477 le32_to_cpu(kern_sge64[i].length), 8478 kbuff_arr[i], 8479 le64_to_cpu(kern_sge64[i].phys_addr)); 8480 else 8481 dma_free_coherent(&instance->pdev->dev, 8482 le32_to_cpu(kern_sge32[i].length), 8483 kbuff_arr[i], 8484 le32_to_cpu(kern_sge32[i].phys_addr)); 8485 kbuff_arr[i] = NULL; 8486 } 8487 } 8488 8489 megasas_return_cmd(instance, cmd); 8490 return error; 8491 } 8492 8493 static struct megasas_iocpacket * 8494 megasas_compat_iocpacket_get_user(void __user *arg) 8495 { 8496 struct megasas_iocpacket *ioc; 8497 struct compat_megasas_iocpacket __user *cioc = arg; 8498 size_t size; 8499 int err = -EFAULT; 8500 int i; 8501 8502 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL); 8503 if (!ioc) 8504 return ERR_PTR(-ENOMEM); 8505 size = offsetof(struct megasas_iocpacket, frame) + sizeof(ioc->frame); 8506 if (copy_from_user(ioc, arg, size)) 8507 goto out; 8508 8509 for (i = 0; i < MAX_IOCTL_SGE; i++) { 8510 compat_uptr_t iov_base; 8511 8512 if (get_user(iov_base, &cioc->sgl[i].iov_base) || 8513 get_user(ioc->sgl[i].iov_len, &cioc->sgl[i].iov_len)) 8514 goto out; 8515 8516 ioc->sgl[i].iov_base = compat_ptr(iov_base); 8517 } 8518 8519 return ioc; 8520 out: 8521 kfree(ioc); 8522 return ERR_PTR(err); 8523 } 8524 8525 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) 8526 { 8527 struct megasas_iocpacket __user *user_ioc = 8528 (struct megasas_iocpacket __user *)arg; 8529 struct megasas_iocpacket *ioc; 8530 struct megasas_instance *instance; 8531 int error; 8532 8533 if (in_compat_syscall()) 8534 ioc = megasas_compat_iocpacket_get_user(user_ioc); 8535 else 8536 ioc = memdup_user(user_ioc, sizeof(struct megasas_iocpacket)); 8537 8538 if (IS_ERR(ioc)) 8539 return PTR_ERR(ioc); 8540 8541 instance = megasas_lookup_instance(ioc->host_no); 8542 if (!instance) { 8543 error = -ENODEV; 8544 goto out_kfree_ioc; 8545 } 8546 8547 /* Block ioctls in VF mode */ 8548 if (instance->requestorId && !allow_vf_ioctls) { 8549 error = -ENODEV; 8550 goto out_kfree_ioc; 8551 } 8552 8553 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 8554 dev_err(&instance->pdev->dev, "Controller in crit error\n"); 8555 error = -ENODEV; 8556 goto out_kfree_ioc; 8557 } 8558 8559 if (instance->unload == 1) { 8560 error = -ENODEV; 8561 goto out_kfree_ioc; 8562 } 8563 8564 if (down_interruptible(&instance->ioctl_sem)) { 8565 error = -ERESTARTSYS; 8566 goto out_kfree_ioc; 8567 } 8568 8569 if (megasas_wait_for_adapter_operational(instance)) { 8570 error = -ENODEV; 8571 goto out_up; 8572 } 8573 8574 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); 8575 out_up: 8576 up(&instance->ioctl_sem); 8577 8578 out_kfree_ioc: 8579 kfree(ioc); 8580 return error; 8581 } 8582 8583 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) 8584 { 8585 struct megasas_instance *instance; 8586 struct megasas_aen aen; 8587 int error; 8588 8589 if (file->private_data != file) { 8590 printk(KERN_DEBUG "megasas: fasync_helper was not " 8591 "called first\n"); 8592 return -EINVAL; 8593 } 8594 8595 if (copy_from_user(&aen, (void __user *)arg, sizeof(aen))) 8596 return -EFAULT; 8597 8598 instance = megasas_lookup_instance(aen.host_no); 8599 8600 if (!instance) 8601 return -ENODEV; 8602 8603 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 8604 return -ENODEV; 8605 } 8606 8607 if (instance->unload == 1) { 8608 return -ENODEV; 8609 } 8610 8611 if (megasas_wait_for_adapter_operational(instance)) 8612 return -ENODEV; 8613 8614 mutex_lock(&instance->reset_mutex); 8615 error = megasas_register_aen(instance, aen.seq_num, 8616 aen.class_locale_word); 8617 mutex_unlock(&instance->reset_mutex); 8618 return error; 8619 } 8620 8621 /** 8622 * megasas_mgmt_ioctl - char node ioctl entry point 8623 * @file: char device file pointer 8624 * @cmd: ioctl command 8625 * @arg: ioctl command arguments address 8626 */ 8627 static long 8628 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 8629 { 8630 switch (cmd) { 8631 case MEGASAS_IOC_FIRMWARE: 8632 return megasas_mgmt_ioctl_fw(file, arg); 8633 8634 case MEGASAS_IOC_GET_AEN: 8635 return megasas_mgmt_ioctl_aen(file, arg); 8636 } 8637 8638 return -ENOTTY; 8639 } 8640 8641 #ifdef CONFIG_COMPAT 8642 static long 8643 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd, 8644 unsigned long arg) 8645 { 8646 switch (cmd) { 8647 case MEGASAS_IOC_FIRMWARE32: 8648 return megasas_mgmt_ioctl_fw(file, arg); 8649 case MEGASAS_IOC_GET_AEN: 8650 return megasas_mgmt_ioctl_aen(file, arg); 8651 } 8652 8653 return -ENOTTY; 8654 } 8655 #endif 8656 8657 /* 8658 * File operations structure for management interface 8659 */ 8660 static const struct file_operations megasas_mgmt_fops = { 8661 .owner = THIS_MODULE, 8662 .open = megasas_mgmt_open, 8663 .fasync = megasas_mgmt_fasync, 8664 .unlocked_ioctl = megasas_mgmt_ioctl, 8665 .poll = megasas_mgmt_poll, 8666 #ifdef CONFIG_COMPAT 8667 .compat_ioctl = megasas_mgmt_compat_ioctl, 8668 #endif 8669 .llseek = noop_llseek, 8670 }; 8671 8672 static SIMPLE_DEV_PM_OPS(megasas_pm_ops, megasas_suspend, megasas_resume); 8673 8674 /* 8675 * PCI hotplug support registration structure 8676 */ 8677 static struct pci_driver megasas_pci_driver = { 8678 8679 .name = "megaraid_sas", 8680 .id_table = megasas_pci_table, 8681 .probe = megasas_probe_one, 8682 .remove = megasas_detach_one, 8683 .driver.pm = &megasas_pm_ops, 8684 .shutdown = megasas_shutdown, 8685 }; 8686 8687 /* 8688 * Sysfs driver attributes 8689 */ 8690 static ssize_t version_show(struct device_driver *dd, char *buf) 8691 { 8692 return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n", 8693 MEGASAS_VERSION); 8694 } 8695 static DRIVER_ATTR_RO(version); 8696 8697 static ssize_t release_date_show(struct device_driver *dd, char *buf) 8698 { 8699 return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n", 8700 MEGASAS_RELDATE); 8701 } 8702 static DRIVER_ATTR_RO(release_date); 8703 8704 static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf) 8705 { 8706 return sprintf(buf, "%u\n", support_poll_for_event); 8707 } 8708 static DRIVER_ATTR_RO(support_poll_for_event); 8709 8710 static ssize_t support_device_change_show(struct device_driver *dd, char *buf) 8711 { 8712 return sprintf(buf, "%u\n", support_device_change); 8713 } 8714 static DRIVER_ATTR_RO(support_device_change); 8715 8716 static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf) 8717 { 8718 return sprintf(buf, "%u\n", megasas_dbg_lvl); 8719 } 8720 8721 static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf, 8722 size_t count) 8723 { 8724 int retval = count; 8725 8726 if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) { 8727 printk(KERN_ERR "megasas: could not set dbg_lvl\n"); 8728 retval = -EINVAL; 8729 } 8730 return retval; 8731 } 8732 static DRIVER_ATTR_RW(dbg_lvl); 8733 8734 static ssize_t 8735 support_nvme_encapsulation_show(struct device_driver *dd, char *buf) 8736 { 8737 return sprintf(buf, "%u\n", support_nvme_encapsulation); 8738 } 8739 8740 static DRIVER_ATTR_RO(support_nvme_encapsulation); 8741 8742 static ssize_t 8743 support_pci_lane_margining_show(struct device_driver *dd, char *buf) 8744 { 8745 return sprintf(buf, "%u\n", support_pci_lane_margining); 8746 } 8747 8748 static DRIVER_ATTR_RO(support_pci_lane_margining); 8749 8750 static inline void megasas_remove_scsi_device(struct scsi_device *sdev) 8751 { 8752 sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n"); 8753 scsi_remove_device(sdev); 8754 scsi_device_put(sdev); 8755 } 8756 8757 /** 8758 * megasas_update_device_list - Update the PD and LD device list from FW 8759 * after an AEN event notification 8760 * @instance: Adapter soft state 8761 * @event_type: Indicates type of event (PD or LD event) 8762 * 8763 * @return: Success or failure 8764 * 8765 * Issue DCMDs to Firmware to update the internal device list in driver. 8766 * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination 8767 * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list. 8768 */ 8769 static 8770 int megasas_update_device_list(struct megasas_instance *instance, 8771 int event_type) 8772 { 8773 int dcmd_ret = DCMD_SUCCESS; 8774 8775 if (instance->enable_fw_dev_list) { 8776 dcmd_ret = megasas_host_device_list_query(instance, false); 8777 if (dcmd_ret != DCMD_SUCCESS) 8778 goto out; 8779 } else { 8780 if (event_type & SCAN_PD_CHANNEL) { 8781 dcmd_ret = megasas_get_pd_list(instance); 8782 8783 if (dcmd_ret != DCMD_SUCCESS) 8784 goto out; 8785 } 8786 8787 if (event_type & SCAN_VD_CHANNEL) { 8788 if (!instance->requestorId || 8789 megasas_get_ld_vf_affiliation(instance, 0)) { 8790 dcmd_ret = megasas_ld_list_query(instance, 8791 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); 8792 if (dcmd_ret != DCMD_SUCCESS) 8793 goto out; 8794 } 8795 } 8796 } 8797 8798 out: 8799 return dcmd_ret; 8800 } 8801 8802 /** 8803 * megasas_add_remove_devices - Add/remove devices to SCSI mid-layer 8804 * after an AEN event notification 8805 * @instance: Adapter soft state 8806 * @scan_type: Indicates type of devices (PD/LD) to add 8807 * @return void 8808 */ 8809 static 8810 void megasas_add_remove_devices(struct megasas_instance *instance, 8811 int scan_type) 8812 { 8813 int i, j; 8814 u16 pd_index = 0; 8815 u16 ld_index = 0; 8816 u16 channel = 0, id = 0; 8817 struct Scsi_Host *host; 8818 struct scsi_device *sdev1; 8819 struct MR_HOST_DEVICE_LIST *targetid_list = NULL; 8820 struct MR_HOST_DEVICE_LIST_ENTRY *targetid_entry = NULL; 8821 8822 host = instance->host; 8823 8824 if (instance->enable_fw_dev_list) { 8825 targetid_list = instance->host_device_list_buf; 8826 for (i = 0; i < targetid_list->count; i++) { 8827 targetid_entry = &targetid_list->host_device_list[i]; 8828 if (targetid_entry->flags.u.bits.is_sys_pd) { 8829 channel = le16_to_cpu(targetid_entry->target_id) / 8830 MEGASAS_MAX_DEV_PER_CHANNEL; 8831 id = le16_to_cpu(targetid_entry->target_id) % 8832 MEGASAS_MAX_DEV_PER_CHANNEL; 8833 } else { 8834 channel = MEGASAS_MAX_PD_CHANNELS + 8835 (le16_to_cpu(targetid_entry->target_id) / 8836 MEGASAS_MAX_DEV_PER_CHANNEL); 8837 id = le16_to_cpu(targetid_entry->target_id) % 8838 MEGASAS_MAX_DEV_PER_CHANNEL; 8839 } 8840 sdev1 = scsi_device_lookup(host, channel, id, 0); 8841 if (!sdev1) { 8842 scsi_add_device(host, channel, id, 0); 8843 } else { 8844 scsi_device_put(sdev1); 8845 } 8846 } 8847 } 8848 8849 if (scan_type & SCAN_PD_CHANNEL) { 8850 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { 8851 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 8852 pd_index = i * MEGASAS_MAX_DEV_PER_CHANNEL + j; 8853 sdev1 = scsi_device_lookup(host, i, j, 0); 8854 if (instance->pd_list[pd_index].driveState == 8855 MR_PD_STATE_SYSTEM) { 8856 if (!sdev1) 8857 scsi_add_device(host, i, j, 0); 8858 else 8859 scsi_device_put(sdev1); 8860 } else { 8861 if (sdev1) 8862 megasas_remove_scsi_device(sdev1); 8863 } 8864 } 8865 } 8866 } 8867 8868 if (scan_type & SCAN_VD_CHANNEL) { 8869 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { 8870 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 8871 ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; 8872 sdev1 = scsi_device_lookup(host, 8873 MEGASAS_MAX_PD_CHANNELS + i, j, 0); 8874 if (instance->ld_ids[ld_index] != 0xff) { 8875 if (!sdev1) 8876 scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); 8877 else 8878 scsi_device_put(sdev1); 8879 } else { 8880 if (sdev1) 8881 megasas_remove_scsi_device(sdev1); 8882 } 8883 } 8884 } 8885 } 8886 8887 } 8888 8889 static void 8890 megasas_aen_polling(struct work_struct *work) 8891 { 8892 struct megasas_aen_event *ev = 8893 container_of(work, struct megasas_aen_event, hotplug_work.work); 8894 struct megasas_instance *instance = ev->instance; 8895 union megasas_evt_class_locale class_locale; 8896 int event_type = 0; 8897 u32 seq_num; 8898 u16 ld_target_id; 8899 int error; 8900 u8 dcmd_ret = DCMD_SUCCESS; 8901 struct scsi_device *sdev1; 8902 8903 if (!instance) { 8904 printk(KERN_ERR "invalid instance!\n"); 8905 kfree(ev); 8906 return; 8907 } 8908 8909 /* Don't run the event workqueue thread if OCR is running */ 8910 mutex_lock(&instance->reset_mutex); 8911 8912 instance->ev = NULL; 8913 if (instance->evt_detail) { 8914 megasas_decode_evt(instance); 8915 8916 switch (le32_to_cpu(instance->evt_detail->code)) { 8917 8918 case MR_EVT_PD_INSERTED: 8919 case MR_EVT_PD_REMOVED: 8920 event_type = SCAN_PD_CHANNEL; 8921 break; 8922 8923 case MR_EVT_LD_OFFLINE: 8924 case MR_EVT_LD_DELETED: 8925 ld_target_id = instance->evt_detail->args.ld.target_id; 8926 sdev1 = scsi_device_lookup(instance->host, 8927 MEGASAS_MAX_PD_CHANNELS + 8928 (ld_target_id / MEGASAS_MAX_DEV_PER_CHANNEL), 8929 (ld_target_id - MEGASAS_MAX_DEV_PER_CHANNEL), 8930 0); 8931 if (sdev1) 8932 megasas_remove_scsi_device(sdev1); 8933 8934 event_type = SCAN_VD_CHANNEL; 8935 break; 8936 case MR_EVT_LD_CREATED: 8937 event_type = SCAN_VD_CHANNEL; 8938 break; 8939 8940 case MR_EVT_CFG_CLEARED: 8941 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: 8942 case MR_EVT_FOREIGN_CFG_IMPORTED: 8943 case MR_EVT_LD_STATE_CHANGE: 8944 event_type = SCAN_PD_CHANNEL | SCAN_VD_CHANNEL; 8945 dev_info(&instance->pdev->dev, "scanning for scsi%d...\n", 8946 instance->host->host_no); 8947 break; 8948 8949 case MR_EVT_CTRL_PROP_CHANGED: 8950 dcmd_ret = megasas_get_ctrl_info(instance); 8951 if (dcmd_ret == DCMD_SUCCESS && 8952 instance->snapdump_wait_time) { 8953 megasas_get_snapdump_properties(instance); 8954 dev_info(&instance->pdev->dev, 8955 "Snap dump wait time\t: %d\n", 8956 instance->snapdump_wait_time); 8957 } 8958 break; 8959 default: 8960 event_type = 0; 8961 break; 8962 } 8963 } else { 8964 dev_err(&instance->pdev->dev, "invalid evt_detail!\n"); 8965 mutex_unlock(&instance->reset_mutex); 8966 kfree(ev); 8967 return; 8968 } 8969 8970 if (event_type) 8971 dcmd_ret = megasas_update_device_list(instance, event_type); 8972 8973 mutex_unlock(&instance->reset_mutex); 8974 8975 if (event_type && dcmd_ret == DCMD_SUCCESS) 8976 megasas_add_remove_devices(instance, event_type); 8977 8978 if (dcmd_ret == DCMD_SUCCESS) 8979 seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1; 8980 else 8981 seq_num = instance->last_seq_num; 8982 8983 /* Register AEN with FW for latest sequence number plus 1 */ 8984 class_locale.members.reserved = 0; 8985 class_locale.members.locale = MR_EVT_LOCALE_ALL; 8986 class_locale.members.class = MR_EVT_CLASS_DEBUG; 8987 8988 if (instance->aen_cmd != NULL) { 8989 kfree(ev); 8990 return; 8991 } 8992 8993 mutex_lock(&instance->reset_mutex); 8994 error = megasas_register_aen(instance, seq_num, 8995 class_locale.word); 8996 if (error) 8997 dev_err(&instance->pdev->dev, 8998 "register aen failed error %x\n", error); 8999 9000 mutex_unlock(&instance->reset_mutex); 9001 kfree(ev); 9002 } 9003 9004 /** 9005 * megasas_init - Driver load entry point 9006 */ 9007 static int __init megasas_init(void) 9008 { 9009 int rval; 9010 9011 /* 9012 * Booted in kdump kernel, minimize memory footprints by 9013 * disabling few features 9014 */ 9015 if (reset_devices) { 9016 msix_vectors = 1; 9017 rdpq_enable = 0; 9018 dual_qdepth_disable = 1; 9019 poll_queues = 0; 9020 } 9021 9022 /* 9023 * Announce driver version and other information 9024 */ 9025 pr_info("megasas: %s\n", MEGASAS_VERSION); 9026 9027 support_poll_for_event = 2; 9028 support_device_change = 1; 9029 support_nvme_encapsulation = true; 9030 support_pci_lane_margining = true; 9031 9032 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); 9033 9034 /* 9035 * Register character device node 9036 */ 9037 rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops); 9038 9039 if (rval < 0) { 9040 printk(KERN_DEBUG "megasas: failed to open device node\n"); 9041 return rval; 9042 } 9043 9044 megasas_mgmt_majorno = rval; 9045 9046 megasas_init_debugfs(); 9047 9048 /* 9049 * Register ourselves as PCI hotplug module 9050 */ 9051 rval = pci_register_driver(&megasas_pci_driver); 9052 9053 if (rval) { 9054 printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n"); 9055 goto err_pcidrv; 9056 } 9057 9058 if ((event_log_level < MFI_EVT_CLASS_DEBUG) || 9059 (event_log_level > MFI_EVT_CLASS_DEAD)) { 9060 pr_warn("megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n"); 9061 event_log_level = MFI_EVT_CLASS_CRITICAL; 9062 } 9063 9064 rval = driver_create_file(&megasas_pci_driver.driver, 9065 &driver_attr_version); 9066 if (rval) 9067 goto err_dcf_attr_ver; 9068 9069 rval = driver_create_file(&megasas_pci_driver.driver, 9070 &driver_attr_release_date); 9071 if (rval) 9072 goto err_dcf_rel_date; 9073 9074 rval = driver_create_file(&megasas_pci_driver.driver, 9075 &driver_attr_support_poll_for_event); 9076 if (rval) 9077 goto err_dcf_support_poll_for_event; 9078 9079 rval = driver_create_file(&megasas_pci_driver.driver, 9080 &driver_attr_dbg_lvl); 9081 if (rval) 9082 goto err_dcf_dbg_lvl; 9083 rval = driver_create_file(&megasas_pci_driver.driver, 9084 &driver_attr_support_device_change); 9085 if (rval) 9086 goto err_dcf_support_device_change; 9087 9088 rval = driver_create_file(&megasas_pci_driver.driver, 9089 &driver_attr_support_nvme_encapsulation); 9090 if (rval) 9091 goto err_dcf_support_nvme_encapsulation; 9092 9093 rval = driver_create_file(&megasas_pci_driver.driver, 9094 &driver_attr_support_pci_lane_margining); 9095 if (rval) 9096 goto err_dcf_support_pci_lane_margining; 9097 9098 return rval; 9099 9100 err_dcf_support_pci_lane_margining: 9101 driver_remove_file(&megasas_pci_driver.driver, 9102 &driver_attr_support_nvme_encapsulation); 9103 9104 err_dcf_support_nvme_encapsulation: 9105 driver_remove_file(&megasas_pci_driver.driver, 9106 &driver_attr_support_device_change); 9107 9108 err_dcf_support_device_change: 9109 driver_remove_file(&megasas_pci_driver.driver, 9110 &driver_attr_dbg_lvl); 9111 err_dcf_dbg_lvl: 9112 driver_remove_file(&megasas_pci_driver.driver, 9113 &driver_attr_support_poll_for_event); 9114 err_dcf_support_poll_for_event: 9115 driver_remove_file(&megasas_pci_driver.driver, 9116 &driver_attr_release_date); 9117 err_dcf_rel_date: 9118 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 9119 err_dcf_attr_ver: 9120 pci_unregister_driver(&megasas_pci_driver); 9121 err_pcidrv: 9122 megasas_exit_debugfs(); 9123 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 9124 return rval; 9125 } 9126 9127 /** 9128 * megasas_exit - Driver unload entry point 9129 */ 9130 static void __exit megasas_exit(void) 9131 { 9132 driver_remove_file(&megasas_pci_driver.driver, 9133 &driver_attr_dbg_lvl); 9134 driver_remove_file(&megasas_pci_driver.driver, 9135 &driver_attr_support_poll_for_event); 9136 driver_remove_file(&megasas_pci_driver.driver, 9137 &driver_attr_support_device_change); 9138 driver_remove_file(&megasas_pci_driver.driver, 9139 &driver_attr_release_date); 9140 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 9141 driver_remove_file(&megasas_pci_driver.driver, 9142 &driver_attr_support_nvme_encapsulation); 9143 driver_remove_file(&megasas_pci_driver.driver, 9144 &driver_attr_support_pci_lane_margining); 9145 9146 pci_unregister_driver(&megasas_pci_driver); 9147 megasas_exit_debugfs(); 9148 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 9149 } 9150 9151 module_init(megasas_init); 9152 module_exit(megasas_exit); 9153