1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux MegaRAID driver for SAS based RAID controllers 4 * 5 * Copyright (c) 2003-2013 LSI Corporation 6 * Copyright (c) 2013-2016 Avago Technologies 7 * Copyright (c) 2016-2018 Broadcom Inc. 8 * 9 * Authors: Broadcom Inc. 10 * Sreenivas Bagalkote 11 * Sumant Patro 12 * Bo Yang 13 * Adam Radford 14 * Kashyap Desai <kashyap.desai@broadcom.com> 15 * Sumit Saxena <sumit.saxena@broadcom.com> 16 * 17 * Send feedback to: megaraidlinux.pdl@broadcom.com 18 */ 19 20 #include <linux/kernel.h> 21 #include <linux/types.h> 22 #include <linux/pci.h> 23 #include <linux/list.h> 24 #include <linux/moduleparam.h> 25 #include <linux/module.h> 26 #include <linux/spinlock.h> 27 #include <linux/interrupt.h> 28 #include <linux/delay.h> 29 #include <linux/uio.h> 30 #include <linux/slab.h> 31 #include <linux/uaccess.h> 32 #include <asm/unaligned.h> 33 #include <linux/fs.h> 34 #include <linux/compat.h> 35 #include <linux/blkdev.h> 36 #include <linux/mutex.h> 37 #include <linux/poll.h> 38 #include <linux/vmalloc.h> 39 #include <linux/irq_poll.h> 40 #include <linux/blk-mq-pci.h> 41 42 #include <scsi/scsi.h> 43 #include <scsi/scsi_cmnd.h> 44 #include <scsi/scsi_device.h> 45 #include <scsi/scsi_host.h> 46 #include <scsi/scsi_tcq.h> 47 #include <scsi/scsi_dbg.h> 48 #include "megaraid_sas_fusion.h" 49 #include "megaraid_sas.h" 50 51 /* 52 * Number of sectors per IO command 53 * Will be set in megasas_init_mfi if user does not provide 54 */ 55 static unsigned int max_sectors; 56 module_param_named(max_sectors, max_sectors, int, 0444); 57 MODULE_PARM_DESC(max_sectors, 58 "Maximum number of sectors per IO command"); 59 60 static int msix_disable; 61 module_param(msix_disable, int, 0444); 62 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0"); 63 64 static unsigned int msix_vectors; 65 module_param(msix_vectors, int, 0444); 66 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW"); 67 68 static int allow_vf_ioctls; 69 module_param(allow_vf_ioctls, int, 0444); 70 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0"); 71 72 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; 73 module_param(throttlequeuedepth, int, 0444); 74 MODULE_PARM_DESC(throttlequeuedepth, 75 "Adapter queue depth when throttled due to I/O timeout. Default: 16"); 76 77 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME; 78 module_param(resetwaittime, int, 0444); 79 MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s"); 80 81 static int smp_affinity_enable = 1; 82 module_param(smp_affinity_enable, int, 0444); 83 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)"); 84 85 static int rdpq_enable = 1; 86 module_param(rdpq_enable, int, 0444); 87 MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)"); 88 89 unsigned int dual_qdepth_disable; 90 module_param(dual_qdepth_disable, int, 0444); 91 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0"); 92 93 static unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; 94 module_param(scmd_timeout, int, 0444); 95 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer."); 96 97 int perf_mode = -1; 98 module_param(perf_mode, int, 0444); 99 MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t" 100 "0 - balanced: High iops and low latency queues are allocated &\n\t\t" 101 "interrupt coalescing is enabled only on high iops queues\n\t\t" 102 "1 - iops: High iops queues are not allocated &\n\t\t" 103 "interrupt coalescing is enabled on all queues\n\t\t" 104 "2 - latency: High iops queues are not allocated &\n\t\t" 105 "interrupt coalescing is disabled on all queues\n\t\t" 106 "default mode is 'balanced'" 107 ); 108 109 int event_log_level = MFI_EVT_CLASS_CRITICAL; 110 module_param(event_log_level, int, 0644); 111 MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)"); 112 113 unsigned int enable_sdev_max_qd; 114 module_param(enable_sdev_max_qd, int, 0444); 115 MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0"); 116 117 int poll_queues; 118 module_param(poll_queues, int, 0444); 119 MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t" 120 "This parameter is effective only if host_tagset_enable=1 &\n\t\t" 121 "It is not applicable for MFI_SERIES. &\n\t\t" 122 "Driver will work in latency mode. &\n\t\t" 123 "High iops queues are not allocated &\n\t\t" 124 ); 125 126 int host_tagset_enable = 1; 127 module_param(host_tagset_enable, int, 0444); 128 MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)"); 129 130 MODULE_LICENSE("GPL"); 131 MODULE_VERSION(MEGASAS_VERSION); 132 MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com"); 133 MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver"); 134 135 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); 136 static int megasas_get_pd_list(struct megasas_instance *instance); 137 static int megasas_ld_list_query(struct megasas_instance *instance, 138 u8 query_type); 139 static int megasas_issue_init_mfi(struct megasas_instance *instance); 140 static int megasas_register_aen(struct megasas_instance *instance, 141 u32 seq_num, u32 class_locale_word); 142 static void megasas_get_pd_info(struct megasas_instance *instance, 143 struct scsi_device *sdev); 144 145 /* 146 * PCI ID table for all supported controllers 147 */ 148 static struct pci_device_id megasas_pci_table[] = { 149 150 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)}, 151 /* xscale IOP */ 152 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)}, 153 /* ppc IOP */ 154 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)}, 155 /* ppc IOP */ 156 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)}, 157 /* gen2*/ 158 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)}, 159 /* gen2*/ 160 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)}, 161 /* skinny*/ 162 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)}, 163 /* skinny*/ 164 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, 165 /* xscale IOP, vega */ 166 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, 167 /* xscale IOP */ 168 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)}, 169 /* Fusion */ 170 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)}, 171 /* Plasma */ 172 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)}, 173 /* Invader */ 174 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)}, 175 /* Fury */ 176 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)}, 177 /* Intruder */ 178 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)}, 179 /* Intruder 24 port*/ 180 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)}, 181 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)}, 182 /* VENTURA */ 183 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)}, 184 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)}, 185 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)}, 186 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)}, 187 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)}, 188 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)}, 189 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)}, 190 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)}, 191 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)}, 192 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)}, 193 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E0)}, 194 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E3)}, 195 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E4)}, 196 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E7)}, 197 {} 198 }; 199 200 MODULE_DEVICE_TABLE(pci, megasas_pci_table); 201 202 static int megasas_mgmt_majorno; 203 struct megasas_mgmt_info megasas_mgmt_info; 204 static struct fasync_struct *megasas_async_queue; 205 static DEFINE_MUTEX(megasas_async_queue_mutex); 206 207 static int megasas_poll_wait_aen; 208 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait); 209 static u32 support_poll_for_event; 210 u32 megasas_dbg_lvl; 211 static u32 support_device_change; 212 static bool support_nvme_encapsulation; 213 static bool support_pci_lane_margining; 214 215 /* define lock for aen poll */ 216 static spinlock_t poll_aen_lock; 217 218 extern struct dentry *megasas_debugfs_root; 219 extern int megasas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num); 220 221 void 222 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 223 u8 alt_status); 224 static u32 225 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance); 226 static int 227 megasas_adp_reset_gen2(struct megasas_instance *instance, 228 struct megasas_register_set __iomem *reg_set); 229 static irqreturn_t megasas_isr(int irq, void *devp); 230 static u32 231 megasas_init_adapter_mfi(struct megasas_instance *instance); 232 u32 233 megasas_build_and_issue_cmd(struct megasas_instance *instance, 234 struct scsi_cmnd *scmd); 235 static void megasas_complete_cmd_dpc(unsigned long instance_addr); 236 int 237 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, 238 int seconds); 239 void megasas_fusion_ocr_wq(struct work_struct *work); 240 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 241 int initial); 242 static int 243 megasas_set_dma_mask(struct megasas_instance *instance); 244 static int 245 megasas_alloc_ctrl_mem(struct megasas_instance *instance); 246 static inline void 247 megasas_free_ctrl_mem(struct megasas_instance *instance); 248 static inline int 249 megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance); 250 static inline void 251 megasas_free_ctrl_dma_buffers(struct megasas_instance *instance); 252 static inline void 253 megasas_init_ctrl_params(struct megasas_instance *instance); 254 255 u32 megasas_readl(struct megasas_instance *instance, 256 const volatile void __iomem *addr) 257 { 258 u32 i = 0, ret_val; 259 /* 260 * Due to a HW errata in Aero controllers, reads to certain 261 * Fusion registers could intermittently return all zeroes. 262 * This behavior is transient in nature and subsequent reads will 263 * return valid value. As a workaround in driver, retry readl for 264 * upto three times until a non-zero value is read. 265 */ 266 if (instance->adapter_type == AERO_SERIES) { 267 do { 268 ret_val = readl(addr); 269 i++; 270 } while (ret_val == 0 && i < 3); 271 return ret_val; 272 } else { 273 return readl(addr); 274 } 275 } 276 277 /** 278 * megasas_set_dma_settings - Populate DMA address, length and flags for DCMDs 279 * @instance: Adapter soft state 280 * @dcmd: DCMD frame inside MFI command 281 * @dma_addr: DMA address of buffer to be passed to FW 282 * @dma_len: Length of DMA buffer to be passed to FW 283 * @return: void 284 */ 285 void megasas_set_dma_settings(struct megasas_instance *instance, 286 struct megasas_dcmd_frame *dcmd, 287 dma_addr_t dma_addr, u32 dma_len) 288 { 289 if (instance->consistent_mask_64bit) { 290 dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr); 291 dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len); 292 dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64); 293 294 } else { 295 dcmd->sgl.sge32[0].phys_addr = 296 cpu_to_le32(lower_32_bits(dma_addr)); 297 dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len); 298 dcmd->flags = cpu_to_le16(dcmd->flags); 299 } 300 } 301 302 static void 303 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 304 { 305 instance->instancet->fire_cmd(instance, 306 cmd->frame_phys_addr, 0, instance->reg_set); 307 return; 308 } 309 310 /** 311 * megasas_get_cmd - Get a command from the free pool 312 * @instance: Adapter soft state 313 * 314 * Returns a free command from the pool 315 */ 316 struct megasas_cmd *megasas_get_cmd(struct megasas_instance 317 *instance) 318 { 319 unsigned long flags; 320 struct megasas_cmd *cmd = NULL; 321 322 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 323 324 if (!list_empty(&instance->cmd_pool)) { 325 cmd = list_entry((&instance->cmd_pool)->next, 326 struct megasas_cmd, list); 327 list_del_init(&cmd->list); 328 } else { 329 dev_err(&instance->pdev->dev, "Command pool empty!\n"); 330 } 331 332 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 333 return cmd; 334 } 335 336 /** 337 * megasas_return_cmd - Return a cmd to free command pool 338 * @instance: Adapter soft state 339 * @cmd: Command packet to be returned to free command pool 340 */ 341 void 342 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 343 { 344 unsigned long flags; 345 u32 blk_tags; 346 struct megasas_cmd_fusion *cmd_fusion; 347 struct fusion_context *fusion = instance->ctrl_context; 348 349 /* This flag is used only for fusion adapter. 350 * Wait for Interrupt for Polled mode DCMD 351 */ 352 if (cmd->flags & DRV_DCMD_POLLED_MODE) 353 return; 354 355 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 356 357 if (fusion) { 358 blk_tags = instance->max_scsi_cmds + cmd->index; 359 cmd_fusion = fusion->cmd_list[blk_tags]; 360 megasas_return_cmd_fusion(instance, cmd_fusion); 361 } 362 cmd->scmd = NULL; 363 cmd->frame_count = 0; 364 cmd->flags = 0; 365 memset(cmd->frame, 0, instance->mfi_frame_size); 366 cmd->frame->io.context = cpu_to_le32(cmd->index); 367 if (!fusion && reset_devices) 368 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 369 list_add(&cmd->list, (&instance->cmd_pool)->next); 370 371 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 372 373 } 374 375 static const char * 376 format_timestamp(uint32_t timestamp) 377 { 378 static char buffer[32]; 379 380 if ((timestamp & 0xff000000) == 0xff000000) 381 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp & 382 0x00ffffff); 383 else 384 snprintf(buffer, sizeof(buffer), "%us", timestamp); 385 return buffer; 386 } 387 388 static const char * 389 format_class(int8_t class) 390 { 391 static char buffer[6]; 392 393 switch (class) { 394 case MFI_EVT_CLASS_DEBUG: 395 return "debug"; 396 case MFI_EVT_CLASS_PROGRESS: 397 return "progress"; 398 case MFI_EVT_CLASS_INFO: 399 return "info"; 400 case MFI_EVT_CLASS_WARNING: 401 return "WARN"; 402 case MFI_EVT_CLASS_CRITICAL: 403 return "CRIT"; 404 case MFI_EVT_CLASS_FATAL: 405 return "FATAL"; 406 case MFI_EVT_CLASS_DEAD: 407 return "DEAD"; 408 default: 409 snprintf(buffer, sizeof(buffer), "%d", class); 410 return buffer; 411 } 412 } 413 414 /** 415 * megasas_decode_evt: Decode FW AEN event and print critical event 416 * for information. 417 * @instance: Adapter soft state 418 */ 419 static void 420 megasas_decode_evt(struct megasas_instance *instance) 421 { 422 struct megasas_evt_detail *evt_detail = instance->evt_detail; 423 union megasas_evt_class_locale class_locale; 424 class_locale.word = le32_to_cpu(evt_detail->cl.word); 425 426 if ((event_log_level < MFI_EVT_CLASS_DEBUG) || 427 (event_log_level > MFI_EVT_CLASS_DEAD)) { 428 printk(KERN_WARNING "megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n"); 429 event_log_level = MFI_EVT_CLASS_CRITICAL; 430 } 431 432 if (class_locale.members.class >= event_log_level) 433 dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n", 434 le32_to_cpu(evt_detail->seq_num), 435 format_timestamp(le32_to_cpu(evt_detail->time_stamp)), 436 (class_locale.members.locale), 437 format_class(class_locale.members.class), 438 evt_detail->description); 439 } 440 441 /* 442 * The following functions are defined for xscale 443 * (deviceid : 1064R, PERC5) controllers 444 */ 445 446 /** 447 * megasas_enable_intr_xscale - Enables interrupts 448 * @instance: Adapter soft state 449 */ 450 static inline void 451 megasas_enable_intr_xscale(struct megasas_instance *instance) 452 { 453 struct megasas_register_set __iomem *regs; 454 455 regs = instance->reg_set; 456 writel(0, &(regs)->outbound_intr_mask); 457 458 /* Dummy readl to force pci flush */ 459 readl(®s->outbound_intr_mask); 460 } 461 462 /** 463 * megasas_disable_intr_xscale -Disables interrupt 464 * @instance: Adapter soft state 465 */ 466 static inline void 467 megasas_disable_intr_xscale(struct megasas_instance *instance) 468 { 469 struct megasas_register_set __iomem *regs; 470 u32 mask = 0x1f; 471 472 regs = instance->reg_set; 473 writel(mask, ®s->outbound_intr_mask); 474 /* Dummy readl to force pci flush */ 475 readl(®s->outbound_intr_mask); 476 } 477 478 /** 479 * megasas_read_fw_status_reg_xscale - returns the current FW status value 480 * @instance: Adapter soft state 481 */ 482 static u32 483 megasas_read_fw_status_reg_xscale(struct megasas_instance *instance) 484 { 485 return readl(&instance->reg_set->outbound_msg_0); 486 } 487 /** 488 * megasas_clear_intr_xscale - Check & clear interrupt 489 * @instance: Adapter soft state 490 */ 491 static int 492 megasas_clear_intr_xscale(struct megasas_instance *instance) 493 { 494 u32 status; 495 u32 mfiStatus = 0; 496 struct megasas_register_set __iomem *regs; 497 regs = instance->reg_set; 498 499 /* 500 * Check if it is our interrupt 501 */ 502 status = readl(®s->outbound_intr_status); 503 504 if (status & MFI_OB_INTR_STATUS_MASK) 505 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 506 if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT) 507 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 508 509 /* 510 * Clear the interrupt by writing back the same value 511 */ 512 if (mfiStatus) 513 writel(status, ®s->outbound_intr_status); 514 515 /* Dummy readl to force pci flush */ 516 readl(®s->outbound_intr_status); 517 518 return mfiStatus; 519 } 520 521 /** 522 * megasas_fire_cmd_xscale - Sends command to the FW 523 * @instance: Adapter soft state 524 * @frame_phys_addr : Physical address of cmd 525 * @frame_count : Number of frames for the command 526 * @regs : MFI register set 527 */ 528 static inline void 529 megasas_fire_cmd_xscale(struct megasas_instance *instance, 530 dma_addr_t frame_phys_addr, 531 u32 frame_count, 532 struct megasas_register_set __iomem *regs) 533 { 534 unsigned long flags; 535 536 spin_lock_irqsave(&instance->hba_lock, flags); 537 writel((frame_phys_addr >> 3)|(frame_count), 538 &(regs)->inbound_queue_port); 539 spin_unlock_irqrestore(&instance->hba_lock, flags); 540 } 541 542 /** 543 * megasas_adp_reset_xscale - For controller reset 544 * @instance: Adapter soft state 545 * @regs: MFI register set 546 */ 547 static int 548 megasas_adp_reset_xscale(struct megasas_instance *instance, 549 struct megasas_register_set __iomem *regs) 550 { 551 u32 i; 552 u32 pcidata; 553 554 writel(MFI_ADP_RESET, ®s->inbound_doorbell); 555 556 for (i = 0; i < 3; i++) 557 msleep(1000); /* sleep for 3 secs */ 558 pcidata = 0; 559 pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata); 560 dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata); 561 if (pcidata & 0x2) { 562 dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata); 563 pcidata &= ~0x2; 564 pci_write_config_dword(instance->pdev, 565 MFI_1068_PCSR_OFFSET, pcidata); 566 567 for (i = 0; i < 2; i++) 568 msleep(1000); /* need to wait 2 secs again */ 569 570 pcidata = 0; 571 pci_read_config_dword(instance->pdev, 572 MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata); 573 dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata); 574 if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) { 575 dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata); 576 pcidata = 0; 577 pci_write_config_dword(instance->pdev, 578 MFI_1068_FW_HANDSHAKE_OFFSET, pcidata); 579 } 580 } 581 return 0; 582 } 583 584 /** 585 * megasas_check_reset_xscale - For controller reset check 586 * @instance: Adapter soft state 587 * @regs: MFI register set 588 */ 589 static int 590 megasas_check_reset_xscale(struct megasas_instance *instance, 591 struct megasas_register_set __iomem *regs) 592 { 593 if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) && 594 (le32_to_cpu(*instance->consumer) == 595 MEGASAS_ADPRESET_INPROG_SIGN)) 596 return 1; 597 return 0; 598 } 599 600 static struct megasas_instance_template megasas_instance_template_xscale = { 601 602 .fire_cmd = megasas_fire_cmd_xscale, 603 .enable_intr = megasas_enable_intr_xscale, 604 .disable_intr = megasas_disable_intr_xscale, 605 .clear_intr = megasas_clear_intr_xscale, 606 .read_fw_status_reg = megasas_read_fw_status_reg_xscale, 607 .adp_reset = megasas_adp_reset_xscale, 608 .check_reset = megasas_check_reset_xscale, 609 .service_isr = megasas_isr, 610 .tasklet = megasas_complete_cmd_dpc, 611 .init_adapter = megasas_init_adapter_mfi, 612 .build_and_issue_cmd = megasas_build_and_issue_cmd, 613 .issue_dcmd = megasas_issue_dcmd, 614 }; 615 616 /* 617 * This is the end of set of functions & definitions specific 618 * to xscale (deviceid : 1064R, PERC5) controllers 619 */ 620 621 /* 622 * The following functions are defined for ppc (deviceid : 0x60) 623 * controllers 624 */ 625 626 /** 627 * megasas_enable_intr_ppc - Enables interrupts 628 * @instance: Adapter soft state 629 */ 630 static inline void 631 megasas_enable_intr_ppc(struct megasas_instance *instance) 632 { 633 struct megasas_register_set __iomem *regs; 634 635 regs = instance->reg_set; 636 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 637 638 writel(~0x80000000, &(regs)->outbound_intr_mask); 639 640 /* Dummy readl to force pci flush */ 641 readl(®s->outbound_intr_mask); 642 } 643 644 /** 645 * megasas_disable_intr_ppc - Disable interrupt 646 * @instance: Adapter soft state 647 */ 648 static inline void 649 megasas_disable_intr_ppc(struct megasas_instance *instance) 650 { 651 struct megasas_register_set __iomem *regs; 652 u32 mask = 0xFFFFFFFF; 653 654 regs = instance->reg_set; 655 writel(mask, ®s->outbound_intr_mask); 656 /* Dummy readl to force pci flush */ 657 readl(®s->outbound_intr_mask); 658 } 659 660 /** 661 * megasas_read_fw_status_reg_ppc - returns the current FW status value 662 * @instance: Adapter soft state 663 */ 664 static u32 665 megasas_read_fw_status_reg_ppc(struct megasas_instance *instance) 666 { 667 return readl(&instance->reg_set->outbound_scratch_pad_0); 668 } 669 670 /** 671 * megasas_clear_intr_ppc - Check & clear interrupt 672 * @instance: Adapter soft state 673 */ 674 static int 675 megasas_clear_intr_ppc(struct megasas_instance *instance) 676 { 677 u32 status, mfiStatus = 0; 678 struct megasas_register_set __iomem *regs; 679 regs = instance->reg_set; 680 681 /* 682 * Check if it is our interrupt 683 */ 684 status = readl(®s->outbound_intr_status); 685 686 if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT) 687 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 688 689 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) 690 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 691 692 /* 693 * Clear the interrupt by writing back the same value 694 */ 695 writel(status, ®s->outbound_doorbell_clear); 696 697 /* Dummy readl to force pci flush */ 698 readl(®s->outbound_doorbell_clear); 699 700 return mfiStatus; 701 } 702 703 /** 704 * megasas_fire_cmd_ppc - Sends command to the FW 705 * @instance: Adapter soft state 706 * @frame_phys_addr: Physical address of cmd 707 * @frame_count: Number of frames for the command 708 * @regs: MFI register set 709 */ 710 static inline void 711 megasas_fire_cmd_ppc(struct megasas_instance *instance, 712 dma_addr_t frame_phys_addr, 713 u32 frame_count, 714 struct megasas_register_set __iomem *regs) 715 { 716 unsigned long flags; 717 718 spin_lock_irqsave(&instance->hba_lock, flags); 719 writel((frame_phys_addr | (frame_count<<1))|1, 720 &(regs)->inbound_queue_port); 721 spin_unlock_irqrestore(&instance->hba_lock, flags); 722 } 723 724 /** 725 * megasas_check_reset_ppc - For controller reset check 726 * @instance: Adapter soft state 727 * @regs: MFI register set 728 */ 729 static int 730 megasas_check_reset_ppc(struct megasas_instance *instance, 731 struct megasas_register_set __iomem *regs) 732 { 733 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 734 return 1; 735 736 return 0; 737 } 738 739 static struct megasas_instance_template megasas_instance_template_ppc = { 740 741 .fire_cmd = megasas_fire_cmd_ppc, 742 .enable_intr = megasas_enable_intr_ppc, 743 .disable_intr = megasas_disable_intr_ppc, 744 .clear_intr = megasas_clear_intr_ppc, 745 .read_fw_status_reg = megasas_read_fw_status_reg_ppc, 746 .adp_reset = megasas_adp_reset_xscale, 747 .check_reset = megasas_check_reset_ppc, 748 .service_isr = megasas_isr, 749 .tasklet = megasas_complete_cmd_dpc, 750 .init_adapter = megasas_init_adapter_mfi, 751 .build_and_issue_cmd = megasas_build_and_issue_cmd, 752 .issue_dcmd = megasas_issue_dcmd, 753 }; 754 755 /** 756 * megasas_enable_intr_skinny - Enables interrupts 757 * @instance: Adapter soft state 758 */ 759 static inline void 760 megasas_enable_intr_skinny(struct megasas_instance *instance) 761 { 762 struct megasas_register_set __iomem *regs; 763 764 regs = instance->reg_set; 765 writel(0xFFFFFFFF, &(regs)->outbound_intr_mask); 766 767 writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 768 769 /* Dummy readl to force pci flush */ 770 readl(®s->outbound_intr_mask); 771 } 772 773 /** 774 * megasas_disable_intr_skinny - Disables interrupt 775 * @instance: Adapter soft state 776 */ 777 static inline void 778 megasas_disable_intr_skinny(struct megasas_instance *instance) 779 { 780 struct megasas_register_set __iomem *regs; 781 u32 mask = 0xFFFFFFFF; 782 783 regs = instance->reg_set; 784 writel(mask, ®s->outbound_intr_mask); 785 /* Dummy readl to force pci flush */ 786 readl(®s->outbound_intr_mask); 787 } 788 789 /** 790 * megasas_read_fw_status_reg_skinny - returns the current FW status value 791 * @instance: Adapter soft state 792 */ 793 static u32 794 megasas_read_fw_status_reg_skinny(struct megasas_instance *instance) 795 { 796 return readl(&instance->reg_set->outbound_scratch_pad_0); 797 } 798 799 /** 800 * megasas_clear_intr_skinny - Check & clear interrupt 801 * @instance: Adapter soft state 802 */ 803 static int 804 megasas_clear_intr_skinny(struct megasas_instance *instance) 805 { 806 u32 status; 807 u32 mfiStatus = 0; 808 struct megasas_register_set __iomem *regs; 809 regs = instance->reg_set; 810 811 /* 812 * Check if it is our interrupt 813 */ 814 status = readl(®s->outbound_intr_status); 815 816 if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) { 817 return 0; 818 } 819 820 /* 821 * Check if it is our interrupt 822 */ 823 if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) == 824 MFI_STATE_FAULT) { 825 mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 826 } else 827 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 828 829 /* 830 * Clear the interrupt by writing back the same value 831 */ 832 writel(status, ®s->outbound_intr_status); 833 834 /* 835 * dummy read to flush PCI 836 */ 837 readl(®s->outbound_intr_status); 838 839 return mfiStatus; 840 } 841 842 /** 843 * megasas_fire_cmd_skinny - Sends command to the FW 844 * @instance: Adapter soft state 845 * @frame_phys_addr: Physical address of cmd 846 * @frame_count: Number of frames for the command 847 * @regs: MFI register set 848 */ 849 static inline void 850 megasas_fire_cmd_skinny(struct megasas_instance *instance, 851 dma_addr_t frame_phys_addr, 852 u32 frame_count, 853 struct megasas_register_set __iomem *regs) 854 { 855 unsigned long flags; 856 857 spin_lock_irqsave(&instance->hba_lock, flags); 858 writel(upper_32_bits(frame_phys_addr), 859 &(regs)->inbound_high_queue_port); 860 writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1, 861 &(regs)->inbound_low_queue_port); 862 spin_unlock_irqrestore(&instance->hba_lock, flags); 863 } 864 865 /** 866 * megasas_check_reset_skinny - For controller reset check 867 * @instance: Adapter soft state 868 * @regs: MFI register set 869 */ 870 static int 871 megasas_check_reset_skinny(struct megasas_instance *instance, 872 struct megasas_register_set __iomem *regs) 873 { 874 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 875 return 1; 876 877 return 0; 878 } 879 880 static struct megasas_instance_template megasas_instance_template_skinny = { 881 882 .fire_cmd = megasas_fire_cmd_skinny, 883 .enable_intr = megasas_enable_intr_skinny, 884 .disable_intr = megasas_disable_intr_skinny, 885 .clear_intr = megasas_clear_intr_skinny, 886 .read_fw_status_reg = megasas_read_fw_status_reg_skinny, 887 .adp_reset = megasas_adp_reset_gen2, 888 .check_reset = megasas_check_reset_skinny, 889 .service_isr = megasas_isr, 890 .tasklet = megasas_complete_cmd_dpc, 891 .init_adapter = megasas_init_adapter_mfi, 892 .build_and_issue_cmd = megasas_build_and_issue_cmd, 893 .issue_dcmd = megasas_issue_dcmd, 894 }; 895 896 897 /* 898 * The following functions are defined for gen2 (deviceid : 0x78 0x79) 899 * controllers 900 */ 901 902 /** 903 * megasas_enable_intr_gen2 - Enables interrupts 904 * @instance: Adapter soft state 905 */ 906 static inline void 907 megasas_enable_intr_gen2(struct megasas_instance *instance) 908 { 909 struct megasas_register_set __iomem *regs; 910 911 regs = instance->reg_set; 912 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 913 914 /* write ~0x00000005 (4 & 1) to the intr mask*/ 915 writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 916 917 /* Dummy readl to force pci flush */ 918 readl(®s->outbound_intr_mask); 919 } 920 921 /** 922 * megasas_disable_intr_gen2 - Disables interrupt 923 * @instance: Adapter soft state 924 */ 925 static inline void 926 megasas_disable_intr_gen2(struct megasas_instance *instance) 927 { 928 struct megasas_register_set __iomem *regs; 929 u32 mask = 0xFFFFFFFF; 930 931 regs = instance->reg_set; 932 writel(mask, ®s->outbound_intr_mask); 933 /* Dummy readl to force pci flush */ 934 readl(®s->outbound_intr_mask); 935 } 936 937 /** 938 * megasas_read_fw_status_reg_gen2 - returns the current FW status value 939 * @instance: Adapter soft state 940 */ 941 static u32 942 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance) 943 { 944 return readl(&instance->reg_set->outbound_scratch_pad_0); 945 } 946 947 /** 948 * megasas_clear_intr_gen2 - Check & clear interrupt 949 * @instance: Adapter soft state 950 */ 951 static int 952 megasas_clear_intr_gen2(struct megasas_instance *instance) 953 { 954 u32 status; 955 u32 mfiStatus = 0; 956 struct megasas_register_set __iomem *regs; 957 regs = instance->reg_set; 958 959 /* 960 * Check if it is our interrupt 961 */ 962 status = readl(®s->outbound_intr_status); 963 964 if (status & MFI_INTR_FLAG_REPLY_MESSAGE) { 965 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 966 } 967 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) { 968 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 969 } 970 971 /* 972 * Clear the interrupt by writing back the same value 973 */ 974 if (mfiStatus) 975 writel(status, ®s->outbound_doorbell_clear); 976 977 /* Dummy readl to force pci flush */ 978 readl(®s->outbound_intr_status); 979 980 return mfiStatus; 981 } 982 983 /** 984 * megasas_fire_cmd_gen2 - Sends command to the FW 985 * @instance: Adapter soft state 986 * @frame_phys_addr: Physical address of cmd 987 * @frame_count: Number of frames for the command 988 * @regs: MFI register set 989 */ 990 static inline void 991 megasas_fire_cmd_gen2(struct megasas_instance *instance, 992 dma_addr_t frame_phys_addr, 993 u32 frame_count, 994 struct megasas_register_set __iomem *regs) 995 { 996 unsigned long flags; 997 998 spin_lock_irqsave(&instance->hba_lock, flags); 999 writel((frame_phys_addr | (frame_count<<1))|1, 1000 &(regs)->inbound_queue_port); 1001 spin_unlock_irqrestore(&instance->hba_lock, flags); 1002 } 1003 1004 /** 1005 * megasas_adp_reset_gen2 - For controller reset 1006 * @instance: Adapter soft state 1007 * @reg_set: MFI register set 1008 */ 1009 static int 1010 megasas_adp_reset_gen2(struct megasas_instance *instance, 1011 struct megasas_register_set __iomem *reg_set) 1012 { 1013 u32 retry = 0 ; 1014 u32 HostDiag; 1015 u32 __iomem *seq_offset = ®_set->seq_offset; 1016 u32 __iomem *hostdiag_offset = ®_set->host_diag; 1017 1018 if (instance->instancet == &megasas_instance_template_skinny) { 1019 seq_offset = ®_set->fusion_seq_offset; 1020 hostdiag_offset = ®_set->fusion_host_diag; 1021 } 1022 1023 writel(0, seq_offset); 1024 writel(4, seq_offset); 1025 writel(0xb, seq_offset); 1026 writel(2, seq_offset); 1027 writel(7, seq_offset); 1028 writel(0xd, seq_offset); 1029 1030 msleep(1000); 1031 1032 HostDiag = (u32)readl(hostdiag_offset); 1033 1034 while (!(HostDiag & DIAG_WRITE_ENABLE)) { 1035 msleep(100); 1036 HostDiag = (u32)readl(hostdiag_offset); 1037 dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n", 1038 retry, HostDiag); 1039 1040 if (retry++ >= 100) 1041 return 1; 1042 1043 } 1044 1045 dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag); 1046 1047 writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset); 1048 1049 ssleep(10); 1050 1051 HostDiag = (u32)readl(hostdiag_offset); 1052 while (HostDiag & DIAG_RESET_ADAPTER) { 1053 msleep(100); 1054 HostDiag = (u32)readl(hostdiag_offset); 1055 dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n", 1056 retry, HostDiag); 1057 1058 if (retry++ >= 1000) 1059 return 1; 1060 1061 } 1062 return 0; 1063 } 1064 1065 /** 1066 * megasas_check_reset_gen2 - For controller reset check 1067 * @instance: Adapter soft state 1068 * @regs: MFI register set 1069 */ 1070 static int 1071 megasas_check_reset_gen2(struct megasas_instance *instance, 1072 struct megasas_register_set __iomem *regs) 1073 { 1074 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 1075 return 1; 1076 1077 return 0; 1078 } 1079 1080 static struct megasas_instance_template megasas_instance_template_gen2 = { 1081 1082 .fire_cmd = megasas_fire_cmd_gen2, 1083 .enable_intr = megasas_enable_intr_gen2, 1084 .disable_intr = megasas_disable_intr_gen2, 1085 .clear_intr = megasas_clear_intr_gen2, 1086 .read_fw_status_reg = megasas_read_fw_status_reg_gen2, 1087 .adp_reset = megasas_adp_reset_gen2, 1088 .check_reset = megasas_check_reset_gen2, 1089 .service_isr = megasas_isr, 1090 .tasklet = megasas_complete_cmd_dpc, 1091 .init_adapter = megasas_init_adapter_mfi, 1092 .build_and_issue_cmd = megasas_build_and_issue_cmd, 1093 .issue_dcmd = megasas_issue_dcmd, 1094 }; 1095 1096 /* 1097 * This is the end of set of functions & definitions 1098 * specific to gen2 (deviceid : 0x78, 0x79) controllers 1099 */ 1100 1101 /* 1102 * Template added for TB (Fusion) 1103 */ 1104 extern struct megasas_instance_template megasas_instance_template_fusion; 1105 1106 /** 1107 * megasas_issue_polled - Issues a polling command 1108 * @instance: Adapter soft state 1109 * @cmd: Command packet to be issued 1110 * 1111 * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting. 1112 */ 1113 int 1114 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) 1115 { 1116 struct megasas_header *frame_hdr = &cmd->frame->hdr; 1117 1118 frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS; 1119 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); 1120 1121 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1122 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1123 __func__, __LINE__); 1124 return DCMD_INIT; 1125 } 1126 1127 instance->instancet->issue_dcmd(instance, cmd); 1128 1129 return wait_and_poll(instance, cmd, instance->requestorId ? 1130 MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS); 1131 } 1132 1133 /** 1134 * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds 1135 * @instance: Adapter soft state 1136 * @cmd: Command to be issued 1137 * @timeout: Timeout in seconds 1138 * 1139 * This function waits on an event for the command to be returned from ISR. 1140 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 1141 * Used to issue ioctl commands. 1142 */ 1143 int 1144 megasas_issue_blocked_cmd(struct megasas_instance *instance, 1145 struct megasas_cmd *cmd, int timeout) 1146 { 1147 int ret = 0; 1148 cmd->cmd_status_drv = DCMD_INIT; 1149 1150 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1151 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1152 __func__, __LINE__); 1153 return DCMD_INIT; 1154 } 1155 1156 instance->instancet->issue_dcmd(instance, cmd); 1157 1158 if (timeout) { 1159 ret = wait_event_timeout(instance->int_cmd_wait_q, 1160 cmd->cmd_status_drv != DCMD_INIT, timeout * HZ); 1161 if (!ret) { 1162 dev_err(&instance->pdev->dev, 1163 "DCMD(opcode: 0x%x) is timed out, func:%s\n", 1164 cmd->frame->dcmd.opcode, __func__); 1165 return DCMD_TIMEOUT; 1166 } 1167 } else 1168 wait_event(instance->int_cmd_wait_q, 1169 cmd->cmd_status_drv != DCMD_INIT); 1170 1171 return cmd->cmd_status_drv; 1172 } 1173 1174 /** 1175 * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd 1176 * @instance: Adapter soft state 1177 * @cmd_to_abort: Previously issued cmd to be aborted 1178 * @timeout: Timeout in seconds 1179 * 1180 * MFI firmware can abort previously issued AEN comamnd (automatic event 1181 * notification). The megasas_issue_blocked_abort_cmd() issues such abort 1182 * cmd and waits for return status. 1183 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 1184 */ 1185 static int 1186 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, 1187 struct megasas_cmd *cmd_to_abort, int timeout) 1188 { 1189 struct megasas_cmd *cmd; 1190 struct megasas_abort_frame *abort_fr; 1191 int ret = 0; 1192 u32 opcode; 1193 1194 cmd = megasas_get_cmd(instance); 1195 1196 if (!cmd) 1197 return -1; 1198 1199 abort_fr = &cmd->frame->abort; 1200 1201 /* 1202 * Prepare and issue the abort frame 1203 */ 1204 abort_fr->cmd = MFI_CMD_ABORT; 1205 abort_fr->cmd_status = MFI_STAT_INVALID_STATUS; 1206 abort_fr->flags = cpu_to_le16(0); 1207 abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index); 1208 abort_fr->abort_mfi_phys_addr_lo = 1209 cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr)); 1210 abort_fr->abort_mfi_phys_addr_hi = 1211 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); 1212 1213 cmd->sync_cmd = 1; 1214 cmd->cmd_status_drv = DCMD_INIT; 1215 1216 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1217 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1218 __func__, __LINE__); 1219 return DCMD_INIT; 1220 } 1221 1222 instance->instancet->issue_dcmd(instance, cmd); 1223 1224 if (timeout) { 1225 ret = wait_event_timeout(instance->abort_cmd_wait_q, 1226 cmd->cmd_status_drv != DCMD_INIT, timeout * HZ); 1227 if (!ret) { 1228 opcode = cmd_to_abort->frame->dcmd.opcode; 1229 dev_err(&instance->pdev->dev, 1230 "Abort(to be aborted DCMD opcode: 0x%x) is timed out func:%s\n", 1231 opcode, __func__); 1232 return DCMD_TIMEOUT; 1233 } 1234 } else 1235 wait_event(instance->abort_cmd_wait_q, 1236 cmd->cmd_status_drv != DCMD_INIT); 1237 1238 cmd->sync_cmd = 0; 1239 1240 megasas_return_cmd(instance, cmd); 1241 return cmd->cmd_status_drv; 1242 } 1243 1244 /** 1245 * megasas_make_sgl32 - Prepares 32-bit SGL 1246 * @instance: Adapter soft state 1247 * @scp: SCSI command from the mid-layer 1248 * @mfi_sgl: SGL to be filled in 1249 * 1250 * If successful, this function returns the number of SG elements. Otherwise, 1251 * it returnes -1. 1252 */ 1253 static int 1254 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp, 1255 union megasas_sgl *mfi_sgl) 1256 { 1257 int i; 1258 int sge_count; 1259 struct scatterlist *os_sgl; 1260 1261 sge_count = scsi_dma_map(scp); 1262 BUG_ON(sge_count < 0); 1263 1264 if (sge_count) { 1265 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1266 mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl)); 1267 mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl)); 1268 } 1269 } 1270 return sge_count; 1271 } 1272 1273 /** 1274 * megasas_make_sgl64 - Prepares 64-bit SGL 1275 * @instance: Adapter soft state 1276 * @scp: SCSI command from the mid-layer 1277 * @mfi_sgl: SGL to be filled in 1278 * 1279 * If successful, this function returns the number of SG elements. Otherwise, 1280 * it returnes -1. 1281 */ 1282 static int 1283 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, 1284 union megasas_sgl *mfi_sgl) 1285 { 1286 int i; 1287 int sge_count; 1288 struct scatterlist *os_sgl; 1289 1290 sge_count = scsi_dma_map(scp); 1291 BUG_ON(sge_count < 0); 1292 1293 if (sge_count) { 1294 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1295 mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl)); 1296 mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl)); 1297 } 1298 } 1299 return sge_count; 1300 } 1301 1302 /** 1303 * megasas_make_sgl_skinny - Prepares IEEE SGL 1304 * @instance: Adapter soft state 1305 * @scp: SCSI command from the mid-layer 1306 * @mfi_sgl: SGL to be filled in 1307 * 1308 * If successful, this function returns the number of SG elements. Otherwise, 1309 * it returnes -1. 1310 */ 1311 static int 1312 megasas_make_sgl_skinny(struct megasas_instance *instance, 1313 struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) 1314 { 1315 int i; 1316 int sge_count; 1317 struct scatterlist *os_sgl; 1318 1319 sge_count = scsi_dma_map(scp); 1320 1321 if (sge_count) { 1322 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1323 mfi_sgl->sge_skinny[i].length = 1324 cpu_to_le32(sg_dma_len(os_sgl)); 1325 mfi_sgl->sge_skinny[i].phys_addr = 1326 cpu_to_le64(sg_dma_address(os_sgl)); 1327 mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0); 1328 } 1329 } 1330 return sge_count; 1331 } 1332 1333 /** 1334 * megasas_get_frame_count - Computes the number of frames 1335 * @frame_type : type of frame- io or pthru frame 1336 * @sge_count : number of sg elements 1337 * 1338 * Returns the number of frames required for numnber of sge's (sge_count) 1339 */ 1340 1341 static u32 megasas_get_frame_count(struct megasas_instance *instance, 1342 u8 sge_count, u8 frame_type) 1343 { 1344 int num_cnt; 1345 int sge_bytes; 1346 u32 sge_sz; 1347 u32 frame_count = 0; 1348 1349 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : 1350 sizeof(struct megasas_sge32); 1351 1352 if (instance->flag_ieee) { 1353 sge_sz = sizeof(struct megasas_sge_skinny); 1354 } 1355 1356 /* 1357 * Main frame can contain 2 SGEs for 64-bit SGLs and 1358 * 3 SGEs for 32-bit SGLs for ldio & 1359 * 1 SGEs for 64-bit SGLs and 1360 * 2 SGEs for 32-bit SGLs for pthru frame 1361 */ 1362 if (unlikely(frame_type == PTHRU_FRAME)) { 1363 if (instance->flag_ieee == 1) { 1364 num_cnt = sge_count - 1; 1365 } else if (IS_DMA64) 1366 num_cnt = sge_count - 1; 1367 else 1368 num_cnt = sge_count - 2; 1369 } else { 1370 if (instance->flag_ieee == 1) { 1371 num_cnt = sge_count - 1; 1372 } else if (IS_DMA64) 1373 num_cnt = sge_count - 2; 1374 else 1375 num_cnt = sge_count - 3; 1376 } 1377 1378 if (num_cnt > 0) { 1379 sge_bytes = sge_sz * num_cnt; 1380 1381 frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + 1382 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ; 1383 } 1384 /* Main frame */ 1385 frame_count += 1; 1386 1387 if (frame_count > 7) 1388 frame_count = 8; 1389 return frame_count; 1390 } 1391 1392 /** 1393 * megasas_build_dcdb - Prepares a direct cdb (DCDB) command 1394 * @instance: Adapter soft state 1395 * @scp: SCSI command 1396 * @cmd: Command to be prepared in 1397 * 1398 * This function prepares CDB commands. These are typcially pass-through 1399 * commands to the devices. 1400 */ 1401 static int 1402 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, 1403 struct megasas_cmd *cmd) 1404 { 1405 u32 is_logical; 1406 u32 device_id; 1407 u16 flags = 0; 1408 struct megasas_pthru_frame *pthru; 1409 1410 is_logical = MEGASAS_IS_LOGICAL(scp->device); 1411 device_id = MEGASAS_DEV_INDEX(scp); 1412 pthru = (struct megasas_pthru_frame *)cmd->frame; 1413 1414 if (scp->sc_data_direction == DMA_TO_DEVICE) 1415 flags = MFI_FRAME_DIR_WRITE; 1416 else if (scp->sc_data_direction == DMA_FROM_DEVICE) 1417 flags = MFI_FRAME_DIR_READ; 1418 else if (scp->sc_data_direction == DMA_NONE) 1419 flags = MFI_FRAME_DIR_NONE; 1420 1421 if (instance->flag_ieee == 1) { 1422 flags |= MFI_FRAME_IEEE; 1423 } 1424 1425 /* 1426 * Prepare the DCDB frame 1427 */ 1428 pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO; 1429 pthru->cmd_status = 0x0; 1430 pthru->scsi_status = 0x0; 1431 pthru->target_id = device_id; 1432 pthru->lun = scp->device->lun; 1433 pthru->cdb_len = scp->cmd_len; 1434 pthru->timeout = 0; 1435 pthru->pad_0 = 0; 1436 pthru->flags = cpu_to_le16(flags); 1437 pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp)); 1438 1439 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); 1440 1441 /* 1442 * If the command is for the tape device, set the 1443 * pthru timeout to the os layer timeout value. 1444 */ 1445 if (scp->device->type == TYPE_TAPE) { 1446 if ((scp->request->timeout / HZ) > 0xFFFF) 1447 pthru->timeout = cpu_to_le16(0xFFFF); 1448 else 1449 pthru->timeout = cpu_to_le16(scp->request->timeout / HZ); 1450 } 1451 1452 /* 1453 * Construct SGL 1454 */ 1455 if (instance->flag_ieee == 1) { 1456 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1457 pthru->sge_count = megasas_make_sgl_skinny(instance, scp, 1458 &pthru->sgl); 1459 } else if (IS_DMA64) { 1460 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1461 pthru->sge_count = megasas_make_sgl64(instance, scp, 1462 &pthru->sgl); 1463 } else 1464 pthru->sge_count = megasas_make_sgl32(instance, scp, 1465 &pthru->sgl); 1466 1467 if (pthru->sge_count > instance->max_num_sge) { 1468 dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n", 1469 pthru->sge_count); 1470 return 0; 1471 } 1472 1473 /* 1474 * Sense info specific 1475 */ 1476 pthru->sense_len = SCSI_SENSE_BUFFERSIZE; 1477 pthru->sense_buf_phys_addr_hi = 1478 cpu_to_le32(upper_32_bits(cmd->sense_phys_addr)); 1479 pthru->sense_buf_phys_addr_lo = 1480 cpu_to_le32(lower_32_bits(cmd->sense_phys_addr)); 1481 1482 /* 1483 * Compute the total number of frames this command consumes. FW uses 1484 * this number to pull sufficient number of frames from host memory. 1485 */ 1486 cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count, 1487 PTHRU_FRAME); 1488 1489 return cmd->frame_count; 1490 } 1491 1492 /** 1493 * megasas_build_ldio - Prepares IOs to logical devices 1494 * @instance: Adapter soft state 1495 * @scp: SCSI command 1496 * @cmd: Command to be prepared 1497 * 1498 * Frames (and accompanying SGLs) for regular SCSI IOs use this function. 1499 */ 1500 static int 1501 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, 1502 struct megasas_cmd *cmd) 1503 { 1504 u32 device_id; 1505 u8 sc = scp->cmnd[0]; 1506 u16 flags = 0; 1507 struct megasas_io_frame *ldio; 1508 1509 device_id = MEGASAS_DEV_INDEX(scp); 1510 ldio = (struct megasas_io_frame *)cmd->frame; 1511 1512 if (scp->sc_data_direction == DMA_TO_DEVICE) 1513 flags = MFI_FRAME_DIR_WRITE; 1514 else if (scp->sc_data_direction == DMA_FROM_DEVICE) 1515 flags = MFI_FRAME_DIR_READ; 1516 1517 if (instance->flag_ieee == 1) { 1518 flags |= MFI_FRAME_IEEE; 1519 } 1520 1521 /* 1522 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds 1523 */ 1524 ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ; 1525 ldio->cmd_status = 0x0; 1526 ldio->scsi_status = 0x0; 1527 ldio->target_id = device_id; 1528 ldio->timeout = 0; 1529 ldio->reserved_0 = 0; 1530 ldio->pad_0 = 0; 1531 ldio->flags = cpu_to_le16(flags); 1532 ldio->start_lba_hi = 0; 1533 ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0; 1534 1535 /* 1536 * 6-byte READ(0x08) or WRITE(0x0A) cdb 1537 */ 1538 if (scp->cmd_len == 6) { 1539 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]); 1540 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) | 1541 ((u32) scp->cmnd[2] << 8) | 1542 (u32) scp->cmnd[3]); 1543 1544 ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF); 1545 } 1546 1547 /* 1548 * 10-byte READ(0x28) or WRITE(0x2A) cdb 1549 */ 1550 else if (scp->cmd_len == 10) { 1551 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] | 1552 ((u32) scp->cmnd[7] << 8)); 1553 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1554 ((u32) scp->cmnd[3] << 16) | 1555 ((u32) scp->cmnd[4] << 8) | 1556 (u32) scp->cmnd[5]); 1557 } 1558 1559 /* 1560 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 1561 */ 1562 else if (scp->cmd_len == 12) { 1563 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) | 1564 ((u32) scp->cmnd[7] << 16) | 1565 ((u32) scp->cmnd[8] << 8) | 1566 (u32) scp->cmnd[9]); 1567 1568 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1569 ((u32) scp->cmnd[3] << 16) | 1570 ((u32) scp->cmnd[4] << 8) | 1571 (u32) scp->cmnd[5]); 1572 } 1573 1574 /* 1575 * 16-byte READ(0x88) or WRITE(0x8A) cdb 1576 */ 1577 else if (scp->cmd_len == 16) { 1578 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) | 1579 ((u32) scp->cmnd[11] << 16) | 1580 ((u32) scp->cmnd[12] << 8) | 1581 (u32) scp->cmnd[13]); 1582 1583 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) | 1584 ((u32) scp->cmnd[7] << 16) | 1585 ((u32) scp->cmnd[8] << 8) | 1586 (u32) scp->cmnd[9]); 1587 1588 ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1589 ((u32) scp->cmnd[3] << 16) | 1590 ((u32) scp->cmnd[4] << 8) | 1591 (u32) scp->cmnd[5]); 1592 1593 } 1594 1595 /* 1596 * Construct SGL 1597 */ 1598 if (instance->flag_ieee) { 1599 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1600 ldio->sge_count = megasas_make_sgl_skinny(instance, scp, 1601 &ldio->sgl); 1602 } else if (IS_DMA64) { 1603 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1604 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); 1605 } else 1606 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); 1607 1608 if (ldio->sge_count > instance->max_num_sge) { 1609 dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n", 1610 ldio->sge_count); 1611 return 0; 1612 } 1613 1614 /* 1615 * Sense info specific 1616 */ 1617 ldio->sense_len = SCSI_SENSE_BUFFERSIZE; 1618 ldio->sense_buf_phys_addr_hi = 0; 1619 ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr); 1620 1621 /* 1622 * Compute the total number of frames this command consumes. FW uses 1623 * this number to pull sufficient number of frames from host memory. 1624 */ 1625 cmd->frame_count = megasas_get_frame_count(instance, 1626 ldio->sge_count, IO_FRAME); 1627 1628 return cmd->frame_count; 1629 } 1630 1631 /** 1632 * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD 1633 * and whether it's RW or non RW 1634 * @cmd: SCSI command 1635 * 1636 */ 1637 inline int megasas_cmd_type(struct scsi_cmnd *cmd) 1638 { 1639 int ret; 1640 1641 switch (cmd->cmnd[0]) { 1642 case READ_10: 1643 case WRITE_10: 1644 case READ_12: 1645 case WRITE_12: 1646 case READ_6: 1647 case WRITE_6: 1648 case READ_16: 1649 case WRITE_16: 1650 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? 1651 READ_WRITE_LDIO : READ_WRITE_SYSPDIO; 1652 break; 1653 default: 1654 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? 1655 NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO; 1656 } 1657 return ret; 1658 } 1659 1660 /** 1661 * megasas_dump_pending_frames - Dumps the frame address of all pending cmds 1662 * in FW 1663 * @instance: Adapter soft state 1664 */ 1665 static inline void 1666 megasas_dump_pending_frames(struct megasas_instance *instance) 1667 { 1668 struct megasas_cmd *cmd; 1669 int i,n; 1670 union megasas_sgl *mfi_sgl; 1671 struct megasas_io_frame *ldio; 1672 struct megasas_pthru_frame *pthru; 1673 u32 sgcount; 1674 u16 max_cmd = instance->max_fw_cmds; 1675 1676 dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no); 1677 dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding)); 1678 if (IS_DMA64) 1679 dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no); 1680 else 1681 dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no); 1682 1683 dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no); 1684 for (i = 0; i < max_cmd; i++) { 1685 cmd = instance->cmd_list[i]; 1686 if (!cmd->scmd) 1687 continue; 1688 dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr); 1689 if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) { 1690 ldio = (struct megasas_io_frame *)cmd->frame; 1691 mfi_sgl = &ldio->sgl; 1692 sgcount = ldio->sge_count; 1693 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x," 1694 " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1695 instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id, 1696 le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi), 1697 le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount); 1698 } else { 1699 pthru = (struct megasas_pthru_frame *) cmd->frame; 1700 mfi_sgl = &pthru->sgl; 1701 sgcount = pthru->sge_count; 1702 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, " 1703 "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1704 instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id, 1705 pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len), 1706 le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount); 1707 } 1708 if (megasas_dbg_lvl & MEGASAS_DBG_LVL) { 1709 for (n = 0; n < sgcount; n++) { 1710 if (IS_DMA64) 1711 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n", 1712 le32_to_cpu(mfi_sgl->sge64[n].length), 1713 le64_to_cpu(mfi_sgl->sge64[n].phys_addr)); 1714 else 1715 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n", 1716 le32_to_cpu(mfi_sgl->sge32[n].length), 1717 le32_to_cpu(mfi_sgl->sge32[n].phys_addr)); 1718 } 1719 } 1720 } /*for max_cmd*/ 1721 dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no); 1722 for (i = 0; i < max_cmd; i++) { 1723 1724 cmd = instance->cmd_list[i]; 1725 1726 if (cmd->sync_cmd == 1) 1727 dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr); 1728 } 1729 dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no); 1730 } 1731 1732 u32 1733 megasas_build_and_issue_cmd(struct megasas_instance *instance, 1734 struct scsi_cmnd *scmd) 1735 { 1736 struct megasas_cmd *cmd; 1737 u32 frame_count; 1738 1739 cmd = megasas_get_cmd(instance); 1740 if (!cmd) 1741 return SCSI_MLQUEUE_HOST_BUSY; 1742 1743 /* 1744 * Logical drive command 1745 */ 1746 if (megasas_cmd_type(scmd) == READ_WRITE_LDIO) 1747 frame_count = megasas_build_ldio(instance, scmd, cmd); 1748 else 1749 frame_count = megasas_build_dcdb(instance, scmd, cmd); 1750 1751 if (!frame_count) 1752 goto out_return_cmd; 1753 1754 cmd->scmd = scmd; 1755 scmd->SCp.ptr = (char *)cmd; 1756 1757 /* 1758 * Issue the command to the FW 1759 */ 1760 atomic_inc(&instance->fw_outstanding); 1761 1762 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 1763 cmd->frame_count-1, instance->reg_set); 1764 1765 return 0; 1766 out_return_cmd: 1767 megasas_return_cmd(instance, cmd); 1768 return SCSI_MLQUEUE_HOST_BUSY; 1769 } 1770 1771 1772 /** 1773 * megasas_queue_command - Queue entry point 1774 * @shost: adapter SCSI host 1775 * @scmd: SCSI command to be queued 1776 */ 1777 static int 1778 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) 1779 { 1780 struct megasas_instance *instance; 1781 struct MR_PRIV_DEVICE *mr_device_priv_data; 1782 1783 instance = (struct megasas_instance *) 1784 scmd->device->host->hostdata; 1785 1786 if (instance->unload == 1) { 1787 scmd->result = DID_NO_CONNECT << 16; 1788 scmd->scsi_done(scmd); 1789 return 0; 1790 } 1791 1792 if (instance->issuepend_done == 0) 1793 return SCSI_MLQUEUE_HOST_BUSY; 1794 1795 1796 /* Check for an mpio path and adjust behavior */ 1797 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { 1798 if (megasas_check_mpio_paths(instance, scmd) == 1799 (DID_REQUEUE << 16)) { 1800 return SCSI_MLQUEUE_HOST_BUSY; 1801 } else { 1802 scmd->result = DID_NO_CONNECT << 16; 1803 scmd->scsi_done(scmd); 1804 return 0; 1805 } 1806 } 1807 1808 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1809 scmd->result = DID_NO_CONNECT << 16; 1810 scmd->scsi_done(scmd); 1811 return 0; 1812 } 1813 1814 mr_device_priv_data = scmd->device->hostdata; 1815 if (!mr_device_priv_data) { 1816 scmd->result = DID_NO_CONNECT << 16; 1817 scmd->scsi_done(scmd); 1818 return 0; 1819 } 1820 1821 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 1822 return SCSI_MLQUEUE_HOST_BUSY; 1823 1824 if (mr_device_priv_data->tm_busy) 1825 return SCSI_MLQUEUE_DEVICE_BUSY; 1826 1827 1828 scmd->result = 0; 1829 1830 if (MEGASAS_IS_LOGICAL(scmd->device) && 1831 (scmd->device->id >= instance->fw_supported_vd_count || 1832 scmd->device->lun)) { 1833 scmd->result = DID_BAD_TARGET << 16; 1834 goto out_done; 1835 } 1836 1837 if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && 1838 MEGASAS_IS_LOGICAL(scmd->device) && 1839 (!instance->fw_sync_cache_support)) { 1840 scmd->result = DID_OK << 16; 1841 goto out_done; 1842 } 1843 1844 return instance->instancet->build_and_issue_cmd(instance, scmd); 1845 1846 out_done: 1847 scmd->scsi_done(scmd); 1848 return 0; 1849 } 1850 1851 static struct megasas_instance *megasas_lookup_instance(u16 host_no) 1852 { 1853 int i; 1854 1855 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 1856 1857 if ((megasas_mgmt_info.instance[i]) && 1858 (megasas_mgmt_info.instance[i]->host->host_no == host_no)) 1859 return megasas_mgmt_info.instance[i]; 1860 } 1861 1862 return NULL; 1863 } 1864 1865 /* 1866 * megasas_set_dynamic_target_properties - 1867 * Device property set by driver may not be static and it is required to be 1868 * updated after OCR 1869 * 1870 * set tm_capable. 1871 * set dma alignment (only for eedp protection enable vd). 1872 * 1873 * @sdev: OS provided scsi device 1874 * 1875 * Returns void 1876 */ 1877 void megasas_set_dynamic_target_properties(struct scsi_device *sdev, 1878 bool is_target_prop) 1879 { 1880 u16 pd_index = 0, ld; 1881 u32 device_id; 1882 struct megasas_instance *instance; 1883 struct fusion_context *fusion; 1884 struct MR_PRIV_DEVICE *mr_device_priv_data; 1885 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 1886 struct MR_LD_RAID *raid; 1887 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 1888 1889 instance = megasas_lookup_instance(sdev->host->host_no); 1890 fusion = instance->ctrl_context; 1891 mr_device_priv_data = sdev->hostdata; 1892 1893 if (!fusion || !mr_device_priv_data) 1894 return; 1895 1896 if (MEGASAS_IS_LOGICAL(sdev)) { 1897 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) 1898 + sdev->id; 1899 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 1900 ld = MR_TargetIdToLdGet(device_id, local_map_ptr); 1901 if (ld >= instance->fw_supported_vd_count) 1902 return; 1903 raid = MR_LdRaidGet(ld, local_map_ptr); 1904 1905 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) 1906 blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1907 1908 mr_device_priv_data->is_tm_capable = 1909 raid->capability.tmCapable; 1910 1911 if (!raid->flags.isEPD) 1912 sdev->no_write_same = 1; 1913 1914 } else if (instance->use_seqnum_jbod_fp) { 1915 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1916 sdev->id; 1917 pd_sync = (void *)fusion->pd_seq_sync 1918 [(instance->pd_seq_map_id - 1) & 1]; 1919 mr_device_priv_data->is_tm_capable = 1920 pd_sync->seq[pd_index].capability.tmCapable; 1921 } 1922 1923 if (is_target_prop && instance->tgt_prop->reset_tmo) { 1924 /* 1925 * If FW provides a target reset timeout value, driver will use 1926 * it. If not set, fallback to default values. 1927 */ 1928 mr_device_priv_data->target_reset_tmo = 1929 min_t(u8, instance->max_reset_tmo, 1930 instance->tgt_prop->reset_tmo); 1931 mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo; 1932 } else { 1933 mr_device_priv_data->target_reset_tmo = 1934 MEGASAS_DEFAULT_TM_TIMEOUT; 1935 mr_device_priv_data->task_abort_tmo = 1936 MEGASAS_DEFAULT_TM_TIMEOUT; 1937 } 1938 } 1939 1940 /* 1941 * megasas_set_nvme_device_properties - 1942 * set nomerges=2 1943 * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K). 1944 * set maximum io transfer = MDTS of NVME device provided by MR firmware. 1945 * 1946 * MR firmware provides value in KB. Caller of this function converts 1947 * kb into bytes. 1948 * 1949 * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size, 1950 * MR firmware provides value 128 as (32 * 4K) = 128K. 1951 * 1952 * @sdev: scsi device 1953 * @max_io_size: maximum io transfer size 1954 * 1955 */ 1956 static inline void 1957 megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size) 1958 { 1959 struct megasas_instance *instance; 1960 u32 mr_nvme_pg_size; 1961 1962 instance = (struct megasas_instance *)sdev->host->hostdata; 1963 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, 1964 MR_DEFAULT_NVME_PAGE_SIZE); 1965 1966 blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512)); 1967 1968 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue); 1969 blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1); 1970 } 1971 1972 /* 1973 * megasas_set_fw_assisted_qd - 1974 * set device queue depth to can_queue 1975 * set device queue depth to fw assisted qd 1976 * 1977 * @sdev: scsi device 1978 * @is_target_prop true, if fw provided target properties. 1979 */ 1980 static void megasas_set_fw_assisted_qd(struct scsi_device *sdev, 1981 bool is_target_prop) 1982 { 1983 u8 interface_type; 1984 u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN; 1985 u32 tgt_device_qd; 1986 struct megasas_instance *instance; 1987 struct MR_PRIV_DEVICE *mr_device_priv_data; 1988 1989 instance = megasas_lookup_instance(sdev->host->host_no); 1990 mr_device_priv_data = sdev->hostdata; 1991 interface_type = mr_device_priv_data->interface_type; 1992 1993 switch (interface_type) { 1994 case SAS_PD: 1995 device_qd = MEGASAS_SAS_QD; 1996 break; 1997 case SATA_PD: 1998 device_qd = MEGASAS_SATA_QD; 1999 break; 2000 case NVME_PD: 2001 device_qd = MEGASAS_NVME_QD; 2002 break; 2003 } 2004 2005 if (is_target_prop) { 2006 tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth); 2007 if (tgt_device_qd) 2008 device_qd = min(instance->host->can_queue, 2009 (int)tgt_device_qd); 2010 } 2011 2012 if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE) 2013 device_qd = instance->host->can_queue; 2014 2015 scsi_change_queue_depth(sdev, device_qd); 2016 } 2017 2018 /* 2019 * megasas_set_static_target_properties - 2020 * Device property set by driver are static and it is not required to be 2021 * updated after OCR. 2022 * 2023 * set io timeout 2024 * set device queue depth 2025 * set nvme device properties. see - megasas_set_nvme_device_properties 2026 * 2027 * @sdev: scsi device 2028 * @is_target_prop true, if fw provided target properties. 2029 */ 2030 static void megasas_set_static_target_properties(struct scsi_device *sdev, 2031 bool is_target_prop) 2032 { 2033 u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB; 2034 struct megasas_instance *instance; 2035 2036 instance = megasas_lookup_instance(sdev->host->host_no); 2037 2038 /* 2039 * The RAID firmware may require extended timeouts. 2040 */ 2041 blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ); 2042 2043 /* max_io_size_kb will be set to non zero for 2044 * nvme based vd and syspd. 2045 */ 2046 if (is_target_prop) 2047 max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb); 2048 2049 if (instance->nvme_page_size && max_io_size_kb) 2050 megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10)); 2051 2052 megasas_set_fw_assisted_qd(sdev, is_target_prop); 2053 } 2054 2055 2056 static int megasas_slave_configure(struct scsi_device *sdev) 2057 { 2058 u16 pd_index = 0; 2059 struct megasas_instance *instance; 2060 int ret_target_prop = DCMD_FAILED; 2061 bool is_target_prop = false; 2062 2063 instance = megasas_lookup_instance(sdev->host->host_no); 2064 if (instance->pd_list_not_supported) { 2065 if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) { 2066 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 2067 sdev->id; 2068 if (instance->pd_list[pd_index].driveState != 2069 MR_PD_STATE_SYSTEM) 2070 return -ENXIO; 2071 } 2072 } 2073 2074 mutex_lock(&instance->reset_mutex); 2075 /* Send DCMD to Firmware and cache the information */ 2076 if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev)) 2077 megasas_get_pd_info(instance, sdev); 2078 2079 /* Some ventura firmware may not have instance->nvme_page_size set. 2080 * Do not send MR_DCMD_DRV_GET_TARGET_PROP 2081 */ 2082 if ((instance->tgt_prop) && (instance->nvme_page_size)) 2083 ret_target_prop = megasas_get_target_prop(instance, sdev); 2084 2085 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; 2086 megasas_set_static_target_properties(sdev, is_target_prop); 2087 2088 /* This sdev property may change post OCR */ 2089 megasas_set_dynamic_target_properties(sdev, is_target_prop); 2090 2091 mutex_unlock(&instance->reset_mutex); 2092 2093 return 0; 2094 } 2095 2096 static int megasas_slave_alloc(struct scsi_device *sdev) 2097 { 2098 u16 pd_index = 0; 2099 struct megasas_instance *instance ; 2100 struct MR_PRIV_DEVICE *mr_device_priv_data; 2101 2102 instance = megasas_lookup_instance(sdev->host->host_no); 2103 if (!MEGASAS_IS_LOGICAL(sdev)) { 2104 /* 2105 * Open the OS scan to the SYSTEM PD 2106 */ 2107 pd_index = 2108 (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 2109 sdev->id; 2110 if ((instance->pd_list_not_supported || 2111 instance->pd_list[pd_index].driveState == 2112 MR_PD_STATE_SYSTEM)) { 2113 goto scan_target; 2114 } 2115 return -ENXIO; 2116 } 2117 2118 scan_target: 2119 mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data), 2120 GFP_KERNEL); 2121 if (!mr_device_priv_data) 2122 return -ENOMEM; 2123 sdev->hostdata = mr_device_priv_data; 2124 2125 atomic_set(&mr_device_priv_data->r1_ldio_hint, 2126 instance->r1_ldio_hint_default); 2127 return 0; 2128 } 2129 2130 static void megasas_slave_destroy(struct scsi_device *sdev) 2131 { 2132 kfree(sdev->hostdata); 2133 sdev->hostdata = NULL; 2134 } 2135 2136 /* 2137 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a 2138 * kill adapter 2139 * @instance: Adapter soft state 2140 * 2141 */ 2142 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance) 2143 { 2144 int i; 2145 struct megasas_cmd *cmd_mfi; 2146 struct megasas_cmd_fusion *cmd_fusion; 2147 struct fusion_context *fusion = instance->ctrl_context; 2148 2149 /* Find all outstanding ioctls */ 2150 if (fusion) { 2151 for (i = 0; i < instance->max_fw_cmds; i++) { 2152 cmd_fusion = fusion->cmd_list[i]; 2153 if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) { 2154 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 2155 if (cmd_mfi->sync_cmd && 2156 (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) { 2157 cmd_mfi->frame->hdr.cmd_status = 2158 MFI_STAT_WRONG_STATE; 2159 megasas_complete_cmd(instance, 2160 cmd_mfi, DID_OK); 2161 } 2162 } 2163 } 2164 } else { 2165 for (i = 0; i < instance->max_fw_cmds; i++) { 2166 cmd_mfi = instance->cmd_list[i]; 2167 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != 2168 MFI_CMD_ABORT) 2169 megasas_complete_cmd(instance, cmd_mfi, DID_OK); 2170 } 2171 } 2172 } 2173 2174 2175 void megaraid_sas_kill_hba(struct megasas_instance *instance) 2176 { 2177 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2178 dev_warn(&instance->pdev->dev, 2179 "Adapter already dead, skipping kill HBA\n"); 2180 return; 2181 } 2182 2183 /* Set critical error to block I/O & ioctls in case caller didn't */ 2184 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); 2185 /* Wait 1 second to ensure IO or ioctls in build have posted */ 2186 msleep(1000); 2187 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 2188 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 2189 (instance->adapter_type != MFI_SERIES)) { 2190 if (!instance->requestorId) { 2191 writel(MFI_STOP_ADP, &instance->reg_set->doorbell); 2192 /* Flush */ 2193 readl(&instance->reg_set->doorbell); 2194 } 2195 if (instance->requestorId && instance->peerIsPresent) 2196 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 2197 } else { 2198 writel(MFI_STOP_ADP, 2199 &instance->reg_set->inbound_doorbell); 2200 } 2201 /* Complete outstanding ioctls when adapter is killed */ 2202 megasas_complete_outstanding_ioctls(instance); 2203 } 2204 2205 /** 2206 * megasas_check_and_restore_queue_depth - Check if queue depth needs to be 2207 * restored to max value 2208 * @instance: Adapter soft state 2209 * 2210 */ 2211 void 2212 megasas_check_and_restore_queue_depth(struct megasas_instance *instance) 2213 { 2214 unsigned long flags; 2215 2216 if (instance->flag & MEGASAS_FW_BUSY 2217 && time_after(jiffies, instance->last_time + 5 * HZ) 2218 && atomic_read(&instance->fw_outstanding) < 2219 instance->throttlequeuedepth + 1) { 2220 2221 spin_lock_irqsave(instance->host->host_lock, flags); 2222 instance->flag &= ~MEGASAS_FW_BUSY; 2223 2224 instance->host->can_queue = instance->cur_can_queue; 2225 spin_unlock_irqrestore(instance->host->host_lock, flags); 2226 } 2227 } 2228 2229 /** 2230 * megasas_complete_cmd_dpc - Returns FW's controller structure 2231 * @instance_addr: Address of adapter soft state 2232 * 2233 * Tasklet to complete cmds 2234 */ 2235 static void megasas_complete_cmd_dpc(unsigned long instance_addr) 2236 { 2237 u32 producer; 2238 u32 consumer; 2239 u32 context; 2240 struct megasas_cmd *cmd; 2241 struct megasas_instance *instance = 2242 (struct megasas_instance *)instance_addr; 2243 unsigned long flags; 2244 2245 /* If we have already declared adapter dead, donot complete cmds */ 2246 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 2247 return; 2248 2249 spin_lock_irqsave(&instance->completion_lock, flags); 2250 2251 producer = le32_to_cpu(*instance->producer); 2252 consumer = le32_to_cpu(*instance->consumer); 2253 2254 while (consumer != producer) { 2255 context = le32_to_cpu(instance->reply_queue[consumer]); 2256 if (context >= instance->max_fw_cmds) { 2257 dev_err(&instance->pdev->dev, "Unexpected context value %x\n", 2258 context); 2259 BUG(); 2260 } 2261 2262 cmd = instance->cmd_list[context]; 2263 2264 megasas_complete_cmd(instance, cmd, DID_OK); 2265 2266 consumer++; 2267 if (consumer == (instance->max_fw_cmds + 1)) { 2268 consumer = 0; 2269 } 2270 } 2271 2272 *instance->consumer = cpu_to_le32(producer); 2273 2274 spin_unlock_irqrestore(&instance->completion_lock, flags); 2275 2276 /* 2277 * Check if we can restore can_queue 2278 */ 2279 megasas_check_and_restore_queue_depth(instance); 2280 } 2281 2282 static void megasas_sriov_heartbeat_handler(struct timer_list *t); 2283 2284 /** 2285 * megasas_start_timer - Initializes sriov heartbeat timer object 2286 * @instance: Adapter soft state 2287 * 2288 */ 2289 void megasas_start_timer(struct megasas_instance *instance) 2290 { 2291 struct timer_list *timer = &instance->sriov_heartbeat_timer; 2292 2293 timer_setup(timer, megasas_sriov_heartbeat_handler, 0); 2294 timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF; 2295 add_timer(timer); 2296 } 2297 2298 static void 2299 megasas_internal_reset_defer_cmds(struct megasas_instance *instance); 2300 2301 static void 2302 process_fw_state_change_wq(struct work_struct *work); 2303 2304 static void megasas_do_ocr(struct megasas_instance *instance) 2305 { 2306 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 2307 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 2308 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 2309 *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); 2310 } 2311 instance->instancet->disable_intr(instance); 2312 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 2313 instance->issuepend_done = 0; 2314 2315 atomic_set(&instance->fw_outstanding, 0); 2316 megasas_internal_reset_defer_cmds(instance); 2317 process_fw_state_change_wq(&instance->work_init); 2318 } 2319 2320 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance, 2321 int initial) 2322 { 2323 struct megasas_cmd *cmd; 2324 struct megasas_dcmd_frame *dcmd; 2325 struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL; 2326 dma_addr_t new_affiliation_111_h; 2327 int ld, retval = 0; 2328 u8 thisVf; 2329 2330 cmd = megasas_get_cmd(instance); 2331 2332 if (!cmd) { 2333 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:" 2334 "Failed to get cmd for scsi%d\n", 2335 instance->host->host_no); 2336 return -ENOMEM; 2337 } 2338 2339 dcmd = &cmd->frame->dcmd; 2340 2341 if (!instance->vf_affiliation_111) { 2342 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " 2343 "affiliation for scsi%d\n", instance->host->host_no); 2344 megasas_return_cmd(instance, cmd); 2345 return -ENOMEM; 2346 } 2347 2348 if (initial) 2349 memset(instance->vf_affiliation_111, 0, 2350 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2351 else { 2352 new_affiliation_111 = 2353 dma_alloc_coherent(&instance->pdev->dev, 2354 sizeof(struct MR_LD_VF_AFFILIATION_111), 2355 &new_affiliation_111_h, GFP_KERNEL); 2356 if (!new_affiliation_111) { 2357 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2358 "memory for new affiliation for scsi%d\n", 2359 instance->host->host_no); 2360 megasas_return_cmd(instance, cmd); 2361 return -ENOMEM; 2362 } 2363 } 2364 2365 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2366 2367 dcmd->cmd = MFI_CMD_DCMD; 2368 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2369 dcmd->sge_count = 1; 2370 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2371 dcmd->timeout = 0; 2372 dcmd->pad_0 = 0; 2373 dcmd->data_xfer_len = 2374 cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111)); 2375 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111); 2376 2377 if (initial) 2378 dcmd->sgl.sge32[0].phys_addr = 2379 cpu_to_le32(instance->vf_affiliation_111_h); 2380 else 2381 dcmd->sgl.sge32[0].phys_addr = 2382 cpu_to_le32(new_affiliation_111_h); 2383 2384 dcmd->sgl.sge32[0].length = cpu_to_le32( 2385 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2386 2387 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " 2388 "scsi%d\n", instance->host->host_no); 2389 2390 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { 2391 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" 2392 " failed with status 0x%x for scsi%d\n", 2393 dcmd->cmd_status, instance->host->host_no); 2394 retval = 1; /* Do a scan if we couldn't get affiliation */ 2395 goto out; 2396 } 2397 2398 if (!initial) { 2399 thisVf = new_affiliation_111->thisVf; 2400 for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++) 2401 if (instance->vf_affiliation_111->map[ld].policy[thisVf] != 2402 new_affiliation_111->map[ld].policy[thisVf]) { 2403 dev_warn(&instance->pdev->dev, "SR-IOV: " 2404 "Got new LD/VF affiliation for scsi%d\n", 2405 instance->host->host_no); 2406 memcpy(instance->vf_affiliation_111, 2407 new_affiliation_111, 2408 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2409 retval = 1; 2410 goto out; 2411 } 2412 } 2413 out: 2414 if (new_affiliation_111) { 2415 dma_free_coherent(&instance->pdev->dev, 2416 sizeof(struct MR_LD_VF_AFFILIATION_111), 2417 new_affiliation_111, 2418 new_affiliation_111_h); 2419 } 2420 2421 megasas_return_cmd(instance, cmd); 2422 2423 return retval; 2424 } 2425 2426 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance, 2427 int initial) 2428 { 2429 struct megasas_cmd *cmd; 2430 struct megasas_dcmd_frame *dcmd; 2431 struct MR_LD_VF_AFFILIATION *new_affiliation = NULL; 2432 struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL; 2433 dma_addr_t new_affiliation_h; 2434 int i, j, retval = 0, found = 0, doscan = 0; 2435 u8 thisVf; 2436 2437 cmd = megasas_get_cmd(instance); 2438 2439 if (!cmd) { 2440 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: " 2441 "Failed to get cmd for scsi%d\n", 2442 instance->host->host_no); 2443 return -ENOMEM; 2444 } 2445 2446 dcmd = &cmd->frame->dcmd; 2447 2448 if (!instance->vf_affiliation) { 2449 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " 2450 "affiliation for scsi%d\n", instance->host->host_no); 2451 megasas_return_cmd(instance, cmd); 2452 return -ENOMEM; 2453 } 2454 2455 if (initial) 2456 memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) * 2457 sizeof(struct MR_LD_VF_AFFILIATION)); 2458 else { 2459 new_affiliation = 2460 dma_alloc_coherent(&instance->pdev->dev, 2461 (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION), 2462 &new_affiliation_h, GFP_KERNEL); 2463 if (!new_affiliation) { 2464 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2465 "memory for new affiliation for scsi%d\n", 2466 instance->host->host_no); 2467 megasas_return_cmd(instance, cmd); 2468 return -ENOMEM; 2469 } 2470 } 2471 2472 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2473 2474 dcmd->cmd = MFI_CMD_DCMD; 2475 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2476 dcmd->sge_count = 1; 2477 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2478 dcmd->timeout = 0; 2479 dcmd->pad_0 = 0; 2480 dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * 2481 sizeof(struct MR_LD_VF_AFFILIATION)); 2482 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS); 2483 2484 if (initial) 2485 dcmd->sgl.sge32[0].phys_addr = 2486 cpu_to_le32(instance->vf_affiliation_h); 2487 else 2488 dcmd->sgl.sge32[0].phys_addr = 2489 cpu_to_le32(new_affiliation_h); 2490 2491 dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * 2492 sizeof(struct MR_LD_VF_AFFILIATION)); 2493 2494 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " 2495 "scsi%d\n", instance->host->host_no); 2496 2497 2498 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { 2499 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" 2500 " failed with status 0x%x for scsi%d\n", 2501 dcmd->cmd_status, instance->host->host_no); 2502 retval = 1; /* Do a scan if we couldn't get affiliation */ 2503 goto out; 2504 } 2505 2506 if (!initial) { 2507 if (!new_affiliation->ldCount) { 2508 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " 2509 "affiliation for passive path for scsi%d\n", 2510 instance->host->host_no); 2511 retval = 1; 2512 goto out; 2513 } 2514 newmap = new_affiliation->map; 2515 savedmap = instance->vf_affiliation->map; 2516 thisVf = new_affiliation->thisVf; 2517 for (i = 0 ; i < new_affiliation->ldCount; i++) { 2518 found = 0; 2519 for (j = 0; j < instance->vf_affiliation->ldCount; 2520 j++) { 2521 if (newmap->ref.targetId == 2522 savedmap->ref.targetId) { 2523 found = 1; 2524 if (newmap->policy[thisVf] != 2525 savedmap->policy[thisVf]) { 2526 doscan = 1; 2527 goto out; 2528 } 2529 } 2530 savedmap = (struct MR_LD_VF_MAP *) 2531 ((unsigned char *)savedmap + 2532 savedmap->size); 2533 } 2534 if (!found && newmap->policy[thisVf] != 2535 MR_LD_ACCESS_HIDDEN) { 2536 doscan = 1; 2537 goto out; 2538 } 2539 newmap = (struct MR_LD_VF_MAP *) 2540 ((unsigned char *)newmap + newmap->size); 2541 } 2542 2543 newmap = new_affiliation->map; 2544 savedmap = instance->vf_affiliation->map; 2545 2546 for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) { 2547 found = 0; 2548 for (j = 0 ; j < new_affiliation->ldCount; j++) { 2549 if (savedmap->ref.targetId == 2550 newmap->ref.targetId) { 2551 found = 1; 2552 if (savedmap->policy[thisVf] != 2553 newmap->policy[thisVf]) { 2554 doscan = 1; 2555 goto out; 2556 } 2557 } 2558 newmap = (struct MR_LD_VF_MAP *) 2559 ((unsigned char *)newmap + 2560 newmap->size); 2561 } 2562 if (!found && savedmap->policy[thisVf] != 2563 MR_LD_ACCESS_HIDDEN) { 2564 doscan = 1; 2565 goto out; 2566 } 2567 savedmap = (struct MR_LD_VF_MAP *) 2568 ((unsigned char *)savedmap + 2569 savedmap->size); 2570 } 2571 } 2572 out: 2573 if (doscan) { 2574 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " 2575 "affiliation for scsi%d\n", instance->host->host_no); 2576 memcpy(instance->vf_affiliation, new_affiliation, 2577 new_affiliation->size); 2578 retval = 1; 2579 } 2580 2581 if (new_affiliation) 2582 dma_free_coherent(&instance->pdev->dev, 2583 (MAX_LOGICAL_DRIVES + 1) * 2584 sizeof(struct MR_LD_VF_AFFILIATION), 2585 new_affiliation, new_affiliation_h); 2586 megasas_return_cmd(instance, cmd); 2587 2588 return retval; 2589 } 2590 2591 /* This function will get the current SR-IOV LD/VF affiliation */ 2592 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 2593 int initial) 2594 { 2595 int retval; 2596 2597 if (instance->PlasmaFW111) 2598 retval = megasas_get_ld_vf_affiliation_111(instance, initial); 2599 else 2600 retval = megasas_get_ld_vf_affiliation_12(instance, initial); 2601 return retval; 2602 } 2603 2604 /* This function will tell FW to start the SR-IOV heartbeat */ 2605 int megasas_sriov_start_heartbeat(struct megasas_instance *instance, 2606 int initial) 2607 { 2608 struct megasas_cmd *cmd; 2609 struct megasas_dcmd_frame *dcmd; 2610 int retval = 0; 2611 2612 cmd = megasas_get_cmd(instance); 2613 2614 if (!cmd) { 2615 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: " 2616 "Failed to get cmd for scsi%d\n", 2617 instance->host->host_no); 2618 return -ENOMEM; 2619 } 2620 2621 dcmd = &cmd->frame->dcmd; 2622 2623 if (initial) { 2624 instance->hb_host_mem = 2625 dma_alloc_coherent(&instance->pdev->dev, 2626 sizeof(struct MR_CTRL_HB_HOST_MEM), 2627 &instance->hb_host_mem_h, 2628 GFP_KERNEL); 2629 if (!instance->hb_host_mem) { 2630 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate" 2631 " memory for heartbeat host memory for scsi%d\n", 2632 instance->host->host_no); 2633 retval = -ENOMEM; 2634 goto out; 2635 } 2636 } 2637 2638 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2639 2640 dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2641 dcmd->cmd = MFI_CMD_DCMD; 2642 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2643 dcmd->sge_count = 1; 2644 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2645 dcmd->timeout = 0; 2646 dcmd->pad_0 = 0; 2647 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2648 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC); 2649 2650 megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h, 2651 sizeof(struct MR_CTRL_HB_HOST_MEM)); 2652 2653 dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n", 2654 instance->host->host_no); 2655 2656 if ((instance->adapter_type != MFI_SERIES) && 2657 !instance->mask_interrupts) 2658 retval = megasas_issue_blocked_cmd(instance, cmd, 2659 MEGASAS_ROUTINE_WAIT_TIME_VF); 2660 else 2661 retval = megasas_issue_polled(instance, cmd); 2662 2663 if (retval) { 2664 dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST" 2665 "_MEM_ALLOC DCMD %s for scsi%d\n", 2666 (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ? 2667 "timed out" : "failed", instance->host->host_no); 2668 retval = 1; 2669 } 2670 2671 out: 2672 megasas_return_cmd(instance, cmd); 2673 2674 return retval; 2675 } 2676 2677 /* Handler for SR-IOV heartbeat */ 2678 static void megasas_sriov_heartbeat_handler(struct timer_list *t) 2679 { 2680 struct megasas_instance *instance = 2681 from_timer(instance, t, sriov_heartbeat_timer); 2682 2683 if (instance->hb_host_mem->HB.fwCounter != 2684 instance->hb_host_mem->HB.driverCounter) { 2685 instance->hb_host_mem->HB.driverCounter = 2686 instance->hb_host_mem->HB.fwCounter; 2687 mod_timer(&instance->sriov_heartbeat_timer, 2688 jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 2689 } else { 2690 dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never " 2691 "completed for scsi%d\n", instance->host->host_no); 2692 schedule_work(&instance->work_init); 2693 } 2694 } 2695 2696 /** 2697 * megasas_wait_for_outstanding - Wait for all outstanding cmds 2698 * @instance: Adapter soft state 2699 * 2700 * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to 2701 * complete all its outstanding commands. Returns error if one or more IOs 2702 * are pending after this time period. It also marks the controller dead. 2703 */ 2704 static int megasas_wait_for_outstanding(struct megasas_instance *instance) 2705 { 2706 int i, sl, outstanding; 2707 u32 reset_index; 2708 u32 wait_time = MEGASAS_RESET_WAIT_TIME; 2709 unsigned long flags; 2710 struct list_head clist_local; 2711 struct megasas_cmd *reset_cmd; 2712 u32 fw_state; 2713 2714 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2715 dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n", 2716 __func__, __LINE__); 2717 return FAILED; 2718 } 2719 2720 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 2721 2722 INIT_LIST_HEAD(&clist_local); 2723 spin_lock_irqsave(&instance->hba_lock, flags); 2724 list_splice_init(&instance->internal_reset_pending_q, 2725 &clist_local); 2726 spin_unlock_irqrestore(&instance->hba_lock, flags); 2727 2728 dev_notice(&instance->pdev->dev, "HBA reset wait ...\n"); 2729 for (i = 0; i < wait_time; i++) { 2730 msleep(1000); 2731 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) 2732 break; 2733 } 2734 2735 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 2736 dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n"); 2737 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); 2738 return FAILED; 2739 } 2740 2741 reset_index = 0; 2742 while (!list_empty(&clist_local)) { 2743 reset_cmd = list_entry((&clist_local)->next, 2744 struct megasas_cmd, list); 2745 list_del_init(&reset_cmd->list); 2746 if (reset_cmd->scmd) { 2747 reset_cmd->scmd->result = DID_REQUEUE << 16; 2748 dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n", 2749 reset_index, reset_cmd, 2750 reset_cmd->scmd->cmnd[0]); 2751 2752 reset_cmd->scmd->scsi_done(reset_cmd->scmd); 2753 megasas_return_cmd(instance, reset_cmd); 2754 } else if (reset_cmd->sync_cmd) { 2755 dev_notice(&instance->pdev->dev, "%p synch cmds" 2756 "reset queue\n", 2757 reset_cmd); 2758 2759 reset_cmd->cmd_status_drv = DCMD_INIT; 2760 instance->instancet->fire_cmd(instance, 2761 reset_cmd->frame_phys_addr, 2762 0, instance->reg_set); 2763 } else { 2764 dev_notice(&instance->pdev->dev, "%p unexpected" 2765 "cmds lst\n", 2766 reset_cmd); 2767 } 2768 reset_index++; 2769 } 2770 2771 return SUCCESS; 2772 } 2773 2774 for (i = 0; i < resetwaittime; i++) { 2775 outstanding = atomic_read(&instance->fw_outstanding); 2776 2777 if (!outstanding) 2778 break; 2779 2780 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 2781 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d " 2782 "commands to complete\n",i,outstanding); 2783 /* 2784 * Call cmd completion routine. Cmd to be 2785 * be completed directly without depending on isr. 2786 */ 2787 megasas_complete_cmd_dpc((unsigned long)instance); 2788 } 2789 2790 msleep(1000); 2791 } 2792 2793 i = 0; 2794 outstanding = atomic_read(&instance->fw_outstanding); 2795 fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK; 2796 2797 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) 2798 goto no_outstanding; 2799 2800 if (instance->disableOnlineCtrlReset) 2801 goto kill_hba_and_failed; 2802 do { 2803 if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) { 2804 dev_info(&instance->pdev->dev, 2805 "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, outstanding 0x%x\n", 2806 __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding)); 2807 if (i == 3) 2808 goto kill_hba_and_failed; 2809 megasas_do_ocr(instance); 2810 2811 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2812 dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n", 2813 __func__, __LINE__); 2814 return FAILED; 2815 } 2816 dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n", 2817 __func__, __LINE__); 2818 2819 for (sl = 0; sl < 10; sl++) 2820 msleep(500); 2821 2822 outstanding = atomic_read(&instance->fw_outstanding); 2823 2824 fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK; 2825 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) 2826 goto no_outstanding; 2827 } 2828 i++; 2829 } while (i <= 3); 2830 2831 no_outstanding: 2832 2833 dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n", 2834 __func__, __LINE__); 2835 return SUCCESS; 2836 2837 kill_hba_and_failed: 2838 2839 /* Reset not supported, kill adapter */ 2840 dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d" 2841 " disableOnlineCtrlReset %d fw_outstanding %d \n", 2842 __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset, 2843 atomic_read(&instance->fw_outstanding)); 2844 megasas_dump_pending_frames(instance); 2845 megaraid_sas_kill_hba(instance); 2846 2847 return FAILED; 2848 } 2849 2850 /** 2851 * megasas_generic_reset - Generic reset routine 2852 * @scmd: Mid-layer SCSI command 2853 * 2854 * This routine implements a generic reset handler for device, bus and host 2855 * reset requests. Device, bus and host specific reset handlers can use this 2856 * function after they do their specific tasks. 2857 */ 2858 static int megasas_generic_reset(struct scsi_cmnd *scmd) 2859 { 2860 int ret_val; 2861 struct megasas_instance *instance; 2862 2863 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2864 2865 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n", 2866 scmd->cmnd[0], scmd->retries); 2867 2868 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2869 dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n"); 2870 return FAILED; 2871 } 2872 2873 ret_val = megasas_wait_for_outstanding(instance); 2874 if (ret_val == SUCCESS) 2875 dev_notice(&instance->pdev->dev, "reset successful\n"); 2876 else 2877 dev_err(&instance->pdev->dev, "failed to do reset\n"); 2878 2879 return ret_val; 2880 } 2881 2882 /** 2883 * megasas_reset_timer - quiesce the adapter if required 2884 * @scmd: scsi cmnd 2885 * 2886 * Sets the FW busy flag and reduces the host->can_queue if the 2887 * cmd has not been completed within the timeout period. 2888 */ 2889 static enum 2890 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) 2891 { 2892 struct megasas_instance *instance; 2893 unsigned long flags; 2894 2895 if (time_after(jiffies, scmd->jiffies_at_alloc + 2896 (scmd_timeout * 2) * HZ)) { 2897 return BLK_EH_DONE; 2898 } 2899 2900 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2901 if (!(instance->flag & MEGASAS_FW_BUSY)) { 2902 /* FW is busy, throttle IO */ 2903 spin_lock_irqsave(instance->host->host_lock, flags); 2904 2905 instance->host->can_queue = instance->throttlequeuedepth; 2906 instance->last_time = jiffies; 2907 instance->flag |= MEGASAS_FW_BUSY; 2908 2909 spin_unlock_irqrestore(instance->host->host_lock, flags); 2910 } 2911 return BLK_EH_RESET_TIMER; 2912 } 2913 2914 /** 2915 * megasas_dump - This function will print hexdump of provided buffer. 2916 * @buf: Buffer to be dumped 2917 * @sz: Size in bytes 2918 * @format: Different formats of dumping e.g. format=n will 2919 * cause only 'n' 32 bit words to be dumped in a single 2920 * line. 2921 */ 2922 inline void 2923 megasas_dump(void *buf, int sz, int format) 2924 { 2925 int i; 2926 __le32 *buf_loc = (__le32 *)buf; 2927 2928 for (i = 0; i < (sz / sizeof(__le32)); i++) { 2929 if ((i % format) == 0) { 2930 if (i != 0) 2931 printk(KERN_CONT "\n"); 2932 printk(KERN_CONT "%08x: ", (i * 4)); 2933 } 2934 printk(KERN_CONT "%08x ", le32_to_cpu(buf_loc[i])); 2935 } 2936 printk(KERN_CONT "\n"); 2937 } 2938 2939 /** 2940 * megasas_dump_reg_set - This function will print hexdump of register set 2941 * @reg_set: Register set to be dumped 2942 */ 2943 inline void 2944 megasas_dump_reg_set(void __iomem *reg_set) 2945 { 2946 unsigned int i, sz = 256; 2947 u32 __iomem *reg = (u32 __iomem *)reg_set; 2948 2949 for (i = 0; i < (sz / sizeof(u32)); i++) 2950 printk("%08x: %08x\n", (i * 4), readl(®[i])); 2951 } 2952 2953 /** 2954 * megasas_dump_fusion_io - This function will print key details 2955 * of SCSI IO 2956 * @scmd: SCSI command pointer of SCSI IO 2957 */ 2958 void 2959 megasas_dump_fusion_io(struct scsi_cmnd *scmd) 2960 { 2961 struct megasas_cmd_fusion *cmd; 2962 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2963 struct megasas_instance *instance; 2964 2965 cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr; 2966 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2967 2968 scmd_printk(KERN_INFO, scmd, 2969 "scmd: (0x%p) retries: 0x%x allowed: 0x%x\n", 2970 scmd, scmd->retries, scmd->allowed); 2971 scsi_print_command(scmd); 2972 2973 if (cmd) { 2974 req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc; 2975 scmd_printk(KERN_INFO, scmd, "Request descriptor details:\n"); 2976 scmd_printk(KERN_INFO, scmd, 2977 "RequestFlags:0x%x MSIxIndex:0x%x SMID:0x%x LMID:0x%x DevHandle:0x%x\n", 2978 req_desc->SCSIIO.RequestFlags, 2979 req_desc->SCSIIO.MSIxIndex, req_desc->SCSIIO.SMID, 2980 req_desc->SCSIIO.LMID, req_desc->SCSIIO.DevHandle); 2981 2982 printk(KERN_INFO "IO request frame:\n"); 2983 megasas_dump(cmd->io_request, 2984 MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, 8); 2985 printk(KERN_INFO "Chain frame:\n"); 2986 megasas_dump(cmd->sg_frame, 2987 instance->max_chain_frame_sz, 8); 2988 } 2989 2990 } 2991 2992 /* 2993 * megasas_dump_sys_regs - This function will dump system registers through 2994 * sysfs. 2995 * @reg_set: Pointer to System register set. 2996 * @buf: Buffer to which output is to be written. 2997 * @return: Number of bytes written to buffer. 2998 */ 2999 static inline ssize_t 3000 megasas_dump_sys_regs(void __iomem *reg_set, char *buf) 3001 { 3002 unsigned int i, sz = 256; 3003 int bytes_wrote = 0; 3004 char *loc = (char *)buf; 3005 u32 __iomem *reg = (u32 __iomem *)reg_set; 3006 3007 for (i = 0; i < sz / sizeof(u32); i++) { 3008 bytes_wrote += scnprintf(loc + bytes_wrote, 3009 PAGE_SIZE - bytes_wrote, 3010 "%08x: %08x\n", (i * 4), 3011 readl(®[i])); 3012 } 3013 return bytes_wrote; 3014 } 3015 3016 /** 3017 * megasas_reset_bus_host - Bus & host reset handler entry point 3018 * @scmd: Mid-layer SCSI command 3019 */ 3020 static int megasas_reset_bus_host(struct scsi_cmnd *scmd) 3021 { 3022 int ret; 3023 struct megasas_instance *instance; 3024 3025 instance = (struct megasas_instance *)scmd->device->host->hostdata; 3026 3027 scmd_printk(KERN_INFO, scmd, 3028 "OCR is requested due to IO timeout!!\n"); 3029 3030 scmd_printk(KERN_INFO, scmd, 3031 "SCSI host state: %d SCSI host busy: %d FW outstanding: %d\n", 3032 scmd->device->host->shost_state, 3033 scsi_host_busy(scmd->device->host), 3034 atomic_read(&instance->fw_outstanding)); 3035 /* 3036 * First wait for all commands to complete 3037 */ 3038 if (instance->adapter_type == MFI_SERIES) { 3039 ret = megasas_generic_reset(scmd); 3040 } else { 3041 megasas_dump_fusion_io(scmd); 3042 ret = megasas_reset_fusion(scmd->device->host, 3043 SCSIIO_TIMEOUT_OCR); 3044 } 3045 3046 return ret; 3047 } 3048 3049 /** 3050 * megasas_task_abort - Issues task abort request to firmware 3051 * (supported only for fusion adapters) 3052 * @scmd: SCSI command pointer 3053 */ 3054 static int megasas_task_abort(struct scsi_cmnd *scmd) 3055 { 3056 int ret; 3057 struct megasas_instance *instance; 3058 3059 instance = (struct megasas_instance *)scmd->device->host->hostdata; 3060 3061 if (instance->adapter_type != MFI_SERIES) 3062 ret = megasas_task_abort_fusion(scmd); 3063 else { 3064 sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n"); 3065 ret = FAILED; 3066 } 3067 3068 return ret; 3069 } 3070 3071 /** 3072 * megasas_reset_target: Issues target reset request to firmware 3073 * (supported only for fusion adapters) 3074 * @scmd: SCSI command pointer 3075 */ 3076 static int megasas_reset_target(struct scsi_cmnd *scmd) 3077 { 3078 int ret; 3079 struct megasas_instance *instance; 3080 3081 instance = (struct megasas_instance *)scmd->device->host->hostdata; 3082 3083 if (instance->adapter_type != MFI_SERIES) 3084 ret = megasas_reset_target_fusion(scmd); 3085 else { 3086 sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n"); 3087 ret = FAILED; 3088 } 3089 3090 return ret; 3091 } 3092 3093 /** 3094 * megasas_bios_param - Returns disk geometry for a disk 3095 * @sdev: device handle 3096 * @bdev: block device 3097 * @capacity: drive capacity 3098 * @geom: geometry parameters 3099 */ 3100 static int 3101 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev, 3102 sector_t capacity, int geom[]) 3103 { 3104 int heads; 3105 int sectors; 3106 sector_t cylinders; 3107 unsigned long tmp; 3108 3109 /* Default heads (64) & sectors (32) */ 3110 heads = 64; 3111 sectors = 32; 3112 3113 tmp = heads * sectors; 3114 cylinders = capacity; 3115 3116 sector_div(cylinders, tmp); 3117 3118 /* 3119 * Handle extended translation size for logical drives > 1Gb 3120 */ 3121 3122 if (capacity >= 0x200000) { 3123 heads = 255; 3124 sectors = 63; 3125 tmp = heads*sectors; 3126 cylinders = capacity; 3127 sector_div(cylinders, tmp); 3128 } 3129 3130 geom[0] = heads; 3131 geom[1] = sectors; 3132 geom[2] = cylinders; 3133 3134 return 0; 3135 } 3136 3137 static int megasas_map_queues(struct Scsi_Host *shost) 3138 { 3139 struct megasas_instance *instance; 3140 int qoff = 0, offset; 3141 struct blk_mq_queue_map *map; 3142 3143 instance = (struct megasas_instance *)shost->hostdata; 3144 3145 if (shost->nr_hw_queues == 1) 3146 return 0; 3147 3148 offset = instance->low_latency_index_start; 3149 3150 /* Setup Default hctx */ 3151 map = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; 3152 map->nr_queues = instance->msix_vectors - offset; 3153 map->queue_offset = 0; 3154 blk_mq_pci_map_queues(map, instance->pdev, offset); 3155 qoff += map->nr_queues; 3156 offset += map->nr_queues; 3157 3158 /* Setup Poll hctx */ 3159 map = &shost->tag_set.map[HCTX_TYPE_POLL]; 3160 map->nr_queues = instance->iopoll_q_count; 3161 if (map->nr_queues) { 3162 /* 3163 * The poll queue(s) doesn't have an IRQ (and hence IRQ 3164 * affinity), so use the regular blk-mq cpu mapping 3165 */ 3166 map->queue_offset = qoff; 3167 blk_mq_map_queues(map); 3168 } 3169 3170 return 0; 3171 } 3172 3173 static void megasas_aen_polling(struct work_struct *work); 3174 3175 /** 3176 * megasas_service_aen - Processes an event notification 3177 * @instance: Adapter soft state 3178 * @cmd: AEN command completed by the ISR 3179 * 3180 * For AEN, driver sends a command down to FW that is held by the FW till an 3181 * event occurs. When an event of interest occurs, FW completes the command 3182 * that it was previously holding. 3183 * 3184 * This routines sends SIGIO signal to processes that have registered with the 3185 * driver for AEN. 3186 */ 3187 static void 3188 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) 3189 { 3190 unsigned long flags; 3191 3192 /* 3193 * Don't signal app if it is just an aborted previously registered aen 3194 */ 3195 if ((!cmd->abort_aen) && (instance->unload == 0)) { 3196 spin_lock_irqsave(&poll_aen_lock, flags); 3197 megasas_poll_wait_aen = 1; 3198 spin_unlock_irqrestore(&poll_aen_lock, flags); 3199 wake_up(&megasas_poll_wait); 3200 kill_fasync(&megasas_async_queue, SIGIO, POLL_IN); 3201 } 3202 else 3203 cmd->abort_aen = 0; 3204 3205 instance->aen_cmd = NULL; 3206 3207 megasas_return_cmd(instance, cmd); 3208 3209 if ((instance->unload == 0) && 3210 ((instance->issuepend_done == 1))) { 3211 struct megasas_aen_event *ev; 3212 3213 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 3214 if (!ev) { 3215 dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n"); 3216 } else { 3217 ev->instance = instance; 3218 instance->ev = ev; 3219 INIT_DELAYED_WORK(&ev->hotplug_work, 3220 megasas_aen_polling); 3221 schedule_delayed_work(&ev->hotplug_work, 0); 3222 } 3223 } 3224 } 3225 3226 static ssize_t 3227 fw_crash_buffer_store(struct device *cdev, 3228 struct device_attribute *attr, const char *buf, size_t count) 3229 { 3230 struct Scsi_Host *shost = class_to_shost(cdev); 3231 struct megasas_instance *instance = 3232 (struct megasas_instance *) shost->hostdata; 3233 int val = 0; 3234 unsigned long flags; 3235 3236 if (kstrtoint(buf, 0, &val) != 0) 3237 return -EINVAL; 3238 3239 spin_lock_irqsave(&instance->crashdump_lock, flags); 3240 instance->fw_crash_buffer_offset = val; 3241 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3242 return strlen(buf); 3243 } 3244 3245 static ssize_t 3246 fw_crash_buffer_show(struct device *cdev, 3247 struct device_attribute *attr, char *buf) 3248 { 3249 struct Scsi_Host *shost = class_to_shost(cdev); 3250 struct megasas_instance *instance = 3251 (struct megasas_instance *) shost->hostdata; 3252 u32 size; 3253 unsigned long dmachunk = CRASH_DMA_BUF_SIZE; 3254 unsigned long chunk_left_bytes; 3255 unsigned long src_addr; 3256 unsigned long flags; 3257 u32 buff_offset; 3258 3259 spin_lock_irqsave(&instance->crashdump_lock, flags); 3260 buff_offset = instance->fw_crash_buffer_offset; 3261 if (!instance->crash_dump_buf && 3262 !((instance->fw_crash_state == AVAILABLE) || 3263 (instance->fw_crash_state == COPYING))) { 3264 dev_err(&instance->pdev->dev, 3265 "Firmware crash dump is not available\n"); 3266 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3267 return -EINVAL; 3268 } 3269 3270 if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) { 3271 dev_err(&instance->pdev->dev, 3272 "Firmware crash dump offset is out of range\n"); 3273 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3274 return 0; 3275 } 3276 3277 size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset; 3278 chunk_left_bytes = dmachunk - (buff_offset % dmachunk); 3279 size = (size > chunk_left_bytes) ? chunk_left_bytes : size; 3280 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; 3281 3282 src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] + 3283 (buff_offset % dmachunk); 3284 memcpy(buf, (void *)src_addr, size); 3285 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3286 3287 return size; 3288 } 3289 3290 static ssize_t 3291 fw_crash_buffer_size_show(struct device *cdev, 3292 struct device_attribute *attr, char *buf) 3293 { 3294 struct Scsi_Host *shost = class_to_shost(cdev); 3295 struct megasas_instance *instance = 3296 (struct megasas_instance *) shost->hostdata; 3297 3298 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long) 3299 ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE); 3300 } 3301 3302 static ssize_t 3303 fw_crash_state_store(struct device *cdev, 3304 struct device_attribute *attr, const char *buf, size_t count) 3305 { 3306 struct Scsi_Host *shost = class_to_shost(cdev); 3307 struct megasas_instance *instance = 3308 (struct megasas_instance *) shost->hostdata; 3309 int val = 0; 3310 unsigned long flags; 3311 3312 if (kstrtoint(buf, 0, &val) != 0) 3313 return -EINVAL; 3314 3315 if ((val <= AVAILABLE || val > COPY_ERROR)) { 3316 dev_err(&instance->pdev->dev, "application updates invalid " 3317 "firmware crash state\n"); 3318 return -EINVAL; 3319 } 3320 3321 instance->fw_crash_state = val; 3322 3323 if ((val == COPIED) || (val == COPY_ERROR)) { 3324 spin_lock_irqsave(&instance->crashdump_lock, flags); 3325 megasas_free_host_crash_buffer(instance); 3326 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3327 if (val == COPY_ERROR) 3328 dev_info(&instance->pdev->dev, "application failed to " 3329 "copy Firmware crash dump\n"); 3330 else 3331 dev_info(&instance->pdev->dev, "Firmware crash dump " 3332 "copied successfully\n"); 3333 } 3334 return strlen(buf); 3335 } 3336 3337 static ssize_t 3338 fw_crash_state_show(struct device *cdev, 3339 struct device_attribute *attr, char *buf) 3340 { 3341 struct Scsi_Host *shost = class_to_shost(cdev); 3342 struct megasas_instance *instance = 3343 (struct megasas_instance *) shost->hostdata; 3344 3345 return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state); 3346 } 3347 3348 static ssize_t 3349 page_size_show(struct device *cdev, 3350 struct device_attribute *attr, char *buf) 3351 { 3352 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1); 3353 } 3354 3355 static ssize_t 3356 ldio_outstanding_show(struct device *cdev, struct device_attribute *attr, 3357 char *buf) 3358 { 3359 struct Scsi_Host *shost = class_to_shost(cdev); 3360 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3361 3362 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding)); 3363 } 3364 3365 static ssize_t 3366 fw_cmds_outstanding_show(struct device *cdev, 3367 struct device_attribute *attr, char *buf) 3368 { 3369 struct Scsi_Host *shost = class_to_shost(cdev); 3370 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3371 3372 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding)); 3373 } 3374 3375 static ssize_t 3376 enable_sdev_max_qd_show(struct device *cdev, 3377 struct device_attribute *attr, char *buf) 3378 { 3379 struct Scsi_Host *shost = class_to_shost(cdev); 3380 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3381 3382 return snprintf(buf, PAGE_SIZE, "%d\n", instance->enable_sdev_max_qd); 3383 } 3384 3385 static ssize_t 3386 enable_sdev_max_qd_store(struct device *cdev, 3387 struct device_attribute *attr, const char *buf, size_t count) 3388 { 3389 struct Scsi_Host *shost = class_to_shost(cdev); 3390 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3391 u32 val = 0; 3392 bool is_target_prop; 3393 int ret_target_prop = DCMD_FAILED; 3394 struct scsi_device *sdev; 3395 3396 if (kstrtou32(buf, 0, &val) != 0) { 3397 pr_err("megasas: could not set enable_sdev_max_qd\n"); 3398 return -EINVAL; 3399 } 3400 3401 mutex_lock(&instance->reset_mutex); 3402 if (val) 3403 instance->enable_sdev_max_qd = true; 3404 else 3405 instance->enable_sdev_max_qd = false; 3406 3407 shost_for_each_device(sdev, shost) { 3408 ret_target_prop = megasas_get_target_prop(instance, sdev); 3409 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; 3410 megasas_set_fw_assisted_qd(sdev, is_target_prop); 3411 } 3412 mutex_unlock(&instance->reset_mutex); 3413 3414 return strlen(buf); 3415 } 3416 3417 static ssize_t 3418 dump_system_regs_show(struct device *cdev, 3419 struct device_attribute *attr, char *buf) 3420 { 3421 struct Scsi_Host *shost = class_to_shost(cdev); 3422 struct megasas_instance *instance = 3423 (struct megasas_instance *)shost->hostdata; 3424 3425 return megasas_dump_sys_regs(instance->reg_set, buf); 3426 } 3427 3428 static ssize_t 3429 raid_map_id_show(struct device *cdev, struct device_attribute *attr, 3430 char *buf) 3431 { 3432 struct Scsi_Host *shost = class_to_shost(cdev); 3433 struct megasas_instance *instance = 3434 (struct megasas_instance *)shost->hostdata; 3435 3436 return snprintf(buf, PAGE_SIZE, "%ld\n", 3437 (unsigned long)instance->map_id); 3438 } 3439 3440 static DEVICE_ATTR_RW(fw_crash_buffer); 3441 static DEVICE_ATTR_RO(fw_crash_buffer_size); 3442 static DEVICE_ATTR_RW(fw_crash_state); 3443 static DEVICE_ATTR_RO(page_size); 3444 static DEVICE_ATTR_RO(ldio_outstanding); 3445 static DEVICE_ATTR_RO(fw_cmds_outstanding); 3446 static DEVICE_ATTR_RW(enable_sdev_max_qd); 3447 static DEVICE_ATTR_RO(dump_system_regs); 3448 static DEVICE_ATTR_RO(raid_map_id); 3449 3450 static struct device_attribute *megaraid_host_attrs[] = { 3451 &dev_attr_fw_crash_buffer_size, 3452 &dev_attr_fw_crash_buffer, 3453 &dev_attr_fw_crash_state, 3454 &dev_attr_page_size, 3455 &dev_attr_ldio_outstanding, 3456 &dev_attr_fw_cmds_outstanding, 3457 &dev_attr_enable_sdev_max_qd, 3458 &dev_attr_dump_system_regs, 3459 &dev_attr_raid_map_id, 3460 NULL, 3461 }; 3462 3463 /* 3464 * Scsi host template for megaraid_sas driver 3465 */ 3466 static struct scsi_host_template megasas_template = { 3467 3468 .module = THIS_MODULE, 3469 .name = "Avago SAS based MegaRAID driver", 3470 .proc_name = "megaraid_sas", 3471 .slave_configure = megasas_slave_configure, 3472 .slave_alloc = megasas_slave_alloc, 3473 .slave_destroy = megasas_slave_destroy, 3474 .queuecommand = megasas_queue_command, 3475 .eh_target_reset_handler = megasas_reset_target, 3476 .eh_abort_handler = megasas_task_abort, 3477 .eh_host_reset_handler = megasas_reset_bus_host, 3478 .eh_timed_out = megasas_reset_timer, 3479 .shost_attrs = megaraid_host_attrs, 3480 .bios_param = megasas_bios_param, 3481 .map_queues = megasas_map_queues, 3482 .mq_poll = megasas_blk_mq_poll, 3483 .change_queue_depth = scsi_change_queue_depth, 3484 .max_segment_size = 0xffffffff, 3485 }; 3486 3487 /** 3488 * megasas_complete_int_cmd - Completes an internal command 3489 * @instance: Adapter soft state 3490 * @cmd: Command to be completed 3491 * 3492 * The megasas_issue_blocked_cmd() function waits for a command to complete 3493 * after it issues a command. This function wakes up that waiting routine by 3494 * calling wake_up() on the wait queue. 3495 */ 3496 static void 3497 megasas_complete_int_cmd(struct megasas_instance *instance, 3498 struct megasas_cmd *cmd) 3499 { 3500 if (cmd->cmd_status_drv == DCMD_INIT) 3501 cmd->cmd_status_drv = 3502 (cmd->frame->io.cmd_status == MFI_STAT_OK) ? 3503 DCMD_SUCCESS : DCMD_FAILED; 3504 3505 wake_up(&instance->int_cmd_wait_q); 3506 } 3507 3508 /** 3509 * megasas_complete_abort - Completes aborting a command 3510 * @instance: Adapter soft state 3511 * @cmd: Cmd that was issued to abort another cmd 3512 * 3513 * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q 3514 * after it issues an abort on a previously issued command. This function 3515 * wakes up all functions waiting on the same wait queue. 3516 */ 3517 static void 3518 megasas_complete_abort(struct megasas_instance *instance, 3519 struct megasas_cmd *cmd) 3520 { 3521 if (cmd->sync_cmd) { 3522 cmd->sync_cmd = 0; 3523 cmd->cmd_status_drv = DCMD_SUCCESS; 3524 wake_up(&instance->abort_cmd_wait_q); 3525 } 3526 } 3527 3528 /** 3529 * megasas_complete_cmd - Completes a command 3530 * @instance: Adapter soft state 3531 * @cmd: Command to be completed 3532 * @alt_status: If non-zero, use this value as status to 3533 * SCSI mid-layer instead of the value returned 3534 * by the FW. This should be used if caller wants 3535 * an alternate status (as in the case of aborted 3536 * commands) 3537 */ 3538 void 3539 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 3540 u8 alt_status) 3541 { 3542 int exception = 0; 3543 struct megasas_header *hdr = &cmd->frame->hdr; 3544 unsigned long flags; 3545 struct fusion_context *fusion = instance->ctrl_context; 3546 u32 opcode, status; 3547 3548 /* flag for the retry reset */ 3549 cmd->retry_for_fw_reset = 0; 3550 3551 if (cmd->scmd) 3552 cmd->scmd->SCp.ptr = NULL; 3553 3554 switch (hdr->cmd) { 3555 case MFI_CMD_INVALID: 3556 /* Some older 1068 controller FW may keep a pended 3557 MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel 3558 when booting the kdump kernel. Ignore this command to 3559 prevent a kernel panic on shutdown of the kdump kernel. */ 3560 dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command " 3561 "completed\n"); 3562 dev_warn(&instance->pdev->dev, "If you have a controller " 3563 "other than PERC5, please upgrade your firmware\n"); 3564 break; 3565 case MFI_CMD_PD_SCSI_IO: 3566 case MFI_CMD_LD_SCSI_IO: 3567 3568 /* 3569 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been 3570 * issued either through an IO path or an IOCTL path. If it 3571 * was via IOCTL, we will send it to internal completion. 3572 */ 3573 if (cmd->sync_cmd) { 3574 cmd->sync_cmd = 0; 3575 megasas_complete_int_cmd(instance, cmd); 3576 break; 3577 } 3578 fallthrough; 3579 3580 case MFI_CMD_LD_READ: 3581 case MFI_CMD_LD_WRITE: 3582 3583 if (alt_status) { 3584 cmd->scmd->result = alt_status << 16; 3585 exception = 1; 3586 } 3587 3588 if (exception) { 3589 3590 atomic_dec(&instance->fw_outstanding); 3591 3592 scsi_dma_unmap(cmd->scmd); 3593 cmd->scmd->scsi_done(cmd->scmd); 3594 megasas_return_cmd(instance, cmd); 3595 3596 break; 3597 } 3598 3599 switch (hdr->cmd_status) { 3600 3601 case MFI_STAT_OK: 3602 cmd->scmd->result = DID_OK << 16; 3603 break; 3604 3605 case MFI_STAT_SCSI_IO_FAILED: 3606 case MFI_STAT_LD_INIT_IN_PROGRESS: 3607 cmd->scmd->result = 3608 (DID_ERROR << 16) | hdr->scsi_status; 3609 break; 3610 3611 case MFI_STAT_SCSI_DONE_WITH_ERROR: 3612 3613 cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status; 3614 3615 if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) { 3616 memset(cmd->scmd->sense_buffer, 0, 3617 SCSI_SENSE_BUFFERSIZE); 3618 memcpy(cmd->scmd->sense_buffer, cmd->sense, 3619 hdr->sense_len); 3620 3621 cmd->scmd->result |= DRIVER_SENSE << 24; 3622 } 3623 3624 break; 3625 3626 case MFI_STAT_LD_OFFLINE: 3627 case MFI_STAT_DEVICE_NOT_FOUND: 3628 cmd->scmd->result = DID_BAD_TARGET << 16; 3629 break; 3630 3631 default: 3632 dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n", 3633 hdr->cmd_status); 3634 cmd->scmd->result = DID_ERROR << 16; 3635 break; 3636 } 3637 3638 atomic_dec(&instance->fw_outstanding); 3639 3640 scsi_dma_unmap(cmd->scmd); 3641 cmd->scmd->scsi_done(cmd->scmd); 3642 megasas_return_cmd(instance, cmd); 3643 3644 break; 3645 3646 case MFI_CMD_SMP: 3647 case MFI_CMD_STP: 3648 case MFI_CMD_NVME: 3649 case MFI_CMD_TOOLBOX: 3650 megasas_complete_int_cmd(instance, cmd); 3651 break; 3652 3653 case MFI_CMD_DCMD: 3654 opcode = le32_to_cpu(cmd->frame->dcmd.opcode); 3655 /* Check for LD map update */ 3656 if ((opcode == MR_DCMD_LD_MAP_GET_INFO) 3657 && (cmd->frame->dcmd.mbox.b[1] == 1)) { 3658 fusion->fast_path_io = 0; 3659 spin_lock_irqsave(instance->host->host_lock, flags); 3660 status = cmd->frame->hdr.cmd_status; 3661 instance->map_update_cmd = NULL; 3662 if (status != MFI_STAT_OK) { 3663 if (status != MFI_STAT_NOT_FOUND) 3664 dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n", 3665 cmd->frame->hdr.cmd_status); 3666 else { 3667 megasas_return_cmd(instance, cmd); 3668 spin_unlock_irqrestore( 3669 instance->host->host_lock, 3670 flags); 3671 break; 3672 } 3673 } 3674 3675 megasas_return_cmd(instance, cmd); 3676 3677 /* 3678 * Set fast path IO to ZERO. 3679 * Validate Map will set proper value. 3680 * Meanwhile all IOs will go as LD IO. 3681 */ 3682 if (status == MFI_STAT_OK && 3683 (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) { 3684 instance->map_id++; 3685 fusion->fast_path_io = 1; 3686 } else { 3687 fusion->fast_path_io = 0; 3688 } 3689 3690 megasas_sync_map_info(instance); 3691 spin_unlock_irqrestore(instance->host->host_lock, 3692 flags); 3693 break; 3694 } 3695 if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO || 3696 opcode == MR_DCMD_CTRL_EVENT_GET) { 3697 spin_lock_irqsave(&poll_aen_lock, flags); 3698 megasas_poll_wait_aen = 0; 3699 spin_unlock_irqrestore(&poll_aen_lock, flags); 3700 } 3701 3702 /* FW has an updated PD sequence */ 3703 if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && 3704 (cmd->frame->dcmd.mbox.b[0] == 1)) { 3705 3706 spin_lock_irqsave(instance->host->host_lock, flags); 3707 status = cmd->frame->hdr.cmd_status; 3708 instance->jbod_seq_cmd = NULL; 3709 megasas_return_cmd(instance, cmd); 3710 3711 if (status == MFI_STAT_OK) { 3712 instance->pd_seq_map_id++; 3713 /* Re-register a pd sync seq num cmd */ 3714 if (megasas_sync_pd_seq_num(instance, true)) 3715 instance->use_seqnum_jbod_fp = false; 3716 } else 3717 instance->use_seqnum_jbod_fp = false; 3718 3719 spin_unlock_irqrestore(instance->host->host_lock, flags); 3720 break; 3721 } 3722 3723 /* 3724 * See if got an event notification 3725 */ 3726 if (opcode == MR_DCMD_CTRL_EVENT_WAIT) 3727 megasas_service_aen(instance, cmd); 3728 else 3729 megasas_complete_int_cmd(instance, cmd); 3730 3731 break; 3732 3733 case MFI_CMD_ABORT: 3734 /* 3735 * Cmd issued to abort another cmd returned 3736 */ 3737 megasas_complete_abort(instance, cmd); 3738 break; 3739 3740 default: 3741 dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n", 3742 hdr->cmd); 3743 megasas_complete_int_cmd(instance, cmd); 3744 break; 3745 } 3746 } 3747 3748 /** 3749 * megasas_issue_pending_cmds_again - issue all pending cmds 3750 * in FW again because of the fw reset 3751 * @instance: Adapter soft state 3752 */ 3753 static inline void 3754 megasas_issue_pending_cmds_again(struct megasas_instance *instance) 3755 { 3756 struct megasas_cmd *cmd; 3757 struct list_head clist_local; 3758 union megasas_evt_class_locale class_locale; 3759 unsigned long flags; 3760 u32 seq_num; 3761 3762 INIT_LIST_HEAD(&clist_local); 3763 spin_lock_irqsave(&instance->hba_lock, flags); 3764 list_splice_init(&instance->internal_reset_pending_q, &clist_local); 3765 spin_unlock_irqrestore(&instance->hba_lock, flags); 3766 3767 while (!list_empty(&clist_local)) { 3768 cmd = list_entry((&clist_local)->next, 3769 struct megasas_cmd, list); 3770 list_del_init(&cmd->list); 3771 3772 if (cmd->sync_cmd || cmd->scmd) { 3773 dev_notice(&instance->pdev->dev, "command %p, %p:%d" 3774 "detected to be pending while HBA reset\n", 3775 cmd, cmd->scmd, cmd->sync_cmd); 3776 3777 cmd->retry_for_fw_reset++; 3778 3779 if (cmd->retry_for_fw_reset == 3) { 3780 dev_notice(&instance->pdev->dev, "cmd %p, %p:%d" 3781 "was tried multiple times during reset." 3782 "Shutting down the HBA\n", 3783 cmd, cmd->scmd, cmd->sync_cmd); 3784 instance->instancet->disable_intr(instance); 3785 atomic_set(&instance->fw_reset_no_pci_access, 1); 3786 megaraid_sas_kill_hba(instance); 3787 return; 3788 } 3789 } 3790 3791 if (cmd->sync_cmd == 1) { 3792 if (cmd->scmd) { 3793 dev_notice(&instance->pdev->dev, "unexpected" 3794 "cmd attached to internal command!\n"); 3795 } 3796 dev_notice(&instance->pdev->dev, "%p synchronous cmd" 3797 "on the internal reset queue," 3798 "issue it again.\n", cmd); 3799 cmd->cmd_status_drv = DCMD_INIT; 3800 instance->instancet->fire_cmd(instance, 3801 cmd->frame_phys_addr, 3802 0, instance->reg_set); 3803 } else if (cmd->scmd) { 3804 dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]" 3805 "detected on the internal queue, issue again.\n", 3806 cmd, cmd->scmd->cmnd[0]); 3807 3808 atomic_inc(&instance->fw_outstanding); 3809 instance->instancet->fire_cmd(instance, 3810 cmd->frame_phys_addr, 3811 cmd->frame_count-1, instance->reg_set); 3812 } else { 3813 dev_notice(&instance->pdev->dev, "%p unexpected cmd on the" 3814 "internal reset defer list while re-issue!!\n", 3815 cmd); 3816 } 3817 } 3818 3819 if (instance->aen_cmd) { 3820 dev_notice(&instance->pdev->dev, "aen_cmd in def process\n"); 3821 megasas_return_cmd(instance, instance->aen_cmd); 3822 3823 instance->aen_cmd = NULL; 3824 } 3825 3826 /* 3827 * Initiate AEN (Asynchronous Event Notification) 3828 */ 3829 seq_num = instance->last_seq_num; 3830 class_locale.members.reserved = 0; 3831 class_locale.members.locale = MR_EVT_LOCALE_ALL; 3832 class_locale.members.class = MR_EVT_CLASS_DEBUG; 3833 3834 megasas_register_aen(instance, seq_num, class_locale.word); 3835 } 3836 3837 /* 3838 * Move the internal reset pending commands to a deferred queue. 3839 * 3840 * We move the commands pending at internal reset time to a 3841 * pending queue. This queue would be flushed after successful 3842 * completion of the internal reset sequence. if the internal reset 3843 * did not complete in time, the kernel reset handler would flush 3844 * these commands. 3845 */ 3846 static void 3847 megasas_internal_reset_defer_cmds(struct megasas_instance *instance) 3848 { 3849 struct megasas_cmd *cmd; 3850 int i; 3851 u16 max_cmd = instance->max_fw_cmds; 3852 u32 defer_index; 3853 unsigned long flags; 3854 3855 defer_index = 0; 3856 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 3857 for (i = 0; i < max_cmd; i++) { 3858 cmd = instance->cmd_list[i]; 3859 if (cmd->sync_cmd == 1 || cmd->scmd) { 3860 dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p" 3861 "on the defer queue as internal\n", 3862 defer_index, cmd, cmd->sync_cmd, cmd->scmd); 3863 3864 if (!list_empty(&cmd->list)) { 3865 dev_notice(&instance->pdev->dev, "ERROR while" 3866 " moving this cmd:%p, %d %p, it was" 3867 "discovered on some list?\n", 3868 cmd, cmd->sync_cmd, cmd->scmd); 3869 3870 list_del_init(&cmd->list); 3871 } 3872 defer_index++; 3873 list_add_tail(&cmd->list, 3874 &instance->internal_reset_pending_q); 3875 } 3876 } 3877 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 3878 } 3879 3880 3881 static void 3882 process_fw_state_change_wq(struct work_struct *work) 3883 { 3884 struct megasas_instance *instance = 3885 container_of(work, struct megasas_instance, work_init); 3886 u32 wait; 3887 unsigned long flags; 3888 3889 if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) { 3890 dev_notice(&instance->pdev->dev, "error, recovery st %x\n", 3891 atomic_read(&instance->adprecovery)); 3892 return ; 3893 } 3894 3895 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { 3896 dev_notice(&instance->pdev->dev, "FW detected to be in fault" 3897 "state, restarting it...\n"); 3898 3899 instance->instancet->disable_intr(instance); 3900 atomic_set(&instance->fw_outstanding, 0); 3901 3902 atomic_set(&instance->fw_reset_no_pci_access, 1); 3903 instance->instancet->adp_reset(instance, instance->reg_set); 3904 atomic_set(&instance->fw_reset_no_pci_access, 0); 3905 3906 dev_notice(&instance->pdev->dev, "FW restarted successfully," 3907 "initiating next stage...\n"); 3908 3909 dev_notice(&instance->pdev->dev, "HBA recovery state machine," 3910 "state 2 starting...\n"); 3911 3912 /* waiting for about 20 second before start the second init */ 3913 for (wait = 0; wait < 30; wait++) { 3914 msleep(1000); 3915 } 3916 3917 if (megasas_transition_to_ready(instance, 1)) { 3918 dev_notice(&instance->pdev->dev, "adapter not ready\n"); 3919 3920 atomic_set(&instance->fw_reset_no_pci_access, 1); 3921 megaraid_sas_kill_hba(instance); 3922 return ; 3923 } 3924 3925 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 3926 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 3927 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR) 3928 ) { 3929 *instance->consumer = *instance->producer; 3930 } else { 3931 *instance->consumer = 0; 3932 *instance->producer = 0; 3933 } 3934 3935 megasas_issue_init_mfi(instance); 3936 3937 spin_lock_irqsave(&instance->hba_lock, flags); 3938 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 3939 spin_unlock_irqrestore(&instance->hba_lock, flags); 3940 instance->instancet->enable_intr(instance); 3941 3942 megasas_issue_pending_cmds_again(instance); 3943 instance->issuepend_done = 1; 3944 } 3945 } 3946 3947 /** 3948 * megasas_deplete_reply_queue - Processes all completed commands 3949 * @instance: Adapter soft state 3950 * @alt_status: Alternate status to be returned to 3951 * SCSI mid-layer instead of the status 3952 * returned by the FW 3953 * Note: this must be called with hba lock held 3954 */ 3955 static int 3956 megasas_deplete_reply_queue(struct megasas_instance *instance, 3957 u8 alt_status) 3958 { 3959 u32 mfiStatus; 3960 u32 fw_state; 3961 3962 if ((mfiStatus = instance->instancet->check_reset(instance, 3963 instance->reg_set)) == 1) { 3964 return IRQ_HANDLED; 3965 } 3966 3967 mfiStatus = instance->instancet->clear_intr(instance); 3968 if (mfiStatus == 0) { 3969 /* Hardware may not set outbound_intr_status in MSI-X mode */ 3970 if (!instance->msix_vectors) 3971 return IRQ_NONE; 3972 } 3973 3974 instance->mfiStatus = mfiStatus; 3975 3976 if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) { 3977 fw_state = instance->instancet->read_fw_status_reg( 3978 instance) & MFI_STATE_MASK; 3979 3980 if (fw_state != MFI_STATE_FAULT) { 3981 dev_notice(&instance->pdev->dev, "fw state:%x\n", 3982 fw_state); 3983 } 3984 3985 if ((fw_state == MFI_STATE_FAULT) && 3986 (instance->disableOnlineCtrlReset == 0)) { 3987 dev_notice(&instance->pdev->dev, "wait adp restart\n"); 3988 3989 if ((instance->pdev->device == 3990 PCI_DEVICE_ID_LSI_SAS1064R) || 3991 (instance->pdev->device == 3992 PCI_DEVICE_ID_DELL_PERC5) || 3993 (instance->pdev->device == 3994 PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 3995 3996 *instance->consumer = 3997 cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); 3998 } 3999 4000 4001 instance->instancet->disable_intr(instance); 4002 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 4003 instance->issuepend_done = 0; 4004 4005 atomic_set(&instance->fw_outstanding, 0); 4006 megasas_internal_reset_defer_cmds(instance); 4007 4008 dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n", 4009 fw_state, atomic_read(&instance->adprecovery)); 4010 4011 schedule_work(&instance->work_init); 4012 return IRQ_HANDLED; 4013 4014 } else { 4015 dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n", 4016 fw_state, instance->disableOnlineCtrlReset); 4017 } 4018 } 4019 4020 tasklet_schedule(&instance->isr_tasklet); 4021 return IRQ_HANDLED; 4022 } 4023 4024 /** 4025 * megasas_isr - isr entry point 4026 * @irq: IRQ number 4027 * @devp: IRQ context address 4028 */ 4029 static irqreturn_t megasas_isr(int irq, void *devp) 4030 { 4031 struct megasas_irq_context *irq_context = devp; 4032 struct megasas_instance *instance = irq_context->instance; 4033 unsigned long flags; 4034 irqreturn_t rc; 4035 4036 if (atomic_read(&instance->fw_reset_no_pci_access)) 4037 return IRQ_HANDLED; 4038 4039 spin_lock_irqsave(&instance->hba_lock, flags); 4040 rc = megasas_deplete_reply_queue(instance, DID_OK); 4041 spin_unlock_irqrestore(&instance->hba_lock, flags); 4042 4043 return rc; 4044 } 4045 4046 /** 4047 * megasas_transition_to_ready - Move the FW to READY state 4048 * @instance: Adapter soft state 4049 * @ocr: Adapter reset state 4050 * 4051 * During the initialization, FW passes can potentially be in any one of 4052 * several possible states. If the FW in operational, waiting-for-handshake 4053 * states, driver must take steps to bring it to ready state. Otherwise, it 4054 * has to wait for the ready state. 4055 */ 4056 int 4057 megasas_transition_to_ready(struct megasas_instance *instance, int ocr) 4058 { 4059 int i; 4060 u8 max_wait; 4061 u32 fw_state; 4062 u32 abs_state, curr_abs_state; 4063 4064 abs_state = instance->instancet->read_fw_status_reg(instance); 4065 fw_state = abs_state & MFI_STATE_MASK; 4066 4067 if (fw_state != MFI_STATE_READY) 4068 dev_info(&instance->pdev->dev, "Waiting for FW to come to ready" 4069 " state\n"); 4070 4071 while (fw_state != MFI_STATE_READY) { 4072 4073 switch (fw_state) { 4074 4075 case MFI_STATE_FAULT: 4076 dev_printk(KERN_ERR, &instance->pdev->dev, 4077 "FW in FAULT state, Fault code:0x%x subcode:0x%x func:%s\n", 4078 abs_state & MFI_STATE_FAULT_CODE, 4079 abs_state & MFI_STATE_FAULT_SUBCODE, __func__); 4080 if (ocr) { 4081 max_wait = MEGASAS_RESET_WAIT_TIME; 4082 break; 4083 } else { 4084 dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n"); 4085 megasas_dump_reg_set(instance->reg_set); 4086 return -ENODEV; 4087 } 4088 4089 case MFI_STATE_WAIT_HANDSHAKE: 4090 /* 4091 * Set the CLR bit in inbound doorbell 4092 */ 4093 if ((instance->pdev->device == 4094 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 4095 (instance->pdev->device == 4096 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 4097 (instance->adapter_type != MFI_SERIES)) 4098 writel( 4099 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 4100 &instance->reg_set->doorbell); 4101 else 4102 writel( 4103 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 4104 &instance->reg_set->inbound_doorbell); 4105 4106 max_wait = MEGASAS_RESET_WAIT_TIME; 4107 break; 4108 4109 case MFI_STATE_BOOT_MESSAGE_PENDING: 4110 if ((instance->pdev->device == 4111 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 4112 (instance->pdev->device == 4113 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 4114 (instance->adapter_type != MFI_SERIES)) 4115 writel(MFI_INIT_HOTPLUG, 4116 &instance->reg_set->doorbell); 4117 else 4118 writel(MFI_INIT_HOTPLUG, 4119 &instance->reg_set->inbound_doorbell); 4120 4121 max_wait = MEGASAS_RESET_WAIT_TIME; 4122 break; 4123 4124 case MFI_STATE_OPERATIONAL: 4125 /* 4126 * Bring it to READY state; assuming max wait 10 secs 4127 */ 4128 instance->instancet->disable_intr(instance); 4129 if ((instance->pdev->device == 4130 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 4131 (instance->pdev->device == 4132 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 4133 (instance->adapter_type != MFI_SERIES)) { 4134 writel(MFI_RESET_FLAGS, 4135 &instance->reg_set->doorbell); 4136 4137 if (instance->adapter_type != MFI_SERIES) { 4138 for (i = 0; i < (10 * 1000); i += 20) { 4139 if (megasas_readl( 4140 instance, 4141 &instance-> 4142 reg_set-> 4143 doorbell) & 1) 4144 msleep(20); 4145 else 4146 break; 4147 } 4148 } 4149 } else 4150 writel(MFI_RESET_FLAGS, 4151 &instance->reg_set->inbound_doorbell); 4152 4153 max_wait = MEGASAS_RESET_WAIT_TIME; 4154 break; 4155 4156 case MFI_STATE_UNDEFINED: 4157 /* 4158 * This state should not last for more than 2 seconds 4159 */ 4160 max_wait = MEGASAS_RESET_WAIT_TIME; 4161 break; 4162 4163 case MFI_STATE_BB_INIT: 4164 max_wait = MEGASAS_RESET_WAIT_TIME; 4165 break; 4166 4167 case MFI_STATE_FW_INIT: 4168 max_wait = MEGASAS_RESET_WAIT_TIME; 4169 break; 4170 4171 case MFI_STATE_FW_INIT_2: 4172 max_wait = MEGASAS_RESET_WAIT_TIME; 4173 break; 4174 4175 case MFI_STATE_DEVICE_SCAN: 4176 max_wait = MEGASAS_RESET_WAIT_TIME; 4177 break; 4178 4179 case MFI_STATE_FLUSH_CACHE: 4180 max_wait = MEGASAS_RESET_WAIT_TIME; 4181 break; 4182 4183 default: 4184 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n", 4185 fw_state); 4186 dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n"); 4187 megasas_dump_reg_set(instance->reg_set); 4188 return -ENODEV; 4189 } 4190 4191 /* 4192 * The cur_state should not last for more than max_wait secs 4193 */ 4194 for (i = 0; i < max_wait * 50; i++) { 4195 curr_abs_state = instance->instancet-> 4196 read_fw_status_reg(instance); 4197 4198 if (abs_state == curr_abs_state) { 4199 msleep(20); 4200 } else 4201 break; 4202 } 4203 4204 /* 4205 * Return error if fw_state hasn't changed after max_wait 4206 */ 4207 if (curr_abs_state == abs_state) { 4208 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed " 4209 "in %d secs\n", fw_state, max_wait); 4210 dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n"); 4211 megasas_dump_reg_set(instance->reg_set); 4212 return -ENODEV; 4213 } 4214 4215 abs_state = curr_abs_state; 4216 fw_state = curr_abs_state & MFI_STATE_MASK; 4217 } 4218 dev_info(&instance->pdev->dev, "FW now in Ready state\n"); 4219 4220 return 0; 4221 } 4222 4223 /** 4224 * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool 4225 * @instance: Adapter soft state 4226 */ 4227 static void megasas_teardown_frame_pool(struct megasas_instance *instance) 4228 { 4229 int i; 4230 u16 max_cmd = instance->max_mfi_cmds; 4231 struct megasas_cmd *cmd; 4232 4233 if (!instance->frame_dma_pool) 4234 return; 4235 4236 /* 4237 * Return all frames to pool 4238 */ 4239 for (i = 0; i < max_cmd; i++) { 4240 4241 cmd = instance->cmd_list[i]; 4242 4243 if (cmd->frame) 4244 dma_pool_free(instance->frame_dma_pool, cmd->frame, 4245 cmd->frame_phys_addr); 4246 4247 if (cmd->sense) 4248 dma_pool_free(instance->sense_dma_pool, cmd->sense, 4249 cmd->sense_phys_addr); 4250 } 4251 4252 /* 4253 * Now destroy the pool itself 4254 */ 4255 dma_pool_destroy(instance->frame_dma_pool); 4256 dma_pool_destroy(instance->sense_dma_pool); 4257 4258 instance->frame_dma_pool = NULL; 4259 instance->sense_dma_pool = NULL; 4260 } 4261 4262 /** 4263 * megasas_create_frame_pool - Creates DMA pool for cmd frames 4264 * @instance: Adapter soft state 4265 * 4266 * Each command packet has an embedded DMA memory buffer that is used for 4267 * filling MFI frame and the SG list that immediately follows the frame. This 4268 * function creates those DMA memory buffers for each command packet by using 4269 * PCI pool facility. 4270 */ 4271 static int megasas_create_frame_pool(struct megasas_instance *instance) 4272 { 4273 int i; 4274 u16 max_cmd; 4275 u32 frame_count; 4276 struct megasas_cmd *cmd; 4277 4278 max_cmd = instance->max_mfi_cmds; 4279 4280 /* 4281 * For MFI controllers. 4282 * max_num_sge = 60 4283 * max_sge_sz = 16 byte (sizeof megasas_sge_skinny) 4284 * Total 960 byte (15 MFI frame of 64 byte) 4285 * 4286 * Fusion adapter require only 3 extra frame. 4287 * max_num_sge = 16 (defined as MAX_IOCTL_SGE) 4288 * max_sge_sz = 12 byte (sizeof megasas_sge64) 4289 * Total 192 byte (3 MFI frame of 64 byte) 4290 */ 4291 frame_count = (instance->adapter_type == MFI_SERIES) ? 4292 (15 + 1) : (3 + 1); 4293 instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count; 4294 /* 4295 * Use DMA pool facility provided by PCI layer 4296 */ 4297 instance->frame_dma_pool = dma_pool_create("megasas frame pool", 4298 &instance->pdev->dev, 4299 instance->mfi_frame_size, 256, 0); 4300 4301 if (!instance->frame_dma_pool) { 4302 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n"); 4303 return -ENOMEM; 4304 } 4305 4306 instance->sense_dma_pool = dma_pool_create("megasas sense pool", 4307 &instance->pdev->dev, 128, 4308 4, 0); 4309 4310 if (!instance->sense_dma_pool) { 4311 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n"); 4312 4313 dma_pool_destroy(instance->frame_dma_pool); 4314 instance->frame_dma_pool = NULL; 4315 4316 return -ENOMEM; 4317 } 4318 4319 /* 4320 * Allocate and attach a frame to each of the commands in cmd_list. 4321 * By making cmd->index as the context instead of the &cmd, we can 4322 * always use 32bit context regardless of the architecture 4323 */ 4324 for (i = 0; i < max_cmd; i++) { 4325 4326 cmd = instance->cmd_list[i]; 4327 4328 cmd->frame = dma_pool_zalloc(instance->frame_dma_pool, 4329 GFP_KERNEL, &cmd->frame_phys_addr); 4330 4331 cmd->sense = dma_pool_alloc(instance->sense_dma_pool, 4332 GFP_KERNEL, &cmd->sense_phys_addr); 4333 4334 /* 4335 * megasas_teardown_frame_pool() takes care of freeing 4336 * whatever has been allocated 4337 */ 4338 if (!cmd->frame || !cmd->sense) { 4339 dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n"); 4340 megasas_teardown_frame_pool(instance); 4341 return -ENOMEM; 4342 } 4343 4344 cmd->frame->io.context = cpu_to_le32(cmd->index); 4345 cmd->frame->io.pad_0 = 0; 4346 if ((instance->adapter_type == MFI_SERIES) && reset_devices) 4347 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 4348 } 4349 4350 return 0; 4351 } 4352 4353 /** 4354 * megasas_free_cmds - Free all the cmds in the free cmd pool 4355 * @instance: Adapter soft state 4356 */ 4357 void megasas_free_cmds(struct megasas_instance *instance) 4358 { 4359 int i; 4360 4361 /* First free the MFI frame pool */ 4362 megasas_teardown_frame_pool(instance); 4363 4364 /* Free all the commands in the cmd_list */ 4365 for (i = 0; i < instance->max_mfi_cmds; i++) 4366 4367 kfree(instance->cmd_list[i]); 4368 4369 /* Free the cmd_list buffer itself */ 4370 kfree(instance->cmd_list); 4371 instance->cmd_list = NULL; 4372 4373 INIT_LIST_HEAD(&instance->cmd_pool); 4374 } 4375 4376 /** 4377 * megasas_alloc_cmds - Allocates the command packets 4378 * @instance: Adapter soft state 4379 * 4380 * Each command that is issued to the FW, whether IO commands from the OS or 4381 * internal commands like IOCTLs, are wrapped in local data structure called 4382 * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to 4383 * the FW. 4384 * 4385 * Each frame has a 32-bit field called context (tag). This context is used 4386 * to get back the megasas_cmd from the frame when a frame gets completed in 4387 * the ISR. Typically the address of the megasas_cmd itself would be used as 4388 * the context. But we wanted to keep the differences between 32 and 64 bit 4389 * systems to the mininum. We always use 32 bit integers for the context. In 4390 * this driver, the 32 bit values are the indices into an array cmd_list. 4391 * This array is used only to look up the megasas_cmd given the context. The 4392 * free commands themselves are maintained in a linked list called cmd_pool. 4393 */ 4394 int megasas_alloc_cmds(struct megasas_instance *instance) 4395 { 4396 int i; 4397 int j; 4398 u16 max_cmd; 4399 struct megasas_cmd *cmd; 4400 4401 max_cmd = instance->max_mfi_cmds; 4402 4403 /* 4404 * instance->cmd_list is an array of struct megasas_cmd pointers. 4405 * Allocate the dynamic array first and then allocate individual 4406 * commands. 4407 */ 4408 instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL); 4409 4410 if (!instance->cmd_list) { 4411 dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n"); 4412 return -ENOMEM; 4413 } 4414 4415 memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd); 4416 4417 for (i = 0; i < max_cmd; i++) { 4418 instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd), 4419 GFP_KERNEL); 4420 4421 if (!instance->cmd_list[i]) { 4422 4423 for (j = 0; j < i; j++) 4424 kfree(instance->cmd_list[j]); 4425 4426 kfree(instance->cmd_list); 4427 instance->cmd_list = NULL; 4428 4429 return -ENOMEM; 4430 } 4431 } 4432 4433 for (i = 0; i < max_cmd; i++) { 4434 cmd = instance->cmd_list[i]; 4435 memset(cmd, 0, sizeof(struct megasas_cmd)); 4436 cmd->index = i; 4437 cmd->scmd = NULL; 4438 cmd->instance = instance; 4439 4440 list_add_tail(&cmd->list, &instance->cmd_pool); 4441 } 4442 4443 /* 4444 * Create a frame pool and assign one frame to each cmd 4445 */ 4446 if (megasas_create_frame_pool(instance)) { 4447 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n"); 4448 megasas_free_cmds(instance); 4449 return -ENOMEM; 4450 } 4451 4452 return 0; 4453 } 4454 4455 /* 4456 * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state. 4457 * @instance: Adapter soft state 4458 * 4459 * Return 0 for only Fusion adapter, if driver load/unload is not in progress 4460 * or FW is not under OCR. 4461 */ 4462 inline int 4463 dcmd_timeout_ocr_possible(struct megasas_instance *instance) { 4464 4465 if (instance->adapter_type == MFI_SERIES) 4466 return KILL_ADAPTER; 4467 else if (instance->unload || 4468 test_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, 4469 &instance->reset_flags)) 4470 return IGNORE_TIMEOUT; 4471 else 4472 return INITIATE_OCR; 4473 } 4474 4475 static void 4476 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev) 4477 { 4478 int ret; 4479 struct megasas_cmd *cmd; 4480 struct megasas_dcmd_frame *dcmd; 4481 4482 struct MR_PRIV_DEVICE *mr_device_priv_data; 4483 u16 device_id = 0; 4484 4485 device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; 4486 cmd = megasas_get_cmd(instance); 4487 4488 if (!cmd) { 4489 dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__); 4490 return; 4491 } 4492 4493 dcmd = &cmd->frame->dcmd; 4494 4495 memset(instance->pd_info, 0, sizeof(*instance->pd_info)); 4496 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4497 4498 dcmd->mbox.s[0] = cpu_to_le16(device_id); 4499 dcmd->cmd = MFI_CMD_DCMD; 4500 dcmd->cmd_status = 0xFF; 4501 dcmd->sge_count = 1; 4502 dcmd->flags = MFI_FRAME_DIR_READ; 4503 dcmd->timeout = 0; 4504 dcmd->pad_0 = 0; 4505 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO)); 4506 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO); 4507 4508 megasas_set_dma_settings(instance, dcmd, instance->pd_info_h, 4509 sizeof(struct MR_PD_INFO)); 4510 4511 if ((instance->adapter_type != MFI_SERIES) && 4512 !instance->mask_interrupts) 4513 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4514 else 4515 ret = megasas_issue_polled(instance, cmd); 4516 4517 switch (ret) { 4518 case DCMD_SUCCESS: 4519 mr_device_priv_data = sdev->hostdata; 4520 le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType); 4521 mr_device_priv_data->interface_type = 4522 instance->pd_info->state.ddf.pdType.intf; 4523 break; 4524 4525 case DCMD_TIMEOUT: 4526 4527 switch (dcmd_timeout_ocr_possible(instance)) { 4528 case INITIATE_OCR: 4529 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4530 mutex_unlock(&instance->reset_mutex); 4531 megasas_reset_fusion(instance->host, 4532 MFI_IO_TIMEOUT_OCR); 4533 mutex_lock(&instance->reset_mutex); 4534 break; 4535 case KILL_ADAPTER: 4536 megaraid_sas_kill_hba(instance); 4537 break; 4538 case IGNORE_TIMEOUT: 4539 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4540 __func__, __LINE__); 4541 break; 4542 } 4543 4544 break; 4545 } 4546 4547 if (ret != DCMD_TIMEOUT) 4548 megasas_return_cmd(instance, cmd); 4549 4550 return; 4551 } 4552 /* 4553 * megasas_get_pd_list_info - Returns FW's pd_list structure 4554 * @instance: Adapter soft state 4555 * @pd_list: pd_list structure 4556 * 4557 * Issues an internal command (DCMD) to get the FW's controller PD 4558 * list structure. This information is mainly used to find out SYSTEM 4559 * supported by the FW. 4560 */ 4561 static int 4562 megasas_get_pd_list(struct megasas_instance *instance) 4563 { 4564 int ret = 0, pd_index = 0; 4565 struct megasas_cmd *cmd; 4566 struct megasas_dcmd_frame *dcmd; 4567 struct MR_PD_LIST *ci; 4568 struct MR_PD_ADDRESS *pd_addr; 4569 4570 if (instance->pd_list_not_supported) { 4571 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " 4572 "not supported by firmware\n"); 4573 return ret; 4574 } 4575 4576 ci = instance->pd_list_buf; 4577 4578 cmd = megasas_get_cmd(instance); 4579 4580 if (!cmd) { 4581 dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n"); 4582 return -ENOMEM; 4583 } 4584 4585 dcmd = &cmd->frame->dcmd; 4586 4587 memset(ci, 0, sizeof(*ci)); 4588 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4589 4590 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; 4591 dcmd->mbox.b[1] = 0; 4592 dcmd->cmd = MFI_CMD_DCMD; 4593 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4594 dcmd->sge_count = 1; 4595 dcmd->flags = MFI_FRAME_DIR_READ; 4596 dcmd->timeout = 0; 4597 dcmd->pad_0 = 0; 4598 dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); 4599 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY); 4600 4601 megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h, 4602 (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST))); 4603 4604 if ((instance->adapter_type != MFI_SERIES) && 4605 !instance->mask_interrupts) 4606 ret = megasas_issue_blocked_cmd(instance, cmd, 4607 MFI_IO_TIMEOUT_SECS); 4608 else 4609 ret = megasas_issue_polled(instance, cmd); 4610 4611 switch (ret) { 4612 case DCMD_FAILED: 4613 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " 4614 "failed/not supported by firmware\n"); 4615 4616 if (instance->adapter_type != MFI_SERIES) 4617 megaraid_sas_kill_hba(instance); 4618 else 4619 instance->pd_list_not_supported = 1; 4620 break; 4621 case DCMD_TIMEOUT: 4622 4623 switch (dcmd_timeout_ocr_possible(instance)) { 4624 case INITIATE_OCR: 4625 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4626 /* 4627 * DCMD failed from AEN path. 4628 * AEN path already hold reset_mutex to avoid PCI access 4629 * while OCR is in progress. 4630 */ 4631 mutex_unlock(&instance->reset_mutex); 4632 megasas_reset_fusion(instance->host, 4633 MFI_IO_TIMEOUT_OCR); 4634 mutex_lock(&instance->reset_mutex); 4635 break; 4636 case KILL_ADAPTER: 4637 megaraid_sas_kill_hba(instance); 4638 break; 4639 case IGNORE_TIMEOUT: 4640 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n", 4641 __func__, __LINE__); 4642 break; 4643 } 4644 4645 break; 4646 4647 case DCMD_SUCCESS: 4648 pd_addr = ci->addr; 4649 if (megasas_dbg_lvl & LD_PD_DEBUG) 4650 dev_info(&instance->pdev->dev, "%s, sysPD count: 0x%x\n", 4651 __func__, le32_to_cpu(ci->count)); 4652 4653 if ((le32_to_cpu(ci->count) > 4654 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) 4655 break; 4656 4657 memset(instance->local_pd_list, 0, 4658 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); 4659 4660 for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) { 4661 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid = 4662 le16_to_cpu(pd_addr->deviceId); 4663 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType = 4664 pd_addr->scsiDevType; 4665 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState = 4666 MR_PD_STATE_SYSTEM; 4667 if (megasas_dbg_lvl & LD_PD_DEBUG) 4668 dev_info(&instance->pdev->dev, 4669 "PD%d: targetID: 0x%03x deviceType:0x%x\n", 4670 pd_index, le16_to_cpu(pd_addr->deviceId), 4671 pd_addr->scsiDevType); 4672 pd_addr++; 4673 } 4674 4675 memcpy(instance->pd_list, instance->local_pd_list, 4676 sizeof(instance->pd_list)); 4677 break; 4678 4679 } 4680 4681 if (ret != DCMD_TIMEOUT) 4682 megasas_return_cmd(instance, cmd); 4683 4684 return ret; 4685 } 4686 4687 /* 4688 * megasas_get_ld_list_info - Returns FW's ld_list structure 4689 * @instance: Adapter soft state 4690 * @ld_list: ld_list structure 4691 * 4692 * Issues an internal command (DCMD) to get the FW's controller PD 4693 * list structure. This information is mainly used to find out SYSTEM 4694 * supported by the FW. 4695 */ 4696 static int 4697 megasas_get_ld_list(struct megasas_instance *instance) 4698 { 4699 int ret = 0, ld_index = 0, ids = 0; 4700 struct megasas_cmd *cmd; 4701 struct megasas_dcmd_frame *dcmd; 4702 struct MR_LD_LIST *ci; 4703 dma_addr_t ci_h = 0; 4704 u32 ld_count; 4705 4706 ci = instance->ld_list_buf; 4707 ci_h = instance->ld_list_buf_h; 4708 4709 cmd = megasas_get_cmd(instance); 4710 4711 if (!cmd) { 4712 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n"); 4713 return -ENOMEM; 4714 } 4715 4716 dcmd = &cmd->frame->dcmd; 4717 4718 memset(ci, 0, sizeof(*ci)); 4719 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4720 4721 if (instance->supportmax256vd) 4722 dcmd->mbox.b[0] = 1; 4723 dcmd->cmd = MFI_CMD_DCMD; 4724 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4725 dcmd->sge_count = 1; 4726 dcmd->flags = MFI_FRAME_DIR_READ; 4727 dcmd->timeout = 0; 4728 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST)); 4729 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST); 4730 dcmd->pad_0 = 0; 4731 4732 megasas_set_dma_settings(instance, dcmd, ci_h, 4733 sizeof(struct MR_LD_LIST)); 4734 4735 if ((instance->adapter_type != MFI_SERIES) && 4736 !instance->mask_interrupts) 4737 ret = megasas_issue_blocked_cmd(instance, cmd, 4738 MFI_IO_TIMEOUT_SECS); 4739 else 4740 ret = megasas_issue_polled(instance, cmd); 4741 4742 ld_count = le32_to_cpu(ci->ldCount); 4743 4744 switch (ret) { 4745 case DCMD_FAILED: 4746 megaraid_sas_kill_hba(instance); 4747 break; 4748 case DCMD_TIMEOUT: 4749 4750 switch (dcmd_timeout_ocr_possible(instance)) { 4751 case INITIATE_OCR: 4752 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4753 /* 4754 * DCMD failed from AEN path. 4755 * AEN path already hold reset_mutex to avoid PCI access 4756 * while OCR is in progress. 4757 */ 4758 mutex_unlock(&instance->reset_mutex); 4759 megasas_reset_fusion(instance->host, 4760 MFI_IO_TIMEOUT_OCR); 4761 mutex_lock(&instance->reset_mutex); 4762 break; 4763 case KILL_ADAPTER: 4764 megaraid_sas_kill_hba(instance); 4765 break; 4766 case IGNORE_TIMEOUT: 4767 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4768 __func__, __LINE__); 4769 break; 4770 } 4771 4772 break; 4773 4774 case DCMD_SUCCESS: 4775 if (megasas_dbg_lvl & LD_PD_DEBUG) 4776 dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n", 4777 __func__, ld_count); 4778 4779 if (ld_count > instance->fw_supported_vd_count) 4780 break; 4781 4782 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 4783 4784 for (ld_index = 0; ld_index < ld_count; ld_index++) { 4785 if (ci->ldList[ld_index].state != 0) { 4786 ids = ci->ldList[ld_index].ref.targetId; 4787 instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId; 4788 if (megasas_dbg_lvl & LD_PD_DEBUG) 4789 dev_info(&instance->pdev->dev, 4790 "LD%d: targetID: 0x%03x\n", 4791 ld_index, ids); 4792 } 4793 } 4794 4795 break; 4796 } 4797 4798 if (ret != DCMD_TIMEOUT) 4799 megasas_return_cmd(instance, cmd); 4800 4801 return ret; 4802 } 4803 4804 /** 4805 * megasas_ld_list_query - Returns FW's ld_list structure 4806 * @instance: Adapter soft state 4807 * @query_type: ld_list structure type 4808 * 4809 * Issues an internal command (DCMD) to get the FW's controller PD 4810 * list structure. This information is mainly used to find out SYSTEM 4811 * supported by the FW. 4812 */ 4813 static int 4814 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) 4815 { 4816 int ret = 0, ld_index = 0, ids = 0; 4817 struct megasas_cmd *cmd; 4818 struct megasas_dcmd_frame *dcmd; 4819 struct MR_LD_TARGETID_LIST *ci; 4820 dma_addr_t ci_h = 0; 4821 u32 tgtid_count; 4822 4823 ci = instance->ld_targetid_list_buf; 4824 ci_h = instance->ld_targetid_list_buf_h; 4825 4826 cmd = megasas_get_cmd(instance); 4827 4828 if (!cmd) { 4829 dev_warn(&instance->pdev->dev, 4830 "megasas_ld_list_query: Failed to get cmd\n"); 4831 return -ENOMEM; 4832 } 4833 4834 dcmd = &cmd->frame->dcmd; 4835 4836 memset(ci, 0, sizeof(*ci)); 4837 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4838 4839 dcmd->mbox.b[0] = query_type; 4840 if (instance->supportmax256vd) 4841 dcmd->mbox.b[2] = 1; 4842 4843 dcmd->cmd = MFI_CMD_DCMD; 4844 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4845 dcmd->sge_count = 1; 4846 dcmd->flags = MFI_FRAME_DIR_READ; 4847 dcmd->timeout = 0; 4848 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); 4849 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY); 4850 dcmd->pad_0 = 0; 4851 4852 megasas_set_dma_settings(instance, dcmd, ci_h, 4853 sizeof(struct MR_LD_TARGETID_LIST)); 4854 4855 if ((instance->adapter_type != MFI_SERIES) && 4856 !instance->mask_interrupts) 4857 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4858 else 4859 ret = megasas_issue_polled(instance, cmd); 4860 4861 switch (ret) { 4862 case DCMD_FAILED: 4863 dev_info(&instance->pdev->dev, 4864 "DCMD not supported by firmware - %s %d\n", 4865 __func__, __LINE__); 4866 ret = megasas_get_ld_list(instance); 4867 break; 4868 case DCMD_TIMEOUT: 4869 switch (dcmd_timeout_ocr_possible(instance)) { 4870 case INITIATE_OCR: 4871 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4872 /* 4873 * DCMD failed from AEN path. 4874 * AEN path already hold reset_mutex to avoid PCI access 4875 * while OCR is in progress. 4876 */ 4877 mutex_unlock(&instance->reset_mutex); 4878 megasas_reset_fusion(instance->host, 4879 MFI_IO_TIMEOUT_OCR); 4880 mutex_lock(&instance->reset_mutex); 4881 break; 4882 case KILL_ADAPTER: 4883 megaraid_sas_kill_hba(instance); 4884 break; 4885 case IGNORE_TIMEOUT: 4886 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4887 __func__, __LINE__); 4888 break; 4889 } 4890 4891 break; 4892 case DCMD_SUCCESS: 4893 tgtid_count = le32_to_cpu(ci->count); 4894 4895 if (megasas_dbg_lvl & LD_PD_DEBUG) 4896 dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n", 4897 __func__, tgtid_count); 4898 4899 if ((tgtid_count > (instance->fw_supported_vd_count))) 4900 break; 4901 4902 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 4903 for (ld_index = 0; ld_index < tgtid_count; ld_index++) { 4904 ids = ci->targetId[ld_index]; 4905 instance->ld_ids[ids] = ci->targetId[ld_index]; 4906 if (megasas_dbg_lvl & LD_PD_DEBUG) 4907 dev_info(&instance->pdev->dev, "LD%d: targetID: 0x%03x\n", 4908 ld_index, ci->targetId[ld_index]); 4909 } 4910 4911 break; 4912 } 4913 4914 if (ret != DCMD_TIMEOUT) 4915 megasas_return_cmd(instance, cmd); 4916 4917 return ret; 4918 } 4919 4920 /** 4921 * megasas_host_device_list_query 4922 * dcmd.opcode - MR_DCMD_CTRL_DEVICE_LIST_GET 4923 * dcmd.mbox - reserved 4924 * dcmd.sge IN - ptr to return MR_HOST_DEVICE_LIST structure 4925 * Desc: This DCMD will return the combined device list 4926 * Status: MFI_STAT_OK - List returned successfully 4927 * MFI_STAT_INVALID_CMD - Firmware support for the feature has been 4928 * disabled 4929 * @instance: Adapter soft state 4930 * @is_probe: Driver probe check 4931 * Return: 0 if DCMD succeeded 4932 * non-zero if failed 4933 */ 4934 static int 4935 megasas_host_device_list_query(struct megasas_instance *instance, 4936 bool is_probe) 4937 { 4938 int ret, i, target_id; 4939 struct megasas_cmd *cmd; 4940 struct megasas_dcmd_frame *dcmd; 4941 struct MR_HOST_DEVICE_LIST *ci; 4942 u32 count; 4943 dma_addr_t ci_h; 4944 4945 ci = instance->host_device_list_buf; 4946 ci_h = instance->host_device_list_buf_h; 4947 4948 cmd = megasas_get_cmd(instance); 4949 4950 if (!cmd) { 4951 dev_warn(&instance->pdev->dev, 4952 "%s: failed to get cmd\n", 4953 __func__); 4954 return -ENOMEM; 4955 } 4956 4957 dcmd = &cmd->frame->dcmd; 4958 4959 memset(ci, 0, sizeof(*ci)); 4960 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4961 4962 dcmd->mbox.b[0] = is_probe ? 0 : 1; 4963 dcmd->cmd = MFI_CMD_DCMD; 4964 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4965 dcmd->sge_count = 1; 4966 dcmd->flags = MFI_FRAME_DIR_READ; 4967 dcmd->timeout = 0; 4968 dcmd->pad_0 = 0; 4969 dcmd->data_xfer_len = cpu_to_le32(HOST_DEVICE_LIST_SZ); 4970 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_DEVICE_LIST_GET); 4971 4972 megasas_set_dma_settings(instance, dcmd, ci_h, HOST_DEVICE_LIST_SZ); 4973 4974 if (!instance->mask_interrupts) { 4975 ret = megasas_issue_blocked_cmd(instance, cmd, 4976 MFI_IO_TIMEOUT_SECS); 4977 } else { 4978 ret = megasas_issue_polled(instance, cmd); 4979 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4980 } 4981 4982 switch (ret) { 4983 case DCMD_SUCCESS: 4984 /* Fill the internal pd_list and ld_ids array based on 4985 * targetIds returned by FW 4986 */ 4987 count = le32_to_cpu(ci->count); 4988 4989 if (count > (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT)) 4990 break; 4991 4992 if (megasas_dbg_lvl & LD_PD_DEBUG) 4993 dev_info(&instance->pdev->dev, "%s, Device count: 0x%x\n", 4994 __func__, count); 4995 4996 memset(instance->local_pd_list, 0, 4997 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); 4998 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 4999 for (i = 0; i < count; i++) { 5000 target_id = le16_to_cpu(ci->host_device_list[i].target_id); 5001 if (ci->host_device_list[i].flags.u.bits.is_sys_pd) { 5002 instance->local_pd_list[target_id].tid = target_id; 5003 instance->local_pd_list[target_id].driveType = 5004 ci->host_device_list[i].scsi_type; 5005 instance->local_pd_list[target_id].driveState = 5006 MR_PD_STATE_SYSTEM; 5007 if (megasas_dbg_lvl & LD_PD_DEBUG) 5008 dev_info(&instance->pdev->dev, 5009 "Device %d: PD targetID: 0x%03x deviceType:0x%x\n", 5010 i, target_id, ci->host_device_list[i].scsi_type); 5011 } else { 5012 instance->ld_ids[target_id] = target_id; 5013 if (megasas_dbg_lvl & LD_PD_DEBUG) 5014 dev_info(&instance->pdev->dev, 5015 "Device %d: LD targetID: 0x%03x\n", 5016 i, target_id); 5017 } 5018 } 5019 5020 memcpy(instance->pd_list, instance->local_pd_list, 5021 sizeof(instance->pd_list)); 5022 break; 5023 5024 case DCMD_TIMEOUT: 5025 switch (dcmd_timeout_ocr_possible(instance)) { 5026 case INITIATE_OCR: 5027 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5028 mutex_unlock(&instance->reset_mutex); 5029 megasas_reset_fusion(instance->host, 5030 MFI_IO_TIMEOUT_OCR); 5031 mutex_lock(&instance->reset_mutex); 5032 break; 5033 case KILL_ADAPTER: 5034 megaraid_sas_kill_hba(instance); 5035 break; 5036 case IGNORE_TIMEOUT: 5037 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 5038 __func__, __LINE__); 5039 break; 5040 } 5041 break; 5042 case DCMD_FAILED: 5043 dev_err(&instance->pdev->dev, 5044 "%s: MR_DCMD_CTRL_DEVICE_LIST_GET failed\n", 5045 __func__); 5046 break; 5047 } 5048 5049 if (ret != DCMD_TIMEOUT) 5050 megasas_return_cmd(instance, cmd); 5051 5052 return ret; 5053 } 5054 5055 /* 5056 * megasas_update_ext_vd_details : Update details w.r.t Extended VD 5057 * instance : Controller's instance 5058 */ 5059 static void megasas_update_ext_vd_details(struct megasas_instance *instance) 5060 { 5061 struct fusion_context *fusion; 5062 u32 ventura_map_sz = 0; 5063 5064 fusion = instance->ctrl_context; 5065 /* For MFI based controllers return dummy success */ 5066 if (!fusion) 5067 return; 5068 5069 instance->supportmax256vd = 5070 instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs; 5071 /* Below is additional check to address future FW enhancement */ 5072 if (instance->ctrl_info_buf->max_lds > 64) 5073 instance->supportmax256vd = 1; 5074 5075 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS 5076 * MEGASAS_MAX_DEV_PER_CHANNEL; 5077 instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS 5078 * MEGASAS_MAX_DEV_PER_CHANNEL; 5079 if (instance->supportmax256vd) { 5080 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; 5081 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 5082 } else { 5083 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 5084 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 5085 } 5086 5087 dev_info(&instance->pdev->dev, 5088 "FW provided supportMaxExtLDs: %d\tmax_lds: %d\n", 5089 instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0, 5090 instance->ctrl_info_buf->max_lds); 5091 5092 if (instance->max_raid_mapsize) { 5093 ventura_map_sz = instance->max_raid_mapsize * 5094 MR_MIN_MAP_SIZE; /* 64k */ 5095 fusion->current_map_sz = ventura_map_sz; 5096 fusion->max_map_sz = ventura_map_sz; 5097 } else { 5098 fusion->old_map_sz = sizeof(struct MR_FW_RAID_MAP) + 5099 (sizeof(struct MR_LD_SPAN_MAP) * 5100 (instance->fw_supported_vd_count - 1)); 5101 fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT); 5102 5103 fusion->max_map_sz = 5104 max(fusion->old_map_sz, fusion->new_map_sz); 5105 5106 if (instance->supportmax256vd) 5107 fusion->current_map_sz = fusion->new_map_sz; 5108 else 5109 fusion->current_map_sz = fusion->old_map_sz; 5110 } 5111 /* irrespective of FW raid maps, driver raid map is constant */ 5112 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL); 5113 } 5114 5115 /* 5116 * dcmd.opcode - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES 5117 * dcmd.hdr.length - number of bytes to read 5118 * dcmd.sge - Ptr to MR_SNAPDUMP_PROPERTIES 5119 * Desc: Fill in snapdump properties 5120 * Status: MFI_STAT_OK- Command successful 5121 */ 5122 void megasas_get_snapdump_properties(struct megasas_instance *instance) 5123 { 5124 int ret = 0; 5125 struct megasas_cmd *cmd; 5126 struct megasas_dcmd_frame *dcmd; 5127 struct MR_SNAPDUMP_PROPERTIES *ci; 5128 dma_addr_t ci_h = 0; 5129 5130 ci = instance->snapdump_prop; 5131 ci_h = instance->snapdump_prop_h; 5132 5133 if (!ci) 5134 return; 5135 5136 cmd = megasas_get_cmd(instance); 5137 5138 if (!cmd) { 5139 dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n"); 5140 return; 5141 } 5142 5143 dcmd = &cmd->frame->dcmd; 5144 5145 memset(ci, 0, sizeof(*ci)); 5146 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5147 5148 dcmd->cmd = MFI_CMD_DCMD; 5149 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 5150 dcmd->sge_count = 1; 5151 dcmd->flags = MFI_FRAME_DIR_READ; 5152 dcmd->timeout = 0; 5153 dcmd->pad_0 = 0; 5154 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES)); 5155 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES); 5156 5157 megasas_set_dma_settings(instance, dcmd, ci_h, 5158 sizeof(struct MR_SNAPDUMP_PROPERTIES)); 5159 5160 if (!instance->mask_interrupts) { 5161 ret = megasas_issue_blocked_cmd(instance, cmd, 5162 MFI_IO_TIMEOUT_SECS); 5163 } else { 5164 ret = megasas_issue_polled(instance, cmd); 5165 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5166 } 5167 5168 switch (ret) { 5169 case DCMD_SUCCESS: 5170 instance->snapdump_wait_time = 5171 min_t(u8, ci->trigger_min_num_sec_before_ocr, 5172 MEGASAS_MAX_SNAP_DUMP_WAIT_TIME); 5173 break; 5174 5175 case DCMD_TIMEOUT: 5176 switch (dcmd_timeout_ocr_possible(instance)) { 5177 case INITIATE_OCR: 5178 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5179 mutex_unlock(&instance->reset_mutex); 5180 megasas_reset_fusion(instance->host, 5181 MFI_IO_TIMEOUT_OCR); 5182 mutex_lock(&instance->reset_mutex); 5183 break; 5184 case KILL_ADAPTER: 5185 megaraid_sas_kill_hba(instance); 5186 break; 5187 case IGNORE_TIMEOUT: 5188 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 5189 __func__, __LINE__); 5190 break; 5191 } 5192 } 5193 5194 if (ret != DCMD_TIMEOUT) 5195 megasas_return_cmd(instance, cmd); 5196 } 5197 5198 /** 5199 * megasas_get_ctrl_info - Returns FW's controller structure 5200 * @instance: Adapter soft state 5201 * 5202 * Issues an internal command (DCMD) to get the FW's controller structure. 5203 * This information is mainly used to find out the maximum IO transfer per 5204 * command supported by the FW. 5205 */ 5206 int 5207 megasas_get_ctrl_info(struct megasas_instance *instance) 5208 { 5209 int ret = 0; 5210 struct megasas_cmd *cmd; 5211 struct megasas_dcmd_frame *dcmd; 5212 struct megasas_ctrl_info *ci; 5213 dma_addr_t ci_h = 0; 5214 5215 ci = instance->ctrl_info_buf; 5216 ci_h = instance->ctrl_info_buf_h; 5217 5218 cmd = megasas_get_cmd(instance); 5219 5220 if (!cmd) { 5221 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n"); 5222 return -ENOMEM; 5223 } 5224 5225 dcmd = &cmd->frame->dcmd; 5226 5227 memset(ci, 0, sizeof(*ci)); 5228 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5229 5230 dcmd->cmd = MFI_CMD_DCMD; 5231 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 5232 dcmd->sge_count = 1; 5233 dcmd->flags = MFI_FRAME_DIR_READ; 5234 dcmd->timeout = 0; 5235 dcmd->pad_0 = 0; 5236 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info)); 5237 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO); 5238 dcmd->mbox.b[0] = 1; 5239 5240 megasas_set_dma_settings(instance, dcmd, ci_h, 5241 sizeof(struct megasas_ctrl_info)); 5242 5243 if ((instance->adapter_type != MFI_SERIES) && 5244 !instance->mask_interrupts) { 5245 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 5246 } else { 5247 ret = megasas_issue_polled(instance, cmd); 5248 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5249 } 5250 5251 switch (ret) { 5252 case DCMD_SUCCESS: 5253 /* Save required controller information in 5254 * CPU endianness format. 5255 */ 5256 le32_to_cpus((u32 *)&ci->properties.OnOffProperties); 5257 le16_to_cpus((u16 *)&ci->properties.on_off_properties2); 5258 le32_to_cpus((u32 *)&ci->adapterOperations2); 5259 le32_to_cpus((u32 *)&ci->adapterOperations3); 5260 le16_to_cpus((u16 *)&ci->adapter_operations4); 5261 le32_to_cpus((u32 *)&ci->adapter_operations5); 5262 5263 /* Update the latest Ext VD info. 5264 * From Init path, store current firmware details. 5265 * From OCR path, detect any firmware properties changes. 5266 * in case of Firmware upgrade without system reboot. 5267 */ 5268 megasas_update_ext_vd_details(instance); 5269 instance->support_seqnum_jbod_fp = 5270 ci->adapterOperations3.useSeqNumJbodFP; 5271 instance->support_morethan256jbod = 5272 ci->adapter_operations4.support_pd_map_target_id; 5273 instance->support_nvme_passthru = 5274 ci->adapter_operations4.support_nvme_passthru; 5275 instance->support_pci_lane_margining = 5276 ci->adapter_operations5.support_pci_lane_margining; 5277 instance->task_abort_tmo = ci->TaskAbortTO; 5278 instance->max_reset_tmo = ci->MaxResetTO; 5279 5280 /*Check whether controller is iMR or MR */ 5281 instance->is_imr = (ci->memory_size ? 0 : 1); 5282 5283 instance->snapdump_wait_time = 5284 (ci->properties.on_off_properties2.enable_snap_dump ? 5285 MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0); 5286 5287 instance->enable_fw_dev_list = 5288 ci->properties.on_off_properties2.enable_fw_dev_list; 5289 5290 dev_info(&instance->pdev->dev, 5291 "controller type\t: %s(%dMB)\n", 5292 instance->is_imr ? "iMR" : "MR", 5293 le16_to_cpu(ci->memory_size)); 5294 5295 instance->disableOnlineCtrlReset = 5296 ci->properties.OnOffProperties.disableOnlineCtrlReset; 5297 instance->secure_jbod_support = 5298 ci->adapterOperations3.supportSecurityonJBOD; 5299 dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n", 5300 instance->disableOnlineCtrlReset ? "Disabled" : "Enabled"); 5301 dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n", 5302 instance->secure_jbod_support ? "Yes" : "No"); 5303 dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n", 5304 instance->support_nvme_passthru ? "Yes" : "No"); 5305 dev_info(&instance->pdev->dev, 5306 "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n", 5307 instance->task_abort_tmo, instance->max_reset_tmo); 5308 dev_info(&instance->pdev->dev, "JBOD sequence map support\t: %s\n", 5309 instance->support_seqnum_jbod_fp ? "Yes" : "No"); 5310 dev_info(&instance->pdev->dev, "PCI Lane Margining support\t: %s\n", 5311 instance->support_pci_lane_margining ? "Yes" : "No"); 5312 5313 break; 5314 5315 case DCMD_TIMEOUT: 5316 switch (dcmd_timeout_ocr_possible(instance)) { 5317 case INITIATE_OCR: 5318 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5319 mutex_unlock(&instance->reset_mutex); 5320 megasas_reset_fusion(instance->host, 5321 MFI_IO_TIMEOUT_OCR); 5322 mutex_lock(&instance->reset_mutex); 5323 break; 5324 case KILL_ADAPTER: 5325 megaraid_sas_kill_hba(instance); 5326 break; 5327 case IGNORE_TIMEOUT: 5328 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 5329 __func__, __LINE__); 5330 break; 5331 } 5332 break; 5333 case DCMD_FAILED: 5334 megaraid_sas_kill_hba(instance); 5335 break; 5336 5337 } 5338 5339 if (ret != DCMD_TIMEOUT) 5340 megasas_return_cmd(instance, cmd); 5341 5342 return ret; 5343 } 5344 5345 /* 5346 * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer 5347 * to firmware 5348 * 5349 * @instance: Adapter soft state 5350 * @crash_buf_state - tell FW to turn ON/OFF crash dump feature 5351 MR_CRASH_BUF_TURN_OFF = 0 5352 MR_CRASH_BUF_TURN_ON = 1 5353 * @return 0 on success non-zero on failure. 5354 * Issues an internal command (DCMD) to set parameters for crash dump feature. 5355 * Driver will send address of crash dump DMA buffer and set mbox to tell FW 5356 * that driver supports crash dump feature. This DCMD will be sent only if 5357 * crash dump feature is supported by the FW. 5358 * 5359 */ 5360 int megasas_set_crash_dump_params(struct megasas_instance *instance, 5361 u8 crash_buf_state) 5362 { 5363 int ret = 0; 5364 struct megasas_cmd *cmd; 5365 struct megasas_dcmd_frame *dcmd; 5366 5367 cmd = megasas_get_cmd(instance); 5368 5369 if (!cmd) { 5370 dev_err(&instance->pdev->dev, "Failed to get a free cmd\n"); 5371 return -ENOMEM; 5372 } 5373 5374 5375 dcmd = &cmd->frame->dcmd; 5376 5377 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5378 dcmd->mbox.b[0] = crash_buf_state; 5379 dcmd->cmd = MFI_CMD_DCMD; 5380 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 5381 dcmd->sge_count = 1; 5382 dcmd->flags = MFI_FRAME_DIR_NONE; 5383 dcmd->timeout = 0; 5384 dcmd->pad_0 = 0; 5385 dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE); 5386 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS); 5387 5388 megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h, 5389 CRASH_DMA_BUF_SIZE); 5390 5391 if ((instance->adapter_type != MFI_SERIES) && 5392 !instance->mask_interrupts) 5393 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 5394 else 5395 ret = megasas_issue_polled(instance, cmd); 5396 5397 if (ret == DCMD_TIMEOUT) { 5398 switch (dcmd_timeout_ocr_possible(instance)) { 5399 case INITIATE_OCR: 5400 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5401 megasas_reset_fusion(instance->host, 5402 MFI_IO_TIMEOUT_OCR); 5403 break; 5404 case KILL_ADAPTER: 5405 megaraid_sas_kill_hba(instance); 5406 break; 5407 case IGNORE_TIMEOUT: 5408 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 5409 __func__, __LINE__); 5410 break; 5411 } 5412 } else 5413 megasas_return_cmd(instance, cmd); 5414 5415 return ret; 5416 } 5417 5418 /** 5419 * megasas_issue_init_mfi - Initializes the FW 5420 * @instance: Adapter soft state 5421 * 5422 * Issues the INIT MFI cmd 5423 */ 5424 static int 5425 megasas_issue_init_mfi(struct megasas_instance *instance) 5426 { 5427 __le32 context; 5428 struct megasas_cmd *cmd; 5429 struct megasas_init_frame *init_frame; 5430 struct megasas_init_queue_info *initq_info; 5431 dma_addr_t init_frame_h; 5432 dma_addr_t initq_info_h; 5433 5434 /* 5435 * Prepare a init frame. Note the init frame points to queue info 5436 * structure. Each frame has SGL allocated after first 64 bytes. For 5437 * this frame - since we don't need any SGL - we use SGL's space as 5438 * queue info structure 5439 * 5440 * We will not get a NULL command below. We just created the pool. 5441 */ 5442 cmd = megasas_get_cmd(instance); 5443 5444 init_frame = (struct megasas_init_frame *)cmd->frame; 5445 initq_info = (struct megasas_init_queue_info *) 5446 ((unsigned long)init_frame + 64); 5447 5448 init_frame_h = cmd->frame_phys_addr; 5449 initq_info_h = init_frame_h + 64; 5450 5451 context = init_frame->context; 5452 memset(init_frame, 0, MEGAMFI_FRAME_SIZE); 5453 memset(initq_info, 0, sizeof(struct megasas_init_queue_info)); 5454 init_frame->context = context; 5455 5456 initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1); 5457 initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h); 5458 5459 initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h); 5460 initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h); 5461 5462 init_frame->cmd = MFI_CMD_INIT; 5463 init_frame->cmd_status = MFI_STAT_INVALID_STATUS; 5464 init_frame->queue_info_new_phys_addr_lo = 5465 cpu_to_le32(lower_32_bits(initq_info_h)); 5466 init_frame->queue_info_new_phys_addr_hi = 5467 cpu_to_le32(upper_32_bits(initq_info_h)); 5468 5469 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info)); 5470 5471 /* 5472 * disable the intr before firing the init frame to FW 5473 */ 5474 instance->instancet->disable_intr(instance); 5475 5476 /* 5477 * Issue the init frame in polled mode 5478 */ 5479 5480 if (megasas_issue_polled(instance, cmd)) { 5481 dev_err(&instance->pdev->dev, "Failed to init firmware\n"); 5482 megasas_return_cmd(instance, cmd); 5483 goto fail_fw_init; 5484 } 5485 5486 megasas_return_cmd(instance, cmd); 5487 5488 return 0; 5489 5490 fail_fw_init: 5491 return -EINVAL; 5492 } 5493 5494 static u32 5495 megasas_init_adapter_mfi(struct megasas_instance *instance) 5496 { 5497 u32 context_sz; 5498 u32 reply_q_sz; 5499 5500 /* 5501 * Get various operational parameters from status register 5502 */ 5503 instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF; 5504 /* 5505 * Reduce the max supported cmds by 1. This is to ensure that the 5506 * reply_q_sz (1 more than the max cmd that driver may send) 5507 * does not exceed max cmds that the FW can support 5508 */ 5509 instance->max_fw_cmds = instance->max_fw_cmds-1; 5510 instance->max_mfi_cmds = instance->max_fw_cmds; 5511 instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >> 5512 0x10; 5513 /* 5514 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands 5515 * are reserved for IOCTL + driver's internal DCMDs. 5516 */ 5517 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 5518 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { 5519 instance->max_scsi_cmds = (instance->max_fw_cmds - 5520 MEGASAS_SKINNY_INT_CMDS); 5521 sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); 5522 } else { 5523 instance->max_scsi_cmds = (instance->max_fw_cmds - 5524 MEGASAS_INT_CMDS); 5525 sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS)); 5526 } 5527 5528 instance->cur_can_queue = instance->max_scsi_cmds; 5529 /* 5530 * Create a pool of commands 5531 */ 5532 if (megasas_alloc_cmds(instance)) 5533 goto fail_alloc_cmds; 5534 5535 /* 5536 * Allocate memory for reply queue. Length of reply queue should 5537 * be _one_ more than the maximum commands handled by the firmware. 5538 * 5539 * Note: When FW completes commands, it places corresponding contex 5540 * values in this circular reply queue. This circular queue is a fairly 5541 * typical producer-consumer queue. FW is the producer (of completed 5542 * commands) and the driver is the consumer. 5543 */ 5544 context_sz = sizeof(u32); 5545 reply_q_sz = context_sz * (instance->max_fw_cmds + 1); 5546 5547 instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev, 5548 reply_q_sz, &instance->reply_queue_h, GFP_KERNEL); 5549 5550 if (!instance->reply_queue) { 5551 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n"); 5552 goto fail_reply_queue; 5553 } 5554 5555 if (megasas_issue_init_mfi(instance)) 5556 goto fail_fw_init; 5557 5558 if (megasas_get_ctrl_info(instance)) { 5559 dev_err(&instance->pdev->dev, "(%d): Could get controller info " 5560 "Fail from %s %d\n", instance->unique_id, 5561 __func__, __LINE__); 5562 goto fail_fw_init; 5563 } 5564 5565 instance->fw_support_ieee = 0; 5566 instance->fw_support_ieee = 5567 (instance->instancet->read_fw_status_reg(instance) & 5568 0x04000000); 5569 5570 dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d", 5571 instance->fw_support_ieee); 5572 5573 if (instance->fw_support_ieee) 5574 instance->flag_ieee = 1; 5575 5576 return 0; 5577 5578 fail_fw_init: 5579 5580 dma_free_coherent(&instance->pdev->dev, reply_q_sz, 5581 instance->reply_queue, instance->reply_queue_h); 5582 fail_reply_queue: 5583 megasas_free_cmds(instance); 5584 5585 fail_alloc_cmds: 5586 return 1; 5587 } 5588 5589 static 5590 void megasas_setup_irq_poll(struct megasas_instance *instance) 5591 { 5592 struct megasas_irq_context *irq_ctx; 5593 u32 count, i; 5594 5595 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 5596 5597 /* Initialize IRQ poll */ 5598 for (i = 0; i < count; i++) { 5599 irq_ctx = &instance->irq_context[i]; 5600 irq_ctx->os_irq = pci_irq_vector(instance->pdev, i); 5601 irq_ctx->irq_poll_scheduled = false; 5602 irq_poll_init(&irq_ctx->irqpoll, 5603 instance->threshold_reply_count, 5604 megasas_irqpoll); 5605 } 5606 } 5607 5608 /* 5609 * megasas_setup_irqs_ioapic - register legacy interrupts. 5610 * @instance: Adapter soft state 5611 * 5612 * Do not enable interrupt, only setup ISRs. 5613 * 5614 * Return 0 on success. 5615 */ 5616 static int 5617 megasas_setup_irqs_ioapic(struct megasas_instance *instance) 5618 { 5619 struct pci_dev *pdev; 5620 5621 pdev = instance->pdev; 5622 instance->irq_context[0].instance = instance; 5623 instance->irq_context[0].MSIxIndex = 0; 5624 snprintf(instance->irq_context->name, MEGASAS_MSIX_NAME_LEN, "%s%u", 5625 "megasas", instance->host->host_no); 5626 if (request_irq(pci_irq_vector(pdev, 0), 5627 instance->instancet->service_isr, IRQF_SHARED, 5628 instance->irq_context->name, &instance->irq_context[0])) { 5629 dev_err(&instance->pdev->dev, 5630 "Failed to register IRQ from %s %d\n", 5631 __func__, __LINE__); 5632 return -1; 5633 } 5634 instance->perf_mode = MR_LATENCY_PERF_MODE; 5635 instance->low_latency_index_start = 0; 5636 return 0; 5637 } 5638 5639 /** 5640 * megasas_setup_irqs_msix - register MSI-x interrupts. 5641 * @instance: Adapter soft state 5642 * @is_probe: Driver probe check 5643 * 5644 * Do not enable interrupt, only setup ISRs. 5645 * 5646 * Return 0 on success. 5647 */ 5648 static int 5649 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) 5650 { 5651 int i, j; 5652 struct pci_dev *pdev; 5653 5654 pdev = instance->pdev; 5655 5656 /* Try MSI-x */ 5657 for (i = 0; i < instance->msix_vectors; i++) { 5658 instance->irq_context[i].instance = instance; 5659 instance->irq_context[i].MSIxIndex = i; 5660 snprintf(instance->irq_context[i].name, MEGASAS_MSIX_NAME_LEN, "%s%u-msix%u", 5661 "megasas", instance->host->host_no, i); 5662 if (request_irq(pci_irq_vector(pdev, i), 5663 instance->instancet->service_isr, 0, instance->irq_context[i].name, 5664 &instance->irq_context[i])) { 5665 dev_err(&instance->pdev->dev, 5666 "Failed to register IRQ for vector %d.\n", i); 5667 for (j = 0; j < i; j++) { 5668 if (j < instance->low_latency_index_start) 5669 irq_set_affinity_hint( 5670 pci_irq_vector(pdev, j), NULL); 5671 free_irq(pci_irq_vector(pdev, j), 5672 &instance->irq_context[j]); 5673 } 5674 /* Retry irq register for IO_APIC*/ 5675 instance->msix_vectors = 0; 5676 instance->msix_load_balance = false; 5677 if (is_probe) { 5678 pci_free_irq_vectors(instance->pdev); 5679 return megasas_setup_irqs_ioapic(instance); 5680 } else { 5681 return -1; 5682 } 5683 } 5684 } 5685 5686 return 0; 5687 } 5688 5689 /* 5690 * megasas_destroy_irqs- unregister interrupts. 5691 * @instance: Adapter soft state 5692 * return: void 5693 */ 5694 static void 5695 megasas_destroy_irqs(struct megasas_instance *instance) { 5696 5697 int i; 5698 int count; 5699 struct megasas_irq_context *irq_ctx; 5700 5701 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 5702 if (instance->adapter_type != MFI_SERIES) { 5703 for (i = 0; i < count; i++) { 5704 irq_ctx = &instance->irq_context[i]; 5705 irq_poll_disable(&irq_ctx->irqpoll); 5706 } 5707 } 5708 5709 if (instance->msix_vectors) 5710 for (i = 0; i < instance->msix_vectors; i++) { 5711 if (i < instance->low_latency_index_start) 5712 irq_set_affinity_hint( 5713 pci_irq_vector(instance->pdev, i), NULL); 5714 free_irq(pci_irq_vector(instance->pdev, i), 5715 &instance->irq_context[i]); 5716 } 5717 else 5718 free_irq(pci_irq_vector(instance->pdev, 0), 5719 &instance->irq_context[0]); 5720 } 5721 5722 /** 5723 * megasas_setup_jbod_map - setup jbod map for FP seq_number. 5724 * @instance: Adapter soft state 5725 * 5726 * Return 0 on success. 5727 */ 5728 void 5729 megasas_setup_jbod_map(struct megasas_instance *instance) 5730 { 5731 int i; 5732 struct fusion_context *fusion = instance->ctrl_context; 5733 u32 pd_seq_map_sz; 5734 5735 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 5736 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1)); 5737 5738 instance->use_seqnum_jbod_fp = 5739 instance->support_seqnum_jbod_fp; 5740 if (reset_devices || !fusion || 5741 !instance->support_seqnum_jbod_fp) { 5742 dev_info(&instance->pdev->dev, 5743 "JBOD sequence map is disabled %s %d\n", 5744 __func__, __LINE__); 5745 instance->use_seqnum_jbod_fp = false; 5746 return; 5747 } 5748 5749 if (fusion->pd_seq_sync[0]) 5750 goto skip_alloc; 5751 5752 for (i = 0; i < JBOD_MAPS_COUNT; i++) { 5753 fusion->pd_seq_sync[i] = dma_alloc_coherent 5754 (&instance->pdev->dev, pd_seq_map_sz, 5755 &fusion->pd_seq_phys[i], GFP_KERNEL); 5756 if (!fusion->pd_seq_sync[i]) { 5757 dev_err(&instance->pdev->dev, 5758 "Failed to allocate memory from %s %d\n", 5759 __func__, __LINE__); 5760 if (i == 1) { 5761 dma_free_coherent(&instance->pdev->dev, 5762 pd_seq_map_sz, fusion->pd_seq_sync[0], 5763 fusion->pd_seq_phys[0]); 5764 fusion->pd_seq_sync[0] = NULL; 5765 } 5766 instance->use_seqnum_jbod_fp = false; 5767 return; 5768 } 5769 } 5770 5771 skip_alloc: 5772 if (!megasas_sync_pd_seq_num(instance, false) && 5773 !megasas_sync_pd_seq_num(instance, true)) 5774 instance->use_seqnum_jbod_fp = true; 5775 else 5776 instance->use_seqnum_jbod_fp = false; 5777 } 5778 5779 static void megasas_setup_reply_map(struct megasas_instance *instance) 5780 { 5781 const struct cpumask *mask; 5782 unsigned int queue, cpu, low_latency_index_start; 5783 5784 low_latency_index_start = instance->low_latency_index_start; 5785 5786 for (queue = low_latency_index_start; queue < instance->msix_vectors; queue++) { 5787 mask = pci_irq_get_affinity(instance->pdev, queue); 5788 if (!mask) 5789 goto fallback; 5790 5791 for_each_cpu(cpu, mask) 5792 instance->reply_map[cpu] = queue; 5793 } 5794 return; 5795 5796 fallback: 5797 queue = low_latency_index_start; 5798 for_each_possible_cpu(cpu) { 5799 instance->reply_map[cpu] = queue; 5800 if (queue == (instance->msix_vectors - 1)) 5801 queue = low_latency_index_start; 5802 else 5803 queue++; 5804 } 5805 } 5806 5807 /** 5808 * megasas_get_device_list - Get the PD and LD device list from FW. 5809 * @instance: Adapter soft state 5810 * @return: Success or failure 5811 * 5812 * Issue DCMDs to Firmware to get the PD and LD list. 5813 * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination 5814 * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list. 5815 */ 5816 static 5817 int megasas_get_device_list(struct megasas_instance *instance) 5818 { 5819 memset(instance->pd_list, 0, 5820 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); 5821 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 5822 5823 if (instance->enable_fw_dev_list) { 5824 if (megasas_host_device_list_query(instance, true)) 5825 return FAILED; 5826 } else { 5827 if (megasas_get_pd_list(instance) < 0) { 5828 dev_err(&instance->pdev->dev, "failed to get PD list\n"); 5829 return FAILED; 5830 } 5831 5832 if (megasas_ld_list_query(instance, 5833 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) { 5834 dev_err(&instance->pdev->dev, "failed to get LD list\n"); 5835 return FAILED; 5836 } 5837 } 5838 5839 return SUCCESS; 5840 } 5841 5842 /** 5843 * megasas_set_high_iops_queue_affinity_hint - Set affinity hint for high IOPS queues 5844 * @instance: Adapter soft state 5845 * return: void 5846 */ 5847 static inline void 5848 megasas_set_high_iops_queue_affinity_hint(struct megasas_instance *instance) 5849 { 5850 int i; 5851 int local_numa_node; 5852 5853 if (instance->perf_mode == MR_BALANCED_PERF_MODE) { 5854 local_numa_node = dev_to_node(&instance->pdev->dev); 5855 5856 for (i = 0; i < instance->low_latency_index_start; i++) 5857 irq_set_affinity_hint(pci_irq_vector(instance->pdev, i), 5858 cpumask_of_node(local_numa_node)); 5859 } 5860 } 5861 5862 static int 5863 __megasas_alloc_irq_vectors(struct megasas_instance *instance) 5864 { 5865 int i, irq_flags; 5866 struct irq_affinity desc = { .pre_vectors = instance->low_latency_index_start }; 5867 struct irq_affinity *descp = &desc; 5868 5869 irq_flags = PCI_IRQ_MSIX; 5870 5871 if (instance->smp_affinity_enable) 5872 irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; 5873 else 5874 descp = NULL; 5875 5876 /* Do not allocate msix vectors for poll_queues. 5877 * msix_vectors is always within a range of FW supported reply queue. 5878 */ 5879 i = pci_alloc_irq_vectors_affinity(instance->pdev, 5880 instance->low_latency_index_start, 5881 instance->msix_vectors - instance->iopoll_q_count, irq_flags, descp); 5882 5883 return i; 5884 } 5885 5886 /** 5887 * megasas_alloc_irq_vectors - Allocate IRQ vectors/enable MSI-x vectors 5888 * @instance: Adapter soft state 5889 * return: void 5890 */ 5891 static void 5892 megasas_alloc_irq_vectors(struct megasas_instance *instance) 5893 { 5894 int i; 5895 unsigned int num_msix_req; 5896 5897 instance->iopoll_q_count = 0; 5898 if ((instance->adapter_type != MFI_SERIES) && 5899 poll_queues) { 5900 5901 instance->perf_mode = MR_LATENCY_PERF_MODE; 5902 instance->low_latency_index_start = 1; 5903 5904 /* reserve for default and non-mananged pre-vector. */ 5905 if (instance->msix_vectors > (poll_queues + 2)) 5906 instance->iopoll_q_count = poll_queues; 5907 else 5908 instance->iopoll_q_count = 0; 5909 5910 num_msix_req = num_online_cpus() + instance->low_latency_index_start; 5911 instance->msix_vectors = min(num_msix_req, 5912 instance->msix_vectors); 5913 5914 } 5915 5916 i = __megasas_alloc_irq_vectors(instance); 5917 5918 if (((instance->perf_mode == MR_BALANCED_PERF_MODE) 5919 || instance->iopoll_q_count) && 5920 (i != (instance->msix_vectors - instance->iopoll_q_count))) { 5921 if (instance->msix_vectors) 5922 pci_free_irq_vectors(instance->pdev); 5923 /* Disable Balanced IOPS mode and try realloc vectors */ 5924 instance->perf_mode = MR_LATENCY_PERF_MODE; 5925 instance->low_latency_index_start = 1; 5926 num_msix_req = num_online_cpus() + instance->low_latency_index_start; 5927 5928 instance->msix_vectors = min(num_msix_req, 5929 instance->msix_vectors); 5930 5931 instance->iopoll_q_count = 0; 5932 i = __megasas_alloc_irq_vectors(instance); 5933 5934 } 5935 5936 dev_info(&instance->pdev->dev, 5937 "requested/available msix %d/%d poll_queue %d\n", 5938 instance->msix_vectors - instance->iopoll_q_count, 5939 i, instance->iopoll_q_count); 5940 5941 if (i > 0) 5942 instance->msix_vectors = i; 5943 else 5944 instance->msix_vectors = 0; 5945 5946 if (instance->smp_affinity_enable) 5947 megasas_set_high_iops_queue_affinity_hint(instance); 5948 } 5949 5950 /** 5951 * megasas_init_fw - Initializes the FW 5952 * @instance: Adapter soft state 5953 * 5954 * This is the main function for initializing firmware 5955 */ 5956 5957 static int megasas_init_fw(struct megasas_instance *instance) 5958 { 5959 u32 max_sectors_1; 5960 u32 max_sectors_2, tmp_sectors, msix_enable; 5961 u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg; 5962 resource_size_t base_addr; 5963 void *base_addr_phys; 5964 struct megasas_ctrl_info *ctrl_info = NULL; 5965 unsigned long bar_list; 5966 int i, j, loop; 5967 struct IOV_111 *iovPtr; 5968 struct fusion_context *fusion; 5969 bool intr_coalescing; 5970 unsigned int num_msix_req; 5971 u16 lnksta, speed; 5972 5973 fusion = instance->ctrl_context; 5974 5975 /* Find first memory bar */ 5976 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); 5977 instance->bar = find_first_bit(&bar_list, BITS_PER_LONG); 5978 if (pci_request_selected_regions(instance->pdev, 1<<instance->bar, 5979 "megasas: LSI")) { 5980 dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n"); 5981 return -EBUSY; 5982 } 5983 5984 base_addr = pci_resource_start(instance->pdev, instance->bar); 5985 instance->reg_set = ioremap(base_addr, 8192); 5986 5987 if (!instance->reg_set) { 5988 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n"); 5989 goto fail_ioremap; 5990 } 5991 5992 base_addr_phys = &base_addr; 5993 dev_printk(KERN_DEBUG, &instance->pdev->dev, 5994 "BAR:0x%lx BAR's base_addr(phys):%pa mapped virt_addr:0x%p\n", 5995 instance->bar, base_addr_phys, instance->reg_set); 5996 5997 if (instance->adapter_type != MFI_SERIES) 5998 instance->instancet = &megasas_instance_template_fusion; 5999 else { 6000 switch (instance->pdev->device) { 6001 case PCI_DEVICE_ID_LSI_SAS1078R: 6002 case PCI_DEVICE_ID_LSI_SAS1078DE: 6003 instance->instancet = &megasas_instance_template_ppc; 6004 break; 6005 case PCI_DEVICE_ID_LSI_SAS1078GEN2: 6006 case PCI_DEVICE_ID_LSI_SAS0079GEN2: 6007 instance->instancet = &megasas_instance_template_gen2; 6008 break; 6009 case PCI_DEVICE_ID_LSI_SAS0073SKINNY: 6010 case PCI_DEVICE_ID_LSI_SAS0071SKINNY: 6011 instance->instancet = &megasas_instance_template_skinny; 6012 break; 6013 case PCI_DEVICE_ID_LSI_SAS1064R: 6014 case PCI_DEVICE_ID_DELL_PERC5: 6015 default: 6016 instance->instancet = &megasas_instance_template_xscale; 6017 instance->pd_list_not_supported = 1; 6018 break; 6019 } 6020 } 6021 6022 if (megasas_transition_to_ready(instance, 0)) { 6023 dev_info(&instance->pdev->dev, 6024 "Failed to transition controller to ready from %s!\n", 6025 __func__); 6026 if (instance->adapter_type != MFI_SERIES) { 6027 status_reg = instance->instancet->read_fw_status_reg( 6028 instance); 6029 if (status_reg & MFI_RESET_ADAPTER) { 6030 if (megasas_adp_reset_wait_for_ready 6031 (instance, true, 0) == FAILED) 6032 goto fail_ready_state; 6033 } else { 6034 goto fail_ready_state; 6035 } 6036 } else { 6037 atomic_set(&instance->fw_reset_no_pci_access, 1); 6038 instance->instancet->adp_reset 6039 (instance, instance->reg_set); 6040 atomic_set(&instance->fw_reset_no_pci_access, 0); 6041 6042 /*waiting for about 30 second before retry*/ 6043 ssleep(30); 6044 6045 if (megasas_transition_to_ready(instance, 0)) 6046 goto fail_ready_state; 6047 } 6048 6049 dev_info(&instance->pdev->dev, 6050 "FW restarted successfully from %s!\n", 6051 __func__); 6052 } 6053 6054 megasas_init_ctrl_params(instance); 6055 6056 if (megasas_set_dma_mask(instance)) 6057 goto fail_ready_state; 6058 6059 if (megasas_alloc_ctrl_mem(instance)) 6060 goto fail_alloc_dma_buf; 6061 6062 if (megasas_alloc_ctrl_dma_buffers(instance)) 6063 goto fail_alloc_dma_buf; 6064 6065 fusion = instance->ctrl_context; 6066 6067 if (instance->adapter_type >= VENTURA_SERIES) { 6068 scratch_pad_2 = 6069 megasas_readl(instance, 6070 &instance->reg_set->outbound_scratch_pad_2); 6071 instance->max_raid_mapsize = ((scratch_pad_2 >> 6072 MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) & 6073 MR_MAX_RAID_MAP_SIZE_MASK); 6074 } 6075 6076 instance->enable_sdev_max_qd = enable_sdev_max_qd; 6077 6078 switch (instance->adapter_type) { 6079 case VENTURA_SERIES: 6080 fusion->pcie_bw_limitation = true; 6081 break; 6082 case AERO_SERIES: 6083 fusion->r56_div_offload = true; 6084 break; 6085 default: 6086 break; 6087 } 6088 6089 /* Check if MSI-X is supported while in ready state */ 6090 msix_enable = (instance->instancet->read_fw_status_reg(instance) & 6091 0x4000000) >> 0x1a; 6092 if (msix_enable && !msix_disable) { 6093 6094 scratch_pad_1 = megasas_readl 6095 (instance, &instance->reg_set->outbound_scratch_pad_1); 6096 /* Check max MSI-X vectors */ 6097 if (fusion) { 6098 if (instance->adapter_type == THUNDERBOLT_SERIES) { 6099 /* Thunderbolt Series*/ 6100 instance->msix_vectors = (scratch_pad_1 6101 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; 6102 } else { 6103 instance->msix_vectors = ((scratch_pad_1 6104 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) 6105 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; 6106 6107 /* 6108 * For Invader series, > 8 MSI-x vectors 6109 * supported by FW/HW implies combined 6110 * reply queue mode is enabled. 6111 * For Ventura series, > 16 MSI-x vectors 6112 * supported by FW/HW implies combined 6113 * reply queue mode is enabled. 6114 */ 6115 switch (instance->adapter_type) { 6116 case INVADER_SERIES: 6117 if (instance->msix_vectors > 8) 6118 instance->msix_combined = true; 6119 break; 6120 case AERO_SERIES: 6121 case VENTURA_SERIES: 6122 if (instance->msix_vectors > 16) 6123 instance->msix_combined = true; 6124 break; 6125 } 6126 6127 if (rdpq_enable) 6128 instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ? 6129 1 : 0; 6130 6131 if (instance->adapter_type >= INVADER_SERIES && 6132 !instance->msix_combined) { 6133 instance->msix_load_balance = true; 6134 instance->smp_affinity_enable = false; 6135 } 6136 6137 /* Save 1-15 reply post index address to local memory 6138 * Index 0 is already saved from reg offset 6139 * MPI2_REPLY_POST_HOST_INDEX_OFFSET 6140 */ 6141 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) { 6142 instance->reply_post_host_index_addr[loop] = 6143 (u32 __iomem *) 6144 ((u8 __iomem *)instance->reg_set + 6145 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET 6146 + (loop * 0x10)); 6147 } 6148 } 6149 6150 dev_info(&instance->pdev->dev, 6151 "firmware supports msix\t: (%d)", 6152 instance->msix_vectors); 6153 if (msix_vectors) 6154 instance->msix_vectors = min(msix_vectors, 6155 instance->msix_vectors); 6156 } else /* MFI adapters */ 6157 instance->msix_vectors = 1; 6158 6159 6160 /* 6161 * For Aero (if some conditions are met), driver will configure a 6162 * few additional reply queues with interrupt coalescing enabled. 6163 * These queues with interrupt coalescing enabled are called 6164 * High IOPS queues and rest of reply queues (based on number of 6165 * logical CPUs) are termed as Low latency queues. 6166 * 6167 * Total Number of reply queues = High IOPS queues + low latency queues 6168 * 6169 * For rest of fusion adapters, 1 additional reply queue will be 6170 * reserved for management commands, rest of reply queues 6171 * (based on number of logical CPUs) will be used for IOs and 6172 * referenced as IO queues. 6173 * Total Number of reply queues = 1 + IO queues 6174 * 6175 * MFI adapters supports single MSI-x so single reply queue 6176 * will be used for IO and management commands. 6177 */ 6178 6179 intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ? 6180 true : false; 6181 if (intr_coalescing && 6182 (num_online_cpus() >= MR_HIGH_IOPS_QUEUE_COUNT) && 6183 (instance->msix_vectors == MEGASAS_MAX_MSIX_QUEUES)) 6184 instance->perf_mode = MR_BALANCED_PERF_MODE; 6185 else 6186 instance->perf_mode = MR_LATENCY_PERF_MODE; 6187 6188 6189 if (instance->adapter_type == AERO_SERIES) { 6190 pcie_capability_read_word(instance->pdev, PCI_EXP_LNKSTA, &lnksta); 6191 speed = lnksta & PCI_EXP_LNKSTA_CLS; 6192 6193 /* 6194 * For Aero, if PCIe link speed is <16 GT/s, then driver should operate 6195 * in latency perf mode and enable R1 PCI bandwidth algorithm 6196 */ 6197 if (speed < 0x4) { 6198 instance->perf_mode = MR_LATENCY_PERF_MODE; 6199 fusion->pcie_bw_limitation = true; 6200 } 6201 6202 /* 6203 * Performance mode settings provided through module parameter-perf_mode will 6204 * take affect only for: 6205 * 1. Aero family of adapters. 6206 * 2. When user sets module parameter- perf_mode in range of 0-2. 6207 */ 6208 if ((perf_mode >= MR_BALANCED_PERF_MODE) && 6209 (perf_mode <= MR_LATENCY_PERF_MODE)) 6210 instance->perf_mode = perf_mode; 6211 /* 6212 * If intr coalescing is not supported by controller FW, then IOPS 6213 * and Balanced modes are not feasible. 6214 */ 6215 if (!intr_coalescing) 6216 instance->perf_mode = MR_LATENCY_PERF_MODE; 6217 6218 } 6219 6220 if (instance->perf_mode == MR_BALANCED_PERF_MODE) 6221 instance->low_latency_index_start = 6222 MR_HIGH_IOPS_QUEUE_COUNT; 6223 else 6224 instance->low_latency_index_start = 1; 6225 6226 num_msix_req = num_online_cpus() + instance->low_latency_index_start; 6227 6228 instance->msix_vectors = min(num_msix_req, 6229 instance->msix_vectors); 6230 6231 megasas_alloc_irq_vectors(instance); 6232 if (!instance->msix_vectors) 6233 instance->msix_load_balance = false; 6234 } 6235 /* 6236 * MSI-X host index 0 is common for all adapter. 6237 * It is used for all MPT based Adapters. 6238 */ 6239 if (instance->msix_combined) { 6240 instance->reply_post_host_index_addr[0] = 6241 (u32 *)((u8 *)instance->reg_set + 6242 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET); 6243 } else { 6244 instance->reply_post_host_index_addr[0] = 6245 (u32 *)((u8 *)instance->reg_set + 6246 MPI2_REPLY_POST_HOST_INDEX_OFFSET); 6247 } 6248 6249 if (!instance->msix_vectors) { 6250 i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); 6251 if (i < 0) 6252 goto fail_init_adapter; 6253 } 6254 6255 megasas_setup_reply_map(instance); 6256 6257 dev_info(&instance->pdev->dev, 6258 "current msix/online cpus\t: (%d/%d)\n", 6259 instance->msix_vectors, (unsigned int)num_online_cpus()); 6260 dev_info(&instance->pdev->dev, 6261 "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled"); 6262 6263 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 6264 (unsigned long)instance); 6265 6266 /* 6267 * Below are default value for legacy Firmware. 6268 * non-fusion based controllers 6269 */ 6270 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 6271 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 6272 /* Get operational params, sge flags, send init cmd to controller */ 6273 if (instance->instancet->init_adapter(instance)) 6274 goto fail_init_adapter; 6275 6276 if (instance->adapter_type >= VENTURA_SERIES) { 6277 scratch_pad_3 = 6278 megasas_readl(instance, 6279 &instance->reg_set->outbound_scratch_pad_3); 6280 if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >= 6281 MR_DEFAULT_NVME_PAGE_SHIFT) 6282 instance->nvme_page_size = 6283 (1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK)); 6284 6285 dev_info(&instance->pdev->dev, 6286 "NVME page size\t: (%d)\n", instance->nvme_page_size); 6287 } 6288 6289 if (instance->msix_vectors ? 6290 megasas_setup_irqs_msix(instance, 1) : 6291 megasas_setup_irqs_ioapic(instance)) 6292 goto fail_init_adapter; 6293 6294 if (instance->adapter_type != MFI_SERIES) 6295 megasas_setup_irq_poll(instance); 6296 6297 instance->instancet->enable_intr(instance); 6298 6299 dev_info(&instance->pdev->dev, "INIT adapter done\n"); 6300 6301 megasas_setup_jbod_map(instance); 6302 6303 if (megasas_get_device_list(instance) != SUCCESS) { 6304 dev_err(&instance->pdev->dev, 6305 "%s: megasas_get_device_list failed\n", 6306 __func__); 6307 goto fail_get_ld_pd_list; 6308 } 6309 6310 /* stream detection initialization */ 6311 if (instance->adapter_type >= VENTURA_SERIES) { 6312 fusion->stream_detect_by_ld = 6313 kcalloc(MAX_LOGICAL_DRIVES_EXT, 6314 sizeof(struct LD_STREAM_DETECT *), 6315 GFP_KERNEL); 6316 if (!fusion->stream_detect_by_ld) { 6317 dev_err(&instance->pdev->dev, 6318 "unable to allocate stream detection for pool of LDs\n"); 6319 goto fail_get_ld_pd_list; 6320 } 6321 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) { 6322 fusion->stream_detect_by_ld[i] = 6323 kzalloc(sizeof(struct LD_STREAM_DETECT), 6324 GFP_KERNEL); 6325 if (!fusion->stream_detect_by_ld[i]) { 6326 dev_err(&instance->pdev->dev, 6327 "unable to allocate stream detect by LD\n "); 6328 for (j = 0; j < i; ++j) 6329 kfree(fusion->stream_detect_by_ld[j]); 6330 kfree(fusion->stream_detect_by_ld); 6331 fusion->stream_detect_by_ld = NULL; 6332 goto fail_get_ld_pd_list; 6333 } 6334 fusion->stream_detect_by_ld[i]->mru_bit_map 6335 = MR_STREAM_BITMAP; 6336 } 6337 } 6338 6339 /* 6340 * Compute the max allowed sectors per IO: The controller info has two 6341 * limits on max sectors. Driver should use the minimum of these two. 6342 * 6343 * 1 << stripe_sz_ops.min = max sectors per strip 6344 * 6345 * Note that older firmwares ( < FW ver 30) didn't report information 6346 * to calculate max_sectors_1. So the number ended up as zero always. 6347 */ 6348 tmp_sectors = 0; 6349 ctrl_info = instance->ctrl_info_buf; 6350 6351 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * 6352 le16_to_cpu(ctrl_info->max_strips_per_io); 6353 max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size); 6354 6355 tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2); 6356 6357 instance->peerIsPresent = ctrl_info->cluster.peerIsPresent; 6358 instance->passive = ctrl_info->cluster.passive; 6359 memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId)); 6360 instance->UnevenSpanSupport = 6361 ctrl_info->adapterOperations2.supportUnevenSpans; 6362 if (instance->UnevenSpanSupport) { 6363 struct fusion_context *fusion = instance->ctrl_context; 6364 if (MR_ValidateMapInfo(instance, instance->map_id)) 6365 fusion->fast_path_io = 1; 6366 else 6367 fusion->fast_path_io = 0; 6368 6369 } 6370 if (ctrl_info->host_interface.SRIOV) { 6371 instance->requestorId = ctrl_info->iov.requestorId; 6372 if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) { 6373 if (!ctrl_info->adapterOperations2.activePassive) 6374 instance->PlasmaFW111 = 1; 6375 6376 dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n", 6377 instance->PlasmaFW111 ? "1.11" : "new"); 6378 6379 if (instance->PlasmaFW111) { 6380 iovPtr = (struct IOV_111 *) 6381 ((unsigned char *)ctrl_info + IOV_111_OFFSET); 6382 instance->requestorId = iovPtr->requestorId; 6383 } 6384 } 6385 dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n", 6386 instance->requestorId); 6387 } 6388 6389 instance->crash_dump_fw_support = 6390 ctrl_info->adapterOperations3.supportCrashDump; 6391 instance->crash_dump_drv_support = 6392 (instance->crash_dump_fw_support && 6393 instance->crash_dump_buf); 6394 if (instance->crash_dump_drv_support) 6395 megasas_set_crash_dump_params(instance, 6396 MR_CRASH_BUF_TURN_OFF); 6397 6398 else { 6399 if (instance->crash_dump_buf) 6400 dma_free_coherent(&instance->pdev->dev, 6401 CRASH_DMA_BUF_SIZE, 6402 instance->crash_dump_buf, 6403 instance->crash_dump_h); 6404 instance->crash_dump_buf = NULL; 6405 } 6406 6407 if (instance->snapdump_wait_time) { 6408 megasas_get_snapdump_properties(instance); 6409 dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n", 6410 instance->snapdump_wait_time); 6411 } 6412 6413 dev_info(&instance->pdev->dev, 6414 "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n", 6415 le16_to_cpu(ctrl_info->pci.vendor_id), 6416 le16_to_cpu(ctrl_info->pci.device_id), 6417 le16_to_cpu(ctrl_info->pci.sub_vendor_id), 6418 le16_to_cpu(ctrl_info->pci.sub_device_id)); 6419 dev_info(&instance->pdev->dev, "unevenspan support : %s\n", 6420 instance->UnevenSpanSupport ? "yes" : "no"); 6421 dev_info(&instance->pdev->dev, "firmware crash dump : %s\n", 6422 instance->crash_dump_drv_support ? "yes" : "no"); 6423 dev_info(&instance->pdev->dev, "JBOD sequence map : %s\n", 6424 instance->use_seqnum_jbod_fp ? "enabled" : "disabled"); 6425 6426 instance->max_sectors_per_req = instance->max_num_sge * 6427 SGE_BUFFER_SIZE / 512; 6428 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) 6429 instance->max_sectors_per_req = tmp_sectors; 6430 6431 /* Check for valid throttlequeuedepth module parameter */ 6432 if (throttlequeuedepth && 6433 throttlequeuedepth <= instance->max_scsi_cmds) 6434 instance->throttlequeuedepth = throttlequeuedepth; 6435 else 6436 instance->throttlequeuedepth = 6437 MEGASAS_THROTTLE_QUEUE_DEPTH; 6438 6439 if ((resetwaittime < 1) || 6440 (resetwaittime > MEGASAS_RESET_WAIT_TIME)) 6441 resetwaittime = MEGASAS_RESET_WAIT_TIME; 6442 6443 if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT)) 6444 scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; 6445 6446 /* Launch SR-IOV heartbeat timer */ 6447 if (instance->requestorId) { 6448 if (!megasas_sriov_start_heartbeat(instance, 1)) { 6449 megasas_start_timer(instance); 6450 } else { 6451 instance->skip_heartbeat_timer_del = 1; 6452 goto fail_get_ld_pd_list; 6453 } 6454 } 6455 6456 /* 6457 * Create and start watchdog thread which will monitor 6458 * controller state every 1 sec and trigger OCR when 6459 * it enters fault state 6460 */ 6461 if (instance->adapter_type != MFI_SERIES) 6462 if (megasas_fusion_start_watchdog(instance) != SUCCESS) 6463 goto fail_start_watchdog; 6464 6465 return 0; 6466 6467 fail_start_watchdog: 6468 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 6469 del_timer_sync(&instance->sriov_heartbeat_timer); 6470 fail_get_ld_pd_list: 6471 instance->instancet->disable_intr(instance); 6472 megasas_destroy_irqs(instance); 6473 fail_init_adapter: 6474 if (instance->msix_vectors) 6475 pci_free_irq_vectors(instance->pdev); 6476 instance->msix_vectors = 0; 6477 fail_alloc_dma_buf: 6478 megasas_free_ctrl_dma_buffers(instance); 6479 megasas_free_ctrl_mem(instance); 6480 fail_ready_state: 6481 iounmap(instance->reg_set); 6482 6483 fail_ioremap: 6484 pci_release_selected_regions(instance->pdev, 1<<instance->bar); 6485 6486 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 6487 __func__, __LINE__); 6488 return -EINVAL; 6489 } 6490 6491 /** 6492 * megasas_release_mfi - Reverses the FW initialization 6493 * @instance: Adapter soft state 6494 */ 6495 static void megasas_release_mfi(struct megasas_instance *instance) 6496 { 6497 u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1); 6498 6499 if (instance->reply_queue) 6500 dma_free_coherent(&instance->pdev->dev, reply_q_sz, 6501 instance->reply_queue, instance->reply_queue_h); 6502 6503 megasas_free_cmds(instance); 6504 6505 iounmap(instance->reg_set); 6506 6507 pci_release_selected_regions(instance->pdev, 1<<instance->bar); 6508 } 6509 6510 /** 6511 * megasas_get_seq_num - Gets latest event sequence numbers 6512 * @instance: Adapter soft state 6513 * @eli: FW event log sequence numbers information 6514 * 6515 * FW maintains a log of all events in a non-volatile area. Upper layers would 6516 * usually find out the latest sequence number of the events, the seq number at 6517 * the boot etc. They would "read" all the events below the latest seq number 6518 * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq 6519 * number), they would subsribe to AEN (asynchronous event notification) and 6520 * wait for the events to happen. 6521 */ 6522 static int 6523 megasas_get_seq_num(struct megasas_instance *instance, 6524 struct megasas_evt_log_info *eli) 6525 { 6526 struct megasas_cmd *cmd; 6527 struct megasas_dcmd_frame *dcmd; 6528 struct megasas_evt_log_info *el_info; 6529 dma_addr_t el_info_h = 0; 6530 int ret; 6531 6532 cmd = megasas_get_cmd(instance); 6533 6534 if (!cmd) { 6535 return -ENOMEM; 6536 } 6537 6538 dcmd = &cmd->frame->dcmd; 6539 el_info = dma_alloc_coherent(&instance->pdev->dev, 6540 sizeof(struct megasas_evt_log_info), 6541 &el_info_h, GFP_KERNEL); 6542 if (!el_info) { 6543 megasas_return_cmd(instance, cmd); 6544 return -ENOMEM; 6545 } 6546 6547 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6548 6549 dcmd->cmd = MFI_CMD_DCMD; 6550 dcmd->cmd_status = 0x0; 6551 dcmd->sge_count = 1; 6552 dcmd->flags = MFI_FRAME_DIR_READ; 6553 dcmd->timeout = 0; 6554 dcmd->pad_0 = 0; 6555 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info)); 6556 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO); 6557 6558 megasas_set_dma_settings(instance, dcmd, el_info_h, 6559 sizeof(struct megasas_evt_log_info)); 6560 6561 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 6562 if (ret != DCMD_SUCCESS) { 6563 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 6564 __func__, __LINE__); 6565 goto dcmd_failed; 6566 } 6567 6568 /* 6569 * Copy the data back into callers buffer 6570 */ 6571 eli->newest_seq_num = el_info->newest_seq_num; 6572 eli->oldest_seq_num = el_info->oldest_seq_num; 6573 eli->clear_seq_num = el_info->clear_seq_num; 6574 eli->shutdown_seq_num = el_info->shutdown_seq_num; 6575 eli->boot_seq_num = el_info->boot_seq_num; 6576 6577 dcmd_failed: 6578 dma_free_coherent(&instance->pdev->dev, 6579 sizeof(struct megasas_evt_log_info), 6580 el_info, el_info_h); 6581 6582 megasas_return_cmd(instance, cmd); 6583 6584 return ret; 6585 } 6586 6587 /** 6588 * megasas_register_aen - Registers for asynchronous event notification 6589 * @instance: Adapter soft state 6590 * @seq_num: The starting sequence number 6591 * @class_locale_word: Class of the event 6592 * 6593 * This function subscribes for AEN for events beyond the @seq_num. It requests 6594 * to be notified if and only if the event is of type @class_locale 6595 */ 6596 static int 6597 megasas_register_aen(struct megasas_instance *instance, u32 seq_num, 6598 u32 class_locale_word) 6599 { 6600 int ret_val; 6601 struct megasas_cmd *cmd; 6602 struct megasas_dcmd_frame *dcmd; 6603 union megasas_evt_class_locale curr_aen; 6604 union megasas_evt_class_locale prev_aen; 6605 6606 /* 6607 * If there an AEN pending already (aen_cmd), check if the 6608 * class_locale of that pending AEN is inclusive of the new 6609 * AEN request we currently have. If it is, then we don't have 6610 * to do anything. In other words, whichever events the current 6611 * AEN request is subscribing to, have already been subscribed 6612 * to. 6613 * 6614 * If the old_cmd is _not_ inclusive, then we have to abort 6615 * that command, form a class_locale that is superset of both 6616 * old and current and re-issue to the FW 6617 */ 6618 6619 curr_aen.word = class_locale_word; 6620 6621 if (instance->aen_cmd) { 6622 6623 prev_aen.word = 6624 le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]); 6625 6626 if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) || 6627 (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) { 6628 dev_info(&instance->pdev->dev, 6629 "%s %d out of range class %d send by application\n", 6630 __func__, __LINE__, curr_aen.members.class); 6631 return 0; 6632 } 6633 6634 /* 6635 * A class whose enum value is smaller is inclusive of all 6636 * higher values. If a PROGRESS (= -1) was previously 6637 * registered, then a new registration requests for higher 6638 * classes need not be sent to FW. They are automatically 6639 * included. 6640 * 6641 * Locale numbers don't have such hierarchy. They are bitmap 6642 * values 6643 */ 6644 if ((prev_aen.members.class <= curr_aen.members.class) && 6645 !((prev_aen.members.locale & curr_aen.members.locale) ^ 6646 curr_aen.members.locale)) { 6647 /* 6648 * Previously issued event registration includes 6649 * current request. Nothing to do. 6650 */ 6651 return 0; 6652 } else { 6653 curr_aen.members.locale |= prev_aen.members.locale; 6654 6655 if (prev_aen.members.class < curr_aen.members.class) 6656 curr_aen.members.class = prev_aen.members.class; 6657 6658 instance->aen_cmd->abort_aen = 1; 6659 ret_val = megasas_issue_blocked_abort_cmd(instance, 6660 instance-> 6661 aen_cmd, 30); 6662 6663 if (ret_val) { 6664 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort " 6665 "previous AEN command\n"); 6666 return ret_val; 6667 } 6668 } 6669 } 6670 6671 cmd = megasas_get_cmd(instance); 6672 6673 if (!cmd) 6674 return -ENOMEM; 6675 6676 dcmd = &cmd->frame->dcmd; 6677 6678 memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail)); 6679 6680 /* 6681 * Prepare DCMD for aen registration 6682 */ 6683 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6684 6685 dcmd->cmd = MFI_CMD_DCMD; 6686 dcmd->cmd_status = 0x0; 6687 dcmd->sge_count = 1; 6688 dcmd->flags = MFI_FRAME_DIR_READ; 6689 dcmd->timeout = 0; 6690 dcmd->pad_0 = 0; 6691 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail)); 6692 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT); 6693 dcmd->mbox.w[0] = cpu_to_le32(seq_num); 6694 instance->last_seq_num = seq_num; 6695 dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word); 6696 6697 megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h, 6698 sizeof(struct megasas_evt_detail)); 6699 6700 if (instance->aen_cmd != NULL) { 6701 megasas_return_cmd(instance, cmd); 6702 return 0; 6703 } 6704 6705 /* 6706 * Store reference to the cmd used to register for AEN. When an 6707 * application wants us to register for AEN, we have to abort this 6708 * cmd and re-register with a new EVENT LOCALE supplied by that app 6709 */ 6710 instance->aen_cmd = cmd; 6711 6712 /* 6713 * Issue the aen registration frame 6714 */ 6715 instance->instancet->issue_dcmd(instance, cmd); 6716 6717 return 0; 6718 } 6719 6720 /* megasas_get_target_prop - Send DCMD with below details to firmware. 6721 * 6722 * This DCMD will fetch few properties of LD/system PD defined 6723 * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value. 6724 * 6725 * DCMD send by drivers whenever new target is added to the OS. 6726 * 6727 * dcmd.opcode - MR_DCMD_DEV_GET_TARGET_PROP 6728 * dcmd.mbox.b[0] - DCMD is to be fired for LD or system PD. 6729 * 0 = system PD, 1 = LD. 6730 * dcmd.mbox.s[1] - TargetID for LD/system PD. 6731 * dcmd.sge IN - Pointer to return MR_TARGET_DEV_PROPERTIES. 6732 * 6733 * @instance: Adapter soft state 6734 * @sdev: OS provided scsi device 6735 * 6736 * Returns 0 on success non-zero on failure. 6737 */ 6738 int 6739 megasas_get_target_prop(struct megasas_instance *instance, 6740 struct scsi_device *sdev) 6741 { 6742 int ret; 6743 struct megasas_cmd *cmd; 6744 struct megasas_dcmd_frame *dcmd; 6745 u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + 6746 sdev->id; 6747 6748 cmd = megasas_get_cmd(instance); 6749 6750 if (!cmd) { 6751 dev_err(&instance->pdev->dev, 6752 "Failed to get cmd %s\n", __func__); 6753 return -ENOMEM; 6754 } 6755 6756 dcmd = &cmd->frame->dcmd; 6757 6758 memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop)); 6759 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6760 dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev); 6761 6762 dcmd->mbox.s[1] = cpu_to_le16(targetId); 6763 dcmd->cmd = MFI_CMD_DCMD; 6764 dcmd->cmd_status = 0xFF; 6765 dcmd->sge_count = 1; 6766 dcmd->flags = MFI_FRAME_DIR_READ; 6767 dcmd->timeout = 0; 6768 dcmd->pad_0 = 0; 6769 dcmd->data_xfer_len = 6770 cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES)); 6771 dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP); 6772 6773 megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h, 6774 sizeof(struct MR_TARGET_PROPERTIES)); 6775 6776 if ((instance->adapter_type != MFI_SERIES) && 6777 !instance->mask_interrupts) 6778 ret = megasas_issue_blocked_cmd(instance, 6779 cmd, MFI_IO_TIMEOUT_SECS); 6780 else 6781 ret = megasas_issue_polled(instance, cmd); 6782 6783 switch (ret) { 6784 case DCMD_TIMEOUT: 6785 switch (dcmd_timeout_ocr_possible(instance)) { 6786 case INITIATE_OCR: 6787 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 6788 mutex_unlock(&instance->reset_mutex); 6789 megasas_reset_fusion(instance->host, 6790 MFI_IO_TIMEOUT_OCR); 6791 mutex_lock(&instance->reset_mutex); 6792 break; 6793 case KILL_ADAPTER: 6794 megaraid_sas_kill_hba(instance); 6795 break; 6796 case IGNORE_TIMEOUT: 6797 dev_info(&instance->pdev->dev, 6798 "Ignore DCMD timeout: %s %d\n", 6799 __func__, __LINE__); 6800 break; 6801 } 6802 break; 6803 6804 default: 6805 megasas_return_cmd(instance, cmd); 6806 } 6807 if (ret != DCMD_SUCCESS) 6808 dev_err(&instance->pdev->dev, 6809 "return from %s %d return value %d\n", 6810 __func__, __LINE__, ret); 6811 6812 return ret; 6813 } 6814 6815 /** 6816 * megasas_start_aen - Subscribes to AEN during driver load time 6817 * @instance: Adapter soft state 6818 */ 6819 static int megasas_start_aen(struct megasas_instance *instance) 6820 { 6821 struct megasas_evt_log_info eli; 6822 union megasas_evt_class_locale class_locale; 6823 6824 /* 6825 * Get the latest sequence number from FW 6826 */ 6827 memset(&eli, 0, sizeof(eli)); 6828 6829 if (megasas_get_seq_num(instance, &eli)) 6830 return -1; 6831 6832 /* 6833 * Register AEN with FW for latest sequence number plus 1 6834 */ 6835 class_locale.members.reserved = 0; 6836 class_locale.members.locale = MR_EVT_LOCALE_ALL; 6837 class_locale.members.class = MR_EVT_CLASS_DEBUG; 6838 6839 return megasas_register_aen(instance, 6840 le32_to_cpu(eli.newest_seq_num) + 1, 6841 class_locale.word); 6842 } 6843 6844 /** 6845 * megasas_io_attach - Attaches this driver to SCSI mid-layer 6846 * @instance: Adapter soft state 6847 */ 6848 static int megasas_io_attach(struct megasas_instance *instance) 6849 { 6850 struct Scsi_Host *host = instance->host; 6851 6852 /* 6853 * Export parameters required by SCSI mid-layer 6854 */ 6855 host->unique_id = instance->unique_id; 6856 host->can_queue = instance->max_scsi_cmds; 6857 host->this_id = instance->init_id; 6858 host->sg_tablesize = instance->max_num_sge; 6859 6860 if (instance->fw_support_ieee) 6861 instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE; 6862 6863 /* 6864 * Check if the module parameter value for max_sectors can be used 6865 */ 6866 if (max_sectors && max_sectors < instance->max_sectors_per_req) 6867 instance->max_sectors_per_req = max_sectors; 6868 else { 6869 if (max_sectors) { 6870 if (((instance->pdev->device == 6871 PCI_DEVICE_ID_LSI_SAS1078GEN2) || 6872 (instance->pdev->device == 6873 PCI_DEVICE_ID_LSI_SAS0079GEN2)) && 6874 (max_sectors <= MEGASAS_MAX_SECTORS)) { 6875 instance->max_sectors_per_req = max_sectors; 6876 } else { 6877 dev_info(&instance->pdev->dev, "max_sectors should be > 0" 6878 "and <= %d (or < 1MB for GEN2 controller)\n", 6879 instance->max_sectors_per_req); 6880 } 6881 } 6882 } 6883 6884 host->max_sectors = instance->max_sectors_per_req; 6885 host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN; 6886 host->max_channel = MEGASAS_MAX_CHANNELS - 1; 6887 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; 6888 host->max_lun = MEGASAS_MAX_LUN; 6889 host->max_cmd_len = 16; 6890 6891 /* Use shared host tagset only for fusion adaptors 6892 * if there are managed interrupts (smp affinity enabled case). 6893 * Single msix_vectors in kdump, so shared host tag is also disabled. 6894 */ 6895 6896 host->host_tagset = 0; 6897 host->nr_hw_queues = 1; 6898 6899 if ((instance->adapter_type != MFI_SERIES) && 6900 (instance->msix_vectors > instance->low_latency_index_start) && 6901 host_tagset_enable && 6902 instance->smp_affinity_enable) { 6903 host->host_tagset = 1; 6904 host->nr_hw_queues = instance->msix_vectors - 6905 instance->low_latency_index_start + instance->iopoll_q_count; 6906 if (instance->iopoll_q_count) 6907 host->nr_maps = 3; 6908 } else { 6909 instance->iopoll_q_count = 0; 6910 } 6911 6912 dev_info(&instance->pdev->dev, 6913 "Max firmware commands: %d shared with default " 6914 "hw_queues = %d poll_queues %d\n", instance->max_fw_cmds, 6915 host->nr_hw_queues - instance->iopoll_q_count, 6916 instance->iopoll_q_count); 6917 /* 6918 * Notify the mid-layer about the new controller 6919 */ 6920 if (scsi_add_host(host, &instance->pdev->dev)) { 6921 dev_err(&instance->pdev->dev, 6922 "Failed to add host from %s %d\n", 6923 __func__, __LINE__); 6924 return -ENODEV; 6925 } 6926 6927 return 0; 6928 } 6929 6930 /** 6931 * megasas_set_dma_mask - Set DMA mask for supported controllers 6932 * 6933 * @instance: Adapter soft state 6934 * Description: 6935 * 6936 * For Ventura, driver/FW will operate in 63bit DMA addresses. 6937 * 6938 * For invader- 6939 * By default, driver/FW will operate in 32bit DMA addresses 6940 * for consistent DMA mapping but if 32 bit consistent 6941 * DMA mask fails, driver will try with 63 bit consistent 6942 * mask provided FW is true 63bit DMA capable 6943 * 6944 * For older controllers(Thunderbolt and MFI based adapters)- 6945 * driver/FW will operate in 32 bit consistent DMA addresses. 6946 */ 6947 static int 6948 megasas_set_dma_mask(struct megasas_instance *instance) 6949 { 6950 u64 consistent_mask; 6951 struct pci_dev *pdev; 6952 u32 scratch_pad_1; 6953 6954 pdev = instance->pdev; 6955 consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ? 6956 DMA_BIT_MASK(63) : DMA_BIT_MASK(32); 6957 6958 if (IS_DMA64) { 6959 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) && 6960 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) 6961 goto fail_set_dma_mask; 6962 6963 if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) && 6964 (dma_set_coherent_mask(&pdev->dev, consistent_mask) && 6965 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) { 6966 /* 6967 * If 32 bit DMA mask fails, then try for 64 bit mask 6968 * for FW capable of handling 64 bit DMA. 6969 */ 6970 scratch_pad_1 = megasas_readl 6971 (instance, &instance->reg_set->outbound_scratch_pad_1); 6972 6973 if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET)) 6974 goto fail_set_dma_mask; 6975 else if (dma_set_mask_and_coherent(&pdev->dev, 6976 DMA_BIT_MASK(63))) 6977 goto fail_set_dma_mask; 6978 } 6979 } else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) 6980 goto fail_set_dma_mask; 6981 6982 if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32)) 6983 instance->consistent_mask_64bit = false; 6984 else 6985 instance->consistent_mask_64bit = true; 6986 6987 dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n", 6988 ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"), 6989 (instance->consistent_mask_64bit ? "63" : "32")); 6990 6991 return 0; 6992 6993 fail_set_dma_mask: 6994 dev_err(&pdev->dev, "Failed to set DMA mask\n"); 6995 return -1; 6996 6997 } 6998 6999 /* 7000 * megasas_set_adapter_type - Set adapter type. 7001 * Supported controllers can be divided in 7002 * different categories- 7003 * enum MR_ADAPTER_TYPE { 7004 * MFI_SERIES = 1, 7005 * THUNDERBOLT_SERIES = 2, 7006 * INVADER_SERIES = 3, 7007 * VENTURA_SERIES = 4, 7008 * AERO_SERIES = 5, 7009 * }; 7010 * @instance: Adapter soft state 7011 * return: void 7012 */ 7013 static inline void megasas_set_adapter_type(struct megasas_instance *instance) 7014 { 7015 if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) && 7016 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) { 7017 instance->adapter_type = MFI_SERIES; 7018 } else { 7019 switch (instance->pdev->device) { 7020 case PCI_DEVICE_ID_LSI_AERO_10E1: 7021 case PCI_DEVICE_ID_LSI_AERO_10E2: 7022 case PCI_DEVICE_ID_LSI_AERO_10E5: 7023 case PCI_DEVICE_ID_LSI_AERO_10E6: 7024 instance->adapter_type = AERO_SERIES; 7025 break; 7026 case PCI_DEVICE_ID_LSI_VENTURA: 7027 case PCI_DEVICE_ID_LSI_CRUSADER: 7028 case PCI_DEVICE_ID_LSI_HARPOON: 7029 case PCI_DEVICE_ID_LSI_TOMCAT: 7030 case PCI_DEVICE_ID_LSI_VENTURA_4PORT: 7031 case PCI_DEVICE_ID_LSI_CRUSADER_4PORT: 7032 instance->adapter_type = VENTURA_SERIES; 7033 break; 7034 case PCI_DEVICE_ID_LSI_FUSION: 7035 case PCI_DEVICE_ID_LSI_PLASMA: 7036 instance->adapter_type = THUNDERBOLT_SERIES; 7037 break; 7038 case PCI_DEVICE_ID_LSI_INVADER: 7039 case PCI_DEVICE_ID_LSI_INTRUDER: 7040 case PCI_DEVICE_ID_LSI_INTRUDER_24: 7041 case PCI_DEVICE_ID_LSI_CUTLASS_52: 7042 case PCI_DEVICE_ID_LSI_CUTLASS_53: 7043 case PCI_DEVICE_ID_LSI_FURY: 7044 instance->adapter_type = INVADER_SERIES; 7045 break; 7046 default: /* For all other supported controllers */ 7047 instance->adapter_type = MFI_SERIES; 7048 break; 7049 } 7050 } 7051 } 7052 7053 static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance) 7054 { 7055 instance->producer = dma_alloc_coherent(&instance->pdev->dev, 7056 sizeof(u32), &instance->producer_h, GFP_KERNEL); 7057 instance->consumer = dma_alloc_coherent(&instance->pdev->dev, 7058 sizeof(u32), &instance->consumer_h, GFP_KERNEL); 7059 7060 if (!instance->producer || !instance->consumer) { 7061 dev_err(&instance->pdev->dev, 7062 "Failed to allocate memory for producer, consumer\n"); 7063 return -1; 7064 } 7065 7066 *instance->producer = 0; 7067 *instance->consumer = 0; 7068 return 0; 7069 } 7070 7071 /** 7072 * megasas_alloc_ctrl_mem - Allocate per controller memory for core data 7073 * structures which are not common across MFI 7074 * adapters and fusion adapters. 7075 * For MFI based adapters, allocate producer and 7076 * consumer buffers. For fusion adapters, allocate 7077 * memory for fusion context. 7078 * @instance: Adapter soft state 7079 * return: 0 for SUCCESS 7080 */ 7081 static int megasas_alloc_ctrl_mem(struct megasas_instance *instance) 7082 { 7083 instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int), 7084 GFP_KERNEL); 7085 if (!instance->reply_map) 7086 return -ENOMEM; 7087 7088 switch (instance->adapter_type) { 7089 case MFI_SERIES: 7090 if (megasas_alloc_mfi_ctrl_mem(instance)) 7091 goto fail; 7092 break; 7093 case AERO_SERIES: 7094 case VENTURA_SERIES: 7095 case THUNDERBOLT_SERIES: 7096 case INVADER_SERIES: 7097 if (megasas_alloc_fusion_context(instance)) 7098 goto fail; 7099 break; 7100 } 7101 7102 return 0; 7103 fail: 7104 kfree(instance->reply_map); 7105 instance->reply_map = NULL; 7106 return -ENOMEM; 7107 } 7108 7109 /* 7110 * megasas_free_ctrl_mem - Free fusion context for fusion adapters and 7111 * producer, consumer buffers for MFI adapters 7112 * 7113 * @instance - Adapter soft instance 7114 * 7115 */ 7116 static inline void megasas_free_ctrl_mem(struct megasas_instance *instance) 7117 { 7118 kfree(instance->reply_map); 7119 if (instance->adapter_type == MFI_SERIES) { 7120 if (instance->producer) 7121 dma_free_coherent(&instance->pdev->dev, sizeof(u32), 7122 instance->producer, 7123 instance->producer_h); 7124 if (instance->consumer) 7125 dma_free_coherent(&instance->pdev->dev, sizeof(u32), 7126 instance->consumer, 7127 instance->consumer_h); 7128 } else { 7129 megasas_free_fusion_context(instance); 7130 } 7131 } 7132 7133 /** 7134 * megasas_alloc_ctrl_dma_buffers - Allocate consistent DMA buffers during 7135 * driver load time 7136 * 7137 * @instance: Adapter soft instance 7138 * 7139 * @return: O for SUCCESS 7140 */ 7141 static inline 7142 int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance) 7143 { 7144 struct pci_dev *pdev = instance->pdev; 7145 struct fusion_context *fusion = instance->ctrl_context; 7146 7147 instance->evt_detail = dma_alloc_coherent(&pdev->dev, 7148 sizeof(struct megasas_evt_detail), 7149 &instance->evt_detail_h, GFP_KERNEL); 7150 7151 if (!instance->evt_detail) { 7152 dev_err(&instance->pdev->dev, 7153 "Failed to allocate event detail buffer\n"); 7154 return -ENOMEM; 7155 } 7156 7157 if (fusion) { 7158 fusion->ioc_init_request = 7159 dma_alloc_coherent(&pdev->dev, 7160 sizeof(struct MPI2_IOC_INIT_REQUEST), 7161 &fusion->ioc_init_request_phys, 7162 GFP_KERNEL); 7163 7164 if (!fusion->ioc_init_request) { 7165 dev_err(&pdev->dev, 7166 "Failed to allocate PD list buffer\n"); 7167 return -ENOMEM; 7168 } 7169 7170 instance->snapdump_prop = dma_alloc_coherent(&pdev->dev, 7171 sizeof(struct MR_SNAPDUMP_PROPERTIES), 7172 &instance->snapdump_prop_h, GFP_KERNEL); 7173 7174 if (!instance->snapdump_prop) 7175 dev_err(&pdev->dev, 7176 "Failed to allocate snapdump properties buffer\n"); 7177 7178 instance->host_device_list_buf = dma_alloc_coherent(&pdev->dev, 7179 HOST_DEVICE_LIST_SZ, 7180 &instance->host_device_list_buf_h, 7181 GFP_KERNEL); 7182 7183 if (!instance->host_device_list_buf) { 7184 dev_err(&pdev->dev, 7185 "Failed to allocate targetid list buffer\n"); 7186 return -ENOMEM; 7187 } 7188 7189 } 7190 7191 instance->pd_list_buf = 7192 dma_alloc_coherent(&pdev->dev, 7193 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), 7194 &instance->pd_list_buf_h, GFP_KERNEL); 7195 7196 if (!instance->pd_list_buf) { 7197 dev_err(&pdev->dev, "Failed to allocate PD list buffer\n"); 7198 return -ENOMEM; 7199 } 7200 7201 instance->ctrl_info_buf = 7202 dma_alloc_coherent(&pdev->dev, 7203 sizeof(struct megasas_ctrl_info), 7204 &instance->ctrl_info_buf_h, GFP_KERNEL); 7205 7206 if (!instance->ctrl_info_buf) { 7207 dev_err(&pdev->dev, 7208 "Failed to allocate controller info buffer\n"); 7209 return -ENOMEM; 7210 } 7211 7212 instance->ld_list_buf = 7213 dma_alloc_coherent(&pdev->dev, 7214 sizeof(struct MR_LD_LIST), 7215 &instance->ld_list_buf_h, GFP_KERNEL); 7216 7217 if (!instance->ld_list_buf) { 7218 dev_err(&pdev->dev, "Failed to allocate LD list buffer\n"); 7219 return -ENOMEM; 7220 } 7221 7222 instance->ld_targetid_list_buf = 7223 dma_alloc_coherent(&pdev->dev, 7224 sizeof(struct MR_LD_TARGETID_LIST), 7225 &instance->ld_targetid_list_buf_h, GFP_KERNEL); 7226 7227 if (!instance->ld_targetid_list_buf) { 7228 dev_err(&pdev->dev, 7229 "Failed to allocate LD targetid list buffer\n"); 7230 return -ENOMEM; 7231 } 7232 7233 if (!reset_devices) { 7234 instance->system_info_buf = 7235 dma_alloc_coherent(&pdev->dev, 7236 sizeof(struct MR_DRV_SYSTEM_INFO), 7237 &instance->system_info_h, GFP_KERNEL); 7238 instance->pd_info = 7239 dma_alloc_coherent(&pdev->dev, 7240 sizeof(struct MR_PD_INFO), 7241 &instance->pd_info_h, GFP_KERNEL); 7242 instance->tgt_prop = 7243 dma_alloc_coherent(&pdev->dev, 7244 sizeof(struct MR_TARGET_PROPERTIES), 7245 &instance->tgt_prop_h, GFP_KERNEL); 7246 instance->crash_dump_buf = 7247 dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE, 7248 &instance->crash_dump_h, GFP_KERNEL); 7249 7250 if (!instance->system_info_buf) 7251 dev_err(&instance->pdev->dev, 7252 "Failed to allocate system info buffer\n"); 7253 7254 if (!instance->pd_info) 7255 dev_err(&instance->pdev->dev, 7256 "Failed to allocate pd_info buffer\n"); 7257 7258 if (!instance->tgt_prop) 7259 dev_err(&instance->pdev->dev, 7260 "Failed to allocate tgt_prop buffer\n"); 7261 7262 if (!instance->crash_dump_buf) 7263 dev_err(&instance->pdev->dev, 7264 "Failed to allocate crash dump buffer\n"); 7265 } 7266 7267 return 0; 7268 } 7269 7270 /* 7271 * megasas_free_ctrl_dma_buffers - Free consistent DMA buffers allocated 7272 * during driver load time 7273 * 7274 * @instance- Adapter soft instance 7275 * 7276 */ 7277 static inline 7278 void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance) 7279 { 7280 struct pci_dev *pdev = instance->pdev; 7281 struct fusion_context *fusion = instance->ctrl_context; 7282 7283 if (instance->evt_detail) 7284 dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail), 7285 instance->evt_detail, 7286 instance->evt_detail_h); 7287 7288 if (fusion && fusion->ioc_init_request) 7289 dma_free_coherent(&pdev->dev, 7290 sizeof(struct MPI2_IOC_INIT_REQUEST), 7291 fusion->ioc_init_request, 7292 fusion->ioc_init_request_phys); 7293 7294 if (instance->pd_list_buf) 7295 dma_free_coherent(&pdev->dev, 7296 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), 7297 instance->pd_list_buf, 7298 instance->pd_list_buf_h); 7299 7300 if (instance->ld_list_buf) 7301 dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST), 7302 instance->ld_list_buf, 7303 instance->ld_list_buf_h); 7304 7305 if (instance->ld_targetid_list_buf) 7306 dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST), 7307 instance->ld_targetid_list_buf, 7308 instance->ld_targetid_list_buf_h); 7309 7310 if (instance->ctrl_info_buf) 7311 dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info), 7312 instance->ctrl_info_buf, 7313 instance->ctrl_info_buf_h); 7314 7315 if (instance->system_info_buf) 7316 dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO), 7317 instance->system_info_buf, 7318 instance->system_info_h); 7319 7320 if (instance->pd_info) 7321 dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO), 7322 instance->pd_info, instance->pd_info_h); 7323 7324 if (instance->tgt_prop) 7325 dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES), 7326 instance->tgt_prop, instance->tgt_prop_h); 7327 7328 if (instance->crash_dump_buf) 7329 dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE, 7330 instance->crash_dump_buf, 7331 instance->crash_dump_h); 7332 7333 if (instance->snapdump_prop) 7334 dma_free_coherent(&pdev->dev, 7335 sizeof(struct MR_SNAPDUMP_PROPERTIES), 7336 instance->snapdump_prop, 7337 instance->snapdump_prop_h); 7338 7339 if (instance->host_device_list_buf) 7340 dma_free_coherent(&pdev->dev, 7341 HOST_DEVICE_LIST_SZ, 7342 instance->host_device_list_buf, 7343 instance->host_device_list_buf_h); 7344 7345 } 7346 7347 /* 7348 * megasas_init_ctrl_params - Initialize controller's instance 7349 * parameters before FW init 7350 * @instance - Adapter soft instance 7351 * @return - void 7352 */ 7353 static inline void megasas_init_ctrl_params(struct megasas_instance *instance) 7354 { 7355 instance->fw_crash_state = UNAVAILABLE; 7356 7357 megasas_poll_wait_aen = 0; 7358 instance->issuepend_done = 1; 7359 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 7360 7361 /* 7362 * Initialize locks and queues 7363 */ 7364 INIT_LIST_HEAD(&instance->cmd_pool); 7365 INIT_LIST_HEAD(&instance->internal_reset_pending_q); 7366 7367 atomic_set(&instance->fw_outstanding, 0); 7368 atomic64_set(&instance->total_io_count, 0); 7369 7370 init_waitqueue_head(&instance->int_cmd_wait_q); 7371 init_waitqueue_head(&instance->abort_cmd_wait_q); 7372 7373 spin_lock_init(&instance->crashdump_lock); 7374 spin_lock_init(&instance->mfi_pool_lock); 7375 spin_lock_init(&instance->hba_lock); 7376 spin_lock_init(&instance->stream_lock); 7377 spin_lock_init(&instance->completion_lock); 7378 7379 mutex_init(&instance->reset_mutex); 7380 7381 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 7382 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) 7383 instance->flag_ieee = 1; 7384 7385 megasas_dbg_lvl = 0; 7386 instance->flag = 0; 7387 instance->unload = 1; 7388 instance->last_time = 0; 7389 instance->disableOnlineCtrlReset = 1; 7390 instance->UnevenSpanSupport = 0; 7391 instance->smp_affinity_enable = smp_affinity_enable ? true : false; 7392 instance->msix_load_balance = false; 7393 7394 if (instance->adapter_type != MFI_SERIES) 7395 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); 7396 else 7397 INIT_WORK(&instance->work_init, process_fw_state_change_wq); 7398 } 7399 7400 /** 7401 * megasas_probe_one - PCI hotplug entry point 7402 * @pdev: PCI device structure 7403 * @id: PCI ids of supported hotplugged adapter 7404 */ 7405 static int megasas_probe_one(struct pci_dev *pdev, 7406 const struct pci_device_id *id) 7407 { 7408 int rval, pos; 7409 struct Scsi_Host *host; 7410 struct megasas_instance *instance; 7411 u16 control = 0; 7412 7413 switch (pdev->device) { 7414 case PCI_DEVICE_ID_LSI_AERO_10E0: 7415 case PCI_DEVICE_ID_LSI_AERO_10E3: 7416 case PCI_DEVICE_ID_LSI_AERO_10E4: 7417 case PCI_DEVICE_ID_LSI_AERO_10E7: 7418 dev_err(&pdev->dev, "Adapter is in non secure mode\n"); 7419 return 1; 7420 case PCI_DEVICE_ID_LSI_AERO_10E1: 7421 case PCI_DEVICE_ID_LSI_AERO_10E5: 7422 dev_info(&pdev->dev, "Adapter is in configurable secure mode\n"); 7423 break; 7424 } 7425 7426 /* Reset MSI-X in the kdump kernel */ 7427 if (reset_devices) { 7428 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 7429 if (pos) { 7430 pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, 7431 &control); 7432 if (control & PCI_MSIX_FLAGS_ENABLE) { 7433 dev_info(&pdev->dev, "resetting MSI-X\n"); 7434 pci_write_config_word(pdev, 7435 pos + PCI_MSIX_FLAGS, 7436 control & 7437 ~PCI_MSIX_FLAGS_ENABLE); 7438 } 7439 } 7440 } 7441 7442 /* 7443 * PCI prepping: enable device set bus mastering and dma mask 7444 */ 7445 rval = pci_enable_device_mem(pdev); 7446 7447 if (rval) { 7448 return rval; 7449 } 7450 7451 pci_set_master(pdev); 7452 7453 host = scsi_host_alloc(&megasas_template, 7454 sizeof(struct megasas_instance)); 7455 7456 if (!host) { 7457 dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n"); 7458 goto fail_alloc_instance; 7459 } 7460 7461 instance = (struct megasas_instance *)host->hostdata; 7462 memset(instance, 0, sizeof(*instance)); 7463 atomic_set(&instance->fw_reset_no_pci_access, 0); 7464 7465 /* 7466 * Initialize PCI related and misc parameters 7467 */ 7468 instance->pdev = pdev; 7469 instance->host = host; 7470 instance->unique_id = pdev->bus->number << 8 | pdev->devfn; 7471 instance->init_id = MEGASAS_DEFAULT_INIT_ID; 7472 7473 megasas_set_adapter_type(instance); 7474 7475 /* 7476 * Initialize MFI Firmware 7477 */ 7478 if (megasas_init_fw(instance)) 7479 goto fail_init_mfi; 7480 7481 if (instance->requestorId) { 7482 if (instance->PlasmaFW111) { 7483 instance->vf_affiliation_111 = 7484 dma_alloc_coherent(&pdev->dev, 7485 sizeof(struct MR_LD_VF_AFFILIATION_111), 7486 &instance->vf_affiliation_111_h, 7487 GFP_KERNEL); 7488 if (!instance->vf_affiliation_111) 7489 dev_warn(&pdev->dev, "Can't allocate " 7490 "memory for VF affiliation buffer\n"); 7491 } else { 7492 instance->vf_affiliation = 7493 dma_alloc_coherent(&pdev->dev, 7494 (MAX_LOGICAL_DRIVES + 1) * 7495 sizeof(struct MR_LD_VF_AFFILIATION), 7496 &instance->vf_affiliation_h, 7497 GFP_KERNEL); 7498 if (!instance->vf_affiliation) 7499 dev_warn(&pdev->dev, "Can't allocate " 7500 "memory for VF affiliation buffer\n"); 7501 } 7502 } 7503 7504 /* 7505 * Store instance in PCI softstate 7506 */ 7507 pci_set_drvdata(pdev, instance); 7508 7509 /* 7510 * Add this controller to megasas_mgmt_info structure so that it 7511 * can be exported to management applications 7512 */ 7513 megasas_mgmt_info.count++; 7514 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance; 7515 megasas_mgmt_info.max_index++; 7516 7517 /* 7518 * Register with SCSI mid-layer 7519 */ 7520 if (megasas_io_attach(instance)) 7521 goto fail_io_attach; 7522 7523 instance->unload = 0; 7524 /* 7525 * Trigger SCSI to scan our drives 7526 */ 7527 if (!instance->enable_fw_dev_list || 7528 (instance->host_device_list_buf->count > 0)) 7529 scsi_scan_host(host); 7530 7531 /* 7532 * Initiate AEN (Asynchronous Event Notification) 7533 */ 7534 if (megasas_start_aen(instance)) { 7535 dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n"); 7536 goto fail_start_aen; 7537 } 7538 7539 megasas_setup_debugfs(instance); 7540 7541 /* Get current SR-IOV LD/VF affiliation */ 7542 if (instance->requestorId) 7543 megasas_get_ld_vf_affiliation(instance, 1); 7544 7545 return 0; 7546 7547 fail_start_aen: 7548 fail_io_attach: 7549 megasas_mgmt_info.count--; 7550 megasas_mgmt_info.max_index--; 7551 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; 7552 7553 instance->instancet->disable_intr(instance); 7554 megasas_destroy_irqs(instance); 7555 7556 if (instance->adapter_type != MFI_SERIES) 7557 megasas_release_fusion(instance); 7558 else 7559 megasas_release_mfi(instance); 7560 if (instance->msix_vectors) 7561 pci_free_irq_vectors(instance->pdev); 7562 fail_init_mfi: 7563 scsi_host_put(host); 7564 fail_alloc_instance: 7565 pci_disable_device(pdev); 7566 7567 return -ENODEV; 7568 } 7569 7570 /** 7571 * megasas_flush_cache - Requests FW to flush all its caches 7572 * @instance: Adapter soft state 7573 */ 7574 static void megasas_flush_cache(struct megasas_instance *instance) 7575 { 7576 struct megasas_cmd *cmd; 7577 struct megasas_dcmd_frame *dcmd; 7578 7579 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 7580 return; 7581 7582 cmd = megasas_get_cmd(instance); 7583 7584 if (!cmd) 7585 return; 7586 7587 dcmd = &cmd->frame->dcmd; 7588 7589 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 7590 7591 dcmd->cmd = MFI_CMD_DCMD; 7592 dcmd->cmd_status = 0x0; 7593 dcmd->sge_count = 0; 7594 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 7595 dcmd->timeout = 0; 7596 dcmd->pad_0 = 0; 7597 dcmd->data_xfer_len = 0; 7598 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH); 7599 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 7600 7601 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) 7602 != DCMD_SUCCESS) { 7603 dev_err(&instance->pdev->dev, 7604 "return from %s %d\n", __func__, __LINE__); 7605 return; 7606 } 7607 7608 megasas_return_cmd(instance, cmd); 7609 } 7610 7611 /** 7612 * megasas_shutdown_controller - Instructs FW to shutdown the controller 7613 * @instance: Adapter soft state 7614 * @opcode: Shutdown/Hibernate 7615 */ 7616 static void megasas_shutdown_controller(struct megasas_instance *instance, 7617 u32 opcode) 7618 { 7619 struct megasas_cmd *cmd; 7620 struct megasas_dcmd_frame *dcmd; 7621 7622 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 7623 return; 7624 7625 cmd = megasas_get_cmd(instance); 7626 7627 if (!cmd) 7628 return; 7629 7630 if (instance->aen_cmd) 7631 megasas_issue_blocked_abort_cmd(instance, 7632 instance->aen_cmd, MFI_IO_TIMEOUT_SECS); 7633 if (instance->map_update_cmd) 7634 megasas_issue_blocked_abort_cmd(instance, 7635 instance->map_update_cmd, MFI_IO_TIMEOUT_SECS); 7636 if (instance->jbod_seq_cmd) 7637 megasas_issue_blocked_abort_cmd(instance, 7638 instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS); 7639 7640 dcmd = &cmd->frame->dcmd; 7641 7642 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 7643 7644 dcmd->cmd = MFI_CMD_DCMD; 7645 dcmd->cmd_status = 0x0; 7646 dcmd->sge_count = 0; 7647 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 7648 dcmd->timeout = 0; 7649 dcmd->pad_0 = 0; 7650 dcmd->data_xfer_len = 0; 7651 dcmd->opcode = cpu_to_le32(opcode); 7652 7653 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) 7654 != DCMD_SUCCESS) { 7655 dev_err(&instance->pdev->dev, 7656 "return from %s %d\n", __func__, __LINE__); 7657 return; 7658 } 7659 7660 megasas_return_cmd(instance, cmd); 7661 } 7662 7663 /** 7664 * megasas_suspend - driver suspend entry point 7665 * @dev: Device structure 7666 */ 7667 static int __maybe_unused 7668 megasas_suspend(struct device *dev) 7669 { 7670 struct megasas_instance *instance; 7671 7672 instance = dev_get_drvdata(dev); 7673 7674 if (!instance) 7675 return 0; 7676 7677 instance->unload = 1; 7678 7679 dev_info(dev, "%s is called\n", __func__); 7680 7681 /* Shutdown SR-IOV heartbeat timer */ 7682 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7683 del_timer_sync(&instance->sriov_heartbeat_timer); 7684 7685 /* Stop the FW fault detection watchdog */ 7686 if (instance->adapter_type != MFI_SERIES) 7687 megasas_fusion_stop_watchdog(instance); 7688 7689 megasas_flush_cache(instance); 7690 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); 7691 7692 /* cancel the delayed work if this work still in queue */ 7693 if (instance->ev != NULL) { 7694 struct megasas_aen_event *ev = instance->ev; 7695 cancel_delayed_work_sync(&ev->hotplug_work); 7696 instance->ev = NULL; 7697 } 7698 7699 tasklet_kill(&instance->isr_tasklet); 7700 7701 pci_set_drvdata(instance->pdev, instance); 7702 instance->instancet->disable_intr(instance); 7703 7704 megasas_destroy_irqs(instance); 7705 7706 if (instance->msix_vectors) 7707 pci_free_irq_vectors(instance->pdev); 7708 7709 return 0; 7710 } 7711 7712 /** 7713 * megasas_resume- driver resume entry point 7714 * @dev: Device structure 7715 */ 7716 static int __maybe_unused 7717 megasas_resume(struct device *dev) 7718 { 7719 int rval; 7720 struct Scsi_Host *host; 7721 struct megasas_instance *instance; 7722 u32 status_reg; 7723 7724 instance = dev_get_drvdata(dev); 7725 7726 if (!instance) 7727 return 0; 7728 7729 host = instance->host; 7730 7731 dev_info(dev, "%s is called\n", __func__); 7732 7733 /* 7734 * We expect the FW state to be READY 7735 */ 7736 7737 if (megasas_transition_to_ready(instance, 0)) { 7738 dev_info(&instance->pdev->dev, 7739 "Failed to transition controller to ready from %s!\n", 7740 __func__); 7741 if (instance->adapter_type != MFI_SERIES) { 7742 status_reg = 7743 instance->instancet->read_fw_status_reg(instance); 7744 if (!(status_reg & MFI_RESET_ADAPTER) || 7745 ((megasas_adp_reset_wait_for_ready 7746 (instance, true, 0)) == FAILED)) 7747 goto fail_ready_state; 7748 } else { 7749 atomic_set(&instance->fw_reset_no_pci_access, 1); 7750 instance->instancet->adp_reset 7751 (instance, instance->reg_set); 7752 atomic_set(&instance->fw_reset_no_pci_access, 0); 7753 7754 /* waiting for about 30 seconds before retry */ 7755 ssleep(30); 7756 7757 if (megasas_transition_to_ready(instance, 0)) 7758 goto fail_ready_state; 7759 } 7760 7761 dev_info(&instance->pdev->dev, 7762 "FW restarted successfully from %s!\n", 7763 __func__); 7764 } 7765 if (megasas_set_dma_mask(instance)) 7766 goto fail_set_dma_mask; 7767 7768 /* 7769 * Initialize MFI Firmware 7770 */ 7771 7772 atomic_set(&instance->fw_outstanding, 0); 7773 atomic_set(&instance->ldio_outstanding, 0); 7774 7775 /* Now re-enable MSI-X */ 7776 if (instance->msix_vectors) 7777 megasas_alloc_irq_vectors(instance); 7778 7779 if (!instance->msix_vectors) { 7780 rval = pci_alloc_irq_vectors(instance->pdev, 1, 1, 7781 PCI_IRQ_LEGACY); 7782 if (rval < 0) 7783 goto fail_reenable_msix; 7784 } 7785 7786 megasas_setup_reply_map(instance); 7787 7788 if (instance->adapter_type != MFI_SERIES) { 7789 megasas_reset_reply_desc(instance); 7790 if (megasas_ioc_init_fusion(instance)) { 7791 megasas_free_cmds(instance); 7792 megasas_free_cmds_fusion(instance); 7793 goto fail_init_mfi; 7794 } 7795 if (!megasas_get_map_info(instance)) 7796 megasas_sync_map_info(instance); 7797 } else { 7798 *instance->producer = 0; 7799 *instance->consumer = 0; 7800 if (megasas_issue_init_mfi(instance)) 7801 goto fail_init_mfi; 7802 } 7803 7804 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) 7805 goto fail_init_mfi; 7806 7807 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 7808 (unsigned long)instance); 7809 7810 if (instance->msix_vectors ? 7811 megasas_setup_irqs_msix(instance, 0) : 7812 megasas_setup_irqs_ioapic(instance)) 7813 goto fail_init_mfi; 7814 7815 if (instance->adapter_type != MFI_SERIES) 7816 megasas_setup_irq_poll(instance); 7817 7818 /* Re-launch SR-IOV heartbeat timer */ 7819 if (instance->requestorId) { 7820 if (!megasas_sriov_start_heartbeat(instance, 0)) 7821 megasas_start_timer(instance); 7822 else { 7823 instance->skip_heartbeat_timer_del = 1; 7824 goto fail_init_mfi; 7825 } 7826 } 7827 7828 instance->instancet->enable_intr(instance); 7829 megasas_setup_jbod_map(instance); 7830 instance->unload = 0; 7831 7832 /* 7833 * Initiate AEN (Asynchronous Event Notification) 7834 */ 7835 if (megasas_start_aen(instance)) 7836 dev_err(&instance->pdev->dev, "Start AEN failed\n"); 7837 7838 /* Re-launch FW fault watchdog */ 7839 if (instance->adapter_type != MFI_SERIES) 7840 if (megasas_fusion_start_watchdog(instance) != SUCCESS) 7841 goto fail_start_watchdog; 7842 7843 return 0; 7844 7845 fail_start_watchdog: 7846 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7847 del_timer_sync(&instance->sriov_heartbeat_timer); 7848 fail_init_mfi: 7849 megasas_free_ctrl_dma_buffers(instance); 7850 megasas_free_ctrl_mem(instance); 7851 scsi_host_put(host); 7852 7853 fail_reenable_msix: 7854 fail_set_dma_mask: 7855 fail_ready_state: 7856 7857 return -ENODEV; 7858 } 7859 7860 static inline int 7861 megasas_wait_for_adapter_operational(struct megasas_instance *instance) 7862 { 7863 int wait_time = MEGASAS_RESET_WAIT_TIME * 2; 7864 int i; 7865 u8 adp_state; 7866 7867 for (i = 0; i < wait_time; i++) { 7868 adp_state = atomic_read(&instance->adprecovery); 7869 if ((adp_state == MEGASAS_HBA_OPERATIONAL) || 7870 (adp_state == MEGASAS_HW_CRITICAL_ERROR)) 7871 break; 7872 7873 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) 7874 dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n"); 7875 7876 msleep(1000); 7877 } 7878 7879 if (adp_state != MEGASAS_HBA_OPERATIONAL) { 7880 dev_info(&instance->pdev->dev, 7881 "%s HBA failed to become operational, adp_state %d\n", 7882 __func__, adp_state); 7883 return 1; 7884 } 7885 7886 return 0; 7887 } 7888 7889 /** 7890 * megasas_detach_one - PCI hot"un"plug entry point 7891 * @pdev: PCI device structure 7892 */ 7893 static void megasas_detach_one(struct pci_dev *pdev) 7894 { 7895 int i; 7896 struct Scsi_Host *host; 7897 struct megasas_instance *instance; 7898 struct fusion_context *fusion; 7899 u32 pd_seq_map_sz; 7900 7901 instance = pci_get_drvdata(pdev); 7902 7903 if (!instance) 7904 return; 7905 7906 host = instance->host; 7907 fusion = instance->ctrl_context; 7908 7909 /* Shutdown SR-IOV heartbeat timer */ 7910 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7911 del_timer_sync(&instance->sriov_heartbeat_timer); 7912 7913 /* Stop the FW fault detection watchdog */ 7914 if (instance->adapter_type != MFI_SERIES) 7915 megasas_fusion_stop_watchdog(instance); 7916 7917 if (instance->fw_crash_state != UNAVAILABLE) 7918 megasas_free_host_crash_buffer(instance); 7919 scsi_remove_host(instance->host); 7920 instance->unload = 1; 7921 7922 if (megasas_wait_for_adapter_operational(instance)) 7923 goto skip_firing_dcmds; 7924 7925 megasas_flush_cache(instance); 7926 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 7927 7928 skip_firing_dcmds: 7929 /* cancel the delayed work if this work still in queue*/ 7930 if (instance->ev != NULL) { 7931 struct megasas_aen_event *ev = instance->ev; 7932 cancel_delayed_work_sync(&ev->hotplug_work); 7933 instance->ev = NULL; 7934 } 7935 7936 /* cancel all wait events */ 7937 wake_up_all(&instance->int_cmd_wait_q); 7938 7939 tasklet_kill(&instance->isr_tasklet); 7940 7941 /* 7942 * Take the instance off the instance array. Note that we will not 7943 * decrement the max_index. We let this array be sparse array 7944 */ 7945 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 7946 if (megasas_mgmt_info.instance[i] == instance) { 7947 megasas_mgmt_info.count--; 7948 megasas_mgmt_info.instance[i] = NULL; 7949 7950 break; 7951 } 7952 } 7953 7954 instance->instancet->disable_intr(instance); 7955 7956 megasas_destroy_irqs(instance); 7957 7958 if (instance->msix_vectors) 7959 pci_free_irq_vectors(instance->pdev); 7960 7961 if (instance->adapter_type >= VENTURA_SERIES) { 7962 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) 7963 kfree(fusion->stream_detect_by_ld[i]); 7964 kfree(fusion->stream_detect_by_ld); 7965 fusion->stream_detect_by_ld = NULL; 7966 } 7967 7968 7969 if (instance->adapter_type != MFI_SERIES) { 7970 megasas_release_fusion(instance); 7971 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 7972 (sizeof(struct MR_PD_CFG_SEQ) * 7973 (MAX_PHYSICAL_DEVICES - 1)); 7974 for (i = 0; i < 2 ; i++) { 7975 if (fusion->ld_map[i]) 7976 dma_free_coherent(&instance->pdev->dev, 7977 fusion->max_map_sz, 7978 fusion->ld_map[i], 7979 fusion->ld_map_phys[i]); 7980 if (fusion->ld_drv_map[i]) { 7981 if (is_vmalloc_addr(fusion->ld_drv_map[i])) 7982 vfree(fusion->ld_drv_map[i]); 7983 else 7984 free_pages((ulong)fusion->ld_drv_map[i], 7985 fusion->drv_map_pages); 7986 } 7987 7988 if (fusion->pd_seq_sync[i]) 7989 dma_free_coherent(&instance->pdev->dev, 7990 pd_seq_map_sz, 7991 fusion->pd_seq_sync[i], 7992 fusion->pd_seq_phys[i]); 7993 } 7994 } else { 7995 megasas_release_mfi(instance); 7996 } 7997 7998 if (instance->vf_affiliation) 7999 dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) * 8000 sizeof(struct MR_LD_VF_AFFILIATION), 8001 instance->vf_affiliation, 8002 instance->vf_affiliation_h); 8003 8004 if (instance->vf_affiliation_111) 8005 dma_free_coherent(&pdev->dev, 8006 sizeof(struct MR_LD_VF_AFFILIATION_111), 8007 instance->vf_affiliation_111, 8008 instance->vf_affiliation_111_h); 8009 8010 if (instance->hb_host_mem) 8011 dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM), 8012 instance->hb_host_mem, 8013 instance->hb_host_mem_h); 8014 8015 megasas_free_ctrl_dma_buffers(instance); 8016 8017 megasas_free_ctrl_mem(instance); 8018 8019 megasas_destroy_debugfs(instance); 8020 8021 scsi_host_put(host); 8022 8023 pci_disable_device(pdev); 8024 } 8025 8026 /** 8027 * megasas_shutdown - Shutdown entry point 8028 * @pdev: PCI device structure 8029 */ 8030 static void megasas_shutdown(struct pci_dev *pdev) 8031 { 8032 struct megasas_instance *instance = pci_get_drvdata(pdev); 8033 8034 if (!instance) 8035 return; 8036 8037 instance->unload = 1; 8038 8039 if (megasas_wait_for_adapter_operational(instance)) 8040 goto skip_firing_dcmds; 8041 8042 megasas_flush_cache(instance); 8043 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 8044 8045 skip_firing_dcmds: 8046 instance->instancet->disable_intr(instance); 8047 megasas_destroy_irqs(instance); 8048 8049 if (instance->msix_vectors) 8050 pci_free_irq_vectors(instance->pdev); 8051 } 8052 8053 /* 8054 * megasas_mgmt_open - char node "open" entry point 8055 * @inode: char node inode 8056 * @filep: char node file 8057 */ 8058 static int megasas_mgmt_open(struct inode *inode, struct file *filep) 8059 { 8060 /* 8061 * Allow only those users with admin rights 8062 */ 8063 if (!capable(CAP_SYS_ADMIN)) 8064 return -EACCES; 8065 8066 return 0; 8067 } 8068 8069 /* 8070 * megasas_mgmt_fasync - Async notifier registration from applications 8071 * @fd: char node file descriptor number 8072 * @filep: char node file 8073 * @mode: notifier on/off 8074 * 8075 * This function adds the calling process to a driver global queue. When an 8076 * event occurs, SIGIO will be sent to all processes in this queue. 8077 */ 8078 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode) 8079 { 8080 int rc; 8081 8082 mutex_lock(&megasas_async_queue_mutex); 8083 8084 rc = fasync_helper(fd, filep, mode, &megasas_async_queue); 8085 8086 mutex_unlock(&megasas_async_queue_mutex); 8087 8088 if (rc >= 0) { 8089 /* For sanity check when we get ioctl */ 8090 filep->private_data = filep; 8091 return 0; 8092 } 8093 8094 printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc); 8095 8096 return rc; 8097 } 8098 8099 /* 8100 * megasas_mgmt_poll - char node "poll" entry point 8101 * @filep: char node file 8102 * @wait: Events to poll for 8103 */ 8104 static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait) 8105 { 8106 __poll_t mask; 8107 unsigned long flags; 8108 8109 poll_wait(file, &megasas_poll_wait, wait); 8110 spin_lock_irqsave(&poll_aen_lock, flags); 8111 if (megasas_poll_wait_aen) 8112 mask = (EPOLLIN | EPOLLRDNORM); 8113 else 8114 mask = 0; 8115 megasas_poll_wait_aen = 0; 8116 spin_unlock_irqrestore(&poll_aen_lock, flags); 8117 return mask; 8118 } 8119 8120 /* 8121 * megasas_set_crash_dump_params_ioctl: 8122 * Send CRASH_DUMP_MODE DCMD to all controllers 8123 * @cmd: MFI command frame 8124 */ 8125 8126 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd) 8127 { 8128 struct megasas_instance *local_instance; 8129 int i, error = 0; 8130 int crash_support; 8131 8132 crash_support = cmd->frame->dcmd.mbox.w[0]; 8133 8134 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 8135 local_instance = megasas_mgmt_info.instance[i]; 8136 if (local_instance && local_instance->crash_dump_drv_support) { 8137 if ((atomic_read(&local_instance->adprecovery) == 8138 MEGASAS_HBA_OPERATIONAL) && 8139 !megasas_set_crash_dump_params(local_instance, 8140 crash_support)) { 8141 local_instance->crash_dump_app_support = 8142 crash_support; 8143 dev_info(&local_instance->pdev->dev, 8144 "Application firmware crash " 8145 "dump mode set success\n"); 8146 error = 0; 8147 } else { 8148 dev_info(&local_instance->pdev->dev, 8149 "Application firmware crash " 8150 "dump mode set failed\n"); 8151 error = -1; 8152 } 8153 } 8154 } 8155 return error; 8156 } 8157 8158 /** 8159 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW 8160 * @instance: Adapter soft state 8161 * @user_ioc: User's ioctl packet 8162 * @ioc: ioctl packet 8163 */ 8164 static int 8165 megasas_mgmt_fw_ioctl(struct megasas_instance *instance, 8166 struct megasas_iocpacket __user * user_ioc, 8167 struct megasas_iocpacket *ioc) 8168 { 8169 struct megasas_sge64 *kern_sge64 = NULL; 8170 struct megasas_sge32 *kern_sge32 = NULL; 8171 struct megasas_cmd *cmd; 8172 void *kbuff_arr[MAX_IOCTL_SGE]; 8173 dma_addr_t buf_handle = 0; 8174 int error = 0, i; 8175 void *sense = NULL; 8176 dma_addr_t sense_handle; 8177 void *sense_ptr; 8178 u32 opcode = 0; 8179 int ret = DCMD_SUCCESS; 8180 8181 memset(kbuff_arr, 0, sizeof(kbuff_arr)); 8182 8183 if (ioc->sge_count > MAX_IOCTL_SGE) { 8184 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n", 8185 ioc->sge_count, MAX_IOCTL_SGE); 8186 return -EINVAL; 8187 } 8188 8189 if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) || 8190 ((ioc->frame.hdr.cmd == MFI_CMD_NVME) && 8191 !instance->support_nvme_passthru) || 8192 ((ioc->frame.hdr.cmd == MFI_CMD_TOOLBOX) && 8193 !instance->support_pci_lane_margining)) { 8194 dev_err(&instance->pdev->dev, 8195 "Received invalid ioctl command 0x%x\n", 8196 ioc->frame.hdr.cmd); 8197 return -ENOTSUPP; 8198 } 8199 8200 cmd = megasas_get_cmd(instance); 8201 if (!cmd) { 8202 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n"); 8203 return -ENOMEM; 8204 } 8205 8206 /* 8207 * User's IOCTL packet has 2 frames (maximum). Copy those two 8208 * frames into our cmd's frames. cmd->frame's context will get 8209 * overwritten when we copy from user's frames. So set that value 8210 * alone separately 8211 */ 8212 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); 8213 cmd->frame->hdr.context = cpu_to_le32(cmd->index); 8214 cmd->frame->hdr.pad_0 = 0; 8215 8216 cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE); 8217 8218 if (instance->consistent_mask_64bit) 8219 cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 | 8220 MFI_FRAME_SENSE64)); 8221 else 8222 cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 | 8223 MFI_FRAME_SENSE64)); 8224 8225 if (cmd->frame->hdr.cmd == MFI_CMD_DCMD) 8226 opcode = le32_to_cpu(cmd->frame->dcmd.opcode); 8227 8228 if (opcode == MR_DCMD_CTRL_SHUTDOWN) { 8229 mutex_lock(&instance->reset_mutex); 8230 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) { 8231 megasas_return_cmd(instance, cmd); 8232 mutex_unlock(&instance->reset_mutex); 8233 return -1; 8234 } 8235 mutex_unlock(&instance->reset_mutex); 8236 } 8237 8238 if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) { 8239 error = megasas_set_crash_dump_params_ioctl(cmd); 8240 megasas_return_cmd(instance, cmd); 8241 return error; 8242 } 8243 8244 /* 8245 * The management interface between applications and the fw uses 8246 * MFI frames. E.g, RAID configuration changes, LD property changes 8247 * etc are accomplishes through different kinds of MFI frames. The 8248 * driver needs to care only about substituting user buffers with 8249 * kernel buffers in SGLs. The location of SGL is embedded in the 8250 * struct iocpacket itself. 8251 */ 8252 if (instance->consistent_mask_64bit) 8253 kern_sge64 = (struct megasas_sge64 *) 8254 ((unsigned long)cmd->frame + ioc->sgl_off); 8255 else 8256 kern_sge32 = (struct megasas_sge32 *) 8257 ((unsigned long)cmd->frame + ioc->sgl_off); 8258 8259 /* 8260 * For each user buffer, create a mirror buffer and copy in 8261 */ 8262 for (i = 0; i < ioc->sge_count; i++) { 8263 if (!ioc->sgl[i].iov_len) 8264 continue; 8265 8266 kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev, 8267 ioc->sgl[i].iov_len, 8268 &buf_handle, GFP_KERNEL); 8269 if (!kbuff_arr[i]) { 8270 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc " 8271 "kernel SGL buffer for IOCTL\n"); 8272 error = -ENOMEM; 8273 goto out; 8274 } 8275 8276 /* 8277 * We don't change the dma_coherent_mask, so 8278 * dma_alloc_coherent only returns 32bit addresses 8279 */ 8280 if (instance->consistent_mask_64bit) { 8281 kern_sge64[i].phys_addr = cpu_to_le64(buf_handle); 8282 kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len); 8283 } else { 8284 kern_sge32[i].phys_addr = cpu_to_le32(buf_handle); 8285 kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len); 8286 } 8287 8288 /* 8289 * We created a kernel buffer corresponding to the 8290 * user buffer. Now copy in from the user buffer 8291 */ 8292 if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base, 8293 (u32) (ioc->sgl[i].iov_len))) { 8294 error = -EFAULT; 8295 goto out; 8296 } 8297 } 8298 8299 if (ioc->sense_len) { 8300 /* make sure the pointer is part of the frame */ 8301 if (ioc->sense_off > 8302 (sizeof(union megasas_frame) - sizeof(__le64))) { 8303 error = -EINVAL; 8304 goto out; 8305 } 8306 8307 sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len, 8308 &sense_handle, GFP_KERNEL); 8309 if (!sense) { 8310 error = -ENOMEM; 8311 goto out; 8312 } 8313 8314 /* always store 64 bits regardless of addressing */ 8315 sense_ptr = (void *)cmd->frame + ioc->sense_off; 8316 put_unaligned_le64(sense_handle, sense_ptr); 8317 } 8318 8319 /* 8320 * Set the sync_cmd flag so that the ISR knows not to complete this 8321 * cmd to the SCSI mid-layer 8322 */ 8323 cmd->sync_cmd = 1; 8324 8325 ret = megasas_issue_blocked_cmd(instance, cmd, 0); 8326 switch (ret) { 8327 case DCMD_INIT: 8328 case DCMD_BUSY: 8329 cmd->sync_cmd = 0; 8330 dev_err(&instance->pdev->dev, 8331 "return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n", 8332 __func__, __LINE__, cmd->frame->hdr.cmd, opcode, 8333 cmd->cmd_status_drv); 8334 error = -EBUSY; 8335 goto out; 8336 } 8337 8338 cmd->sync_cmd = 0; 8339 8340 if (instance->unload == 1) { 8341 dev_info(&instance->pdev->dev, "Driver unload is in progress " 8342 "don't submit data to application\n"); 8343 goto out; 8344 } 8345 /* 8346 * copy out the kernel buffers to user buffers 8347 */ 8348 for (i = 0; i < ioc->sge_count; i++) { 8349 if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i], 8350 ioc->sgl[i].iov_len)) { 8351 error = -EFAULT; 8352 goto out; 8353 } 8354 } 8355 8356 /* 8357 * copy out the sense 8358 */ 8359 if (ioc->sense_len) { 8360 void __user *uptr; 8361 /* 8362 * sense_ptr points to the location that has the user 8363 * sense buffer address 8364 */ 8365 sense_ptr = (void *)ioc->frame.raw + ioc->sense_off; 8366 if (in_compat_syscall()) 8367 uptr = compat_ptr(get_unaligned((compat_uptr_t *) 8368 sense_ptr)); 8369 else 8370 uptr = get_unaligned((void __user **)sense_ptr); 8371 8372 if (copy_to_user(uptr, sense, ioc->sense_len)) { 8373 dev_err(&instance->pdev->dev, "Failed to copy out to user " 8374 "sense data\n"); 8375 error = -EFAULT; 8376 goto out; 8377 } 8378 } 8379 8380 /* 8381 * copy the status codes returned by the fw 8382 */ 8383 if (copy_to_user(&user_ioc->frame.hdr.cmd_status, 8384 &cmd->frame->hdr.cmd_status, sizeof(u8))) { 8385 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n"); 8386 error = -EFAULT; 8387 } 8388 8389 out: 8390 if (sense) { 8391 dma_free_coherent(&instance->pdev->dev, ioc->sense_len, 8392 sense, sense_handle); 8393 } 8394 8395 for (i = 0; i < ioc->sge_count; i++) { 8396 if (kbuff_arr[i]) { 8397 if (instance->consistent_mask_64bit) 8398 dma_free_coherent(&instance->pdev->dev, 8399 le32_to_cpu(kern_sge64[i].length), 8400 kbuff_arr[i], 8401 le64_to_cpu(kern_sge64[i].phys_addr)); 8402 else 8403 dma_free_coherent(&instance->pdev->dev, 8404 le32_to_cpu(kern_sge32[i].length), 8405 kbuff_arr[i], 8406 le32_to_cpu(kern_sge32[i].phys_addr)); 8407 kbuff_arr[i] = NULL; 8408 } 8409 } 8410 8411 megasas_return_cmd(instance, cmd); 8412 return error; 8413 } 8414 8415 static struct megasas_iocpacket * 8416 megasas_compat_iocpacket_get_user(void __user *arg) 8417 { 8418 struct megasas_iocpacket *ioc; 8419 struct compat_megasas_iocpacket __user *cioc = arg; 8420 size_t size; 8421 int err = -EFAULT; 8422 int i; 8423 8424 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL); 8425 if (!ioc) 8426 return ERR_PTR(-ENOMEM); 8427 size = offsetof(struct megasas_iocpacket, frame) + sizeof(ioc->frame); 8428 if (copy_from_user(ioc, arg, size)) 8429 goto out; 8430 8431 for (i = 0; i < MAX_IOCTL_SGE; i++) { 8432 compat_uptr_t iov_base; 8433 8434 if (get_user(iov_base, &cioc->sgl[i].iov_base) || 8435 get_user(ioc->sgl[i].iov_len, &cioc->sgl[i].iov_len)) 8436 goto out; 8437 8438 ioc->sgl[i].iov_base = compat_ptr(iov_base); 8439 } 8440 8441 return ioc; 8442 out: 8443 kfree(ioc); 8444 return ERR_PTR(err); 8445 } 8446 8447 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) 8448 { 8449 struct megasas_iocpacket __user *user_ioc = 8450 (struct megasas_iocpacket __user *)arg; 8451 struct megasas_iocpacket *ioc; 8452 struct megasas_instance *instance; 8453 int error; 8454 8455 if (in_compat_syscall()) 8456 ioc = megasas_compat_iocpacket_get_user(user_ioc); 8457 else 8458 ioc = memdup_user(user_ioc, sizeof(struct megasas_iocpacket)); 8459 8460 if (IS_ERR(ioc)) 8461 return PTR_ERR(ioc); 8462 8463 instance = megasas_lookup_instance(ioc->host_no); 8464 if (!instance) { 8465 error = -ENODEV; 8466 goto out_kfree_ioc; 8467 } 8468 8469 /* Block ioctls in VF mode */ 8470 if (instance->requestorId && !allow_vf_ioctls) { 8471 error = -ENODEV; 8472 goto out_kfree_ioc; 8473 } 8474 8475 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 8476 dev_err(&instance->pdev->dev, "Controller in crit error\n"); 8477 error = -ENODEV; 8478 goto out_kfree_ioc; 8479 } 8480 8481 if (instance->unload == 1) { 8482 error = -ENODEV; 8483 goto out_kfree_ioc; 8484 } 8485 8486 if (down_interruptible(&instance->ioctl_sem)) { 8487 error = -ERESTARTSYS; 8488 goto out_kfree_ioc; 8489 } 8490 8491 if (megasas_wait_for_adapter_operational(instance)) { 8492 error = -ENODEV; 8493 goto out_up; 8494 } 8495 8496 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); 8497 out_up: 8498 up(&instance->ioctl_sem); 8499 8500 out_kfree_ioc: 8501 kfree(ioc); 8502 return error; 8503 } 8504 8505 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) 8506 { 8507 struct megasas_instance *instance; 8508 struct megasas_aen aen; 8509 int error; 8510 8511 if (file->private_data != file) { 8512 printk(KERN_DEBUG "megasas: fasync_helper was not " 8513 "called first\n"); 8514 return -EINVAL; 8515 } 8516 8517 if (copy_from_user(&aen, (void __user *)arg, sizeof(aen))) 8518 return -EFAULT; 8519 8520 instance = megasas_lookup_instance(aen.host_no); 8521 8522 if (!instance) 8523 return -ENODEV; 8524 8525 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 8526 return -ENODEV; 8527 } 8528 8529 if (instance->unload == 1) { 8530 return -ENODEV; 8531 } 8532 8533 if (megasas_wait_for_adapter_operational(instance)) 8534 return -ENODEV; 8535 8536 mutex_lock(&instance->reset_mutex); 8537 error = megasas_register_aen(instance, aen.seq_num, 8538 aen.class_locale_word); 8539 mutex_unlock(&instance->reset_mutex); 8540 return error; 8541 } 8542 8543 /** 8544 * megasas_mgmt_ioctl - char node ioctl entry point 8545 * @file: char device file pointer 8546 * @cmd: ioctl command 8547 * @arg: ioctl command arguments address 8548 */ 8549 static long 8550 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 8551 { 8552 switch (cmd) { 8553 case MEGASAS_IOC_FIRMWARE: 8554 return megasas_mgmt_ioctl_fw(file, arg); 8555 8556 case MEGASAS_IOC_GET_AEN: 8557 return megasas_mgmt_ioctl_aen(file, arg); 8558 } 8559 8560 return -ENOTTY; 8561 } 8562 8563 #ifdef CONFIG_COMPAT 8564 static long 8565 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd, 8566 unsigned long arg) 8567 { 8568 switch (cmd) { 8569 case MEGASAS_IOC_FIRMWARE32: 8570 return megasas_mgmt_ioctl_fw(file, arg); 8571 case MEGASAS_IOC_GET_AEN: 8572 return megasas_mgmt_ioctl_aen(file, arg); 8573 } 8574 8575 return -ENOTTY; 8576 } 8577 #endif 8578 8579 /* 8580 * File operations structure for management interface 8581 */ 8582 static const struct file_operations megasas_mgmt_fops = { 8583 .owner = THIS_MODULE, 8584 .open = megasas_mgmt_open, 8585 .fasync = megasas_mgmt_fasync, 8586 .unlocked_ioctl = megasas_mgmt_ioctl, 8587 .poll = megasas_mgmt_poll, 8588 #ifdef CONFIG_COMPAT 8589 .compat_ioctl = megasas_mgmt_compat_ioctl, 8590 #endif 8591 .llseek = noop_llseek, 8592 }; 8593 8594 static SIMPLE_DEV_PM_OPS(megasas_pm_ops, megasas_suspend, megasas_resume); 8595 8596 /* 8597 * PCI hotplug support registration structure 8598 */ 8599 static struct pci_driver megasas_pci_driver = { 8600 8601 .name = "megaraid_sas", 8602 .id_table = megasas_pci_table, 8603 .probe = megasas_probe_one, 8604 .remove = megasas_detach_one, 8605 .driver.pm = &megasas_pm_ops, 8606 .shutdown = megasas_shutdown, 8607 }; 8608 8609 /* 8610 * Sysfs driver attributes 8611 */ 8612 static ssize_t version_show(struct device_driver *dd, char *buf) 8613 { 8614 return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n", 8615 MEGASAS_VERSION); 8616 } 8617 static DRIVER_ATTR_RO(version); 8618 8619 static ssize_t release_date_show(struct device_driver *dd, char *buf) 8620 { 8621 return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n", 8622 MEGASAS_RELDATE); 8623 } 8624 static DRIVER_ATTR_RO(release_date); 8625 8626 static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf) 8627 { 8628 return sprintf(buf, "%u\n", support_poll_for_event); 8629 } 8630 static DRIVER_ATTR_RO(support_poll_for_event); 8631 8632 static ssize_t support_device_change_show(struct device_driver *dd, char *buf) 8633 { 8634 return sprintf(buf, "%u\n", support_device_change); 8635 } 8636 static DRIVER_ATTR_RO(support_device_change); 8637 8638 static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf) 8639 { 8640 return sprintf(buf, "%u\n", megasas_dbg_lvl); 8641 } 8642 8643 static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf, 8644 size_t count) 8645 { 8646 int retval = count; 8647 8648 if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) { 8649 printk(KERN_ERR "megasas: could not set dbg_lvl\n"); 8650 retval = -EINVAL; 8651 } 8652 return retval; 8653 } 8654 static DRIVER_ATTR_RW(dbg_lvl); 8655 8656 static ssize_t 8657 support_nvme_encapsulation_show(struct device_driver *dd, char *buf) 8658 { 8659 return sprintf(buf, "%u\n", support_nvme_encapsulation); 8660 } 8661 8662 static DRIVER_ATTR_RO(support_nvme_encapsulation); 8663 8664 static ssize_t 8665 support_pci_lane_margining_show(struct device_driver *dd, char *buf) 8666 { 8667 return sprintf(buf, "%u\n", support_pci_lane_margining); 8668 } 8669 8670 static DRIVER_ATTR_RO(support_pci_lane_margining); 8671 8672 static inline void megasas_remove_scsi_device(struct scsi_device *sdev) 8673 { 8674 sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n"); 8675 scsi_remove_device(sdev); 8676 scsi_device_put(sdev); 8677 } 8678 8679 /** 8680 * megasas_update_device_list - Update the PD and LD device list from FW 8681 * after an AEN event notification 8682 * @instance: Adapter soft state 8683 * @event_type: Indicates type of event (PD or LD event) 8684 * 8685 * @return: Success or failure 8686 * 8687 * Issue DCMDs to Firmware to update the internal device list in driver. 8688 * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination 8689 * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list. 8690 */ 8691 static 8692 int megasas_update_device_list(struct megasas_instance *instance, 8693 int event_type) 8694 { 8695 int dcmd_ret = DCMD_SUCCESS; 8696 8697 if (instance->enable_fw_dev_list) { 8698 dcmd_ret = megasas_host_device_list_query(instance, false); 8699 if (dcmd_ret != DCMD_SUCCESS) 8700 goto out; 8701 } else { 8702 if (event_type & SCAN_PD_CHANNEL) { 8703 dcmd_ret = megasas_get_pd_list(instance); 8704 8705 if (dcmd_ret != DCMD_SUCCESS) 8706 goto out; 8707 } 8708 8709 if (event_type & SCAN_VD_CHANNEL) { 8710 if (!instance->requestorId || 8711 (instance->requestorId && 8712 megasas_get_ld_vf_affiliation(instance, 0))) { 8713 dcmd_ret = megasas_ld_list_query(instance, 8714 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); 8715 if (dcmd_ret != DCMD_SUCCESS) 8716 goto out; 8717 } 8718 } 8719 } 8720 8721 out: 8722 return dcmd_ret; 8723 } 8724 8725 /** 8726 * megasas_add_remove_devices - Add/remove devices to SCSI mid-layer 8727 * after an AEN event notification 8728 * @instance: Adapter soft state 8729 * @scan_type: Indicates type of devices (PD/LD) to add 8730 * @return void 8731 */ 8732 static 8733 void megasas_add_remove_devices(struct megasas_instance *instance, 8734 int scan_type) 8735 { 8736 int i, j; 8737 u16 pd_index = 0; 8738 u16 ld_index = 0; 8739 u16 channel = 0, id = 0; 8740 struct Scsi_Host *host; 8741 struct scsi_device *sdev1; 8742 struct MR_HOST_DEVICE_LIST *targetid_list = NULL; 8743 struct MR_HOST_DEVICE_LIST_ENTRY *targetid_entry = NULL; 8744 8745 host = instance->host; 8746 8747 if (instance->enable_fw_dev_list) { 8748 targetid_list = instance->host_device_list_buf; 8749 for (i = 0; i < targetid_list->count; i++) { 8750 targetid_entry = &targetid_list->host_device_list[i]; 8751 if (targetid_entry->flags.u.bits.is_sys_pd) { 8752 channel = le16_to_cpu(targetid_entry->target_id) / 8753 MEGASAS_MAX_DEV_PER_CHANNEL; 8754 id = le16_to_cpu(targetid_entry->target_id) % 8755 MEGASAS_MAX_DEV_PER_CHANNEL; 8756 } else { 8757 channel = MEGASAS_MAX_PD_CHANNELS + 8758 (le16_to_cpu(targetid_entry->target_id) / 8759 MEGASAS_MAX_DEV_PER_CHANNEL); 8760 id = le16_to_cpu(targetid_entry->target_id) % 8761 MEGASAS_MAX_DEV_PER_CHANNEL; 8762 } 8763 sdev1 = scsi_device_lookup(host, channel, id, 0); 8764 if (!sdev1) { 8765 scsi_add_device(host, channel, id, 0); 8766 } else { 8767 scsi_device_put(sdev1); 8768 } 8769 } 8770 } 8771 8772 if (scan_type & SCAN_PD_CHANNEL) { 8773 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { 8774 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 8775 pd_index = i * MEGASAS_MAX_DEV_PER_CHANNEL + j; 8776 sdev1 = scsi_device_lookup(host, i, j, 0); 8777 if (instance->pd_list[pd_index].driveState == 8778 MR_PD_STATE_SYSTEM) { 8779 if (!sdev1) 8780 scsi_add_device(host, i, j, 0); 8781 else 8782 scsi_device_put(sdev1); 8783 } else { 8784 if (sdev1) 8785 megasas_remove_scsi_device(sdev1); 8786 } 8787 } 8788 } 8789 } 8790 8791 if (scan_type & SCAN_VD_CHANNEL) { 8792 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { 8793 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 8794 ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; 8795 sdev1 = scsi_device_lookup(host, 8796 MEGASAS_MAX_PD_CHANNELS + i, j, 0); 8797 if (instance->ld_ids[ld_index] != 0xff) { 8798 if (!sdev1) 8799 scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); 8800 else 8801 scsi_device_put(sdev1); 8802 } else { 8803 if (sdev1) 8804 megasas_remove_scsi_device(sdev1); 8805 } 8806 } 8807 } 8808 } 8809 8810 } 8811 8812 static void 8813 megasas_aen_polling(struct work_struct *work) 8814 { 8815 struct megasas_aen_event *ev = 8816 container_of(work, struct megasas_aen_event, hotplug_work.work); 8817 struct megasas_instance *instance = ev->instance; 8818 union megasas_evt_class_locale class_locale; 8819 int event_type = 0; 8820 u32 seq_num; 8821 int error; 8822 u8 dcmd_ret = DCMD_SUCCESS; 8823 8824 if (!instance) { 8825 printk(KERN_ERR "invalid instance!\n"); 8826 kfree(ev); 8827 return; 8828 } 8829 8830 /* Don't run the event workqueue thread if OCR is running */ 8831 mutex_lock(&instance->reset_mutex); 8832 8833 instance->ev = NULL; 8834 if (instance->evt_detail) { 8835 megasas_decode_evt(instance); 8836 8837 switch (le32_to_cpu(instance->evt_detail->code)) { 8838 8839 case MR_EVT_PD_INSERTED: 8840 case MR_EVT_PD_REMOVED: 8841 event_type = SCAN_PD_CHANNEL; 8842 break; 8843 8844 case MR_EVT_LD_OFFLINE: 8845 case MR_EVT_CFG_CLEARED: 8846 case MR_EVT_LD_DELETED: 8847 case MR_EVT_LD_CREATED: 8848 event_type = SCAN_VD_CHANNEL; 8849 break; 8850 8851 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: 8852 case MR_EVT_FOREIGN_CFG_IMPORTED: 8853 case MR_EVT_LD_STATE_CHANGE: 8854 event_type = SCAN_PD_CHANNEL | SCAN_VD_CHANNEL; 8855 dev_info(&instance->pdev->dev, "scanning for scsi%d...\n", 8856 instance->host->host_no); 8857 break; 8858 8859 case MR_EVT_CTRL_PROP_CHANGED: 8860 dcmd_ret = megasas_get_ctrl_info(instance); 8861 if (dcmd_ret == DCMD_SUCCESS && 8862 instance->snapdump_wait_time) { 8863 megasas_get_snapdump_properties(instance); 8864 dev_info(&instance->pdev->dev, 8865 "Snap dump wait time\t: %d\n", 8866 instance->snapdump_wait_time); 8867 } 8868 break; 8869 default: 8870 event_type = 0; 8871 break; 8872 } 8873 } else { 8874 dev_err(&instance->pdev->dev, "invalid evt_detail!\n"); 8875 mutex_unlock(&instance->reset_mutex); 8876 kfree(ev); 8877 return; 8878 } 8879 8880 if (event_type) 8881 dcmd_ret = megasas_update_device_list(instance, event_type); 8882 8883 mutex_unlock(&instance->reset_mutex); 8884 8885 if (event_type && dcmd_ret == DCMD_SUCCESS) 8886 megasas_add_remove_devices(instance, event_type); 8887 8888 if (dcmd_ret == DCMD_SUCCESS) 8889 seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1; 8890 else 8891 seq_num = instance->last_seq_num; 8892 8893 /* Register AEN with FW for latest sequence number plus 1 */ 8894 class_locale.members.reserved = 0; 8895 class_locale.members.locale = MR_EVT_LOCALE_ALL; 8896 class_locale.members.class = MR_EVT_CLASS_DEBUG; 8897 8898 if (instance->aen_cmd != NULL) { 8899 kfree(ev); 8900 return; 8901 } 8902 8903 mutex_lock(&instance->reset_mutex); 8904 error = megasas_register_aen(instance, seq_num, 8905 class_locale.word); 8906 if (error) 8907 dev_err(&instance->pdev->dev, 8908 "register aen failed error %x\n", error); 8909 8910 mutex_unlock(&instance->reset_mutex); 8911 kfree(ev); 8912 } 8913 8914 /** 8915 * megasas_init - Driver load entry point 8916 */ 8917 static int __init megasas_init(void) 8918 { 8919 int rval; 8920 8921 /* 8922 * Booted in kdump kernel, minimize memory footprints by 8923 * disabling few features 8924 */ 8925 if (reset_devices) { 8926 msix_vectors = 1; 8927 rdpq_enable = 0; 8928 dual_qdepth_disable = 1; 8929 poll_queues = 0; 8930 } 8931 8932 /* 8933 * Announce driver version and other information 8934 */ 8935 pr_info("megasas: %s\n", MEGASAS_VERSION); 8936 8937 spin_lock_init(&poll_aen_lock); 8938 8939 support_poll_for_event = 2; 8940 support_device_change = 1; 8941 support_nvme_encapsulation = true; 8942 support_pci_lane_margining = true; 8943 8944 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); 8945 8946 /* 8947 * Register character device node 8948 */ 8949 rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops); 8950 8951 if (rval < 0) { 8952 printk(KERN_DEBUG "megasas: failed to open device node\n"); 8953 return rval; 8954 } 8955 8956 megasas_mgmt_majorno = rval; 8957 8958 megasas_init_debugfs(); 8959 8960 /* 8961 * Register ourselves as PCI hotplug module 8962 */ 8963 rval = pci_register_driver(&megasas_pci_driver); 8964 8965 if (rval) { 8966 printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n"); 8967 goto err_pcidrv; 8968 } 8969 8970 if ((event_log_level < MFI_EVT_CLASS_DEBUG) || 8971 (event_log_level > MFI_EVT_CLASS_DEAD)) { 8972 pr_warn("megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n"); 8973 event_log_level = MFI_EVT_CLASS_CRITICAL; 8974 } 8975 8976 rval = driver_create_file(&megasas_pci_driver.driver, 8977 &driver_attr_version); 8978 if (rval) 8979 goto err_dcf_attr_ver; 8980 8981 rval = driver_create_file(&megasas_pci_driver.driver, 8982 &driver_attr_release_date); 8983 if (rval) 8984 goto err_dcf_rel_date; 8985 8986 rval = driver_create_file(&megasas_pci_driver.driver, 8987 &driver_attr_support_poll_for_event); 8988 if (rval) 8989 goto err_dcf_support_poll_for_event; 8990 8991 rval = driver_create_file(&megasas_pci_driver.driver, 8992 &driver_attr_dbg_lvl); 8993 if (rval) 8994 goto err_dcf_dbg_lvl; 8995 rval = driver_create_file(&megasas_pci_driver.driver, 8996 &driver_attr_support_device_change); 8997 if (rval) 8998 goto err_dcf_support_device_change; 8999 9000 rval = driver_create_file(&megasas_pci_driver.driver, 9001 &driver_attr_support_nvme_encapsulation); 9002 if (rval) 9003 goto err_dcf_support_nvme_encapsulation; 9004 9005 rval = driver_create_file(&megasas_pci_driver.driver, 9006 &driver_attr_support_pci_lane_margining); 9007 if (rval) 9008 goto err_dcf_support_pci_lane_margining; 9009 9010 return rval; 9011 9012 err_dcf_support_pci_lane_margining: 9013 driver_remove_file(&megasas_pci_driver.driver, 9014 &driver_attr_support_nvme_encapsulation); 9015 9016 err_dcf_support_nvme_encapsulation: 9017 driver_remove_file(&megasas_pci_driver.driver, 9018 &driver_attr_support_device_change); 9019 9020 err_dcf_support_device_change: 9021 driver_remove_file(&megasas_pci_driver.driver, 9022 &driver_attr_dbg_lvl); 9023 err_dcf_dbg_lvl: 9024 driver_remove_file(&megasas_pci_driver.driver, 9025 &driver_attr_support_poll_for_event); 9026 err_dcf_support_poll_for_event: 9027 driver_remove_file(&megasas_pci_driver.driver, 9028 &driver_attr_release_date); 9029 err_dcf_rel_date: 9030 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 9031 err_dcf_attr_ver: 9032 pci_unregister_driver(&megasas_pci_driver); 9033 err_pcidrv: 9034 megasas_exit_debugfs(); 9035 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 9036 return rval; 9037 } 9038 9039 /** 9040 * megasas_exit - Driver unload entry point 9041 */ 9042 static void __exit megasas_exit(void) 9043 { 9044 driver_remove_file(&megasas_pci_driver.driver, 9045 &driver_attr_dbg_lvl); 9046 driver_remove_file(&megasas_pci_driver.driver, 9047 &driver_attr_support_poll_for_event); 9048 driver_remove_file(&megasas_pci_driver.driver, 9049 &driver_attr_support_device_change); 9050 driver_remove_file(&megasas_pci_driver.driver, 9051 &driver_attr_release_date); 9052 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 9053 driver_remove_file(&megasas_pci_driver.driver, 9054 &driver_attr_support_nvme_encapsulation); 9055 driver_remove_file(&megasas_pci_driver.driver, 9056 &driver_attr_support_pci_lane_margining); 9057 9058 pci_unregister_driver(&megasas_pci_driver); 9059 megasas_exit_debugfs(); 9060 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 9061 } 9062 9063 module_init(megasas_init); 9064 module_exit(megasas_exit); 9065