1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux MegaRAID driver for SAS based RAID controllers 4 * 5 * Copyright (c) 2003-2013 LSI Corporation 6 * Copyright (c) 2013-2016 Avago Technologies 7 * Copyright (c) 2016-2018 Broadcom Inc. 8 * 9 * Authors: Broadcom Inc. 10 * Sreenivas Bagalkote 11 * Sumant Patro 12 * Bo Yang 13 * Adam Radford 14 * Kashyap Desai <kashyap.desai@broadcom.com> 15 * Sumit Saxena <sumit.saxena@broadcom.com> 16 * 17 * Send feedback to: megaraidlinux.pdl@broadcom.com 18 */ 19 20 #include <linux/kernel.h> 21 #include <linux/types.h> 22 #include <linux/pci.h> 23 #include <linux/list.h> 24 #include <linux/moduleparam.h> 25 #include <linux/module.h> 26 #include <linux/spinlock.h> 27 #include <linux/interrupt.h> 28 #include <linux/delay.h> 29 #include <linux/uio.h> 30 #include <linux/slab.h> 31 #include <linux/uaccess.h> 32 #include <asm/unaligned.h> 33 #include <linux/fs.h> 34 #include <linux/compat.h> 35 #include <linux/blkdev.h> 36 #include <linux/mutex.h> 37 #include <linux/poll.h> 38 #include <linux/vmalloc.h> 39 #include <linux/irq_poll.h> 40 #include <linux/blk-mq-pci.h> 41 42 #include <scsi/scsi.h> 43 #include <scsi/scsi_cmnd.h> 44 #include <scsi/scsi_device.h> 45 #include <scsi/scsi_host.h> 46 #include <scsi/scsi_tcq.h> 47 #include <scsi/scsi_dbg.h> 48 #include "megaraid_sas_fusion.h" 49 #include "megaraid_sas.h" 50 51 /* 52 * Number of sectors per IO command 53 * Will be set in megasas_init_mfi if user does not provide 54 */ 55 static unsigned int max_sectors; 56 module_param_named(max_sectors, max_sectors, int, 0444); 57 MODULE_PARM_DESC(max_sectors, 58 "Maximum number of sectors per IO command"); 59 60 static int msix_disable; 61 module_param(msix_disable, int, 0444); 62 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0"); 63 64 static unsigned int msix_vectors; 65 module_param(msix_vectors, int, 0444); 66 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW"); 67 68 static int allow_vf_ioctls; 69 module_param(allow_vf_ioctls, int, 0444); 70 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0"); 71 72 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; 73 module_param(throttlequeuedepth, int, 0444); 74 MODULE_PARM_DESC(throttlequeuedepth, 75 "Adapter queue depth when throttled due to I/O timeout. Default: 16"); 76 77 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME; 78 module_param(resetwaittime, int, 0444); 79 MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s"); 80 81 static int smp_affinity_enable = 1; 82 module_param(smp_affinity_enable, int, 0444); 83 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)"); 84 85 static int rdpq_enable = 1; 86 module_param(rdpq_enable, int, 0444); 87 MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)"); 88 89 unsigned int dual_qdepth_disable; 90 module_param(dual_qdepth_disable, int, 0444); 91 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0"); 92 93 static unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; 94 module_param(scmd_timeout, int, 0444); 95 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer."); 96 97 int perf_mode = -1; 98 module_param(perf_mode, int, 0444); 99 MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t" 100 "0 - balanced: High iops and low latency queues are allocated &\n\t\t" 101 "interrupt coalescing is enabled only on high iops queues\n\t\t" 102 "1 - iops: High iops queues are not allocated &\n\t\t" 103 "interrupt coalescing is enabled on all queues\n\t\t" 104 "2 - latency: High iops queues are not allocated &\n\t\t" 105 "interrupt coalescing is disabled on all queues\n\t\t" 106 "default mode is 'balanced'" 107 ); 108 109 int event_log_level = MFI_EVT_CLASS_CRITICAL; 110 module_param(event_log_level, int, 0644); 111 MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)"); 112 113 unsigned int enable_sdev_max_qd; 114 module_param(enable_sdev_max_qd, int, 0444); 115 MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0"); 116 117 int poll_queues; 118 module_param(poll_queues, int, 0444); 119 MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t" 120 "This parameter is effective only if host_tagset_enable=1 &\n\t\t" 121 "It is not applicable for MFI_SERIES. &\n\t\t" 122 "Driver will work in latency mode. &\n\t\t" 123 "High iops queues are not allocated &\n\t\t" 124 ); 125 126 int host_tagset_enable = 1; 127 module_param(host_tagset_enable, int, 0444); 128 MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)"); 129 130 MODULE_LICENSE("GPL"); 131 MODULE_VERSION(MEGASAS_VERSION); 132 MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com"); 133 MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver"); 134 135 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); 136 static int megasas_get_pd_list(struct megasas_instance *instance); 137 static int megasas_ld_list_query(struct megasas_instance *instance, 138 u8 query_type); 139 static int megasas_issue_init_mfi(struct megasas_instance *instance); 140 static int megasas_register_aen(struct megasas_instance *instance, 141 u32 seq_num, u32 class_locale_word); 142 static void megasas_get_pd_info(struct megasas_instance *instance, 143 struct scsi_device *sdev); 144 static void 145 megasas_set_ld_removed_by_fw(struct megasas_instance *instance); 146 147 /* 148 * PCI ID table for all supported controllers 149 */ 150 static struct pci_device_id megasas_pci_table[] = { 151 152 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)}, 153 /* xscale IOP */ 154 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)}, 155 /* ppc IOP */ 156 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)}, 157 /* ppc IOP */ 158 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)}, 159 /* gen2*/ 160 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)}, 161 /* gen2*/ 162 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)}, 163 /* skinny*/ 164 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)}, 165 /* skinny*/ 166 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, 167 /* xscale IOP, vega */ 168 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, 169 /* xscale IOP */ 170 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)}, 171 /* Fusion */ 172 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)}, 173 /* Plasma */ 174 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)}, 175 /* Invader */ 176 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)}, 177 /* Fury */ 178 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)}, 179 /* Intruder */ 180 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)}, 181 /* Intruder 24 port*/ 182 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)}, 183 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)}, 184 /* VENTURA */ 185 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)}, 186 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)}, 187 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)}, 188 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)}, 189 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)}, 190 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)}, 191 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)}, 192 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)}, 193 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)}, 194 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)}, 195 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E0)}, 196 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E3)}, 197 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E4)}, 198 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E7)}, 199 {} 200 }; 201 202 MODULE_DEVICE_TABLE(pci, megasas_pci_table); 203 204 static int megasas_mgmt_majorno; 205 struct megasas_mgmt_info megasas_mgmt_info; 206 static struct fasync_struct *megasas_async_queue; 207 static DEFINE_MUTEX(megasas_async_queue_mutex); 208 209 static int megasas_poll_wait_aen; 210 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait); 211 static u32 support_poll_for_event; 212 u32 megasas_dbg_lvl; 213 static u32 support_device_change; 214 static bool support_nvme_encapsulation; 215 static bool support_pci_lane_margining; 216 217 /* define lock for aen poll */ 218 static DEFINE_SPINLOCK(poll_aen_lock); 219 220 extern struct dentry *megasas_debugfs_root; 221 extern int megasas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num); 222 223 void 224 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 225 u8 alt_status); 226 static u32 227 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance); 228 static int 229 megasas_adp_reset_gen2(struct megasas_instance *instance, 230 struct megasas_register_set __iomem *reg_set); 231 static irqreturn_t megasas_isr(int irq, void *devp); 232 static u32 233 megasas_init_adapter_mfi(struct megasas_instance *instance); 234 u32 235 megasas_build_and_issue_cmd(struct megasas_instance *instance, 236 struct scsi_cmnd *scmd); 237 static void megasas_complete_cmd_dpc(unsigned long instance_addr); 238 int 239 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, 240 int seconds); 241 void megasas_fusion_ocr_wq(struct work_struct *work); 242 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 243 int initial); 244 static int 245 megasas_set_dma_mask(struct megasas_instance *instance); 246 static int 247 megasas_alloc_ctrl_mem(struct megasas_instance *instance); 248 static inline void 249 megasas_free_ctrl_mem(struct megasas_instance *instance); 250 static inline int 251 megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance); 252 static inline void 253 megasas_free_ctrl_dma_buffers(struct megasas_instance *instance); 254 static inline void 255 megasas_init_ctrl_params(struct megasas_instance *instance); 256 257 u32 megasas_readl(struct megasas_instance *instance, 258 const volatile void __iomem *addr) 259 { 260 u32 i = 0, ret_val; 261 /* 262 * Due to a HW errata in Aero controllers, reads to certain 263 * Fusion registers could intermittently return all zeroes. 264 * This behavior is transient in nature and subsequent reads will 265 * return valid value. As a workaround in driver, retry readl for 266 * upto three times until a non-zero value is read. 267 */ 268 if (instance->adapter_type == AERO_SERIES) { 269 do { 270 ret_val = readl(addr); 271 i++; 272 } while (ret_val == 0 && i < 3); 273 return ret_val; 274 } else { 275 return readl(addr); 276 } 277 } 278 279 /** 280 * megasas_set_dma_settings - Populate DMA address, length and flags for DCMDs 281 * @instance: Adapter soft state 282 * @dcmd: DCMD frame inside MFI command 283 * @dma_addr: DMA address of buffer to be passed to FW 284 * @dma_len: Length of DMA buffer to be passed to FW 285 * @return: void 286 */ 287 void megasas_set_dma_settings(struct megasas_instance *instance, 288 struct megasas_dcmd_frame *dcmd, 289 dma_addr_t dma_addr, u32 dma_len) 290 { 291 if (instance->consistent_mask_64bit) { 292 dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr); 293 dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len); 294 dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64); 295 296 } else { 297 dcmd->sgl.sge32[0].phys_addr = 298 cpu_to_le32(lower_32_bits(dma_addr)); 299 dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len); 300 dcmd->flags = cpu_to_le16(dcmd->flags); 301 } 302 } 303 304 static void 305 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 306 { 307 instance->instancet->fire_cmd(instance, 308 cmd->frame_phys_addr, 0, instance->reg_set); 309 return; 310 } 311 312 /** 313 * megasas_get_cmd - Get a command from the free pool 314 * @instance: Adapter soft state 315 * 316 * Returns a free command from the pool 317 */ 318 struct megasas_cmd *megasas_get_cmd(struct megasas_instance 319 *instance) 320 { 321 unsigned long flags; 322 struct megasas_cmd *cmd = NULL; 323 324 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 325 326 if (!list_empty(&instance->cmd_pool)) { 327 cmd = list_entry((&instance->cmd_pool)->next, 328 struct megasas_cmd, list); 329 list_del_init(&cmd->list); 330 } else { 331 dev_err(&instance->pdev->dev, "Command pool empty!\n"); 332 } 333 334 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 335 return cmd; 336 } 337 338 /** 339 * megasas_return_cmd - Return a cmd to free command pool 340 * @instance: Adapter soft state 341 * @cmd: Command packet to be returned to free command pool 342 */ 343 void 344 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 345 { 346 unsigned long flags; 347 u32 blk_tags; 348 struct megasas_cmd_fusion *cmd_fusion; 349 struct fusion_context *fusion = instance->ctrl_context; 350 351 /* This flag is used only for fusion adapter. 352 * Wait for Interrupt for Polled mode DCMD 353 */ 354 if (cmd->flags & DRV_DCMD_POLLED_MODE) 355 return; 356 357 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 358 359 if (fusion) { 360 blk_tags = instance->max_scsi_cmds + cmd->index; 361 cmd_fusion = fusion->cmd_list[blk_tags]; 362 megasas_return_cmd_fusion(instance, cmd_fusion); 363 } 364 cmd->scmd = NULL; 365 cmd->frame_count = 0; 366 cmd->flags = 0; 367 memset(cmd->frame, 0, instance->mfi_frame_size); 368 cmd->frame->io.context = cpu_to_le32(cmd->index); 369 if (!fusion && reset_devices) 370 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 371 list_add(&cmd->list, (&instance->cmd_pool)->next); 372 373 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 374 375 } 376 377 static const char * 378 format_timestamp(uint32_t timestamp) 379 { 380 static char buffer[32]; 381 382 if ((timestamp & 0xff000000) == 0xff000000) 383 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp & 384 0x00ffffff); 385 else 386 snprintf(buffer, sizeof(buffer), "%us", timestamp); 387 return buffer; 388 } 389 390 static const char * 391 format_class(int8_t class) 392 { 393 static char buffer[6]; 394 395 switch (class) { 396 case MFI_EVT_CLASS_DEBUG: 397 return "debug"; 398 case MFI_EVT_CLASS_PROGRESS: 399 return "progress"; 400 case MFI_EVT_CLASS_INFO: 401 return "info"; 402 case MFI_EVT_CLASS_WARNING: 403 return "WARN"; 404 case MFI_EVT_CLASS_CRITICAL: 405 return "CRIT"; 406 case MFI_EVT_CLASS_FATAL: 407 return "FATAL"; 408 case MFI_EVT_CLASS_DEAD: 409 return "DEAD"; 410 default: 411 snprintf(buffer, sizeof(buffer), "%d", class); 412 return buffer; 413 } 414 } 415 416 /** 417 * megasas_decode_evt: Decode FW AEN event and print critical event 418 * for information. 419 * @instance: Adapter soft state 420 */ 421 static void 422 megasas_decode_evt(struct megasas_instance *instance) 423 { 424 struct megasas_evt_detail *evt_detail = instance->evt_detail; 425 union megasas_evt_class_locale class_locale; 426 class_locale.word = le32_to_cpu(evt_detail->cl.word); 427 428 if ((event_log_level < MFI_EVT_CLASS_DEBUG) || 429 (event_log_level > MFI_EVT_CLASS_DEAD)) { 430 printk(KERN_WARNING "megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n"); 431 event_log_level = MFI_EVT_CLASS_CRITICAL; 432 } 433 434 if (class_locale.members.class >= event_log_level) 435 dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n", 436 le32_to_cpu(evt_detail->seq_num), 437 format_timestamp(le32_to_cpu(evt_detail->time_stamp)), 438 (class_locale.members.locale), 439 format_class(class_locale.members.class), 440 evt_detail->description); 441 442 if (megasas_dbg_lvl & LD_PD_DEBUG) 443 dev_info(&instance->pdev->dev, 444 "evt_detail.args.ld.target_id/index %d/%d\n", 445 evt_detail->args.ld.target_id, evt_detail->args.ld.ld_index); 446 447 } 448 449 /* 450 * The following functions are defined for xscale 451 * (deviceid : 1064R, PERC5) controllers 452 */ 453 454 /** 455 * megasas_enable_intr_xscale - Enables interrupts 456 * @instance: Adapter soft state 457 */ 458 static inline void 459 megasas_enable_intr_xscale(struct megasas_instance *instance) 460 { 461 struct megasas_register_set __iomem *regs; 462 463 regs = instance->reg_set; 464 writel(0, &(regs)->outbound_intr_mask); 465 466 /* Dummy readl to force pci flush */ 467 readl(®s->outbound_intr_mask); 468 } 469 470 /** 471 * megasas_disable_intr_xscale -Disables interrupt 472 * @instance: Adapter soft state 473 */ 474 static inline void 475 megasas_disable_intr_xscale(struct megasas_instance *instance) 476 { 477 struct megasas_register_set __iomem *regs; 478 u32 mask = 0x1f; 479 480 regs = instance->reg_set; 481 writel(mask, ®s->outbound_intr_mask); 482 /* Dummy readl to force pci flush */ 483 readl(®s->outbound_intr_mask); 484 } 485 486 /** 487 * megasas_read_fw_status_reg_xscale - returns the current FW status value 488 * @instance: Adapter soft state 489 */ 490 static u32 491 megasas_read_fw_status_reg_xscale(struct megasas_instance *instance) 492 { 493 return readl(&instance->reg_set->outbound_msg_0); 494 } 495 /** 496 * megasas_clear_intr_xscale - Check & clear interrupt 497 * @instance: Adapter soft state 498 */ 499 static int 500 megasas_clear_intr_xscale(struct megasas_instance *instance) 501 { 502 u32 status; 503 u32 mfiStatus = 0; 504 struct megasas_register_set __iomem *regs; 505 regs = instance->reg_set; 506 507 /* 508 * Check if it is our interrupt 509 */ 510 status = readl(®s->outbound_intr_status); 511 512 if (status & MFI_OB_INTR_STATUS_MASK) 513 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 514 if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT) 515 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 516 517 /* 518 * Clear the interrupt by writing back the same value 519 */ 520 if (mfiStatus) 521 writel(status, ®s->outbound_intr_status); 522 523 /* Dummy readl to force pci flush */ 524 readl(®s->outbound_intr_status); 525 526 return mfiStatus; 527 } 528 529 /** 530 * megasas_fire_cmd_xscale - Sends command to the FW 531 * @instance: Adapter soft state 532 * @frame_phys_addr : Physical address of cmd 533 * @frame_count : Number of frames for the command 534 * @regs : MFI register set 535 */ 536 static inline void 537 megasas_fire_cmd_xscale(struct megasas_instance *instance, 538 dma_addr_t frame_phys_addr, 539 u32 frame_count, 540 struct megasas_register_set __iomem *regs) 541 { 542 unsigned long flags; 543 544 spin_lock_irqsave(&instance->hba_lock, flags); 545 writel((frame_phys_addr >> 3)|(frame_count), 546 &(regs)->inbound_queue_port); 547 spin_unlock_irqrestore(&instance->hba_lock, flags); 548 } 549 550 /** 551 * megasas_adp_reset_xscale - For controller reset 552 * @instance: Adapter soft state 553 * @regs: MFI register set 554 */ 555 static int 556 megasas_adp_reset_xscale(struct megasas_instance *instance, 557 struct megasas_register_set __iomem *regs) 558 { 559 u32 i; 560 u32 pcidata; 561 562 writel(MFI_ADP_RESET, ®s->inbound_doorbell); 563 564 for (i = 0; i < 3; i++) 565 msleep(1000); /* sleep for 3 secs */ 566 pcidata = 0; 567 pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata); 568 dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata); 569 if (pcidata & 0x2) { 570 dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata); 571 pcidata &= ~0x2; 572 pci_write_config_dword(instance->pdev, 573 MFI_1068_PCSR_OFFSET, pcidata); 574 575 for (i = 0; i < 2; i++) 576 msleep(1000); /* need to wait 2 secs again */ 577 578 pcidata = 0; 579 pci_read_config_dword(instance->pdev, 580 MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata); 581 dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata); 582 if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) { 583 dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata); 584 pcidata = 0; 585 pci_write_config_dword(instance->pdev, 586 MFI_1068_FW_HANDSHAKE_OFFSET, pcidata); 587 } 588 } 589 return 0; 590 } 591 592 /** 593 * megasas_check_reset_xscale - For controller reset check 594 * @instance: Adapter soft state 595 * @regs: MFI register set 596 */ 597 static int 598 megasas_check_reset_xscale(struct megasas_instance *instance, 599 struct megasas_register_set __iomem *regs) 600 { 601 if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) && 602 (le32_to_cpu(*instance->consumer) == 603 MEGASAS_ADPRESET_INPROG_SIGN)) 604 return 1; 605 return 0; 606 } 607 608 static struct megasas_instance_template megasas_instance_template_xscale = { 609 610 .fire_cmd = megasas_fire_cmd_xscale, 611 .enable_intr = megasas_enable_intr_xscale, 612 .disable_intr = megasas_disable_intr_xscale, 613 .clear_intr = megasas_clear_intr_xscale, 614 .read_fw_status_reg = megasas_read_fw_status_reg_xscale, 615 .adp_reset = megasas_adp_reset_xscale, 616 .check_reset = megasas_check_reset_xscale, 617 .service_isr = megasas_isr, 618 .tasklet = megasas_complete_cmd_dpc, 619 .init_adapter = megasas_init_adapter_mfi, 620 .build_and_issue_cmd = megasas_build_and_issue_cmd, 621 .issue_dcmd = megasas_issue_dcmd, 622 }; 623 624 /* 625 * This is the end of set of functions & definitions specific 626 * to xscale (deviceid : 1064R, PERC5) controllers 627 */ 628 629 /* 630 * The following functions are defined for ppc (deviceid : 0x60) 631 * controllers 632 */ 633 634 /** 635 * megasas_enable_intr_ppc - Enables interrupts 636 * @instance: Adapter soft state 637 */ 638 static inline void 639 megasas_enable_intr_ppc(struct megasas_instance *instance) 640 { 641 struct megasas_register_set __iomem *regs; 642 643 regs = instance->reg_set; 644 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 645 646 writel(~0x80000000, &(regs)->outbound_intr_mask); 647 648 /* Dummy readl to force pci flush */ 649 readl(®s->outbound_intr_mask); 650 } 651 652 /** 653 * megasas_disable_intr_ppc - Disable interrupt 654 * @instance: Adapter soft state 655 */ 656 static inline void 657 megasas_disable_intr_ppc(struct megasas_instance *instance) 658 { 659 struct megasas_register_set __iomem *regs; 660 u32 mask = 0xFFFFFFFF; 661 662 regs = instance->reg_set; 663 writel(mask, ®s->outbound_intr_mask); 664 /* Dummy readl to force pci flush */ 665 readl(®s->outbound_intr_mask); 666 } 667 668 /** 669 * megasas_read_fw_status_reg_ppc - returns the current FW status value 670 * @instance: Adapter soft state 671 */ 672 static u32 673 megasas_read_fw_status_reg_ppc(struct megasas_instance *instance) 674 { 675 return readl(&instance->reg_set->outbound_scratch_pad_0); 676 } 677 678 /** 679 * megasas_clear_intr_ppc - Check & clear interrupt 680 * @instance: Adapter soft state 681 */ 682 static int 683 megasas_clear_intr_ppc(struct megasas_instance *instance) 684 { 685 u32 status, mfiStatus = 0; 686 struct megasas_register_set __iomem *regs; 687 regs = instance->reg_set; 688 689 /* 690 * Check if it is our interrupt 691 */ 692 status = readl(®s->outbound_intr_status); 693 694 if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT) 695 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 696 697 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) 698 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 699 700 /* 701 * Clear the interrupt by writing back the same value 702 */ 703 writel(status, ®s->outbound_doorbell_clear); 704 705 /* Dummy readl to force pci flush */ 706 readl(®s->outbound_doorbell_clear); 707 708 return mfiStatus; 709 } 710 711 /** 712 * megasas_fire_cmd_ppc - Sends command to the FW 713 * @instance: Adapter soft state 714 * @frame_phys_addr: Physical address of cmd 715 * @frame_count: Number of frames for the command 716 * @regs: MFI register set 717 */ 718 static inline void 719 megasas_fire_cmd_ppc(struct megasas_instance *instance, 720 dma_addr_t frame_phys_addr, 721 u32 frame_count, 722 struct megasas_register_set __iomem *regs) 723 { 724 unsigned long flags; 725 726 spin_lock_irqsave(&instance->hba_lock, flags); 727 writel((frame_phys_addr | (frame_count<<1))|1, 728 &(regs)->inbound_queue_port); 729 spin_unlock_irqrestore(&instance->hba_lock, flags); 730 } 731 732 /** 733 * megasas_check_reset_ppc - For controller reset check 734 * @instance: Adapter soft state 735 * @regs: MFI register set 736 */ 737 static int 738 megasas_check_reset_ppc(struct megasas_instance *instance, 739 struct megasas_register_set __iomem *regs) 740 { 741 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 742 return 1; 743 744 return 0; 745 } 746 747 static struct megasas_instance_template megasas_instance_template_ppc = { 748 749 .fire_cmd = megasas_fire_cmd_ppc, 750 .enable_intr = megasas_enable_intr_ppc, 751 .disable_intr = megasas_disable_intr_ppc, 752 .clear_intr = megasas_clear_intr_ppc, 753 .read_fw_status_reg = megasas_read_fw_status_reg_ppc, 754 .adp_reset = megasas_adp_reset_xscale, 755 .check_reset = megasas_check_reset_ppc, 756 .service_isr = megasas_isr, 757 .tasklet = megasas_complete_cmd_dpc, 758 .init_adapter = megasas_init_adapter_mfi, 759 .build_and_issue_cmd = megasas_build_and_issue_cmd, 760 .issue_dcmd = megasas_issue_dcmd, 761 }; 762 763 /** 764 * megasas_enable_intr_skinny - Enables interrupts 765 * @instance: Adapter soft state 766 */ 767 static inline void 768 megasas_enable_intr_skinny(struct megasas_instance *instance) 769 { 770 struct megasas_register_set __iomem *regs; 771 772 regs = instance->reg_set; 773 writel(0xFFFFFFFF, &(regs)->outbound_intr_mask); 774 775 writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 776 777 /* Dummy readl to force pci flush */ 778 readl(®s->outbound_intr_mask); 779 } 780 781 /** 782 * megasas_disable_intr_skinny - Disables interrupt 783 * @instance: Adapter soft state 784 */ 785 static inline void 786 megasas_disable_intr_skinny(struct megasas_instance *instance) 787 { 788 struct megasas_register_set __iomem *regs; 789 u32 mask = 0xFFFFFFFF; 790 791 regs = instance->reg_set; 792 writel(mask, ®s->outbound_intr_mask); 793 /* Dummy readl to force pci flush */ 794 readl(®s->outbound_intr_mask); 795 } 796 797 /** 798 * megasas_read_fw_status_reg_skinny - returns the current FW status value 799 * @instance: Adapter soft state 800 */ 801 static u32 802 megasas_read_fw_status_reg_skinny(struct megasas_instance *instance) 803 { 804 return readl(&instance->reg_set->outbound_scratch_pad_0); 805 } 806 807 /** 808 * megasas_clear_intr_skinny - Check & clear interrupt 809 * @instance: Adapter soft state 810 */ 811 static int 812 megasas_clear_intr_skinny(struct megasas_instance *instance) 813 { 814 u32 status; 815 u32 mfiStatus = 0; 816 struct megasas_register_set __iomem *regs; 817 regs = instance->reg_set; 818 819 /* 820 * Check if it is our interrupt 821 */ 822 status = readl(®s->outbound_intr_status); 823 824 if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) { 825 return 0; 826 } 827 828 /* 829 * Check if it is our interrupt 830 */ 831 if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) == 832 MFI_STATE_FAULT) { 833 mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 834 } else 835 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 836 837 /* 838 * Clear the interrupt by writing back the same value 839 */ 840 writel(status, ®s->outbound_intr_status); 841 842 /* 843 * dummy read to flush PCI 844 */ 845 readl(®s->outbound_intr_status); 846 847 return mfiStatus; 848 } 849 850 /** 851 * megasas_fire_cmd_skinny - Sends command to the FW 852 * @instance: Adapter soft state 853 * @frame_phys_addr: Physical address of cmd 854 * @frame_count: Number of frames for the command 855 * @regs: MFI register set 856 */ 857 static inline void 858 megasas_fire_cmd_skinny(struct megasas_instance *instance, 859 dma_addr_t frame_phys_addr, 860 u32 frame_count, 861 struct megasas_register_set __iomem *regs) 862 { 863 unsigned long flags; 864 865 spin_lock_irqsave(&instance->hba_lock, flags); 866 writel(upper_32_bits(frame_phys_addr), 867 &(regs)->inbound_high_queue_port); 868 writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1, 869 &(regs)->inbound_low_queue_port); 870 spin_unlock_irqrestore(&instance->hba_lock, flags); 871 } 872 873 /** 874 * megasas_check_reset_skinny - For controller reset check 875 * @instance: Adapter soft state 876 * @regs: MFI register set 877 */ 878 static int 879 megasas_check_reset_skinny(struct megasas_instance *instance, 880 struct megasas_register_set __iomem *regs) 881 { 882 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 883 return 1; 884 885 return 0; 886 } 887 888 static struct megasas_instance_template megasas_instance_template_skinny = { 889 890 .fire_cmd = megasas_fire_cmd_skinny, 891 .enable_intr = megasas_enable_intr_skinny, 892 .disable_intr = megasas_disable_intr_skinny, 893 .clear_intr = megasas_clear_intr_skinny, 894 .read_fw_status_reg = megasas_read_fw_status_reg_skinny, 895 .adp_reset = megasas_adp_reset_gen2, 896 .check_reset = megasas_check_reset_skinny, 897 .service_isr = megasas_isr, 898 .tasklet = megasas_complete_cmd_dpc, 899 .init_adapter = megasas_init_adapter_mfi, 900 .build_and_issue_cmd = megasas_build_and_issue_cmd, 901 .issue_dcmd = megasas_issue_dcmd, 902 }; 903 904 905 /* 906 * The following functions are defined for gen2 (deviceid : 0x78 0x79) 907 * controllers 908 */ 909 910 /** 911 * megasas_enable_intr_gen2 - Enables interrupts 912 * @instance: Adapter soft state 913 */ 914 static inline void 915 megasas_enable_intr_gen2(struct megasas_instance *instance) 916 { 917 struct megasas_register_set __iomem *regs; 918 919 regs = instance->reg_set; 920 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 921 922 /* write ~0x00000005 (4 & 1) to the intr mask*/ 923 writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 924 925 /* Dummy readl to force pci flush */ 926 readl(®s->outbound_intr_mask); 927 } 928 929 /** 930 * megasas_disable_intr_gen2 - Disables interrupt 931 * @instance: Adapter soft state 932 */ 933 static inline void 934 megasas_disable_intr_gen2(struct megasas_instance *instance) 935 { 936 struct megasas_register_set __iomem *regs; 937 u32 mask = 0xFFFFFFFF; 938 939 regs = instance->reg_set; 940 writel(mask, ®s->outbound_intr_mask); 941 /* Dummy readl to force pci flush */ 942 readl(®s->outbound_intr_mask); 943 } 944 945 /** 946 * megasas_read_fw_status_reg_gen2 - returns the current FW status value 947 * @instance: Adapter soft state 948 */ 949 static u32 950 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance) 951 { 952 return readl(&instance->reg_set->outbound_scratch_pad_0); 953 } 954 955 /** 956 * megasas_clear_intr_gen2 - Check & clear interrupt 957 * @instance: Adapter soft state 958 */ 959 static int 960 megasas_clear_intr_gen2(struct megasas_instance *instance) 961 { 962 u32 status; 963 u32 mfiStatus = 0; 964 struct megasas_register_set __iomem *regs; 965 regs = instance->reg_set; 966 967 /* 968 * Check if it is our interrupt 969 */ 970 status = readl(®s->outbound_intr_status); 971 972 if (status & MFI_INTR_FLAG_REPLY_MESSAGE) { 973 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 974 } 975 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) { 976 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 977 } 978 979 /* 980 * Clear the interrupt by writing back the same value 981 */ 982 if (mfiStatus) 983 writel(status, ®s->outbound_doorbell_clear); 984 985 /* Dummy readl to force pci flush */ 986 readl(®s->outbound_intr_status); 987 988 return mfiStatus; 989 } 990 991 /** 992 * megasas_fire_cmd_gen2 - Sends command to the FW 993 * @instance: Adapter soft state 994 * @frame_phys_addr: Physical address of cmd 995 * @frame_count: Number of frames for the command 996 * @regs: MFI register set 997 */ 998 static inline void 999 megasas_fire_cmd_gen2(struct megasas_instance *instance, 1000 dma_addr_t frame_phys_addr, 1001 u32 frame_count, 1002 struct megasas_register_set __iomem *regs) 1003 { 1004 unsigned long flags; 1005 1006 spin_lock_irqsave(&instance->hba_lock, flags); 1007 writel((frame_phys_addr | (frame_count<<1))|1, 1008 &(regs)->inbound_queue_port); 1009 spin_unlock_irqrestore(&instance->hba_lock, flags); 1010 } 1011 1012 /** 1013 * megasas_adp_reset_gen2 - For controller reset 1014 * @instance: Adapter soft state 1015 * @reg_set: MFI register set 1016 */ 1017 static int 1018 megasas_adp_reset_gen2(struct megasas_instance *instance, 1019 struct megasas_register_set __iomem *reg_set) 1020 { 1021 u32 retry = 0 ; 1022 u32 HostDiag; 1023 u32 __iomem *seq_offset = ®_set->seq_offset; 1024 u32 __iomem *hostdiag_offset = ®_set->host_diag; 1025 1026 if (instance->instancet == &megasas_instance_template_skinny) { 1027 seq_offset = ®_set->fusion_seq_offset; 1028 hostdiag_offset = ®_set->fusion_host_diag; 1029 } 1030 1031 writel(0, seq_offset); 1032 writel(4, seq_offset); 1033 writel(0xb, seq_offset); 1034 writel(2, seq_offset); 1035 writel(7, seq_offset); 1036 writel(0xd, seq_offset); 1037 1038 msleep(1000); 1039 1040 HostDiag = (u32)readl(hostdiag_offset); 1041 1042 while (!(HostDiag & DIAG_WRITE_ENABLE)) { 1043 msleep(100); 1044 HostDiag = (u32)readl(hostdiag_offset); 1045 dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n", 1046 retry, HostDiag); 1047 1048 if (retry++ >= 100) 1049 return 1; 1050 1051 } 1052 1053 dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag); 1054 1055 writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset); 1056 1057 ssleep(10); 1058 1059 HostDiag = (u32)readl(hostdiag_offset); 1060 while (HostDiag & DIAG_RESET_ADAPTER) { 1061 msleep(100); 1062 HostDiag = (u32)readl(hostdiag_offset); 1063 dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n", 1064 retry, HostDiag); 1065 1066 if (retry++ >= 1000) 1067 return 1; 1068 1069 } 1070 return 0; 1071 } 1072 1073 /** 1074 * megasas_check_reset_gen2 - For controller reset check 1075 * @instance: Adapter soft state 1076 * @regs: MFI register set 1077 */ 1078 static int 1079 megasas_check_reset_gen2(struct megasas_instance *instance, 1080 struct megasas_register_set __iomem *regs) 1081 { 1082 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 1083 return 1; 1084 1085 return 0; 1086 } 1087 1088 static struct megasas_instance_template megasas_instance_template_gen2 = { 1089 1090 .fire_cmd = megasas_fire_cmd_gen2, 1091 .enable_intr = megasas_enable_intr_gen2, 1092 .disable_intr = megasas_disable_intr_gen2, 1093 .clear_intr = megasas_clear_intr_gen2, 1094 .read_fw_status_reg = megasas_read_fw_status_reg_gen2, 1095 .adp_reset = megasas_adp_reset_gen2, 1096 .check_reset = megasas_check_reset_gen2, 1097 .service_isr = megasas_isr, 1098 .tasklet = megasas_complete_cmd_dpc, 1099 .init_adapter = megasas_init_adapter_mfi, 1100 .build_and_issue_cmd = megasas_build_and_issue_cmd, 1101 .issue_dcmd = megasas_issue_dcmd, 1102 }; 1103 1104 /* 1105 * This is the end of set of functions & definitions 1106 * specific to gen2 (deviceid : 0x78, 0x79) controllers 1107 */ 1108 1109 /* 1110 * Template added for TB (Fusion) 1111 */ 1112 extern struct megasas_instance_template megasas_instance_template_fusion; 1113 1114 /** 1115 * megasas_issue_polled - Issues a polling command 1116 * @instance: Adapter soft state 1117 * @cmd: Command packet to be issued 1118 * 1119 * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting. 1120 */ 1121 int 1122 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) 1123 { 1124 struct megasas_header *frame_hdr = &cmd->frame->hdr; 1125 1126 frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS; 1127 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); 1128 1129 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1130 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1131 __func__, __LINE__); 1132 return DCMD_INIT; 1133 } 1134 1135 instance->instancet->issue_dcmd(instance, cmd); 1136 1137 return wait_and_poll(instance, cmd, instance->requestorId ? 1138 MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS); 1139 } 1140 1141 /** 1142 * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds 1143 * @instance: Adapter soft state 1144 * @cmd: Command to be issued 1145 * @timeout: Timeout in seconds 1146 * 1147 * This function waits on an event for the command to be returned from ISR. 1148 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 1149 * Used to issue ioctl commands. 1150 */ 1151 int 1152 megasas_issue_blocked_cmd(struct megasas_instance *instance, 1153 struct megasas_cmd *cmd, int timeout) 1154 { 1155 int ret = 0; 1156 cmd->cmd_status_drv = DCMD_INIT; 1157 1158 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1159 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1160 __func__, __LINE__); 1161 return DCMD_INIT; 1162 } 1163 1164 instance->instancet->issue_dcmd(instance, cmd); 1165 1166 if (timeout) { 1167 ret = wait_event_timeout(instance->int_cmd_wait_q, 1168 cmd->cmd_status_drv != DCMD_INIT, timeout * HZ); 1169 if (!ret) { 1170 dev_err(&instance->pdev->dev, 1171 "DCMD(opcode: 0x%x) is timed out, func:%s\n", 1172 cmd->frame->dcmd.opcode, __func__); 1173 return DCMD_TIMEOUT; 1174 } 1175 } else 1176 wait_event(instance->int_cmd_wait_q, 1177 cmd->cmd_status_drv != DCMD_INIT); 1178 1179 return cmd->cmd_status_drv; 1180 } 1181 1182 /** 1183 * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd 1184 * @instance: Adapter soft state 1185 * @cmd_to_abort: Previously issued cmd to be aborted 1186 * @timeout: Timeout in seconds 1187 * 1188 * MFI firmware can abort previously issued AEN comamnd (automatic event 1189 * notification). The megasas_issue_blocked_abort_cmd() issues such abort 1190 * cmd and waits for return status. 1191 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 1192 */ 1193 static int 1194 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, 1195 struct megasas_cmd *cmd_to_abort, int timeout) 1196 { 1197 struct megasas_cmd *cmd; 1198 struct megasas_abort_frame *abort_fr; 1199 int ret = 0; 1200 u32 opcode; 1201 1202 cmd = megasas_get_cmd(instance); 1203 1204 if (!cmd) 1205 return -1; 1206 1207 abort_fr = &cmd->frame->abort; 1208 1209 /* 1210 * Prepare and issue the abort frame 1211 */ 1212 abort_fr->cmd = MFI_CMD_ABORT; 1213 abort_fr->cmd_status = MFI_STAT_INVALID_STATUS; 1214 abort_fr->flags = cpu_to_le16(0); 1215 abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index); 1216 abort_fr->abort_mfi_phys_addr_lo = 1217 cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr)); 1218 abort_fr->abort_mfi_phys_addr_hi = 1219 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); 1220 1221 cmd->sync_cmd = 1; 1222 cmd->cmd_status_drv = DCMD_INIT; 1223 1224 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1225 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1226 __func__, __LINE__); 1227 return DCMD_INIT; 1228 } 1229 1230 instance->instancet->issue_dcmd(instance, cmd); 1231 1232 if (timeout) { 1233 ret = wait_event_timeout(instance->abort_cmd_wait_q, 1234 cmd->cmd_status_drv != DCMD_INIT, timeout * HZ); 1235 if (!ret) { 1236 opcode = cmd_to_abort->frame->dcmd.opcode; 1237 dev_err(&instance->pdev->dev, 1238 "Abort(to be aborted DCMD opcode: 0x%x) is timed out func:%s\n", 1239 opcode, __func__); 1240 return DCMD_TIMEOUT; 1241 } 1242 } else 1243 wait_event(instance->abort_cmd_wait_q, 1244 cmd->cmd_status_drv != DCMD_INIT); 1245 1246 cmd->sync_cmd = 0; 1247 1248 megasas_return_cmd(instance, cmd); 1249 return cmd->cmd_status_drv; 1250 } 1251 1252 /** 1253 * megasas_make_sgl32 - Prepares 32-bit SGL 1254 * @instance: Adapter soft state 1255 * @scp: SCSI command from the mid-layer 1256 * @mfi_sgl: SGL to be filled in 1257 * 1258 * If successful, this function returns the number of SG elements. Otherwise, 1259 * it returnes -1. 1260 */ 1261 static int 1262 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp, 1263 union megasas_sgl *mfi_sgl) 1264 { 1265 int i; 1266 int sge_count; 1267 struct scatterlist *os_sgl; 1268 1269 sge_count = scsi_dma_map(scp); 1270 BUG_ON(sge_count < 0); 1271 1272 if (sge_count) { 1273 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1274 mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl)); 1275 mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl)); 1276 } 1277 } 1278 return sge_count; 1279 } 1280 1281 /** 1282 * megasas_make_sgl64 - Prepares 64-bit SGL 1283 * @instance: Adapter soft state 1284 * @scp: SCSI command from the mid-layer 1285 * @mfi_sgl: SGL to be filled in 1286 * 1287 * If successful, this function returns the number of SG elements. Otherwise, 1288 * it returnes -1. 1289 */ 1290 static int 1291 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, 1292 union megasas_sgl *mfi_sgl) 1293 { 1294 int i; 1295 int sge_count; 1296 struct scatterlist *os_sgl; 1297 1298 sge_count = scsi_dma_map(scp); 1299 BUG_ON(sge_count < 0); 1300 1301 if (sge_count) { 1302 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1303 mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl)); 1304 mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl)); 1305 } 1306 } 1307 return sge_count; 1308 } 1309 1310 /** 1311 * megasas_make_sgl_skinny - Prepares IEEE SGL 1312 * @instance: Adapter soft state 1313 * @scp: SCSI command from the mid-layer 1314 * @mfi_sgl: SGL to be filled in 1315 * 1316 * If successful, this function returns the number of SG elements. Otherwise, 1317 * it returnes -1. 1318 */ 1319 static int 1320 megasas_make_sgl_skinny(struct megasas_instance *instance, 1321 struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) 1322 { 1323 int i; 1324 int sge_count; 1325 struct scatterlist *os_sgl; 1326 1327 sge_count = scsi_dma_map(scp); 1328 1329 if (sge_count) { 1330 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1331 mfi_sgl->sge_skinny[i].length = 1332 cpu_to_le32(sg_dma_len(os_sgl)); 1333 mfi_sgl->sge_skinny[i].phys_addr = 1334 cpu_to_le64(sg_dma_address(os_sgl)); 1335 mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0); 1336 } 1337 } 1338 return sge_count; 1339 } 1340 1341 /** 1342 * megasas_get_frame_count - Computes the number of frames 1343 * @frame_type : type of frame- io or pthru frame 1344 * @sge_count : number of sg elements 1345 * 1346 * Returns the number of frames required for numnber of sge's (sge_count) 1347 */ 1348 1349 static u32 megasas_get_frame_count(struct megasas_instance *instance, 1350 u8 sge_count, u8 frame_type) 1351 { 1352 int num_cnt; 1353 int sge_bytes; 1354 u32 sge_sz; 1355 u32 frame_count = 0; 1356 1357 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : 1358 sizeof(struct megasas_sge32); 1359 1360 if (instance->flag_ieee) { 1361 sge_sz = sizeof(struct megasas_sge_skinny); 1362 } 1363 1364 /* 1365 * Main frame can contain 2 SGEs for 64-bit SGLs and 1366 * 3 SGEs for 32-bit SGLs for ldio & 1367 * 1 SGEs for 64-bit SGLs and 1368 * 2 SGEs for 32-bit SGLs for pthru frame 1369 */ 1370 if (unlikely(frame_type == PTHRU_FRAME)) { 1371 if (instance->flag_ieee == 1) { 1372 num_cnt = sge_count - 1; 1373 } else if (IS_DMA64) 1374 num_cnt = sge_count - 1; 1375 else 1376 num_cnt = sge_count - 2; 1377 } else { 1378 if (instance->flag_ieee == 1) { 1379 num_cnt = sge_count - 1; 1380 } else if (IS_DMA64) 1381 num_cnt = sge_count - 2; 1382 else 1383 num_cnt = sge_count - 3; 1384 } 1385 1386 if (num_cnt > 0) { 1387 sge_bytes = sge_sz * num_cnt; 1388 1389 frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + 1390 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ; 1391 } 1392 /* Main frame */ 1393 frame_count += 1; 1394 1395 if (frame_count > 7) 1396 frame_count = 8; 1397 return frame_count; 1398 } 1399 1400 /** 1401 * megasas_build_dcdb - Prepares a direct cdb (DCDB) command 1402 * @instance: Adapter soft state 1403 * @scp: SCSI command 1404 * @cmd: Command to be prepared in 1405 * 1406 * This function prepares CDB commands. These are typcially pass-through 1407 * commands to the devices. 1408 */ 1409 static int 1410 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, 1411 struct megasas_cmd *cmd) 1412 { 1413 u32 is_logical; 1414 u32 device_id; 1415 u16 flags = 0; 1416 struct megasas_pthru_frame *pthru; 1417 1418 is_logical = MEGASAS_IS_LOGICAL(scp->device); 1419 device_id = MEGASAS_DEV_INDEX(scp); 1420 pthru = (struct megasas_pthru_frame *)cmd->frame; 1421 1422 if (scp->sc_data_direction == DMA_TO_DEVICE) 1423 flags = MFI_FRAME_DIR_WRITE; 1424 else if (scp->sc_data_direction == DMA_FROM_DEVICE) 1425 flags = MFI_FRAME_DIR_READ; 1426 else if (scp->sc_data_direction == DMA_NONE) 1427 flags = MFI_FRAME_DIR_NONE; 1428 1429 if (instance->flag_ieee == 1) { 1430 flags |= MFI_FRAME_IEEE; 1431 } 1432 1433 /* 1434 * Prepare the DCDB frame 1435 */ 1436 pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO; 1437 pthru->cmd_status = 0x0; 1438 pthru->scsi_status = 0x0; 1439 pthru->target_id = device_id; 1440 pthru->lun = scp->device->lun; 1441 pthru->cdb_len = scp->cmd_len; 1442 pthru->timeout = 0; 1443 pthru->pad_0 = 0; 1444 pthru->flags = cpu_to_le16(flags); 1445 pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp)); 1446 1447 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); 1448 1449 /* 1450 * If the command is for the tape device, set the 1451 * pthru timeout to the os layer timeout value. 1452 */ 1453 if (scp->device->type == TYPE_TAPE) { 1454 if (scsi_cmd_to_rq(scp)->timeout / HZ > 0xFFFF) 1455 pthru->timeout = cpu_to_le16(0xFFFF); 1456 else 1457 pthru->timeout = cpu_to_le16(scsi_cmd_to_rq(scp)->timeout / HZ); 1458 } 1459 1460 /* 1461 * Construct SGL 1462 */ 1463 if (instance->flag_ieee == 1) { 1464 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1465 pthru->sge_count = megasas_make_sgl_skinny(instance, scp, 1466 &pthru->sgl); 1467 } else if (IS_DMA64) { 1468 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1469 pthru->sge_count = megasas_make_sgl64(instance, scp, 1470 &pthru->sgl); 1471 } else 1472 pthru->sge_count = megasas_make_sgl32(instance, scp, 1473 &pthru->sgl); 1474 1475 if (pthru->sge_count > instance->max_num_sge) { 1476 dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n", 1477 pthru->sge_count); 1478 return 0; 1479 } 1480 1481 /* 1482 * Sense info specific 1483 */ 1484 pthru->sense_len = SCSI_SENSE_BUFFERSIZE; 1485 pthru->sense_buf_phys_addr_hi = 1486 cpu_to_le32(upper_32_bits(cmd->sense_phys_addr)); 1487 pthru->sense_buf_phys_addr_lo = 1488 cpu_to_le32(lower_32_bits(cmd->sense_phys_addr)); 1489 1490 /* 1491 * Compute the total number of frames this command consumes. FW uses 1492 * this number to pull sufficient number of frames from host memory. 1493 */ 1494 cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count, 1495 PTHRU_FRAME); 1496 1497 return cmd->frame_count; 1498 } 1499 1500 /** 1501 * megasas_build_ldio - Prepares IOs to logical devices 1502 * @instance: Adapter soft state 1503 * @scp: SCSI command 1504 * @cmd: Command to be prepared 1505 * 1506 * Frames (and accompanying SGLs) for regular SCSI IOs use this function. 1507 */ 1508 static int 1509 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, 1510 struct megasas_cmd *cmd) 1511 { 1512 u32 device_id; 1513 u8 sc = scp->cmnd[0]; 1514 u16 flags = 0; 1515 struct megasas_io_frame *ldio; 1516 1517 device_id = MEGASAS_DEV_INDEX(scp); 1518 ldio = (struct megasas_io_frame *)cmd->frame; 1519 1520 if (scp->sc_data_direction == DMA_TO_DEVICE) 1521 flags = MFI_FRAME_DIR_WRITE; 1522 else if (scp->sc_data_direction == DMA_FROM_DEVICE) 1523 flags = MFI_FRAME_DIR_READ; 1524 1525 if (instance->flag_ieee == 1) { 1526 flags |= MFI_FRAME_IEEE; 1527 } 1528 1529 /* 1530 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds 1531 */ 1532 ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ; 1533 ldio->cmd_status = 0x0; 1534 ldio->scsi_status = 0x0; 1535 ldio->target_id = device_id; 1536 ldio->timeout = 0; 1537 ldio->reserved_0 = 0; 1538 ldio->pad_0 = 0; 1539 ldio->flags = cpu_to_le16(flags); 1540 ldio->start_lba_hi = 0; 1541 ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0; 1542 1543 /* 1544 * 6-byte READ(0x08) or WRITE(0x0A) cdb 1545 */ 1546 if (scp->cmd_len == 6) { 1547 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]); 1548 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) | 1549 ((u32) scp->cmnd[2] << 8) | 1550 (u32) scp->cmnd[3]); 1551 1552 ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF); 1553 } 1554 1555 /* 1556 * 10-byte READ(0x28) or WRITE(0x2A) cdb 1557 */ 1558 else if (scp->cmd_len == 10) { 1559 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] | 1560 ((u32) scp->cmnd[7] << 8)); 1561 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1562 ((u32) scp->cmnd[3] << 16) | 1563 ((u32) scp->cmnd[4] << 8) | 1564 (u32) scp->cmnd[5]); 1565 } 1566 1567 /* 1568 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 1569 */ 1570 else if (scp->cmd_len == 12) { 1571 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) | 1572 ((u32) scp->cmnd[7] << 16) | 1573 ((u32) scp->cmnd[8] << 8) | 1574 (u32) scp->cmnd[9]); 1575 1576 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1577 ((u32) scp->cmnd[3] << 16) | 1578 ((u32) scp->cmnd[4] << 8) | 1579 (u32) scp->cmnd[5]); 1580 } 1581 1582 /* 1583 * 16-byte READ(0x88) or WRITE(0x8A) cdb 1584 */ 1585 else if (scp->cmd_len == 16) { 1586 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) | 1587 ((u32) scp->cmnd[11] << 16) | 1588 ((u32) scp->cmnd[12] << 8) | 1589 (u32) scp->cmnd[13]); 1590 1591 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) | 1592 ((u32) scp->cmnd[7] << 16) | 1593 ((u32) scp->cmnd[8] << 8) | 1594 (u32) scp->cmnd[9]); 1595 1596 ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1597 ((u32) scp->cmnd[3] << 16) | 1598 ((u32) scp->cmnd[4] << 8) | 1599 (u32) scp->cmnd[5]); 1600 1601 } 1602 1603 /* 1604 * Construct SGL 1605 */ 1606 if (instance->flag_ieee) { 1607 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1608 ldio->sge_count = megasas_make_sgl_skinny(instance, scp, 1609 &ldio->sgl); 1610 } else if (IS_DMA64) { 1611 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1612 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); 1613 } else 1614 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); 1615 1616 if (ldio->sge_count > instance->max_num_sge) { 1617 dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n", 1618 ldio->sge_count); 1619 return 0; 1620 } 1621 1622 /* 1623 * Sense info specific 1624 */ 1625 ldio->sense_len = SCSI_SENSE_BUFFERSIZE; 1626 ldio->sense_buf_phys_addr_hi = 0; 1627 ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr); 1628 1629 /* 1630 * Compute the total number of frames this command consumes. FW uses 1631 * this number to pull sufficient number of frames from host memory. 1632 */ 1633 cmd->frame_count = megasas_get_frame_count(instance, 1634 ldio->sge_count, IO_FRAME); 1635 1636 return cmd->frame_count; 1637 } 1638 1639 /** 1640 * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD 1641 * and whether it's RW or non RW 1642 * @cmd: SCSI command 1643 * 1644 */ 1645 inline int megasas_cmd_type(struct scsi_cmnd *cmd) 1646 { 1647 int ret; 1648 1649 switch (cmd->cmnd[0]) { 1650 case READ_10: 1651 case WRITE_10: 1652 case READ_12: 1653 case WRITE_12: 1654 case READ_6: 1655 case WRITE_6: 1656 case READ_16: 1657 case WRITE_16: 1658 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? 1659 READ_WRITE_LDIO : READ_WRITE_SYSPDIO; 1660 break; 1661 default: 1662 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? 1663 NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO; 1664 } 1665 return ret; 1666 } 1667 1668 /** 1669 * megasas_dump_pending_frames - Dumps the frame address of all pending cmds 1670 * in FW 1671 * @instance: Adapter soft state 1672 */ 1673 static inline void 1674 megasas_dump_pending_frames(struct megasas_instance *instance) 1675 { 1676 struct megasas_cmd *cmd; 1677 int i,n; 1678 union megasas_sgl *mfi_sgl; 1679 struct megasas_io_frame *ldio; 1680 struct megasas_pthru_frame *pthru; 1681 u32 sgcount; 1682 u16 max_cmd = instance->max_fw_cmds; 1683 1684 dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no); 1685 dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding)); 1686 if (IS_DMA64) 1687 dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no); 1688 else 1689 dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no); 1690 1691 dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no); 1692 for (i = 0; i < max_cmd; i++) { 1693 cmd = instance->cmd_list[i]; 1694 if (!cmd->scmd) 1695 continue; 1696 dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr); 1697 if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) { 1698 ldio = (struct megasas_io_frame *)cmd->frame; 1699 mfi_sgl = &ldio->sgl; 1700 sgcount = ldio->sge_count; 1701 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x," 1702 " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1703 instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id, 1704 le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi), 1705 le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount); 1706 } else { 1707 pthru = (struct megasas_pthru_frame *) cmd->frame; 1708 mfi_sgl = &pthru->sgl; 1709 sgcount = pthru->sge_count; 1710 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, " 1711 "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1712 instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id, 1713 pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len), 1714 le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount); 1715 } 1716 if (megasas_dbg_lvl & MEGASAS_DBG_LVL) { 1717 for (n = 0; n < sgcount; n++) { 1718 if (IS_DMA64) 1719 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n", 1720 le32_to_cpu(mfi_sgl->sge64[n].length), 1721 le64_to_cpu(mfi_sgl->sge64[n].phys_addr)); 1722 else 1723 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n", 1724 le32_to_cpu(mfi_sgl->sge32[n].length), 1725 le32_to_cpu(mfi_sgl->sge32[n].phys_addr)); 1726 } 1727 } 1728 } /*for max_cmd*/ 1729 dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no); 1730 for (i = 0; i < max_cmd; i++) { 1731 1732 cmd = instance->cmd_list[i]; 1733 1734 if (cmd->sync_cmd == 1) 1735 dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr); 1736 } 1737 dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no); 1738 } 1739 1740 u32 1741 megasas_build_and_issue_cmd(struct megasas_instance *instance, 1742 struct scsi_cmnd *scmd) 1743 { 1744 struct megasas_cmd *cmd; 1745 u32 frame_count; 1746 1747 cmd = megasas_get_cmd(instance); 1748 if (!cmd) 1749 return SCSI_MLQUEUE_HOST_BUSY; 1750 1751 /* 1752 * Logical drive command 1753 */ 1754 if (megasas_cmd_type(scmd) == READ_WRITE_LDIO) 1755 frame_count = megasas_build_ldio(instance, scmd, cmd); 1756 else 1757 frame_count = megasas_build_dcdb(instance, scmd, cmd); 1758 1759 if (!frame_count) 1760 goto out_return_cmd; 1761 1762 cmd->scmd = scmd; 1763 scmd->SCp.ptr = (char *)cmd; 1764 1765 /* 1766 * Issue the command to the FW 1767 */ 1768 atomic_inc(&instance->fw_outstanding); 1769 1770 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 1771 cmd->frame_count-1, instance->reg_set); 1772 1773 return 0; 1774 out_return_cmd: 1775 megasas_return_cmd(instance, cmd); 1776 return SCSI_MLQUEUE_HOST_BUSY; 1777 } 1778 1779 1780 /** 1781 * megasas_queue_command - Queue entry point 1782 * @shost: adapter SCSI host 1783 * @scmd: SCSI command to be queued 1784 */ 1785 static int 1786 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) 1787 { 1788 struct megasas_instance *instance; 1789 struct MR_PRIV_DEVICE *mr_device_priv_data; 1790 u32 ld_tgt_id; 1791 1792 instance = (struct megasas_instance *) 1793 scmd->device->host->hostdata; 1794 1795 if (instance->unload == 1) { 1796 scmd->result = DID_NO_CONNECT << 16; 1797 scmd->scsi_done(scmd); 1798 return 0; 1799 } 1800 1801 if (instance->issuepend_done == 0) 1802 return SCSI_MLQUEUE_HOST_BUSY; 1803 1804 1805 /* Check for an mpio path and adjust behavior */ 1806 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { 1807 if (megasas_check_mpio_paths(instance, scmd) == 1808 (DID_REQUEUE << 16)) { 1809 return SCSI_MLQUEUE_HOST_BUSY; 1810 } else { 1811 scmd->result = DID_NO_CONNECT << 16; 1812 scmd->scsi_done(scmd); 1813 return 0; 1814 } 1815 } 1816 1817 mr_device_priv_data = scmd->device->hostdata; 1818 if (!mr_device_priv_data || 1819 (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)) { 1820 scmd->result = DID_NO_CONNECT << 16; 1821 scmd->scsi_done(scmd); 1822 return 0; 1823 } 1824 1825 if (MEGASAS_IS_LOGICAL(scmd->device)) { 1826 ld_tgt_id = MEGASAS_TARGET_ID(scmd->device); 1827 if (instance->ld_tgtid_status[ld_tgt_id] == LD_TARGET_ID_DELETED) { 1828 scmd->result = DID_NO_CONNECT << 16; 1829 scmd->scsi_done(scmd); 1830 return 0; 1831 } 1832 } 1833 1834 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 1835 return SCSI_MLQUEUE_HOST_BUSY; 1836 1837 if (mr_device_priv_data->tm_busy) 1838 return SCSI_MLQUEUE_DEVICE_BUSY; 1839 1840 1841 scmd->result = 0; 1842 1843 if (MEGASAS_IS_LOGICAL(scmd->device) && 1844 (scmd->device->id >= instance->fw_supported_vd_count || 1845 scmd->device->lun)) { 1846 scmd->result = DID_BAD_TARGET << 16; 1847 goto out_done; 1848 } 1849 1850 if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && 1851 MEGASAS_IS_LOGICAL(scmd->device) && 1852 (!instance->fw_sync_cache_support)) { 1853 scmd->result = DID_OK << 16; 1854 goto out_done; 1855 } 1856 1857 return instance->instancet->build_and_issue_cmd(instance, scmd); 1858 1859 out_done: 1860 scmd->scsi_done(scmd); 1861 return 0; 1862 } 1863 1864 static struct megasas_instance *megasas_lookup_instance(u16 host_no) 1865 { 1866 int i; 1867 1868 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 1869 1870 if ((megasas_mgmt_info.instance[i]) && 1871 (megasas_mgmt_info.instance[i]->host->host_no == host_no)) 1872 return megasas_mgmt_info.instance[i]; 1873 } 1874 1875 return NULL; 1876 } 1877 1878 /* 1879 * megasas_set_dynamic_target_properties - 1880 * Device property set by driver may not be static and it is required to be 1881 * updated after OCR 1882 * 1883 * set tm_capable. 1884 * set dma alignment (only for eedp protection enable vd). 1885 * 1886 * @sdev: OS provided scsi device 1887 * 1888 * Returns void 1889 */ 1890 void megasas_set_dynamic_target_properties(struct scsi_device *sdev, 1891 bool is_target_prop) 1892 { 1893 u16 pd_index = 0, ld; 1894 u32 device_id; 1895 struct megasas_instance *instance; 1896 struct fusion_context *fusion; 1897 struct MR_PRIV_DEVICE *mr_device_priv_data; 1898 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 1899 struct MR_LD_RAID *raid; 1900 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 1901 1902 instance = megasas_lookup_instance(sdev->host->host_no); 1903 fusion = instance->ctrl_context; 1904 mr_device_priv_data = sdev->hostdata; 1905 1906 if (!fusion || !mr_device_priv_data) 1907 return; 1908 1909 if (MEGASAS_IS_LOGICAL(sdev)) { 1910 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) 1911 + sdev->id; 1912 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 1913 ld = MR_TargetIdToLdGet(device_id, local_map_ptr); 1914 if (ld >= instance->fw_supported_vd_count) 1915 return; 1916 raid = MR_LdRaidGet(ld, local_map_ptr); 1917 1918 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) 1919 blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1920 1921 mr_device_priv_data->is_tm_capable = 1922 raid->capability.tmCapable; 1923 1924 if (!raid->flags.isEPD) 1925 sdev->no_write_same = 1; 1926 1927 } else if (instance->use_seqnum_jbod_fp) { 1928 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1929 sdev->id; 1930 pd_sync = (void *)fusion->pd_seq_sync 1931 [(instance->pd_seq_map_id - 1) & 1]; 1932 mr_device_priv_data->is_tm_capable = 1933 pd_sync->seq[pd_index].capability.tmCapable; 1934 } 1935 1936 if (is_target_prop && instance->tgt_prop->reset_tmo) { 1937 /* 1938 * If FW provides a target reset timeout value, driver will use 1939 * it. If not set, fallback to default values. 1940 */ 1941 mr_device_priv_data->target_reset_tmo = 1942 min_t(u8, instance->max_reset_tmo, 1943 instance->tgt_prop->reset_tmo); 1944 mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo; 1945 } else { 1946 mr_device_priv_data->target_reset_tmo = 1947 MEGASAS_DEFAULT_TM_TIMEOUT; 1948 mr_device_priv_data->task_abort_tmo = 1949 MEGASAS_DEFAULT_TM_TIMEOUT; 1950 } 1951 } 1952 1953 /* 1954 * megasas_set_nvme_device_properties - 1955 * set nomerges=2 1956 * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K). 1957 * set maximum io transfer = MDTS of NVME device provided by MR firmware. 1958 * 1959 * MR firmware provides value in KB. Caller of this function converts 1960 * kb into bytes. 1961 * 1962 * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size, 1963 * MR firmware provides value 128 as (32 * 4K) = 128K. 1964 * 1965 * @sdev: scsi device 1966 * @max_io_size: maximum io transfer size 1967 * 1968 */ 1969 static inline void 1970 megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size) 1971 { 1972 struct megasas_instance *instance; 1973 u32 mr_nvme_pg_size; 1974 1975 instance = (struct megasas_instance *)sdev->host->hostdata; 1976 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, 1977 MR_DEFAULT_NVME_PAGE_SIZE); 1978 1979 blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512)); 1980 1981 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue); 1982 blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1); 1983 } 1984 1985 /* 1986 * megasas_set_fw_assisted_qd - 1987 * set device queue depth to can_queue 1988 * set device queue depth to fw assisted qd 1989 * 1990 * @sdev: scsi device 1991 * @is_target_prop true, if fw provided target properties. 1992 */ 1993 static void megasas_set_fw_assisted_qd(struct scsi_device *sdev, 1994 bool is_target_prop) 1995 { 1996 u8 interface_type; 1997 u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN; 1998 u32 tgt_device_qd; 1999 struct megasas_instance *instance; 2000 struct MR_PRIV_DEVICE *mr_device_priv_data; 2001 2002 instance = megasas_lookup_instance(sdev->host->host_no); 2003 mr_device_priv_data = sdev->hostdata; 2004 interface_type = mr_device_priv_data->interface_type; 2005 2006 switch (interface_type) { 2007 case SAS_PD: 2008 device_qd = MEGASAS_SAS_QD; 2009 break; 2010 case SATA_PD: 2011 device_qd = MEGASAS_SATA_QD; 2012 break; 2013 case NVME_PD: 2014 device_qd = MEGASAS_NVME_QD; 2015 break; 2016 } 2017 2018 if (is_target_prop) { 2019 tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth); 2020 if (tgt_device_qd) 2021 device_qd = min(instance->host->can_queue, 2022 (int)tgt_device_qd); 2023 } 2024 2025 if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE) 2026 device_qd = instance->host->can_queue; 2027 2028 scsi_change_queue_depth(sdev, device_qd); 2029 } 2030 2031 /* 2032 * megasas_set_static_target_properties - 2033 * Device property set by driver are static and it is not required to be 2034 * updated after OCR. 2035 * 2036 * set io timeout 2037 * set device queue depth 2038 * set nvme device properties. see - megasas_set_nvme_device_properties 2039 * 2040 * @sdev: scsi device 2041 * @is_target_prop true, if fw provided target properties. 2042 */ 2043 static void megasas_set_static_target_properties(struct scsi_device *sdev, 2044 bool is_target_prop) 2045 { 2046 u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB; 2047 struct megasas_instance *instance; 2048 2049 instance = megasas_lookup_instance(sdev->host->host_no); 2050 2051 /* 2052 * The RAID firmware may require extended timeouts. 2053 */ 2054 blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ); 2055 2056 /* max_io_size_kb will be set to non zero for 2057 * nvme based vd and syspd. 2058 */ 2059 if (is_target_prop) 2060 max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb); 2061 2062 if (instance->nvme_page_size && max_io_size_kb) 2063 megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10)); 2064 2065 megasas_set_fw_assisted_qd(sdev, is_target_prop); 2066 } 2067 2068 2069 static int megasas_slave_configure(struct scsi_device *sdev) 2070 { 2071 u16 pd_index = 0; 2072 struct megasas_instance *instance; 2073 int ret_target_prop = DCMD_FAILED; 2074 bool is_target_prop = false; 2075 2076 instance = megasas_lookup_instance(sdev->host->host_no); 2077 if (instance->pd_list_not_supported) { 2078 if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) { 2079 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 2080 sdev->id; 2081 if (instance->pd_list[pd_index].driveState != 2082 MR_PD_STATE_SYSTEM) 2083 return -ENXIO; 2084 } 2085 } 2086 2087 mutex_lock(&instance->reset_mutex); 2088 /* Send DCMD to Firmware and cache the information */ 2089 if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev)) 2090 megasas_get_pd_info(instance, sdev); 2091 2092 /* Some ventura firmware may not have instance->nvme_page_size set. 2093 * Do not send MR_DCMD_DRV_GET_TARGET_PROP 2094 */ 2095 if ((instance->tgt_prop) && (instance->nvme_page_size)) 2096 ret_target_prop = megasas_get_target_prop(instance, sdev); 2097 2098 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; 2099 megasas_set_static_target_properties(sdev, is_target_prop); 2100 2101 /* This sdev property may change post OCR */ 2102 megasas_set_dynamic_target_properties(sdev, is_target_prop); 2103 2104 mutex_unlock(&instance->reset_mutex); 2105 2106 return 0; 2107 } 2108 2109 static int megasas_slave_alloc(struct scsi_device *sdev) 2110 { 2111 u16 pd_index = 0, ld_tgt_id; 2112 struct megasas_instance *instance ; 2113 struct MR_PRIV_DEVICE *mr_device_priv_data; 2114 2115 instance = megasas_lookup_instance(sdev->host->host_no); 2116 if (!MEGASAS_IS_LOGICAL(sdev)) { 2117 /* 2118 * Open the OS scan to the SYSTEM PD 2119 */ 2120 pd_index = 2121 (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 2122 sdev->id; 2123 if ((instance->pd_list_not_supported || 2124 instance->pd_list[pd_index].driveState == 2125 MR_PD_STATE_SYSTEM)) { 2126 goto scan_target; 2127 } 2128 return -ENXIO; 2129 } 2130 2131 scan_target: 2132 mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data), 2133 GFP_KERNEL); 2134 if (!mr_device_priv_data) 2135 return -ENOMEM; 2136 2137 if (MEGASAS_IS_LOGICAL(sdev)) { 2138 ld_tgt_id = MEGASAS_TARGET_ID(sdev); 2139 instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_ACTIVE; 2140 if (megasas_dbg_lvl & LD_PD_DEBUG) 2141 sdev_printk(KERN_INFO, sdev, "LD target ID %d created.\n", ld_tgt_id); 2142 } 2143 2144 sdev->hostdata = mr_device_priv_data; 2145 2146 atomic_set(&mr_device_priv_data->r1_ldio_hint, 2147 instance->r1_ldio_hint_default); 2148 return 0; 2149 } 2150 2151 static void megasas_slave_destroy(struct scsi_device *sdev) 2152 { 2153 u16 ld_tgt_id; 2154 struct megasas_instance *instance; 2155 2156 instance = megasas_lookup_instance(sdev->host->host_no); 2157 2158 if (MEGASAS_IS_LOGICAL(sdev)) { 2159 ld_tgt_id = MEGASAS_TARGET_ID(sdev); 2160 instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED; 2161 if (megasas_dbg_lvl & LD_PD_DEBUG) 2162 sdev_printk(KERN_INFO, sdev, 2163 "LD target ID %d removed from OS stack\n", ld_tgt_id); 2164 } 2165 2166 kfree(sdev->hostdata); 2167 sdev->hostdata = NULL; 2168 } 2169 2170 /* 2171 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a 2172 * kill adapter 2173 * @instance: Adapter soft state 2174 * 2175 */ 2176 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance) 2177 { 2178 int i; 2179 struct megasas_cmd *cmd_mfi; 2180 struct megasas_cmd_fusion *cmd_fusion; 2181 struct fusion_context *fusion = instance->ctrl_context; 2182 2183 /* Find all outstanding ioctls */ 2184 if (fusion) { 2185 for (i = 0; i < instance->max_fw_cmds; i++) { 2186 cmd_fusion = fusion->cmd_list[i]; 2187 if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) { 2188 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 2189 if (cmd_mfi->sync_cmd && 2190 (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) { 2191 cmd_mfi->frame->hdr.cmd_status = 2192 MFI_STAT_WRONG_STATE; 2193 megasas_complete_cmd(instance, 2194 cmd_mfi, DID_OK); 2195 } 2196 } 2197 } 2198 } else { 2199 for (i = 0; i < instance->max_fw_cmds; i++) { 2200 cmd_mfi = instance->cmd_list[i]; 2201 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != 2202 MFI_CMD_ABORT) 2203 megasas_complete_cmd(instance, cmd_mfi, DID_OK); 2204 } 2205 } 2206 } 2207 2208 2209 void megaraid_sas_kill_hba(struct megasas_instance *instance) 2210 { 2211 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2212 dev_warn(&instance->pdev->dev, 2213 "Adapter already dead, skipping kill HBA\n"); 2214 return; 2215 } 2216 2217 /* Set critical error to block I/O & ioctls in case caller didn't */ 2218 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); 2219 /* Wait 1 second to ensure IO or ioctls in build have posted */ 2220 msleep(1000); 2221 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 2222 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 2223 (instance->adapter_type != MFI_SERIES)) { 2224 if (!instance->requestorId) { 2225 writel(MFI_STOP_ADP, &instance->reg_set->doorbell); 2226 /* Flush */ 2227 readl(&instance->reg_set->doorbell); 2228 } 2229 if (instance->requestorId && instance->peerIsPresent) 2230 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 2231 } else { 2232 writel(MFI_STOP_ADP, 2233 &instance->reg_set->inbound_doorbell); 2234 } 2235 /* Complete outstanding ioctls when adapter is killed */ 2236 megasas_complete_outstanding_ioctls(instance); 2237 } 2238 2239 /** 2240 * megasas_check_and_restore_queue_depth - Check if queue depth needs to be 2241 * restored to max value 2242 * @instance: Adapter soft state 2243 * 2244 */ 2245 void 2246 megasas_check_and_restore_queue_depth(struct megasas_instance *instance) 2247 { 2248 unsigned long flags; 2249 2250 if (instance->flag & MEGASAS_FW_BUSY 2251 && time_after(jiffies, instance->last_time + 5 * HZ) 2252 && atomic_read(&instance->fw_outstanding) < 2253 instance->throttlequeuedepth + 1) { 2254 2255 spin_lock_irqsave(instance->host->host_lock, flags); 2256 instance->flag &= ~MEGASAS_FW_BUSY; 2257 2258 instance->host->can_queue = instance->cur_can_queue; 2259 spin_unlock_irqrestore(instance->host->host_lock, flags); 2260 } 2261 } 2262 2263 /** 2264 * megasas_complete_cmd_dpc - Returns FW's controller structure 2265 * @instance_addr: Address of adapter soft state 2266 * 2267 * Tasklet to complete cmds 2268 */ 2269 static void megasas_complete_cmd_dpc(unsigned long instance_addr) 2270 { 2271 u32 producer; 2272 u32 consumer; 2273 u32 context; 2274 struct megasas_cmd *cmd; 2275 struct megasas_instance *instance = 2276 (struct megasas_instance *)instance_addr; 2277 unsigned long flags; 2278 2279 /* If we have already declared adapter dead, donot complete cmds */ 2280 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 2281 return; 2282 2283 spin_lock_irqsave(&instance->completion_lock, flags); 2284 2285 producer = le32_to_cpu(*instance->producer); 2286 consumer = le32_to_cpu(*instance->consumer); 2287 2288 while (consumer != producer) { 2289 context = le32_to_cpu(instance->reply_queue[consumer]); 2290 if (context >= instance->max_fw_cmds) { 2291 dev_err(&instance->pdev->dev, "Unexpected context value %x\n", 2292 context); 2293 BUG(); 2294 } 2295 2296 cmd = instance->cmd_list[context]; 2297 2298 megasas_complete_cmd(instance, cmd, DID_OK); 2299 2300 consumer++; 2301 if (consumer == (instance->max_fw_cmds + 1)) { 2302 consumer = 0; 2303 } 2304 } 2305 2306 *instance->consumer = cpu_to_le32(producer); 2307 2308 spin_unlock_irqrestore(&instance->completion_lock, flags); 2309 2310 /* 2311 * Check if we can restore can_queue 2312 */ 2313 megasas_check_and_restore_queue_depth(instance); 2314 } 2315 2316 static void megasas_sriov_heartbeat_handler(struct timer_list *t); 2317 2318 /** 2319 * megasas_start_timer - Initializes sriov heartbeat timer object 2320 * @instance: Adapter soft state 2321 * 2322 */ 2323 void megasas_start_timer(struct megasas_instance *instance) 2324 { 2325 struct timer_list *timer = &instance->sriov_heartbeat_timer; 2326 2327 timer_setup(timer, megasas_sriov_heartbeat_handler, 0); 2328 timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF; 2329 add_timer(timer); 2330 } 2331 2332 static void 2333 megasas_internal_reset_defer_cmds(struct megasas_instance *instance); 2334 2335 static void 2336 process_fw_state_change_wq(struct work_struct *work); 2337 2338 static void megasas_do_ocr(struct megasas_instance *instance) 2339 { 2340 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 2341 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 2342 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 2343 *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); 2344 } 2345 instance->instancet->disable_intr(instance); 2346 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 2347 instance->issuepend_done = 0; 2348 2349 atomic_set(&instance->fw_outstanding, 0); 2350 megasas_internal_reset_defer_cmds(instance); 2351 process_fw_state_change_wq(&instance->work_init); 2352 } 2353 2354 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance, 2355 int initial) 2356 { 2357 struct megasas_cmd *cmd; 2358 struct megasas_dcmd_frame *dcmd; 2359 struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL; 2360 dma_addr_t new_affiliation_111_h; 2361 int ld, retval = 0; 2362 u8 thisVf; 2363 2364 cmd = megasas_get_cmd(instance); 2365 2366 if (!cmd) { 2367 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:" 2368 "Failed to get cmd for scsi%d\n", 2369 instance->host->host_no); 2370 return -ENOMEM; 2371 } 2372 2373 dcmd = &cmd->frame->dcmd; 2374 2375 if (!instance->vf_affiliation_111) { 2376 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " 2377 "affiliation for scsi%d\n", instance->host->host_no); 2378 megasas_return_cmd(instance, cmd); 2379 return -ENOMEM; 2380 } 2381 2382 if (initial) 2383 memset(instance->vf_affiliation_111, 0, 2384 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2385 else { 2386 new_affiliation_111 = 2387 dma_alloc_coherent(&instance->pdev->dev, 2388 sizeof(struct MR_LD_VF_AFFILIATION_111), 2389 &new_affiliation_111_h, GFP_KERNEL); 2390 if (!new_affiliation_111) { 2391 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2392 "memory for new affiliation for scsi%d\n", 2393 instance->host->host_no); 2394 megasas_return_cmd(instance, cmd); 2395 return -ENOMEM; 2396 } 2397 } 2398 2399 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2400 2401 dcmd->cmd = MFI_CMD_DCMD; 2402 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2403 dcmd->sge_count = 1; 2404 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2405 dcmd->timeout = 0; 2406 dcmd->pad_0 = 0; 2407 dcmd->data_xfer_len = 2408 cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111)); 2409 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111); 2410 2411 if (initial) 2412 dcmd->sgl.sge32[0].phys_addr = 2413 cpu_to_le32(instance->vf_affiliation_111_h); 2414 else 2415 dcmd->sgl.sge32[0].phys_addr = 2416 cpu_to_le32(new_affiliation_111_h); 2417 2418 dcmd->sgl.sge32[0].length = cpu_to_le32( 2419 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2420 2421 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " 2422 "scsi%d\n", instance->host->host_no); 2423 2424 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { 2425 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" 2426 " failed with status 0x%x for scsi%d\n", 2427 dcmd->cmd_status, instance->host->host_no); 2428 retval = 1; /* Do a scan if we couldn't get affiliation */ 2429 goto out; 2430 } 2431 2432 if (!initial) { 2433 thisVf = new_affiliation_111->thisVf; 2434 for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++) 2435 if (instance->vf_affiliation_111->map[ld].policy[thisVf] != 2436 new_affiliation_111->map[ld].policy[thisVf]) { 2437 dev_warn(&instance->pdev->dev, "SR-IOV: " 2438 "Got new LD/VF affiliation for scsi%d\n", 2439 instance->host->host_no); 2440 memcpy(instance->vf_affiliation_111, 2441 new_affiliation_111, 2442 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2443 retval = 1; 2444 goto out; 2445 } 2446 } 2447 out: 2448 if (new_affiliation_111) { 2449 dma_free_coherent(&instance->pdev->dev, 2450 sizeof(struct MR_LD_VF_AFFILIATION_111), 2451 new_affiliation_111, 2452 new_affiliation_111_h); 2453 } 2454 2455 megasas_return_cmd(instance, cmd); 2456 2457 return retval; 2458 } 2459 2460 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance, 2461 int initial) 2462 { 2463 struct megasas_cmd *cmd; 2464 struct megasas_dcmd_frame *dcmd; 2465 struct MR_LD_VF_AFFILIATION *new_affiliation = NULL; 2466 struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL; 2467 dma_addr_t new_affiliation_h; 2468 int i, j, retval = 0, found = 0, doscan = 0; 2469 u8 thisVf; 2470 2471 cmd = megasas_get_cmd(instance); 2472 2473 if (!cmd) { 2474 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: " 2475 "Failed to get cmd for scsi%d\n", 2476 instance->host->host_no); 2477 return -ENOMEM; 2478 } 2479 2480 dcmd = &cmd->frame->dcmd; 2481 2482 if (!instance->vf_affiliation) { 2483 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " 2484 "affiliation for scsi%d\n", instance->host->host_no); 2485 megasas_return_cmd(instance, cmd); 2486 return -ENOMEM; 2487 } 2488 2489 if (initial) 2490 memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) * 2491 sizeof(struct MR_LD_VF_AFFILIATION)); 2492 else { 2493 new_affiliation = 2494 dma_alloc_coherent(&instance->pdev->dev, 2495 (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION), 2496 &new_affiliation_h, GFP_KERNEL); 2497 if (!new_affiliation) { 2498 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2499 "memory for new affiliation for scsi%d\n", 2500 instance->host->host_no); 2501 megasas_return_cmd(instance, cmd); 2502 return -ENOMEM; 2503 } 2504 } 2505 2506 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2507 2508 dcmd->cmd = MFI_CMD_DCMD; 2509 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2510 dcmd->sge_count = 1; 2511 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2512 dcmd->timeout = 0; 2513 dcmd->pad_0 = 0; 2514 dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * 2515 sizeof(struct MR_LD_VF_AFFILIATION)); 2516 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS); 2517 2518 if (initial) 2519 dcmd->sgl.sge32[0].phys_addr = 2520 cpu_to_le32(instance->vf_affiliation_h); 2521 else 2522 dcmd->sgl.sge32[0].phys_addr = 2523 cpu_to_le32(new_affiliation_h); 2524 2525 dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * 2526 sizeof(struct MR_LD_VF_AFFILIATION)); 2527 2528 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " 2529 "scsi%d\n", instance->host->host_no); 2530 2531 2532 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { 2533 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" 2534 " failed with status 0x%x for scsi%d\n", 2535 dcmd->cmd_status, instance->host->host_no); 2536 retval = 1; /* Do a scan if we couldn't get affiliation */ 2537 goto out; 2538 } 2539 2540 if (!initial) { 2541 if (!new_affiliation->ldCount) { 2542 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " 2543 "affiliation for passive path for scsi%d\n", 2544 instance->host->host_no); 2545 retval = 1; 2546 goto out; 2547 } 2548 newmap = new_affiliation->map; 2549 savedmap = instance->vf_affiliation->map; 2550 thisVf = new_affiliation->thisVf; 2551 for (i = 0 ; i < new_affiliation->ldCount; i++) { 2552 found = 0; 2553 for (j = 0; j < instance->vf_affiliation->ldCount; 2554 j++) { 2555 if (newmap->ref.targetId == 2556 savedmap->ref.targetId) { 2557 found = 1; 2558 if (newmap->policy[thisVf] != 2559 savedmap->policy[thisVf]) { 2560 doscan = 1; 2561 goto out; 2562 } 2563 } 2564 savedmap = (struct MR_LD_VF_MAP *) 2565 ((unsigned char *)savedmap + 2566 savedmap->size); 2567 } 2568 if (!found && newmap->policy[thisVf] != 2569 MR_LD_ACCESS_HIDDEN) { 2570 doscan = 1; 2571 goto out; 2572 } 2573 newmap = (struct MR_LD_VF_MAP *) 2574 ((unsigned char *)newmap + newmap->size); 2575 } 2576 2577 newmap = new_affiliation->map; 2578 savedmap = instance->vf_affiliation->map; 2579 2580 for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) { 2581 found = 0; 2582 for (j = 0 ; j < new_affiliation->ldCount; j++) { 2583 if (savedmap->ref.targetId == 2584 newmap->ref.targetId) { 2585 found = 1; 2586 if (savedmap->policy[thisVf] != 2587 newmap->policy[thisVf]) { 2588 doscan = 1; 2589 goto out; 2590 } 2591 } 2592 newmap = (struct MR_LD_VF_MAP *) 2593 ((unsigned char *)newmap + 2594 newmap->size); 2595 } 2596 if (!found && savedmap->policy[thisVf] != 2597 MR_LD_ACCESS_HIDDEN) { 2598 doscan = 1; 2599 goto out; 2600 } 2601 savedmap = (struct MR_LD_VF_MAP *) 2602 ((unsigned char *)savedmap + 2603 savedmap->size); 2604 } 2605 } 2606 out: 2607 if (doscan) { 2608 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " 2609 "affiliation for scsi%d\n", instance->host->host_no); 2610 memcpy(instance->vf_affiliation, new_affiliation, 2611 new_affiliation->size); 2612 retval = 1; 2613 } 2614 2615 if (new_affiliation) 2616 dma_free_coherent(&instance->pdev->dev, 2617 (MAX_LOGICAL_DRIVES + 1) * 2618 sizeof(struct MR_LD_VF_AFFILIATION), 2619 new_affiliation, new_affiliation_h); 2620 megasas_return_cmd(instance, cmd); 2621 2622 return retval; 2623 } 2624 2625 /* This function will get the current SR-IOV LD/VF affiliation */ 2626 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 2627 int initial) 2628 { 2629 int retval; 2630 2631 if (instance->PlasmaFW111) 2632 retval = megasas_get_ld_vf_affiliation_111(instance, initial); 2633 else 2634 retval = megasas_get_ld_vf_affiliation_12(instance, initial); 2635 return retval; 2636 } 2637 2638 /* This function will tell FW to start the SR-IOV heartbeat */ 2639 int megasas_sriov_start_heartbeat(struct megasas_instance *instance, 2640 int initial) 2641 { 2642 struct megasas_cmd *cmd; 2643 struct megasas_dcmd_frame *dcmd; 2644 int retval = 0; 2645 2646 cmd = megasas_get_cmd(instance); 2647 2648 if (!cmd) { 2649 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: " 2650 "Failed to get cmd for scsi%d\n", 2651 instance->host->host_no); 2652 return -ENOMEM; 2653 } 2654 2655 dcmd = &cmd->frame->dcmd; 2656 2657 if (initial) { 2658 instance->hb_host_mem = 2659 dma_alloc_coherent(&instance->pdev->dev, 2660 sizeof(struct MR_CTRL_HB_HOST_MEM), 2661 &instance->hb_host_mem_h, 2662 GFP_KERNEL); 2663 if (!instance->hb_host_mem) { 2664 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate" 2665 " memory for heartbeat host memory for scsi%d\n", 2666 instance->host->host_no); 2667 retval = -ENOMEM; 2668 goto out; 2669 } 2670 } 2671 2672 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2673 2674 dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2675 dcmd->cmd = MFI_CMD_DCMD; 2676 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2677 dcmd->sge_count = 1; 2678 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2679 dcmd->timeout = 0; 2680 dcmd->pad_0 = 0; 2681 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2682 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC); 2683 2684 megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h, 2685 sizeof(struct MR_CTRL_HB_HOST_MEM)); 2686 2687 dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n", 2688 instance->host->host_no); 2689 2690 if ((instance->adapter_type != MFI_SERIES) && 2691 !instance->mask_interrupts) 2692 retval = megasas_issue_blocked_cmd(instance, cmd, 2693 MEGASAS_ROUTINE_WAIT_TIME_VF); 2694 else 2695 retval = megasas_issue_polled(instance, cmd); 2696 2697 if (retval) { 2698 dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST" 2699 "_MEM_ALLOC DCMD %s for scsi%d\n", 2700 (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ? 2701 "timed out" : "failed", instance->host->host_no); 2702 retval = 1; 2703 } 2704 2705 out: 2706 megasas_return_cmd(instance, cmd); 2707 2708 return retval; 2709 } 2710 2711 /* Handler for SR-IOV heartbeat */ 2712 static void megasas_sriov_heartbeat_handler(struct timer_list *t) 2713 { 2714 struct megasas_instance *instance = 2715 from_timer(instance, t, sriov_heartbeat_timer); 2716 2717 if (instance->hb_host_mem->HB.fwCounter != 2718 instance->hb_host_mem->HB.driverCounter) { 2719 instance->hb_host_mem->HB.driverCounter = 2720 instance->hb_host_mem->HB.fwCounter; 2721 mod_timer(&instance->sriov_heartbeat_timer, 2722 jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 2723 } else { 2724 dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never " 2725 "completed for scsi%d\n", instance->host->host_no); 2726 schedule_work(&instance->work_init); 2727 } 2728 } 2729 2730 /** 2731 * megasas_wait_for_outstanding - Wait for all outstanding cmds 2732 * @instance: Adapter soft state 2733 * 2734 * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to 2735 * complete all its outstanding commands. Returns error if one or more IOs 2736 * are pending after this time period. It also marks the controller dead. 2737 */ 2738 static int megasas_wait_for_outstanding(struct megasas_instance *instance) 2739 { 2740 int i, sl, outstanding; 2741 u32 reset_index; 2742 u32 wait_time = MEGASAS_RESET_WAIT_TIME; 2743 unsigned long flags; 2744 struct list_head clist_local; 2745 struct megasas_cmd *reset_cmd; 2746 u32 fw_state; 2747 2748 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2749 dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n", 2750 __func__, __LINE__); 2751 return FAILED; 2752 } 2753 2754 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 2755 2756 INIT_LIST_HEAD(&clist_local); 2757 spin_lock_irqsave(&instance->hba_lock, flags); 2758 list_splice_init(&instance->internal_reset_pending_q, 2759 &clist_local); 2760 spin_unlock_irqrestore(&instance->hba_lock, flags); 2761 2762 dev_notice(&instance->pdev->dev, "HBA reset wait ...\n"); 2763 for (i = 0; i < wait_time; i++) { 2764 msleep(1000); 2765 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) 2766 break; 2767 } 2768 2769 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 2770 dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n"); 2771 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); 2772 return FAILED; 2773 } 2774 2775 reset_index = 0; 2776 while (!list_empty(&clist_local)) { 2777 reset_cmd = list_entry((&clist_local)->next, 2778 struct megasas_cmd, list); 2779 list_del_init(&reset_cmd->list); 2780 if (reset_cmd->scmd) { 2781 reset_cmd->scmd->result = DID_REQUEUE << 16; 2782 dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n", 2783 reset_index, reset_cmd, 2784 reset_cmd->scmd->cmnd[0]); 2785 2786 reset_cmd->scmd->scsi_done(reset_cmd->scmd); 2787 megasas_return_cmd(instance, reset_cmd); 2788 } else if (reset_cmd->sync_cmd) { 2789 dev_notice(&instance->pdev->dev, "%p synch cmds" 2790 "reset queue\n", 2791 reset_cmd); 2792 2793 reset_cmd->cmd_status_drv = DCMD_INIT; 2794 instance->instancet->fire_cmd(instance, 2795 reset_cmd->frame_phys_addr, 2796 0, instance->reg_set); 2797 } else { 2798 dev_notice(&instance->pdev->dev, "%p unexpected" 2799 "cmds lst\n", 2800 reset_cmd); 2801 } 2802 reset_index++; 2803 } 2804 2805 return SUCCESS; 2806 } 2807 2808 for (i = 0; i < resetwaittime; i++) { 2809 outstanding = atomic_read(&instance->fw_outstanding); 2810 2811 if (!outstanding) 2812 break; 2813 2814 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 2815 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d " 2816 "commands to complete\n",i,outstanding); 2817 /* 2818 * Call cmd completion routine. Cmd to be 2819 * be completed directly without depending on isr. 2820 */ 2821 megasas_complete_cmd_dpc((unsigned long)instance); 2822 } 2823 2824 msleep(1000); 2825 } 2826 2827 i = 0; 2828 outstanding = atomic_read(&instance->fw_outstanding); 2829 fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK; 2830 2831 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) 2832 goto no_outstanding; 2833 2834 if (instance->disableOnlineCtrlReset) 2835 goto kill_hba_and_failed; 2836 do { 2837 if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) { 2838 dev_info(&instance->pdev->dev, 2839 "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, outstanding 0x%x\n", 2840 __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding)); 2841 if (i == 3) 2842 goto kill_hba_and_failed; 2843 megasas_do_ocr(instance); 2844 2845 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2846 dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n", 2847 __func__, __LINE__); 2848 return FAILED; 2849 } 2850 dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n", 2851 __func__, __LINE__); 2852 2853 for (sl = 0; sl < 10; sl++) 2854 msleep(500); 2855 2856 outstanding = atomic_read(&instance->fw_outstanding); 2857 2858 fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK; 2859 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) 2860 goto no_outstanding; 2861 } 2862 i++; 2863 } while (i <= 3); 2864 2865 no_outstanding: 2866 2867 dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n", 2868 __func__, __LINE__); 2869 return SUCCESS; 2870 2871 kill_hba_and_failed: 2872 2873 /* Reset not supported, kill adapter */ 2874 dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d" 2875 " disableOnlineCtrlReset %d fw_outstanding %d \n", 2876 __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset, 2877 atomic_read(&instance->fw_outstanding)); 2878 megasas_dump_pending_frames(instance); 2879 megaraid_sas_kill_hba(instance); 2880 2881 return FAILED; 2882 } 2883 2884 /** 2885 * megasas_generic_reset - Generic reset routine 2886 * @scmd: Mid-layer SCSI command 2887 * 2888 * This routine implements a generic reset handler for device, bus and host 2889 * reset requests. Device, bus and host specific reset handlers can use this 2890 * function after they do their specific tasks. 2891 */ 2892 static int megasas_generic_reset(struct scsi_cmnd *scmd) 2893 { 2894 int ret_val; 2895 struct megasas_instance *instance; 2896 2897 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2898 2899 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n", 2900 scmd->cmnd[0], scmd->retries); 2901 2902 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2903 dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n"); 2904 return FAILED; 2905 } 2906 2907 ret_val = megasas_wait_for_outstanding(instance); 2908 if (ret_val == SUCCESS) 2909 dev_notice(&instance->pdev->dev, "reset successful\n"); 2910 else 2911 dev_err(&instance->pdev->dev, "failed to do reset\n"); 2912 2913 return ret_val; 2914 } 2915 2916 /** 2917 * megasas_reset_timer - quiesce the adapter if required 2918 * @scmd: scsi cmnd 2919 * 2920 * Sets the FW busy flag and reduces the host->can_queue if the 2921 * cmd has not been completed within the timeout period. 2922 */ 2923 static enum 2924 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) 2925 { 2926 struct megasas_instance *instance; 2927 unsigned long flags; 2928 2929 if (time_after(jiffies, scmd->jiffies_at_alloc + 2930 (scmd_timeout * 2) * HZ)) { 2931 return BLK_EH_DONE; 2932 } 2933 2934 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2935 if (!(instance->flag & MEGASAS_FW_BUSY)) { 2936 /* FW is busy, throttle IO */ 2937 spin_lock_irqsave(instance->host->host_lock, flags); 2938 2939 instance->host->can_queue = instance->throttlequeuedepth; 2940 instance->last_time = jiffies; 2941 instance->flag |= MEGASAS_FW_BUSY; 2942 2943 spin_unlock_irqrestore(instance->host->host_lock, flags); 2944 } 2945 return BLK_EH_RESET_TIMER; 2946 } 2947 2948 /** 2949 * megasas_dump - This function will print hexdump of provided buffer. 2950 * @buf: Buffer to be dumped 2951 * @sz: Size in bytes 2952 * @format: Different formats of dumping e.g. format=n will 2953 * cause only 'n' 32 bit words to be dumped in a single 2954 * line. 2955 */ 2956 inline void 2957 megasas_dump(void *buf, int sz, int format) 2958 { 2959 int i; 2960 __le32 *buf_loc = (__le32 *)buf; 2961 2962 for (i = 0; i < (sz / sizeof(__le32)); i++) { 2963 if ((i % format) == 0) { 2964 if (i != 0) 2965 printk(KERN_CONT "\n"); 2966 printk(KERN_CONT "%08x: ", (i * 4)); 2967 } 2968 printk(KERN_CONT "%08x ", le32_to_cpu(buf_loc[i])); 2969 } 2970 printk(KERN_CONT "\n"); 2971 } 2972 2973 /** 2974 * megasas_dump_reg_set - This function will print hexdump of register set 2975 * @reg_set: Register set to be dumped 2976 */ 2977 inline void 2978 megasas_dump_reg_set(void __iomem *reg_set) 2979 { 2980 unsigned int i, sz = 256; 2981 u32 __iomem *reg = (u32 __iomem *)reg_set; 2982 2983 for (i = 0; i < (sz / sizeof(u32)); i++) 2984 printk("%08x: %08x\n", (i * 4), readl(®[i])); 2985 } 2986 2987 /** 2988 * megasas_dump_fusion_io - This function will print key details 2989 * of SCSI IO 2990 * @scmd: SCSI command pointer of SCSI IO 2991 */ 2992 void 2993 megasas_dump_fusion_io(struct scsi_cmnd *scmd) 2994 { 2995 struct megasas_cmd_fusion *cmd; 2996 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2997 struct megasas_instance *instance; 2998 2999 cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr; 3000 instance = (struct megasas_instance *)scmd->device->host->hostdata; 3001 3002 scmd_printk(KERN_INFO, scmd, 3003 "scmd: (0x%p) retries: 0x%x allowed: 0x%x\n", 3004 scmd, scmd->retries, scmd->allowed); 3005 scsi_print_command(scmd); 3006 3007 if (cmd) { 3008 req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc; 3009 scmd_printk(KERN_INFO, scmd, "Request descriptor details:\n"); 3010 scmd_printk(KERN_INFO, scmd, 3011 "RequestFlags:0x%x MSIxIndex:0x%x SMID:0x%x LMID:0x%x DevHandle:0x%x\n", 3012 req_desc->SCSIIO.RequestFlags, 3013 req_desc->SCSIIO.MSIxIndex, req_desc->SCSIIO.SMID, 3014 req_desc->SCSIIO.LMID, req_desc->SCSIIO.DevHandle); 3015 3016 printk(KERN_INFO "IO request frame:\n"); 3017 megasas_dump(cmd->io_request, 3018 MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, 8); 3019 printk(KERN_INFO "Chain frame:\n"); 3020 megasas_dump(cmd->sg_frame, 3021 instance->max_chain_frame_sz, 8); 3022 } 3023 3024 } 3025 3026 /* 3027 * megasas_dump_sys_regs - This function will dump system registers through 3028 * sysfs. 3029 * @reg_set: Pointer to System register set. 3030 * @buf: Buffer to which output is to be written. 3031 * @return: Number of bytes written to buffer. 3032 */ 3033 static inline ssize_t 3034 megasas_dump_sys_regs(void __iomem *reg_set, char *buf) 3035 { 3036 unsigned int i, sz = 256; 3037 int bytes_wrote = 0; 3038 char *loc = (char *)buf; 3039 u32 __iomem *reg = (u32 __iomem *)reg_set; 3040 3041 for (i = 0; i < sz / sizeof(u32); i++) { 3042 bytes_wrote += scnprintf(loc + bytes_wrote, 3043 PAGE_SIZE - bytes_wrote, 3044 "%08x: %08x\n", (i * 4), 3045 readl(®[i])); 3046 } 3047 return bytes_wrote; 3048 } 3049 3050 /** 3051 * megasas_reset_bus_host - Bus & host reset handler entry point 3052 * @scmd: Mid-layer SCSI command 3053 */ 3054 static int megasas_reset_bus_host(struct scsi_cmnd *scmd) 3055 { 3056 int ret; 3057 struct megasas_instance *instance; 3058 3059 instance = (struct megasas_instance *)scmd->device->host->hostdata; 3060 3061 scmd_printk(KERN_INFO, scmd, 3062 "OCR is requested due to IO timeout!!\n"); 3063 3064 scmd_printk(KERN_INFO, scmd, 3065 "SCSI host state: %d SCSI host busy: %d FW outstanding: %d\n", 3066 scmd->device->host->shost_state, 3067 scsi_host_busy(scmd->device->host), 3068 atomic_read(&instance->fw_outstanding)); 3069 /* 3070 * First wait for all commands to complete 3071 */ 3072 if (instance->adapter_type == MFI_SERIES) { 3073 ret = megasas_generic_reset(scmd); 3074 } else { 3075 megasas_dump_fusion_io(scmd); 3076 ret = megasas_reset_fusion(scmd->device->host, 3077 SCSIIO_TIMEOUT_OCR); 3078 } 3079 3080 return ret; 3081 } 3082 3083 /** 3084 * megasas_task_abort - Issues task abort request to firmware 3085 * (supported only for fusion adapters) 3086 * @scmd: SCSI command pointer 3087 */ 3088 static int megasas_task_abort(struct scsi_cmnd *scmd) 3089 { 3090 int ret; 3091 struct megasas_instance *instance; 3092 3093 instance = (struct megasas_instance *)scmd->device->host->hostdata; 3094 3095 if (instance->adapter_type != MFI_SERIES) 3096 ret = megasas_task_abort_fusion(scmd); 3097 else { 3098 sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n"); 3099 ret = FAILED; 3100 } 3101 3102 return ret; 3103 } 3104 3105 /** 3106 * megasas_reset_target: Issues target reset request to firmware 3107 * (supported only for fusion adapters) 3108 * @scmd: SCSI command pointer 3109 */ 3110 static int megasas_reset_target(struct scsi_cmnd *scmd) 3111 { 3112 int ret; 3113 struct megasas_instance *instance; 3114 3115 instance = (struct megasas_instance *)scmd->device->host->hostdata; 3116 3117 if (instance->adapter_type != MFI_SERIES) 3118 ret = megasas_reset_target_fusion(scmd); 3119 else { 3120 sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n"); 3121 ret = FAILED; 3122 } 3123 3124 return ret; 3125 } 3126 3127 /** 3128 * megasas_bios_param - Returns disk geometry for a disk 3129 * @sdev: device handle 3130 * @bdev: block device 3131 * @capacity: drive capacity 3132 * @geom: geometry parameters 3133 */ 3134 static int 3135 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev, 3136 sector_t capacity, int geom[]) 3137 { 3138 int heads; 3139 int sectors; 3140 sector_t cylinders; 3141 unsigned long tmp; 3142 3143 /* Default heads (64) & sectors (32) */ 3144 heads = 64; 3145 sectors = 32; 3146 3147 tmp = heads * sectors; 3148 cylinders = capacity; 3149 3150 sector_div(cylinders, tmp); 3151 3152 /* 3153 * Handle extended translation size for logical drives > 1Gb 3154 */ 3155 3156 if (capacity >= 0x200000) { 3157 heads = 255; 3158 sectors = 63; 3159 tmp = heads*sectors; 3160 cylinders = capacity; 3161 sector_div(cylinders, tmp); 3162 } 3163 3164 geom[0] = heads; 3165 geom[1] = sectors; 3166 geom[2] = cylinders; 3167 3168 return 0; 3169 } 3170 3171 static int megasas_map_queues(struct Scsi_Host *shost) 3172 { 3173 struct megasas_instance *instance; 3174 int qoff = 0, offset; 3175 struct blk_mq_queue_map *map; 3176 3177 instance = (struct megasas_instance *)shost->hostdata; 3178 3179 if (shost->nr_hw_queues == 1) 3180 return 0; 3181 3182 offset = instance->low_latency_index_start; 3183 3184 /* Setup Default hctx */ 3185 map = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; 3186 map->nr_queues = instance->msix_vectors - offset; 3187 map->queue_offset = 0; 3188 blk_mq_pci_map_queues(map, instance->pdev, offset); 3189 qoff += map->nr_queues; 3190 offset += map->nr_queues; 3191 3192 /* Setup Poll hctx */ 3193 map = &shost->tag_set.map[HCTX_TYPE_POLL]; 3194 map->nr_queues = instance->iopoll_q_count; 3195 if (map->nr_queues) { 3196 /* 3197 * The poll queue(s) doesn't have an IRQ (and hence IRQ 3198 * affinity), so use the regular blk-mq cpu mapping 3199 */ 3200 map->queue_offset = qoff; 3201 blk_mq_map_queues(map); 3202 } 3203 3204 return 0; 3205 } 3206 3207 static void megasas_aen_polling(struct work_struct *work); 3208 3209 /** 3210 * megasas_service_aen - Processes an event notification 3211 * @instance: Adapter soft state 3212 * @cmd: AEN command completed by the ISR 3213 * 3214 * For AEN, driver sends a command down to FW that is held by the FW till an 3215 * event occurs. When an event of interest occurs, FW completes the command 3216 * that it was previously holding. 3217 * 3218 * This routines sends SIGIO signal to processes that have registered with the 3219 * driver for AEN. 3220 */ 3221 static void 3222 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) 3223 { 3224 unsigned long flags; 3225 3226 /* 3227 * Don't signal app if it is just an aborted previously registered aen 3228 */ 3229 if ((!cmd->abort_aen) && (instance->unload == 0)) { 3230 spin_lock_irqsave(&poll_aen_lock, flags); 3231 megasas_poll_wait_aen = 1; 3232 spin_unlock_irqrestore(&poll_aen_lock, flags); 3233 wake_up(&megasas_poll_wait); 3234 kill_fasync(&megasas_async_queue, SIGIO, POLL_IN); 3235 } 3236 else 3237 cmd->abort_aen = 0; 3238 3239 instance->aen_cmd = NULL; 3240 3241 megasas_return_cmd(instance, cmd); 3242 3243 if ((instance->unload == 0) && 3244 ((instance->issuepend_done == 1))) { 3245 struct megasas_aen_event *ev; 3246 3247 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 3248 if (!ev) { 3249 dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n"); 3250 } else { 3251 ev->instance = instance; 3252 instance->ev = ev; 3253 INIT_DELAYED_WORK(&ev->hotplug_work, 3254 megasas_aen_polling); 3255 schedule_delayed_work(&ev->hotplug_work, 0); 3256 } 3257 } 3258 } 3259 3260 static ssize_t 3261 fw_crash_buffer_store(struct device *cdev, 3262 struct device_attribute *attr, const char *buf, size_t count) 3263 { 3264 struct Scsi_Host *shost = class_to_shost(cdev); 3265 struct megasas_instance *instance = 3266 (struct megasas_instance *) shost->hostdata; 3267 int val = 0; 3268 unsigned long flags; 3269 3270 if (kstrtoint(buf, 0, &val) != 0) 3271 return -EINVAL; 3272 3273 spin_lock_irqsave(&instance->crashdump_lock, flags); 3274 instance->fw_crash_buffer_offset = val; 3275 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3276 return strlen(buf); 3277 } 3278 3279 static ssize_t 3280 fw_crash_buffer_show(struct device *cdev, 3281 struct device_attribute *attr, char *buf) 3282 { 3283 struct Scsi_Host *shost = class_to_shost(cdev); 3284 struct megasas_instance *instance = 3285 (struct megasas_instance *) shost->hostdata; 3286 u32 size; 3287 unsigned long dmachunk = CRASH_DMA_BUF_SIZE; 3288 unsigned long chunk_left_bytes; 3289 unsigned long src_addr; 3290 unsigned long flags; 3291 u32 buff_offset; 3292 3293 spin_lock_irqsave(&instance->crashdump_lock, flags); 3294 buff_offset = instance->fw_crash_buffer_offset; 3295 if (!instance->crash_dump_buf && 3296 !((instance->fw_crash_state == AVAILABLE) || 3297 (instance->fw_crash_state == COPYING))) { 3298 dev_err(&instance->pdev->dev, 3299 "Firmware crash dump is not available\n"); 3300 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3301 return -EINVAL; 3302 } 3303 3304 if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) { 3305 dev_err(&instance->pdev->dev, 3306 "Firmware crash dump offset is out of range\n"); 3307 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3308 return 0; 3309 } 3310 3311 size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset; 3312 chunk_left_bytes = dmachunk - (buff_offset % dmachunk); 3313 size = (size > chunk_left_bytes) ? chunk_left_bytes : size; 3314 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; 3315 3316 src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] + 3317 (buff_offset % dmachunk); 3318 memcpy(buf, (void *)src_addr, size); 3319 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3320 3321 return size; 3322 } 3323 3324 static ssize_t 3325 fw_crash_buffer_size_show(struct device *cdev, 3326 struct device_attribute *attr, char *buf) 3327 { 3328 struct Scsi_Host *shost = class_to_shost(cdev); 3329 struct megasas_instance *instance = 3330 (struct megasas_instance *) shost->hostdata; 3331 3332 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long) 3333 ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE); 3334 } 3335 3336 static ssize_t 3337 fw_crash_state_store(struct device *cdev, 3338 struct device_attribute *attr, const char *buf, size_t count) 3339 { 3340 struct Scsi_Host *shost = class_to_shost(cdev); 3341 struct megasas_instance *instance = 3342 (struct megasas_instance *) shost->hostdata; 3343 int val = 0; 3344 unsigned long flags; 3345 3346 if (kstrtoint(buf, 0, &val) != 0) 3347 return -EINVAL; 3348 3349 if ((val <= AVAILABLE || val > COPY_ERROR)) { 3350 dev_err(&instance->pdev->dev, "application updates invalid " 3351 "firmware crash state\n"); 3352 return -EINVAL; 3353 } 3354 3355 instance->fw_crash_state = val; 3356 3357 if ((val == COPIED) || (val == COPY_ERROR)) { 3358 spin_lock_irqsave(&instance->crashdump_lock, flags); 3359 megasas_free_host_crash_buffer(instance); 3360 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3361 if (val == COPY_ERROR) 3362 dev_info(&instance->pdev->dev, "application failed to " 3363 "copy Firmware crash dump\n"); 3364 else 3365 dev_info(&instance->pdev->dev, "Firmware crash dump " 3366 "copied successfully\n"); 3367 } 3368 return strlen(buf); 3369 } 3370 3371 static ssize_t 3372 fw_crash_state_show(struct device *cdev, 3373 struct device_attribute *attr, char *buf) 3374 { 3375 struct Scsi_Host *shost = class_to_shost(cdev); 3376 struct megasas_instance *instance = 3377 (struct megasas_instance *) shost->hostdata; 3378 3379 return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state); 3380 } 3381 3382 static ssize_t 3383 page_size_show(struct device *cdev, 3384 struct device_attribute *attr, char *buf) 3385 { 3386 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1); 3387 } 3388 3389 static ssize_t 3390 ldio_outstanding_show(struct device *cdev, struct device_attribute *attr, 3391 char *buf) 3392 { 3393 struct Scsi_Host *shost = class_to_shost(cdev); 3394 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3395 3396 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding)); 3397 } 3398 3399 static ssize_t 3400 fw_cmds_outstanding_show(struct device *cdev, 3401 struct device_attribute *attr, char *buf) 3402 { 3403 struct Scsi_Host *shost = class_to_shost(cdev); 3404 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3405 3406 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding)); 3407 } 3408 3409 static ssize_t 3410 enable_sdev_max_qd_show(struct device *cdev, 3411 struct device_attribute *attr, char *buf) 3412 { 3413 struct Scsi_Host *shost = class_to_shost(cdev); 3414 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3415 3416 return snprintf(buf, PAGE_SIZE, "%d\n", instance->enable_sdev_max_qd); 3417 } 3418 3419 static ssize_t 3420 enable_sdev_max_qd_store(struct device *cdev, 3421 struct device_attribute *attr, const char *buf, size_t count) 3422 { 3423 struct Scsi_Host *shost = class_to_shost(cdev); 3424 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3425 u32 val = 0; 3426 bool is_target_prop; 3427 int ret_target_prop = DCMD_FAILED; 3428 struct scsi_device *sdev; 3429 3430 if (kstrtou32(buf, 0, &val) != 0) { 3431 pr_err("megasas: could not set enable_sdev_max_qd\n"); 3432 return -EINVAL; 3433 } 3434 3435 mutex_lock(&instance->reset_mutex); 3436 if (val) 3437 instance->enable_sdev_max_qd = true; 3438 else 3439 instance->enable_sdev_max_qd = false; 3440 3441 shost_for_each_device(sdev, shost) { 3442 ret_target_prop = megasas_get_target_prop(instance, sdev); 3443 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; 3444 megasas_set_fw_assisted_qd(sdev, is_target_prop); 3445 } 3446 mutex_unlock(&instance->reset_mutex); 3447 3448 return strlen(buf); 3449 } 3450 3451 static ssize_t 3452 dump_system_regs_show(struct device *cdev, 3453 struct device_attribute *attr, char *buf) 3454 { 3455 struct Scsi_Host *shost = class_to_shost(cdev); 3456 struct megasas_instance *instance = 3457 (struct megasas_instance *)shost->hostdata; 3458 3459 return megasas_dump_sys_regs(instance->reg_set, buf); 3460 } 3461 3462 static ssize_t 3463 raid_map_id_show(struct device *cdev, struct device_attribute *attr, 3464 char *buf) 3465 { 3466 struct Scsi_Host *shost = class_to_shost(cdev); 3467 struct megasas_instance *instance = 3468 (struct megasas_instance *)shost->hostdata; 3469 3470 return snprintf(buf, PAGE_SIZE, "%ld\n", 3471 (unsigned long)instance->map_id); 3472 } 3473 3474 static DEVICE_ATTR_RW(fw_crash_buffer); 3475 static DEVICE_ATTR_RO(fw_crash_buffer_size); 3476 static DEVICE_ATTR_RW(fw_crash_state); 3477 static DEVICE_ATTR_RO(page_size); 3478 static DEVICE_ATTR_RO(ldio_outstanding); 3479 static DEVICE_ATTR_RO(fw_cmds_outstanding); 3480 static DEVICE_ATTR_RW(enable_sdev_max_qd); 3481 static DEVICE_ATTR_RO(dump_system_regs); 3482 static DEVICE_ATTR_RO(raid_map_id); 3483 3484 static struct device_attribute *megaraid_host_attrs[] = { 3485 &dev_attr_fw_crash_buffer_size, 3486 &dev_attr_fw_crash_buffer, 3487 &dev_attr_fw_crash_state, 3488 &dev_attr_page_size, 3489 &dev_attr_ldio_outstanding, 3490 &dev_attr_fw_cmds_outstanding, 3491 &dev_attr_enable_sdev_max_qd, 3492 &dev_attr_dump_system_regs, 3493 &dev_attr_raid_map_id, 3494 NULL, 3495 }; 3496 3497 /* 3498 * Scsi host template for megaraid_sas driver 3499 */ 3500 static struct scsi_host_template megasas_template = { 3501 3502 .module = THIS_MODULE, 3503 .name = "Avago SAS based MegaRAID driver", 3504 .proc_name = "megaraid_sas", 3505 .slave_configure = megasas_slave_configure, 3506 .slave_alloc = megasas_slave_alloc, 3507 .slave_destroy = megasas_slave_destroy, 3508 .queuecommand = megasas_queue_command, 3509 .eh_target_reset_handler = megasas_reset_target, 3510 .eh_abort_handler = megasas_task_abort, 3511 .eh_host_reset_handler = megasas_reset_bus_host, 3512 .eh_timed_out = megasas_reset_timer, 3513 .shost_attrs = megaraid_host_attrs, 3514 .bios_param = megasas_bios_param, 3515 .map_queues = megasas_map_queues, 3516 .mq_poll = megasas_blk_mq_poll, 3517 .change_queue_depth = scsi_change_queue_depth, 3518 .max_segment_size = 0xffffffff, 3519 }; 3520 3521 /** 3522 * megasas_complete_int_cmd - Completes an internal command 3523 * @instance: Adapter soft state 3524 * @cmd: Command to be completed 3525 * 3526 * The megasas_issue_blocked_cmd() function waits for a command to complete 3527 * after it issues a command. This function wakes up that waiting routine by 3528 * calling wake_up() on the wait queue. 3529 */ 3530 static void 3531 megasas_complete_int_cmd(struct megasas_instance *instance, 3532 struct megasas_cmd *cmd) 3533 { 3534 if (cmd->cmd_status_drv == DCMD_INIT) 3535 cmd->cmd_status_drv = 3536 (cmd->frame->io.cmd_status == MFI_STAT_OK) ? 3537 DCMD_SUCCESS : DCMD_FAILED; 3538 3539 wake_up(&instance->int_cmd_wait_q); 3540 } 3541 3542 /** 3543 * megasas_complete_abort - Completes aborting a command 3544 * @instance: Adapter soft state 3545 * @cmd: Cmd that was issued to abort another cmd 3546 * 3547 * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q 3548 * after it issues an abort on a previously issued command. This function 3549 * wakes up all functions waiting on the same wait queue. 3550 */ 3551 static void 3552 megasas_complete_abort(struct megasas_instance *instance, 3553 struct megasas_cmd *cmd) 3554 { 3555 if (cmd->sync_cmd) { 3556 cmd->sync_cmd = 0; 3557 cmd->cmd_status_drv = DCMD_SUCCESS; 3558 wake_up(&instance->abort_cmd_wait_q); 3559 } 3560 } 3561 3562 static void 3563 megasas_set_ld_removed_by_fw(struct megasas_instance *instance) 3564 { 3565 uint i; 3566 3567 for (i = 0; (i < MEGASAS_MAX_LD_IDS); i++) { 3568 if (instance->ld_ids_prev[i] != 0xff && 3569 instance->ld_ids_from_raidmap[i] == 0xff) { 3570 if (megasas_dbg_lvl & LD_PD_DEBUG) 3571 dev_info(&instance->pdev->dev, 3572 "LD target ID %d removed from RAID map\n", i); 3573 instance->ld_tgtid_status[i] = LD_TARGET_ID_DELETED; 3574 } 3575 } 3576 } 3577 3578 /** 3579 * megasas_complete_cmd - Completes a command 3580 * @instance: Adapter soft state 3581 * @cmd: Command to be completed 3582 * @alt_status: If non-zero, use this value as status to 3583 * SCSI mid-layer instead of the value returned 3584 * by the FW. This should be used if caller wants 3585 * an alternate status (as in the case of aborted 3586 * commands) 3587 */ 3588 void 3589 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 3590 u8 alt_status) 3591 { 3592 int exception = 0; 3593 struct megasas_header *hdr = &cmd->frame->hdr; 3594 unsigned long flags; 3595 struct fusion_context *fusion = instance->ctrl_context; 3596 u32 opcode, status; 3597 3598 /* flag for the retry reset */ 3599 cmd->retry_for_fw_reset = 0; 3600 3601 if (cmd->scmd) 3602 cmd->scmd->SCp.ptr = NULL; 3603 3604 switch (hdr->cmd) { 3605 case MFI_CMD_INVALID: 3606 /* Some older 1068 controller FW may keep a pended 3607 MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel 3608 when booting the kdump kernel. Ignore this command to 3609 prevent a kernel panic on shutdown of the kdump kernel. */ 3610 dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command " 3611 "completed\n"); 3612 dev_warn(&instance->pdev->dev, "If you have a controller " 3613 "other than PERC5, please upgrade your firmware\n"); 3614 break; 3615 case MFI_CMD_PD_SCSI_IO: 3616 case MFI_CMD_LD_SCSI_IO: 3617 3618 /* 3619 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been 3620 * issued either through an IO path or an IOCTL path. If it 3621 * was via IOCTL, we will send it to internal completion. 3622 */ 3623 if (cmd->sync_cmd) { 3624 cmd->sync_cmd = 0; 3625 megasas_complete_int_cmd(instance, cmd); 3626 break; 3627 } 3628 fallthrough; 3629 3630 case MFI_CMD_LD_READ: 3631 case MFI_CMD_LD_WRITE: 3632 3633 if (alt_status) { 3634 cmd->scmd->result = alt_status << 16; 3635 exception = 1; 3636 } 3637 3638 if (exception) { 3639 3640 atomic_dec(&instance->fw_outstanding); 3641 3642 scsi_dma_unmap(cmd->scmd); 3643 cmd->scmd->scsi_done(cmd->scmd); 3644 megasas_return_cmd(instance, cmd); 3645 3646 break; 3647 } 3648 3649 switch (hdr->cmd_status) { 3650 3651 case MFI_STAT_OK: 3652 cmd->scmd->result = DID_OK << 16; 3653 break; 3654 3655 case MFI_STAT_SCSI_IO_FAILED: 3656 case MFI_STAT_LD_INIT_IN_PROGRESS: 3657 cmd->scmd->result = 3658 (DID_ERROR << 16) | hdr->scsi_status; 3659 break; 3660 3661 case MFI_STAT_SCSI_DONE_WITH_ERROR: 3662 3663 cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status; 3664 3665 if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) { 3666 memset(cmd->scmd->sense_buffer, 0, 3667 SCSI_SENSE_BUFFERSIZE); 3668 memcpy(cmd->scmd->sense_buffer, cmd->sense, 3669 hdr->sense_len); 3670 } 3671 3672 break; 3673 3674 case MFI_STAT_LD_OFFLINE: 3675 case MFI_STAT_DEVICE_NOT_FOUND: 3676 cmd->scmd->result = DID_BAD_TARGET << 16; 3677 break; 3678 3679 default: 3680 dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n", 3681 hdr->cmd_status); 3682 cmd->scmd->result = DID_ERROR << 16; 3683 break; 3684 } 3685 3686 atomic_dec(&instance->fw_outstanding); 3687 3688 scsi_dma_unmap(cmd->scmd); 3689 cmd->scmd->scsi_done(cmd->scmd); 3690 megasas_return_cmd(instance, cmd); 3691 3692 break; 3693 3694 case MFI_CMD_SMP: 3695 case MFI_CMD_STP: 3696 case MFI_CMD_NVME: 3697 case MFI_CMD_TOOLBOX: 3698 megasas_complete_int_cmd(instance, cmd); 3699 break; 3700 3701 case MFI_CMD_DCMD: 3702 opcode = le32_to_cpu(cmd->frame->dcmd.opcode); 3703 /* Check for LD map update */ 3704 if ((opcode == MR_DCMD_LD_MAP_GET_INFO) 3705 && (cmd->frame->dcmd.mbox.b[1] == 1)) { 3706 fusion->fast_path_io = 0; 3707 spin_lock_irqsave(instance->host->host_lock, flags); 3708 status = cmd->frame->hdr.cmd_status; 3709 instance->map_update_cmd = NULL; 3710 if (status != MFI_STAT_OK) { 3711 if (status != MFI_STAT_NOT_FOUND) 3712 dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n", 3713 cmd->frame->hdr.cmd_status); 3714 else { 3715 megasas_return_cmd(instance, cmd); 3716 spin_unlock_irqrestore( 3717 instance->host->host_lock, 3718 flags); 3719 break; 3720 } 3721 } 3722 3723 megasas_return_cmd(instance, cmd); 3724 3725 /* 3726 * Set fast path IO to ZERO. 3727 * Validate Map will set proper value. 3728 * Meanwhile all IOs will go as LD IO. 3729 */ 3730 if (status == MFI_STAT_OK && 3731 (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) { 3732 instance->map_id++; 3733 fusion->fast_path_io = 1; 3734 } else { 3735 fusion->fast_path_io = 0; 3736 } 3737 3738 if (instance->adapter_type >= INVADER_SERIES) 3739 megasas_set_ld_removed_by_fw(instance); 3740 3741 megasas_sync_map_info(instance); 3742 spin_unlock_irqrestore(instance->host->host_lock, 3743 flags); 3744 3745 break; 3746 } 3747 if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO || 3748 opcode == MR_DCMD_CTRL_EVENT_GET) { 3749 spin_lock_irqsave(&poll_aen_lock, flags); 3750 megasas_poll_wait_aen = 0; 3751 spin_unlock_irqrestore(&poll_aen_lock, flags); 3752 } 3753 3754 /* FW has an updated PD sequence */ 3755 if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && 3756 (cmd->frame->dcmd.mbox.b[0] == 1)) { 3757 3758 spin_lock_irqsave(instance->host->host_lock, flags); 3759 status = cmd->frame->hdr.cmd_status; 3760 instance->jbod_seq_cmd = NULL; 3761 megasas_return_cmd(instance, cmd); 3762 3763 if (status == MFI_STAT_OK) { 3764 instance->pd_seq_map_id++; 3765 /* Re-register a pd sync seq num cmd */ 3766 if (megasas_sync_pd_seq_num(instance, true)) 3767 instance->use_seqnum_jbod_fp = false; 3768 } else 3769 instance->use_seqnum_jbod_fp = false; 3770 3771 spin_unlock_irqrestore(instance->host->host_lock, flags); 3772 break; 3773 } 3774 3775 /* 3776 * See if got an event notification 3777 */ 3778 if (opcode == MR_DCMD_CTRL_EVENT_WAIT) 3779 megasas_service_aen(instance, cmd); 3780 else 3781 megasas_complete_int_cmd(instance, cmd); 3782 3783 break; 3784 3785 case MFI_CMD_ABORT: 3786 /* 3787 * Cmd issued to abort another cmd returned 3788 */ 3789 megasas_complete_abort(instance, cmd); 3790 break; 3791 3792 default: 3793 dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n", 3794 hdr->cmd); 3795 megasas_complete_int_cmd(instance, cmd); 3796 break; 3797 } 3798 } 3799 3800 /** 3801 * megasas_issue_pending_cmds_again - issue all pending cmds 3802 * in FW again because of the fw reset 3803 * @instance: Adapter soft state 3804 */ 3805 static inline void 3806 megasas_issue_pending_cmds_again(struct megasas_instance *instance) 3807 { 3808 struct megasas_cmd *cmd; 3809 struct list_head clist_local; 3810 union megasas_evt_class_locale class_locale; 3811 unsigned long flags; 3812 u32 seq_num; 3813 3814 INIT_LIST_HEAD(&clist_local); 3815 spin_lock_irqsave(&instance->hba_lock, flags); 3816 list_splice_init(&instance->internal_reset_pending_q, &clist_local); 3817 spin_unlock_irqrestore(&instance->hba_lock, flags); 3818 3819 while (!list_empty(&clist_local)) { 3820 cmd = list_entry((&clist_local)->next, 3821 struct megasas_cmd, list); 3822 list_del_init(&cmd->list); 3823 3824 if (cmd->sync_cmd || cmd->scmd) { 3825 dev_notice(&instance->pdev->dev, "command %p, %p:%d" 3826 "detected to be pending while HBA reset\n", 3827 cmd, cmd->scmd, cmd->sync_cmd); 3828 3829 cmd->retry_for_fw_reset++; 3830 3831 if (cmd->retry_for_fw_reset == 3) { 3832 dev_notice(&instance->pdev->dev, "cmd %p, %p:%d" 3833 "was tried multiple times during reset." 3834 "Shutting down the HBA\n", 3835 cmd, cmd->scmd, cmd->sync_cmd); 3836 instance->instancet->disable_intr(instance); 3837 atomic_set(&instance->fw_reset_no_pci_access, 1); 3838 megaraid_sas_kill_hba(instance); 3839 return; 3840 } 3841 } 3842 3843 if (cmd->sync_cmd == 1) { 3844 if (cmd->scmd) { 3845 dev_notice(&instance->pdev->dev, "unexpected" 3846 "cmd attached to internal command!\n"); 3847 } 3848 dev_notice(&instance->pdev->dev, "%p synchronous cmd" 3849 "on the internal reset queue," 3850 "issue it again.\n", cmd); 3851 cmd->cmd_status_drv = DCMD_INIT; 3852 instance->instancet->fire_cmd(instance, 3853 cmd->frame_phys_addr, 3854 0, instance->reg_set); 3855 } else if (cmd->scmd) { 3856 dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]" 3857 "detected on the internal queue, issue again.\n", 3858 cmd, cmd->scmd->cmnd[0]); 3859 3860 atomic_inc(&instance->fw_outstanding); 3861 instance->instancet->fire_cmd(instance, 3862 cmd->frame_phys_addr, 3863 cmd->frame_count-1, instance->reg_set); 3864 } else { 3865 dev_notice(&instance->pdev->dev, "%p unexpected cmd on the" 3866 "internal reset defer list while re-issue!!\n", 3867 cmd); 3868 } 3869 } 3870 3871 if (instance->aen_cmd) { 3872 dev_notice(&instance->pdev->dev, "aen_cmd in def process\n"); 3873 megasas_return_cmd(instance, instance->aen_cmd); 3874 3875 instance->aen_cmd = NULL; 3876 } 3877 3878 /* 3879 * Initiate AEN (Asynchronous Event Notification) 3880 */ 3881 seq_num = instance->last_seq_num; 3882 class_locale.members.reserved = 0; 3883 class_locale.members.locale = MR_EVT_LOCALE_ALL; 3884 class_locale.members.class = MR_EVT_CLASS_DEBUG; 3885 3886 megasas_register_aen(instance, seq_num, class_locale.word); 3887 } 3888 3889 /* 3890 * Move the internal reset pending commands to a deferred queue. 3891 * 3892 * We move the commands pending at internal reset time to a 3893 * pending queue. This queue would be flushed after successful 3894 * completion of the internal reset sequence. if the internal reset 3895 * did not complete in time, the kernel reset handler would flush 3896 * these commands. 3897 */ 3898 static void 3899 megasas_internal_reset_defer_cmds(struct megasas_instance *instance) 3900 { 3901 struct megasas_cmd *cmd; 3902 int i; 3903 u16 max_cmd = instance->max_fw_cmds; 3904 u32 defer_index; 3905 unsigned long flags; 3906 3907 defer_index = 0; 3908 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 3909 for (i = 0; i < max_cmd; i++) { 3910 cmd = instance->cmd_list[i]; 3911 if (cmd->sync_cmd == 1 || cmd->scmd) { 3912 dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p" 3913 "on the defer queue as internal\n", 3914 defer_index, cmd, cmd->sync_cmd, cmd->scmd); 3915 3916 if (!list_empty(&cmd->list)) { 3917 dev_notice(&instance->pdev->dev, "ERROR while" 3918 " moving this cmd:%p, %d %p, it was" 3919 "discovered on some list?\n", 3920 cmd, cmd->sync_cmd, cmd->scmd); 3921 3922 list_del_init(&cmd->list); 3923 } 3924 defer_index++; 3925 list_add_tail(&cmd->list, 3926 &instance->internal_reset_pending_q); 3927 } 3928 } 3929 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 3930 } 3931 3932 3933 static void 3934 process_fw_state_change_wq(struct work_struct *work) 3935 { 3936 struct megasas_instance *instance = 3937 container_of(work, struct megasas_instance, work_init); 3938 u32 wait; 3939 unsigned long flags; 3940 3941 if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) { 3942 dev_notice(&instance->pdev->dev, "error, recovery st %x\n", 3943 atomic_read(&instance->adprecovery)); 3944 return ; 3945 } 3946 3947 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { 3948 dev_notice(&instance->pdev->dev, "FW detected to be in fault" 3949 "state, restarting it...\n"); 3950 3951 instance->instancet->disable_intr(instance); 3952 atomic_set(&instance->fw_outstanding, 0); 3953 3954 atomic_set(&instance->fw_reset_no_pci_access, 1); 3955 instance->instancet->adp_reset(instance, instance->reg_set); 3956 atomic_set(&instance->fw_reset_no_pci_access, 0); 3957 3958 dev_notice(&instance->pdev->dev, "FW restarted successfully," 3959 "initiating next stage...\n"); 3960 3961 dev_notice(&instance->pdev->dev, "HBA recovery state machine," 3962 "state 2 starting...\n"); 3963 3964 /* waiting for about 20 second before start the second init */ 3965 for (wait = 0; wait < 30; wait++) { 3966 msleep(1000); 3967 } 3968 3969 if (megasas_transition_to_ready(instance, 1)) { 3970 dev_notice(&instance->pdev->dev, "adapter not ready\n"); 3971 3972 atomic_set(&instance->fw_reset_no_pci_access, 1); 3973 megaraid_sas_kill_hba(instance); 3974 return ; 3975 } 3976 3977 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 3978 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 3979 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR) 3980 ) { 3981 *instance->consumer = *instance->producer; 3982 } else { 3983 *instance->consumer = 0; 3984 *instance->producer = 0; 3985 } 3986 3987 megasas_issue_init_mfi(instance); 3988 3989 spin_lock_irqsave(&instance->hba_lock, flags); 3990 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 3991 spin_unlock_irqrestore(&instance->hba_lock, flags); 3992 instance->instancet->enable_intr(instance); 3993 3994 megasas_issue_pending_cmds_again(instance); 3995 instance->issuepend_done = 1; 3996 } 3997 } 3998 3999 /** 4000 * megasas_deplete_reply_queue - Processes all completed commands 4001 * @instance: Adapter soft state 4002 * @alt_status: Alternate status to be returned to 4003 * SCSI mid-layer instead of the status 4004 * returned by the FW 4005 * Note: this must be called with hba lock held 4006 */ 4007 static int 4008 megasas_deplete_reply_queue(struct megasas_instance *instance, 4009 u8 alt_status) 4010 { 4011 u32 mfiStatus; 4012 u32 fw_state; 4013 4014 if ((mfiStatus = instance->instancet->check_reset(instance, 4015 instance->reg_set)) == 1) { 4016 return IRQ_HANDLED; 4017 } 4018 4019 mfiStatus = instance->instancet->clear_intr(instance); 4020 if (mfiStatus == 0) { 4021 /* Hardware may not set outbound_intr_status in MSI-X mode */ 4022 if (!instance->msix_vectors) 4023 return IRQ_NONE; 4024 } 4025 4026 instance->mfiStatus = mfiStatus; 4027 4028 if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) { 4029 fw_state = instance->instancet->read_fw_status_reg( 4030 instance) & MFI_STATE_MASK; 4031 4032 if (fw_state != MFI_STATE_FAULT) { 4033 dev_notice(&instance->pdev->dev, "fw state:%x\n", 4034 fw_state); 4035 } 4036 4037 if ((fw_state == MFI_STATE_FAULT) && 4038 (instance->disableOnlineCtrlReset == 0)) { 4039 dev_notice(&instance->pdev->dev, "wait adp restart\n"); 4040 4041 if ((instance->pdev->device == 4042 PCI_DEVICE_ID_LSI_SAS1064R) || 4043 (instance->pdev->device == 4044 PCI_DEVICE_ID_DELL_PERC5) || 4045 (instance->pdev->device == 4046 PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 4047 4048 *instance->consumer = 4049 cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); 4050 } 4051 4052 4053 instance->instancet->disable_intr(instance); 4054 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 4055 instance->issuepend_done = 0; 4056 4057 atomic_set(&instance->fw_outstanding, 0); 4058 megasas_internal_reset_defer_cmds(instance); 4059 4060 dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n", 4061 fw_state, atomic_read(&instance->adprecovery)); 4062 4063 schedule_work(&instance->work_init); 4064 return IRQ_HANDLED; 4065 4066 } else { 4067 dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n", 4068 fw_state, instance->disableOnlineCtrlReset); 4069 } 4070 } 4071 4072 tasklet_schedule(&instance->isr_tasklet); 4073 return IRQ_HANDLED; 4074 } 4075 4076 /** 4077 * megasas_isr - isr entry point 4078 * @irq: IRQ number 4079 * @devp: IRQ context address 4080 */ 4081 static irqreturn_t megasas_isr(int irq, void *devp) 4082 { 4083 struct megasas_irq_context *irq_context = devp; 4084 struct megasas_instance *instance = irq_context->instance; 4085 unsigned long flags; 4086 irqreturn_t rc; 4087 4088 if (atomic_read(&instance->fw_reset_no_pci_access)) 4089 return IRQ_HANDLED; 4090 4091 spin_lock_irqsave(&instance->hba_lock, flags); 4092 rc = megasas_deplete_reply_queue(instance, DID_OK); 4093 spin_unlock_irqrestore(&instance->hba_lock, flags); 4094 4095 return rc; 4096 } 4097 4098 /** 4099 * megasas_transition_to_ready - Move the FW to READY state 4100 * @instance: Adapter soft state 4101 * @ocr: Adapter reset state 4102 * 4103 * During the initialization, FW passes can potentially be in any one of 4104 * several possible states. If the FW in operational, waiting-for-handshake 4105 * states, driver must take steps to bring it to ready state. Otherwise, it 4106 * has to wait for the ready state. 4107 */ 4108 int 4109 megasas_transition_to_ready(struct megasas_instance *instance, int ocr) 4110 { 4111 int i; 4112 u8 max_wait; 4113 u32 fw_state; 4114 u32 abs_state, curr_abs_state; 4115 4116 abs_state = instance->instancet->read_fw_status_reg(instance); 4117 fw_state = abs_state & MFI_STATE_MASK; 4118 4119 if (fw_state != MFI_STATE_READY) 4120 dev_info(&instance->pdev->dev, "Waiting for FW to come to ready" 4121 " state\n"); 4122 4123 while (fw_state != MFI_STATE_READY) { 4124 4125 switch (fw_state) { 4126 4127 case MFI_STATE_FAULT: 4128 dev_printk(KERN_ERR, &instance->pdev->dev, 4129 "FW in FAULT state, Fault code:0x%x subcode:0x%x func:%s\n", 4130 abs_state & MFI_STATE_FAULT_CODE, 4131 abs_state & MFI_STATE_FAULT_SUBCODE, __func__); 4132 if (ocr) { 4133 max_wait = MEGASAS_RESET_WAIT_TIME; 4134 break; 4135 } else { 4136 dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n"); 4137 megasas_dump_reg_set(instance->reg_set); 4138 return -ENODEV; 4139 } 4140 4141 case MFI_STATE_WAIT_HANDSHAKE: 4142 /* 4143 * Set the CLR bit in inbound doorbell 4144 */ 4145 if ((instance->pdev->device == 4146 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 4147 (instance->pdev->device == 4148 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 4149 (instance->adapter_type != MFI_SERIES)) 4150 writel( 4151 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 4152 &instance->reg_set->doorbell); 4153 else 4154 writel( 4155 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 4156 &instance->reg_set->inbound_doorbell); 4157 4158 max_wait = MEGASAS_RESET_WAIT_TIME; 4159 break; 4160 4161 case MFI_STATE_BOOT_MESSAGE_PENDING: 4162 if ((instance->pdev->device == 4163 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 4164 (instance->pdev->device == 4165 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 4166 (instance->adapter_type != MFI_SERIES)) 4167 writel(MFI_INIT_HOTPLUG, 4168 &instance->reg_set->doorbell); 4169 else 4170 writel(MFI_INIT_HOTPLUG, 4171 &instance->reg_set->inbound_doorbell); 4172 4173 max_wait = MEGASAS_RESET_WAIT_TIME; 4174 break; 4175 4176 case MFI_STATE_OPERATIONAL: 4177 /* 4178 * Bring it to READY state; assuming max wait 10 secs 4179 */ 4180 instance->instancet->disable_intr(instance); 4181 if ((instance->pdev->device == 4182 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 4183 (instance->pdev->device == 4184 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 4185 (instance->adapter_type != MFI_SERIES)) { 4186 writel(MFI_RESET_FLAGS, 4187 &instance->reg_set->doorbell); 4188 4189 if (instance->adapter_type != MFI_SERIES) { 4190 for (i = 0; i < (10 * 1000); i += 20) { 4191 if (megasas_readl( 4192 instance, 4193 &instance-> 4194 reg_set-> 4195 doorbell) & 1) 4196 msleep(20); 4197 else 4198 break; 4199 } 4200 } 4201 } else 4202 writel(MFI_RESET_FLAGS, 4203 &instance->reg_set->inbound_doorbell); 4204 4205 max_wait = MEGASAS_RESET_WAIT_TIME; 4206 break; 4207 4208 case MFI_STATE_UNDEFINED: 4209 /* 4210 * This state should not last for more than 2 seconds 4211 */ 4212 max_wait = MEGASAS_RESET_WAIT_TIME; 4213 break; 4214 4215 case MFI_STATE_BB_INIT: 4216 max_wait = MEGASAS_RESET_WAIT_TIME; 4217 break; 4218 4219 case MFI_STATE_FW_INIT: 4220 max_wait = MEGASAS_RESET_WAIT_TIME; 4221 break; 4222 4223 case MFI_STATE_FW_INIT_2: 4224 max_wait = MEGASAS_RESET_WAIT_TIME; 4225 break; 4226 4227 case MFI_STATE_DEVICE_SCAN: 4228 max_wait = MEGASAS_RESET_WAIT_TIME; 4229 break; 4230 4231 case MFI_STATE_FLUSH_CACHE: 4232 max_wait = MEGASAS_RESET_WAIT_TIME; 4233 break; 4234 4235 default: 4236 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n", 4237 fw_state); 4238 dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n"); 4239 megasas_dump_reg_set(instance->reg_set); 4240 return -ENODEV; 4241 } 4242 4243 /* 4244 * The cur_state should not last for more than max_wait secs 4245 */ 4246 for (i = 0; i < max_wait * 50; i++) { 4247 curr_abs_state = instance->instancet-> 4248 read_fw_status_reg(instance); 4249 4250 if (abs_state == curr_abs_state) { 4251 msleep(20); 4252 } else 4253 break; 4254 } 4255 4256 /* 4257 * Return error if fw_state hasn't changed after max_wait 4258 */ 4259 if (curr_abs_state == abs_state) { 4260 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed " 4261 "in %d secs\n", fw_state, max_wait); 4262 dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n"); 4263 megasas_dump_reg_set(instance->reg_set); 4264 return -ENODEV; 4265 } 4266 4267 abs_state = curr_abs_state; 4268 fw_state = curr_abs_state & MFI_STATE_MASK; 4269 } 4270 dev_info(&instance->pdev->dev, "FW now in Ready state\n"); 4271 4272 return 0; 4273 } 4274 4275 /** 4276 * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool 4277 * @instance: Adapter soft state 4278 */ 4279 static void megasas_teardown_frame_pool(struct megasas_instance *instance) 4280 { 4281 int i; 4282 u16 max_cmd = instance->max_mfi_cmds; 4283 struct megasas_cmd *cmd; 4284 4285 if (!instance->frame_dma_pool) 4286 return; 4287 4288 /* 4289 * Return all frames to pool 4290 */ 4291 for (i = 0; i < max_cmd; i++) { 4292 4293 cmd = instance->cmd_list[i]; 4294 4295 if (cmd->frame) 4296 dma_pool_free(instance->frame_dma_pool, cmd->frame, 4297 cmd->frame_phys_addr); 4298 4299 if (cmd->sense) 4300 dma_pool_free(instance->sense_dma_pool, cmd->sense, 4301 cmd->sense_phys_addr); 4302 } 4303 4304 /* 4305 * Now destroy the pool itself 4306 */ 4307 dma_pool_destroy(instance->frame_dma_pool); 4308 dma_pool_destroy(instance->sense_dma_pool); 4309 4310 instance->frame_dma_pool = NULL; 4311 instance->sense_dma_pool = NULL; 4312 } 4313 4314 /** 4315 * megasas_create_frame_pool - Creates DMA pool for cmd frames 4316 * @instance: Adapter soft state 4317 * 4318 * Each command packet has an embedded DMA memory buffer that is used for 4319 * filling MFI frame and the SG list that immediately follows the frame. This 4320 * function creates those DMA memory buffers for each command packet by using 4321 * PCI pool facility. 4322 */ 4323 static int megasas_create_frame_pool(struct megasas_instance *instance) 4324 { 4325 int i; 4326 u16 max_cmd; 4327 u32 frame_count; 4328 struct megasas_cmd *cmd; 4329 4330 max_cmd = instance->max_mfi_cmds; 4331 4332 /* 4333 * For MFI controllers. 4334 * max_num_sge = 60 4335 * max_sge_sz = 16 byte (sizeof megasas_sge_skinny) 4336 * Total 960 byte (15 MFI frame of 64 byte) 4337 * 4338 * Fusion adapter require only 3 extra frame. 4339 * max_num_sge = 16 (defined as MAX_IOCTL_SGE) 4340 * max_sge_sz = 12 byte (sizeof megasas_sge64) 4341 * Total 192 byte (3 MFI frame of 64 byte) 4342 */ 4343 frame_count = (instance->adapter_type == MFI_SERIES) ? 4344 (15 + 1) : (3 + 1); 4345 instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count; 4346 /* 4347 * Use DMA pool facility provided by PCI layer 4348 */ 4349 instance->frame_dma_pool = dma_pool_create("megasas frame pool", 4350 &instance->pdev->dev, 4351 instance->mfi_frame_size, 256, 0); 4352 4353 if (!instance->frame_dma_pool) { 4354 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n"); 4355 return -ENOMEM; 4356 } 4357 4358 instance->sense_dma_pool = dma_pool_create("megasas sense pool", 4359 &instance->pdev->dev, 128, 4360 4, 0); 4361 4362 if (!instance->sense_dma_pool) { 4363 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n"); 4364 4365 dma_pool_destroy(instance->frame_dma_pool); 4366 instance->frame_dma_pool = NULL; 4367 4368 return -ENOMEM; 4369 } 4370 4371 /* 4372 * Allocate and attach a frame to each of the commands in cmd_list. 4373 * By making cmd->index as the context instead of the &cmd, we can 4374 * always use 32bit context regardless of the architecture 4375 */ 4376 for (i = 0; i < max_cmd; i++) { 4377 4378 cmd = instance->cmd_list[i]; 4379 4380 cmd->frame = dma_pool_zalloc(instance->frame_dma_pool, 4381 GFP_KERNEL, &cmd->frame_phys_addr); 4382 4383 cmd->sense = dma_pool_alloc(instance->sense_dma_pool, 4384 GFP_KERNEL, &cmd->sense_phys_addr); 4385 4386 /* 4387 * megasas_teardown_frame_pool() takes care of freeing 4388 * whatever has been allocated 4389 */ 4390 if (!cmd->frame || !cmd->sense) { 4391 dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n"); 4392 megasas_teardown_frame_pool(instance); 4393 return -ENOMEM; 4394 } 4395 4396 cmd->frame->io.context = cpu_to_le32(cmd->index); 4397 cmd->frame->io.pad_0 = 0; 4398 if ((instance->adapter_type == MFI_SERIES) && reset_devices) 4399 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 4400 } 4401 4402 return 0; 4403 } 4404 4405 /** 4406 * megasas_free_cmds - Free all the cmds in the free cmd pool 4407 * @instance: Adapter soft state 4408 */ 4409 void megasas_free_cmds(struct megasas_instance *instance) 4410 { 4411 int i; 4412 4413 /* First free the MFI frame pool */ 4414 megasas_teardown_frame_pool(instance); 4415 4416 /* Free all the commands in the cmd_list */ 4417 for (i = 0; i < instance->max_mfi_cmds; i++) 4418 4419 kfree(instance->cmd_list[i]); 4420 4421 /* Free the cmd_list buffer itself */ 4422 kfree(instance->cmd_list); 4423 instance->cmd_list = NULL; 4424 4425 INIT_LIST_HEAD(&instance->cmd_pool); 4426 } 4427 4428 /** 4429 * megasas_alloc_cmds - Allocates the command packets 4430 * @instance: Adapter soft state 4431 * 4432 * Each command that is issued to the FW, whether IO commands from the OS or 4433 * internal commands like IOCTLs, are wrapped in local data structure called 4434 * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to 4435 * the FW. 4436 * 4437 * Each frame has a 32-bit field called context (tag). This context is used 4438 * to get back the megasas_cmd from the frame when a frame gets completed in 4439 * the ISR. Typically the address of the megasas_cmd itself would be used as 4440 * the context. But we wanted to keep the differences between 32 and 64 bit 4441 * systems to the mininum. We always use 32 bit integers for the context. In 4442 * this driver, the 32 bit values are the indices into an array cmd_list. 4443 * This array is used only to look up the megasas_cmd given the context. The 4444 * free commands themselves are maintained in a linked list called cmd_pool. 4445 */ 4446 int megasas_alloc_cmds(struct megasas_instance *instance) 4447 { 4448 int i; 4449 int j; 4450 u16 max_cmd; 4451 struct megasas_cmd *cmd; 4452 4453 max_cmd = instance->max_mfi_cmds; 4454 4455 /* 4456 * instance->cmd_list is an array of struct megasas_cmd pointers. 4457 * Allocate the dynamic array first and then allocate individual 4458 * commands. 4459 */ 4460 instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL); 4461 4462 if (!instance->cmd_list) { 4463 dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n"); 4464 return -ENOMEM; 4465 } 4466 4467 memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd); 4468 4469 for (i = 0; i < max_cmd; i++) { 4470 instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd), 4471 GFP_KERNEL); 4472 4473 if (!instance->cmd_list[i]) { 4474 4475 for (j = 0; j < i; j++) 4476 kfree(instance->cmd_list[j]); 4477 4478 kfree(instance->cmd_list); 4479 instance->cmd_list = NULL; 4480 4481 return -ENOMEM; 4482 } 4483 } 4484 4485 for (i = 0; i < max_cmd; i++) { 4486 cmd = instance->cmd_list[i]; 4487 memset(cmd, 0, sizeof(struct megasas_cmd)); 4488 cmd->index = i; 4489 cmd->scmd = NULL; 4490 cmd->instance = instance; 4491 4492 list_add_tail(&cmd->list, &instance->cmd_pool); 4493 } 4494 4495 /* 4496 * Create a frame pool and assign one frame to each cmd 4497 */ 4498 if (megasas_create_frame_pool(instance)) { 4499 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n"); 4500 megasas_free_cmds(instance); 4501 return -ENOMEM; 4502 } 4503 4504 return 0; 4505 } 4506 4507 /* 4508 * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state. 4509 * @instance: Adapter soft state 4510 * 4511 * Return 0 for only Fusion adapter, if driver load/unload is not in progress 4512 * or FW is not under OCR. 4513 */ 4514 inline int 4515 dcmd_timeout_ocr_possible(struct megasas_instance *instance) { 4516 4517 if (instance->adapter_type == MFI_SERIES) 4518 return KILL_ADAPTER; 4519 else if (instance->unload || 4520 test_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, 4521 &instance->reset_flags)) 4522 return IGNORE_TIMEOUT; 4523 else 4524 return INITIATE_OCR; 4525 } 4526 4527 static void 4528 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev) 4529 { 4530 int ret; 4531 struct megasas_cmd *cmd; 4532 struct megasas_dcmd_frame *dcmd; 4533 4534 struct MR_PRIV_DEVICE *mr_device_priv_data; 4535 u16 device_id = 0; 4536 4537 device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; 4538 cmd = megasas_get_cmd(instance); 4539 4540 if (!cmd) { 4541 dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__); 4542 return; 4543 } 4544 4545 dcmd = &cmd->frame->dcmd; 4546 4547 memset(instance->pd_info, 0, sizeof(*instance->pd_info)); 4548 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4549 4550 dcmd->mbox.s[0] = cpu_to_le16(device_id); 4551 dcmd->cmd = MFI_CMD_DCMD; 4552 dcmd->cmd_status = 0xFF; 4553 dcmd->sge_count = 1; 4554 dcmd->flags = MFI_FRAME_DIR_READ; 4555 dcmd->timeout = 0; 4556 dcmd->pad_0 = 0; 4557 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO)); 4558 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO); 4559 4560 megasas_set_dma_settings(instance, dcmd, instance->pd_info_h, 4561 sizeof(struct MR_PD_INFO)); 4562 4563 if ((instance->adapter_type != MFI_SERIES) && 4564 !instance->mask_interrupts) 4565 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4566 else 4567 ret = megasas_issue_polled(instance, cmd); 4568 4569 switch (ret) { 4570 case DCMD_SUCCESS: 4571 mr_device_priv_data = sdev->hostdata; 4572 le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType); 4573 mr_device_priv_data->interface_type = 4574 instance->pd_info->state.ddf.pdType.intf; 4575 break; 4576 4577 case DCMD_TIMEOUT: 4578 4579 switch (dcmd_timeout_ocr_possible(instance)) { 4580 case INITIATE_OCR: 4581 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4582 mutex_unlock(&instance->reset_mutex); 4583 megasas_reset_fusion(instance->host, 4584 MFI_IO_TIMEOUT_OCR); 4585 mutex_lock(&instance->reset_mutex); 4586 break; 4587 case KILL_ADAPTER: 4588 megaraid_sas_kill_hba(instance); 4589 break; 4590 case IGNORE_TIMEOUT: 4591 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4592 __func__, __LINE__); 4593 break; 4594 } 4595 4596 break; 4597 } 4598 4599 if (ret != DCMD_TIMEOUT) 4600 megasas_return_cmd(instance, cmd); 4601 4602 return; 4603 } 4604 /* 4605 * megasas_get_pd_list_info - Returns FW's pd_list structure 4606 * @instance: Adapter soft state 4607 * @pd_list: pd_list structure 4608 * 4609 * Issues an internal command (DCMD) to get the FW's controller PD 4610 * list structure. This information is mainly used to find out SYSTEM 4611 * supported by the FW. 4612 */ 4613 static int 4614 megasas_get_pd_list(struct megasas_instance *instance) 4615 { 4616 int ret = 0, pd_index = 0; 4617 struct megasas_cmd *cmd; 4618 struct megasas_dcmd_frame *dcmd; 4619 struct MR_PD_LIST *ci; 4620 struct MR_PD_ADDRESS *pd_addr; 4621 4622 if (instance->pd_list_not_supported) { 4623 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " 4624 "not supported by firmware\n"); 4625 return ret; 4626 } 4627 4628 ci = instance->pd_list_buf; 4629 4630 cmd = megasas_get_cmd(instance); 4631 4632 if (!cmd) { 4633 dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n"); 4634 return -ENOMEM; 4635 } 4636 4637 dcmd = &cmd->frame->dcmd; 4638 4639 memset(ci, 0, sizeof(*ci)); 4640 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4641 4642 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; 4643 dcmd->mbox.b[1] = 0; 4644 dcmd->cmd = MFI_CMD_DCMD; 4645 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4646 dcmd->sge_count = 1; 4647 dcmd->flags = MFI_FRAME_DIR_READ; 4648 dcmd->timeout = 0; 4649 dcmd->pad_0 = 0; 4650 dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); 4651 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY); 4652 4653 megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h, 4654 (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST))); 4655 4656 if ((instance->adapter_type != MFI_SERIES) && 4657 !instance->mask_interrupts) 4658 ret = megasas_issue_blocked_cmd(instance, cmd, 4659 MFI_IO_TIMEOUT_SECS); 4660 else 4661 ret = megasas_issue_polled(instance, cmd); 4662 4663 switch (ret) { 4664 case DCMD_FAILED: 4665 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " 4666 "failed/not supported by firmware\n"); 4667 4668 if (instance->adapter_type != MFI_SERIES) 4669 megaraid_sas_kill_hba(instance); 4670 else 4671 instance->pd_list_not_supported = 1; 4672 break; 4673 case DCMD_TIMEOUT: 4674 4675 switch (dcmd_timeout_ocr_possible(instance)) { 4676 case INITIATE_OCR: 4677 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4678 /* 4679 * DCMD failed from AEN path. 4680 * AEN path already hold reset_mutex to avoid PCI access 4681 * while OCR is in progress. 4682 */ 4683 mutex_unlock(&instance->reset_mutex); 4684 megasas_reset_fusion(instance->host, 4685 MFI_IO_TIMEOUT_OCR); 4686 mutex_lock(&instance->reset_mutex); 4687 break; 4688 case KILL_ADAPTER: 4689 megaraid_sas_kill_hba(instance); 4690 break; 4691 case IGNORE_TIMEOUT: 4692 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n", 4693 __func__, __LINE__); 4694 break; 4695 } 4696 4697 break; 4698 4699 case DCMD_SUCCESS: 4700 pd_addr = ci->addr; 4701 if (megasas_dbg_lvl & LD_PD_DEBUG) 4702 dev_info(&instance->pdev->dev, "%s, sysPD count: 0x%x\n", 4703 __func__, le32_to_cpu(ci->count)); 4704 4705 if ((le32_to_cpu(ci->count) > 4706 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) 4707 break; 4708 4709 memset(instance->local_pd_list, 0, 4710 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); 4711 4712 for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) { 4713 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid = 4714 le16_to_cpu(pd_addr->deviceId); 4715 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType = 4716 pd_addr->scsiDevType; 4717 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState = 4718 MR_PD_STATE_SYSTEM; 4719 if (megasas_dbg_lvl & LD_PD_DEBUG) 4720 dev_info(&instance->pdev->dev, 4721 "PD%d: targetID: 0x%03x deviceType:0x%x\n", 4722 pd_index, le16_to_cpu(pd_addr->deviceId), 4723 pd_addr->scsiDevType); 4724 pd_addr++; 4725 } 4726 4727 memcpy(instance->pd_list, instance->local_pd_list, 4728 sizeof(instance->pd_list)); 4729 break; 4730 4731 } 4732 4733 if (ret != DCMD_TIMEOUT) 4734 megasas_return_cmd(instance, cmd); 4735 4736 return ret; 4737 } 4738 4739 /* 4740 * megasas_get_ld_list_info - Returns FW's ld_list structure 4741 * @instance: Adapter soft state 4742 * @ld_list: ld_list structure 4743 * 4744 * Issues an internal command (DCMD) to get the FW's controller PD 4745 * list structure. This information is mainly used to find out SYSTEM 4746 * supported by the FW. 4747 */ 4748 static int 4749 megasas_get_ld_list(struct megasas_instance *instance) 4750 { 4751 int ret = 0, ld_index = 0, ids = 0; 4752 struct megasas_cmd *cmd; 4753 struct megasas_dcmd_frame *dcmd; 4754 struct MR_LD_LIST *ci; 4755 dma_addr_t ci_h = 0; 4756 u32 ld_count; 4757 4758 ci = instance->ld_list_buf; 4759 ci_h = instance->ld_list_buf_h; 4760 4761 cmd = megasas_get_cmd(instance); 4762 4763 if (!cmd) { 4764 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n"); 4765 return -ENOMEM; 4766 } 4767 4768 dcmd = &cmd->frame->dcmd; 4769 4770 memset(ci, 0, sizeof(*ci)); 4771 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4772 4773 if (instance->supportmax256vd) 4774 dcmd->mbox.b[0] = 1; 4775 dcmd->cmd = MFI_CMD_DCMD; 4776 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4777 dcmd->sge_count = 1; 4778 dcmd->flags = MFI_FRAME_DIR_READ; 4779 dcmd->timeout = 0; 4780 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST)); 4781 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST); 4782 dcmd->pad_0 = 0; 4783 4784 megasas_set_dma_settings(instance, dcmd, ci_h, 4785 sizeof(struct MR_LD_LIST)); 4786 4787 if ((instance->adapter_type != MFI_SERIES) && 4788 !instance->mask_interrupts) 4789 ret = megasas_issue_blocked_cmd(instance, cmd, 4790 MFI_IO_TIMEOUT_SECS); 4791 else 4792 ret = megasas_issue_polled(instance, cmd); 4793 4794 ld_count = le32_to_cpu(ci->ldCount); 4795 4796 switch (ret) { 4797 case DCMD_FAILED: 4798 megaraid_sas_kill_hba(instance); 4799 break; 4800 case DCMD_TIMEOUT: 4801 4802 switch (dcmd_timeout_ocr_possible(instance)) { 4803 case INITIATE_OCR: 4804 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4805 /* 4806 * DCMD failed from AEN path. 4807 * AEN path already hold reset_mutex to avoid PCI access 4808 * while OCR is in progress. 4809 */ 4810 mutex_unlock(&instance->reset_mutex); 4811 megasas_reset_fusion(instance->host, 4812 MFI_IO_TIMEOUT_OCR); 4813 mutex_lock(&instance->reset_mutex); 4814 break; 4815 case KILL_ADAPTER: 4816 megaraid_sas_kill_hba(instance); 4817 break; 4818 case IGNORE_TIMEOUT: 4819 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4820 __func__, __LINE__); 4821 break; 4822 } 4823 4824 break; 4825 4826 case DCMD_SUCCESS: 4827 if (megasas_dbg_lvl & LD_PD_DEBUG) 4828 dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n", 4829 __func__, ld_count); 4830 4831 if (ld_count > instance->fw_supported_vd_count) 4832 break; 4833 4834 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 4835 4836 for (ld_index = 0; ld_index < ld_count; ld_index++) { 4837 if (ci->ldList[ld_index].state != 0) { 4838 ids = ci->ldList[ld_index].ref.targetId; 4839 instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId; 4840 if (megasas_dbg_lvl & LD_PD_DEBUG) 4841 dev_info(&instance->pdev->dev, 4842 "LD%d: targetID: 0x%03x\n", 4843 ld_index, ids); 4844 } 4845 } 4846 4847 break; 4848 } 4849 4850 if (ret != DCMD_TIMEOUT) 4851 megasas_return_cmd(instance, cmd); 4852 4853 return ret; 4854 } 4855 4856 /** 4857 * megasas_ld_list_query - Returns FW's ld_list structure 4858 * @instance: Adapter soft state 4859 * @query_type: ld_list structure type 4860 * 4861 * Issues an internal command (DCMD) to get the FW's controller PD 4862 * list structure. This information is mainly used to find out SYSTEM 4863 * supported by the FW. 4864 */ 4865 static int 4866 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) 4867 { 4868 int ret = 0, ld_index = 0, ids = 0; 4869 struct megasas_cmd *cmd; 4870 struct megasas_dcmd_frame *dcmd; 4871 struct MR_LD_TARGETID_LIST *ci; 4872 dma_addr_t ci_h = 0; 4873 u32 tgtid_count; 4874 4875 ci = instance->ld_targetid_list_buf; 4876 ci_h = instance->ld_targetid_list_buf_h; 4877 4878 cmd = megasas_get_cmd(instance); 4879 4880 if (!cmd) { 4881 dev_warn(&instance->pdev->dev, 4882 "megasas_ld_list_query: Failed to get cmd\n"); 4883 return -ENOMEM; 4884 } 4885 4886 dcmd = &cmd->frame->dcmd; 4887 4888 memset(ci, 0, sizeof(*ci)); 4889 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4890 4891 dcmd->mbox.b[0] = query_type; 4892 if (instance->supportmax256vd) 4893 dcmd->mbox.b[2] = 1; 4894 4895 dcmd->cmd = MFI_CMD_DCMD; 4896 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4897 dcmd->sge_count = 1; 4898 dcmd->flags = MFI_FRAME_DIR_READ; 4899 dcmd->timeout = 0; 4900 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); 4901 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY); 4902 dcmd->pad_0 = 0; 4903 4904 megasas_set_dma_settings(instance, dcmd, ci_h, 4905 sizeof(struct MR_LD_TARGETID_LIST)); 4906 4907 if ((instance->adapter_type != MFI_SERIES) && 4908 !instance->mask_interrupts) 4909 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4910 else 4911 ret = megasas_issue_polled(instance, cmd); 4912 4913 switch (ret) { 4914 case DCMD_FAILED: 4915 dev_info(&instance->pdev->dev, 4916 "DCMD not supported by firmware - %s %d\n", 4917 __func__, __LINE__); 4918 ret = megasas_get_ld_list(instance); 4919 break; 4920 case DCMD_TIMEOUT: 4921 switch (dcmd_timeout_ocr_possible(instance)) { 4922 case INITIATE_OCR: 4923 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4924 /* 4925 * DCMD failed from AEN path. 4926 * AEN path already hold reset_mutex to avoid PCI access 4927 * while OCR is in progress. 4928 */ 4929 mutex_unlock(&instance->reset_mutex); 4930 megasas_reset_fusion(instance->host, 4931 MFI_IO_TIMEOUT_OCR); 4932 mutex_lock(&instance->reset_mutex); 4933 break; 4934 case KILL_ADAPTER: 4935 megaraid_sas_kill_hba(instance); 4936 break; 4937 case IGNORE_TIMEOUT: 4938 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4939 __func__, __LINE__); 4940 break; 4941 } 4942 4943 break; 4944 case DCMD_SUCCESS: 4945 tgtid_count = le32_to_cpu(ci->count); 4946 4947 if (megasas_dbg_lvl & LD_PD_DEBUG) 4948 dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n", 4949 __func__, tgtid_count); 4950 4951 if ((tgtid_count > (instance->fw_supported_vd_count))) 4952 break; 4953 4954 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 4955 for (ld_index = 0; ld_index < tgtid_count; ld_index++) { 4956 ids = ci->targetId[ld_index]; 4957 instance->ld_ids[ids] = ci->targetId[ld_index]; 4958 if (megasas_dbg_lvl & LD_PD_DEBUG) 4959 dev_info(&instance->pdev->dev, "LD%d: targetID: 0x%03x\n", 4960 ld_index, ci->targetId[ld_index]); 4961 } 4962 4963 break; 4964 } 4965 4966 if (ret != DCMD_TIMEOUT) 4967 megasas_return_cmd(instance, cmd); 4968 4969 return ret; 4970 } 4971 4972 /** 4973 * megasas_host_device_list_query 4974 * dcmd.opcode - MR_DCMD_CTRL_DEVICE_LIST_GET 4975 * dcmd.mbox - reserved 4976 * dcmd.sge IN - ptr to return MR_HOST_DEVICE_LIST structure 4977 * Desc: This DCMD will return the combined device list 4978 * Status: MFI_STAT_OK - List returned successfully 4979 * MFI_STAT_INVALID_CMD - Firmware support for the feature has been 4980 * disabled 4981 * @instance: Adapter soft state 4982 * @is_probe: Driver probe check 4983 * Return: 0 if DCMD succeeded 4984 * non-zero if failed 4985 */ 4986 static int 4987 megasas_host_device_list_query(struct megasas_instance *instance, 4988 bool is_probe) 4989 { 4990 int ret, i, target_id; 4991 struct megasas_cmd *cmd; 4992 struct megasas_dcmd_frame *dcmd; 4993 struct MR_HOST_DEVICE_LIST *ci; 4994 u32 count; 4995 dma_addr_t ci_h; 4996 4997 ci = instance->host_device_list_buf; 4998 ci_h = instance->host_device_list_buf_h; 4999 5000 cmd = megasas_get_cmd(instance); 5001 5002 if (!cmd) { 5003 dev_warn(&instance->pdev->dev, 5004 "%s: failed to get cmd\n", 5005 __func__); 5006 return -ENOMEM; 5007 } 5008 5009 dcmd = &cmd->frame->dcmd; 5010 5011 memset(ci, 0, sizeof(*ci)); 5012 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5013 5014 dcmd->mbox.b[0] = is_probe ? 0 : 1; 5015 dcmd->cmd = MFI_CMD_DCMD; 5016 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 5017 dcmd->sge_count = 1; 5018 dcmd->flags = MFI_FRAME_DIR_READ; 5019 dcmd->timeout = 0; 5020 dcmd->pad_0 = 0; 5021 dcmd->data_xfer_len = cpu_to_le32(HOST_DEVICE_LIST_SZ); 5022 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_DEVICE_LIST_GET); 5023 5024 megasas_set_dma_settings(instance, dcmd, ci_h, HOST_DEVICE_LIST_SZ); 5025 5026 if (!instance->mask_interrupts) { 5027 ret = megasas_issue_blocked_cmd(instance, cmd, 5028 MFI_IO_TIMEOUT_SECS); 5029 } else { 5030 ret = megasas_issue_polled(instance, cmd); 5031 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5032 } 5033 5034 switch (ret) { 5035 case DCMD_SUCCESS: 5036 /* Fill the internal pd_list and ld_ids array based on 5037 * targetIds returned by FW 5038 */ 5039 count = le32_to_cpu(ci->count); 5040 5041 if (count > (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT)) 5042 break; 5043 5044 if (megasas_dbg_lvl & LD_PD_DEBUG) 5045 dev_info(&instance->pdev->dev, "%s, Device count: 0x%x\n", 5046 __func__, count); 5047 5048 memset(instance->local_pd_list, 0, 5049 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); 5050 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 5051 for (i = 0; i < count; i++) { 5052 target_id = le16_to_cpu(ci->host_device_list[i].target_id); 5053 if (ci->host_device_list[i].flags.u.bits.is_sys_pd) { 5054 instance->local_pd_list[target_id].tid = target_id; 5055 instance->local_pd_list[target_id].driveType = 5056 ci->host_device_list[i].scsi_type; 5057 instance->local_pd_list[target_id].driveState = 5058 MR_PD_STATE_SYSTEM; 5059 if (megasas_dbg_lvl & LD_PD_DEBUG) 5060 dev_info(&instance->pdev->dev, 5061 "Device %d: PD targetID: 0x%03x deviceType:0x%x\n", 5062 i, target_id, ci->host_device_list[i].scsi_type); 5063 } else { 5064 instance->ld_ids[target_id] = target_id; 5065 if (megasas_dbg_lvl & LD_PD_DEBUG) 5066 dev_info(&instance->pdev->dev, 5067 "Device %d: LD targetID: 0x%03x\n", 5068 i, target_id); 5069 } 5070 } 5071 5072 memcpy(instance->pd_list, instance->local_pd_list, 5073 sizeof(instance->pd_list)); 5074 break; 5075 5076 case DCMD_TIMEOUT: 5077 switch (dcmd_timeout_ocr_possible(instance)) { 5078 case INITIATE_OCR: 5079 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5080 mutex_unlock(&instance->reset_mutex); 5081 megasas_reset_fusion(instance->host, 5082 MFI_IO_TIMEOUT_OCR); 5083 mutex_lock(&instance->reset_mutex); 5084 break; 5085 case KILL_ADAPTER: 5086 megaraid_sas_kill_hba(instance); 5087 break; 5088 case IGNORE_TIMEOUT: 5089 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 5090 __func__, __LINE__); 5091 break; 5092 } 5093 break; 5094 case DCMD_FAILED: 5095 dev_err(&instance->pdev->dev, 5096 "%s: MR_DCMD_CTRL_DEVICE_LIST_GET failed\n", 5097 __func__); 5098 break; 5099 } 5100 5101 if (ret != DCMD_TIMEOUT) 5102 megasas_return_cmd(instance, cmd); 5103 5104 return ret; 5105 } 5106 5107 /* 5108 * megasas_update_ext_vd_details : Update details w.r.t Extended VD 5109 * instance : Controller's instance 5110 */ 5111 static void megasas_update_ext_vd_details(struct megasas_instance *instance) 5112 { 5113 struct fusion_context *fusion; 5114 u32 ventura_map_sz = 0; 5115 5116 fusion = instance->ctrl_context; 5117 /* For MFI based controllers return dummy success */ 5118 if (!fusion) 5119 return; 5120 5121 instance->supportmax256vd = 5122 instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs; 5123 /* Below is additional check to address future FW enhancement */ 5124 if (instance->ctrl_info_buf->max_lds > 64) 5125 instance->supportmax256vd = 1; 5126 5127 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS 5128 * MEGASAS_MAX_DEV_PER_CHANNEL; 5129 instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS 5130 * MEGASAS_MAX_DEV_PER_CHANNEL; 5131 if (instance->supportmax256vd) { 5132 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; 5133 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 5134 } else { 5135 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 5136 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 5137 } 5138 5139 dev_info(&instance->pdev->dev, 5140 "FW provided supportMaxExtLDs: %d\tmax_lds: %d\n", 5141 instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0, 5142 instance->ctrl_info_buf->max_lds); 5143 5144 if (instance->max_raid_mapsize) { 5145 ventura_map_sz = instance->max_raid_mapsize * 5146 MR_MIN_MAP_SIZE; /* 64k */ 5147 fusion->current_map_sz = ventura_map_sz; 5148 fusion->max_map_sz = ventura_map_sz; 5149 } else { 5150 fusion->old_map_sz = sizeof(struct MR_FW_RAID_MAP) + 5151 (sizeof(struct MR_LD_SPAN_MAP) * 5152 (instance->fw_supported_vd_count - 1)); 5153 fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT); 5154 5155 fusion->max_map_sz = 5156 max(fusion->old_map_sz, fusion->new_map_sz); 5157 5158 if (instance->supportmax256vd) 5159 fusion->current_map_sz = fusion->new_map_sz; 5160 else 5161 fusion->current_map_sz = fusion->old_map_sz; 5162 } 5163 /* irrespective of FW raid maps, driver raid map is constant */ 5164 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL); 5165 } 5166 5167 /* 5168 * dcmd.opcode - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES 5169 * dcmd.hdr.length - number of bytes to read 5170 * dcmd.sge - Ptr to MR_SNAPDUMP_PROPERTIES 5171 * Desc: Fill in snapdump properties 5172 * Status: MFI_STAT_OK- Command successful 5173 */ 5174 void megasas_get_snapdump_properties(struct megasas_instance *instance) 5175 { 5176 int ret = 0; 5177 struct megasas_cmd *cmd; 5178 struct megasas_dcmd_frame *dcmd; 5179 struct MR_SNAPDUMP_PROPERTIES *ci; 5180 dma_addr_t ci_h = 0; 5181 5182 ci = instance->snapdump_prop; 5183 ci_h = instance->snapdump_prop_h; 5184 5185 if (!ci) 5186 return; 5187 5188 cmd = megasas_get_cmd(instance); 5189 5190 if (!cmd) { 5191 dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n"); 5192 return; 5193 } 5194 5195 dcmd = &cmd->frame->dcmd; 5196 5197 memset(ci, 0, sizeof(*ci)); 5198 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5199 5200 dcmd->cmd = MFI_CMD_DCMD; 5201 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 5202 dcmd->sge_count = 1; 5203 dcmd->flags = MFI_FRAME_DIR_READ; 5204 dcmd->timeout = 0; 5205 dcmd->pad_0 = 0; 5206 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES)); 5207 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES); 5208 5209 megasas_set_dma_settings(instance, dcmd, ci_h, 5210 sizeof(struct MR_SNAPDUMP_PROPERTIES)); 5211 5212 if (!instance->mask_interrupts) { 5213 ret = megasas_issue_blocked_cmd(instance, cmd, 5214 MFI_IO_TIMEOUT_SECS); 5215 } else { 5216 ret = megasas_issue_polled(instance, cmd); 5217 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5218 } 5219 5220 switch (ret) { 5221 case DCMD_SUCCESS: 5222 instance->snapdump_wait_time = 5223 min_t(u8, ci->trigger_min_num_sec_before_ocr, 5224 MEGASAS_MAX_SNAP_DUMP_WAIT_TIME); 5225 break; 5226 5227 case DCMD_TIMEOUT: 5228 switch (dcmd_timeout_ocr_possible(instance)) { 5229 case INITIATE_OCR: 5230 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5231 mutex_unlock(&instance->reset_mutex); 5232 megasas_reset_fusion(instance->host, 5233 MFI_IO_TIMEOUT_OCR); 5234 mutex_lock(&instance->reset_mutex); 5235 break; 5236 case KILL_ADAPTER: 5237 megaraid_sas_kill_hba(instance); 5238 break; 5239 case IGNORE_TIMEOUT: 5240 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 5241 __func__, __LINE__); 5242 break; 5243 } 5244 } 5245 5246 if (ret != DCMD_TIMEOUT) 5247 megasas_return_cmd(instance, cmd); 5248 } 5249 5250 /** 5251 * megasas_get_ctrl_info - Returns FW's controller structure 5252 * @instance: Adapter soft state 5253 * 5254 * Issues an internal command (DCMD) to get the FW's controller structure. 5255 * This information is mainly used to find out the maximum IO transfer per 5256 * command supported by the FW. 5257 */ 5258 int 5259 megasas_get_ctrl_info(struct megasas_instance *instance) 5260 { 5261 int ret = 0; 5262 struct megasas_cmd *cmd; 5263 struct megasas_dcmd_frame *dcmd; 5264 struct megasas_ctrl_info *ci; 5265 dma_addr_t ci_h = 0; 5266 5267 ci = instance->ctrl_info_buf; 5268 ci_h = instance->ctrl_info_buf_h; 5269 5270 cmd = megasas_get_cmd(instance); 5271 5272 if (!cmd) { 5273 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n"); 5274 return -ENOMEM; 5275 } 5276 5277 dcmd = &cmd->frame->dcmd; 5278 5279 memset(ci, 0, sizeof(*ci)); 5280 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5281 5282 dcmd->cmd = MFI_CMD_DCMD; 5283 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 5284 dcmd->sge_count = 1; 5285 dcmd->flags = MFI_FRAME_DIR_READ; 5286 dcmd->timeout = 0; 5287 dcmd->pad_0 = 0; 5288 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info)); 5289 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO); 5290 dcmd->mbox.b[0] = 1; 5291 5292 megasas_set_dma_settings(instance, dcmd, ci_h, 5293 sizeof(struct megasas_ctrl_info)); 5294 5295 if ((instance->adapter_type != MFI_SERIES) && 5296 !instance->mask_interrupts) { 5297 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 5298 } else { 5299 ret = megasas_issue_polled(instance, cmd); 5300 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5301 } 5302 5303 switch (ret) { 5304 case DCMD_SUCCESS: 5305 /* Save required controller information in 5306 * CPU endianness format. 5307 */ 5308 le32_to_cpus((u32 *)&ci->properties.OnOffProperties); 5309 le16_to_cpus((u16 *)&ci->properties.on_off_properties2); 5310 le32_to_cpus((u32 *)&ci->adapterOperations2); 5311 le32_to_cpus((u32 *)&ci->adapterOperations3); 5312 le16_to_cpus((u16 *)&ci->adapter_operations4); 5313 le32_to_cpus((u32 *)&ci->adapter_operations5); 5314 5315 /* Update the latest Ext VD info. 5316 * From Init path, store current firmware details. 5317 * From OCR path, detect any firmware properties changes. 5318 * in case of Firmware upgrade without system reboot. 5319 */ 5320 megasas_update_ext_vd_details(instance); 5321 instance->support_seqnum_jbod_fp = 5322 ci->adapterOperations3.useSeqNumJbodFP; 5323 instance->support_morethan256jbod = 5324 ci->adapter_operations4.support_pd_map_target_id; 5325 instance->support_nvme_passthru = 5326 ci->adapter_operations4.support_nvme_passthru; 5327 instance->support_pci_lane_margining = 5328 ci->adapter_operations5.support_pci_lane_margining; 5329 instance->task_abort_tmo = ci->TaskAbortTO; 5330 instance->max_reset_tmo = ci->MaxResetTO; 5331 5332 /*Check whether controller is iMR or MR */ 5333 instance->is_imr = (ci->memory_size ? 0 : 1); 5334 5335 instance->snapdump_wait_time = 5336 (ci->properties.on_off_properties2.enable_snap_dump ? 5337 MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0); 5338 5339 instance->enable_fw_dev_list = 5340 ci->properties.on_off_properties2.enable_fw_dev_list; 5341 5342 dev_info(&instance->pdev->dev, 5343 "controller type\t: %s(%dMB)\n", 5344 instance->is_imr ? "iMR" : "MR", 5345 le16_to_cpu(ci->memory_size)); 5346 5347 instance->disableOnlineCtrlReset = 5348 ci->properties.OnOffProperties.disableOnlineCtrlReset; 5349 instance->secure_jbod_support = 5350 ci->adapterOperations3.supportSecurityonJBOD; 5351 dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n", 5352 instance->disableOnlineCtrlReset ? "Disabled" : "Enabled"); 5353 dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n", 5354 instance->secure_jbod_support ? "Yes" : "No"); 5355 dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n", 5356 instance->support_nvme_passthru ? "Yes" : "No"); 5357 dev_info(&instance->pdev->dev, 5358 "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n", 5359 instance->task_abort_tmo, instance->max_reset_tmo); 5360 dev_info(&instance->pdev->dev, "JBOD sequence map support\t: %s\n", 5361 instance->support_seqnum_jbod_fp ? "Yes" : "No"); 5362 dev_info(&instance->pdev->dev, "PCI Lane Margining support\t: %s\n", 5363 instance->support_pci_lane_margining ? "Yes" : "No"); 5364 5365 break; 5366 5367 case DCMD_TIMEOUT: 5368 switch (dcmd_timeout_ocr_possible(instance)) { 5369 case INITIATE_OCR: 5370 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5371 mutex_unlock(&instance->reset_mutex); 5372 megasas_reset_fusion(instance->host, 5373 MFI_IO_TIMEOUT_OCR); 5374 mutex_lock(&instance->reset_mutex); 5375 break; 5376 case KILL_ADAPTER: 5377 megaraid_sas_kill_hba(instance); 5378 break; 5379 case IGNORE_TIMEOUT: 5380 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 5381 __func__, __LINE__); 5382 break; 5383 } 5384 break; 5385 case DCMD_FAILED: 5386 megaraid_sas_kill_hba(instance); 5387 break; 5388 5389 } 5390 5391 if (ret != DCMD_TIMEOUT) 5392 megasas_return_cmd(instance, cmd); 5393 5394 return ret; 5395 } 5396 5397 /* 5398 * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer 5399 * to firmware 5400 * 5401 * @instance: Adapter soft state 5402 * @crash_buf_state - tell FW to turn ON/OFF crash dump feature 5403 MR_CRASH_BUF_TURN_OFF = 0 5404 MR_CRASH_BUF_TURN_ON = 1 5405 * @return 0 on success non-zero on failure. 5406 * Issues an internal command (DCMD) to set parameters for crash dump feature. 5407 * Driver will send address of crash dump DMA buffer and set mbox to tell FW 5408 * that driver supports crash dump feature. This DCMD will be sent only if 5409 * crash dump feature is supported by the FW. 5410 * 5411 */ 5412 int megasas_set_crash_dump_params(struct megasas_instance *instance, 5413 u8 crash_buf_state) 5414 { 5415 int ret = 0; 5416 struct megasas_cmd *cmd; 5417 struct megasas_dcmd_frame *dcmd; 5418 5419 cmd = megasas_get_cmd(instance); 5420 5421 if (!cmd) { 5422 dev_err(&instance->pdev->dev, "Failed to get a free cmd\n"); 5423 return -ENOMEM; 5424 } 5425 5426 5427 dcmd = &cmd->frame->dcmd; 5428 5429 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5430 dcmd->mbox.b[0] = crash_buf_state; 5431 dcmd->cmd = MFI_CMD_DCMD; 5432 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 5433 dcmd->sge_count = 1; 5434 dcmd->flags = MFI_FRAME_DIR_NONE; 5435 dcmd->timeout = 0; 5436 dcmd->pad_0 = 0; 5437 dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE); 5438 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS); 5439 5440 megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h, 5441 CRASH_DMA_BUF_SIZE); 5442 5443 if ((instance->adapter_type != MFI_SERIES) && 5444 !instance->mask_interrupts) 5445 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 5446 else 5447 ret = megasas_issue_polled(instance, cmd); 5448 5449 if (ret == DCMD_TIMEOUT) { 5450 switch (dcmd_timeout_ocr_possible(instance)) { 5451 case INITIATE_OCR: 5452 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5453 megasas_reset_fusion(instance->host, 5454 MFI_IO_TIMEOUT_OCR); 5455 break; 5456 case KILL_ADAPTER: 5457 megaraid_sas_kill_hba(instance); 5458 break; 5459 case IGNORE_TIMEOUT: 5460 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 5461 __func__, __LINE__); 5462 break; 5463 } 5464 } else 5465 megasas_return_cmd(instance, cmd); 5466 5467 return ret; 5468 } 5469 5470 /** 5471 * megasas_issue_init_mfi - Initializes the FW 5472 * @instance: Adapter soft state 5473 * 5474 * Issues the INIT MFI cmd 5475 */ 5476 static int 5477 megasas_issue_init_mfi(struct megasas_instance *instance) 5478 { 5479 __le32 context; 5480 struct megasas_cmd *cmd; 5481 struct megasas_init_frame *init_frame; 5482 struct megasas_init_queue_info *initq_info; 5483 dma_addr_t init_frame_h; 5484 dma_addr_t initq_info_h; 5485 5486 /* 5487 * Prepare a init frame. Note the init frame points to queue info 5488 * structure. Each frame has SGL allocated after first 64 bytes. For 5489 * this frame - since we don't need any SGL - we use SGL's space as 5490 * queue info structure 5491 * 5492 * We will not get a NULL command below. We just created the pool. 5493 */ 5494 cmd = megasas_get_cmd(instance); 5495 5496 init_frame = (struct megasas_init_frame *)cmd->frame; 5497 initq_info = (struct megasas_init_queue_info *) 5498 ((unsigned long)init_frame + 64); 5499 5500 init_frame_h = cmd->frame_phys_addr; 5501 initq_info_h = init_frame_h + 64; 5502 5503 context = init_frame->context; 5504 memset(init_frame, 0, MEGAMFI_FRAME_SIZE); 5505 memset(initq_info, 0, sizeof(struct megasas_init_queue_info)); 5506 init_frame->context = context; 5507 5508 initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1); 5509 initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h); 5510 5511 initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h); 5512 initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h); 5513 5514 init_frame->cmd = MFI_CMD_INIT; 5515 init_frame->cmd_status = MFI_STAT_INVALID_STATUS; 5516 init_frame->queue_info_new_phys_addr_lo = 5517 cpu_to_le32(lower_32_bits(initq_info_h)); 5518 init_frame->queue_info_new_phys_addr_hi = 5519 cpu_to_le32(upper_32_bits(initq_info_h)); 5520 5521 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info)); 5522 5523 /* 5524 * disable the intr before firing the init frame to FW 5525 */ 5526 instance->instancet->disable_intr(instance); 5527 5528 /* 5529 * Issue the init frame in polled mode 5530 */ 5531 5532 if (megasas_issue_polled(instance, cmd)) { 5533 dev_err(&instance->pdev->dev, "Failed to init firmware\n"); 5534 megasas_return_cmd(instance, cmd); 5535 goto fail_fw_init; 5536 } 5537 5538 megasas_return_cmd(instance, cmd); 5539 5540 return 0; 5541 5542 fail_fw_init: 5543 return -EINVAL; 5544 } 5545 5546 static u32 5547 megasas_init_adapter_mfi(struct megasas_instance *instance) 5548 { 5549 u32 context_sz; 5550 u32 reply_q_sz; 5551 5552 /* 5553 * Get various operational parameters from status register 5554 */ 5555 instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF; 5556 /* 5557 * Reduce the max supported cmds by 1. This is to ensure that the 5558 * reply_q_sz (1 more than the max cmd that driver may send) 5559 * does not exceed max cmds that the FW can support 5560 */ 5561 instance->max_fw_cmds = instance->max_fw_cmds-1; 5562 instance->max_mfi_cmds = instance->max_fw_cmds; 5563 instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >> 5564 0x10; 5565 /* 5566 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands 5567 * are reserved for IOCTL + driver's internal DCMDs. 5568 */ 5569 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 5570 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { 5571 instance->max_scsi_cmds = (instance->max_fw_cmds - 5572 MEGASAS_SKINNY_INT_CMDS); 5573 sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); 5574 } else { 5575 instance->max_scsi_cmds = (instance->max_fw_cmds - 5576 MEGASAS_INT_CMDS); 5577 sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS)); 5578 } 5579 5580 instance->cur_can_queue = instance->max_scsi_cmds; 5581 /* 5582 * Create a pool of commands 5583 */ 5584 if (megasas_alloc_cmds(instance)) 5585 goto fail_alloc_cmds; 5586 5587 /* 5588 * Allocate memory for reply queue. Length of reply queue should 5589 * be _one_ more than the maximum commands handled by the firmware. 5590 * 5591 * Note: When FW completes commands, it places corresponding contex 5592 * values in this circular reply queue. This circular queue is a fairly 5593 * typical producer-consumer queue. FW is the producer (of completed 5594 * commands) and the driver is the consumer. 5595 */ 5596 context_sz = sizeof(u32); 5597 reply_q_sz = context_sz * (instance->max_fw_cmds + 1); 5598 5599 instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev, 5600 reply_q_sz, &instance->reply_queue_h, GFP_KERNEL); 5601 5602 if (!instance->reply_queue) { 5603 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n"); 5604 goto fail_reply_queue; 5605 } 5606 5607 if (megasas_issue_init_mfi(instance)) 5608 goto fail_fw_init; 5609 5610 if (megasas_get_ctrl_info(instance)) { 5611 dev_err(&instance->pdev->dev, "(%d): Could get controller info " 5612 "Fail from %s %d\n", instance->unique_id, 5613 __func__, __LINE__); 5614 goto fail_fw_init; 5615 } 5616 5617 instance->fw_support_ieee = 0; 5618 instance->fw_support_ieee = 5619 (instance->instancet->read_fw_status_reg(instance) & 5620 0x04000000); 5621 5622 dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d", 5623 instance->fw_support_ieee); 5624 5625 if (instance->fw_support_ieee) 5626 instance->flag_ieee = 1; 5627 5628 return 0; 5629 5630 fail_fw_init: 5631 5632 dma_free_coherent(&instance->pdev->dev, reply_q_sz, 5633 instance->reply_queue, instance->reply_queue_h); 5634 fail_reply_queue: 5635 megasas_free_cmds(instance); 5636 5637 fail_alloc_cmds: 5638 return 1; 5639 } 5640 5641 static 5642 void megasas_setup_irq_poll(struct megasas_instance *instance) 5643 { 5644 struct megasas_irq_context *irq_ctx; 5645 u32 count, i; 5646 5647 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 5648 5649 /* Initialize IRQ poll */ 5650 for (i = 0; i < count; i++) { 5651 irq_ctx = &instance->irq_context[i]; 5652 irq_ctx->os_irq = pci_irq_vector(instance->pdev, i); 5653 irq_ctx->irq_poll_scheduled = false; 5654 irq_poll_init(&irq_ctx->irqpoll, 5655 instance->threshold_reply_count, 5656 megasas_irqpoll); 5657 } 5658 } 5659 5660 /* 5661 * megasas_setup_irqs_ioapic - register legacy interrupts. 5662 * @instance: Adapter soft state 5663 * 5664 * Do not enable interrupt, only setup ISRs. 5665 * 5666 * Return 0 on success. 5667 */ 5668 static int 5669 megasas_setup_irqs_ioapic(struct megasas_instance *instance) 5670 { 5671 struct pci_dev *pdev; 5672 5673 pdev = instance->pdev; 5674 instance->irq_context[0].instance = instance; 5675 instance->irq_context[0].MSIxIndex = 0; 5676 snprintf(instance->irq_context->name, MEGASAS_MSIX_NAME_LEN, "%s%u", 5677 "megasas", instance->host->host_no); 5678 if (request_irq(pci_irq_vector(pdev, 0), 5679 instance->instancet->service_isr, IRQF_SHARED, 5680 instance->irq_context->name, &instance->irq_context[0])) { 5681 dev_err(&instance->pdev->dev, 5682 "Failed to register IRQ from %s %d\n", 5683 __func__, __LINE__); 5684 return -1; 5685 } 5686 instance->perf_mode = MR_LATENCY_PERF_MODE; 5687 instance->low_latency_index_start = 0; 5688 return 0; 5689 } 5690 5691 /** 5692 * megasas_setup_irqs_msix - register MSI-x interrupts. 5693 * @instance: Adapter soft state 5694 * @is_probe: Driver probe check 5695 * 5696 * Do not enable interrupt, only setup ISRs. 5697 * 5698 * Return 0 on success. 5699 */ 5700 static int 5701 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) 5702 { 5703 int i, j; 5704 struct pci_dev *pdev; 5705 5706 pdev = instance->pdev; 5707 5708 /* Try MSI-x */ 5709 for (i = 0; i < instance->msix_vectors; i++) { 5710 instance->irq_context[i].instance = instance; 5711 instance->irq_context[i].MSIxIndex = i; 5712 snprintf(instance->irq_context[i].name, MEGASAS_MSIX_NAME_LEN, "%s%u-msix%u", 5713 "megasas", instance->host->host_no, i); 5714 if (request_irq(pci_irq_vector(pdev, i), 5715 instance->instancet->service_isr, 0, instance->irq_context[i].name, 5716 &instance->irq_context[i])) { 5717 dev_err(&instance->pdev->dev, 5718 "Failed to register IRQ for vector %d.\n", i); 5719 for (j = 0; j < i; j++) { 5720 if (j < instance->low_latency_index_start) 5721 irq_set_affinity_hint( 5722 pci_irq_vector(pdev, j), NULL); 5723 free_irq(pci_irq_vector(pdev, j), 5724 &instance->irq_context[j]); 5725 } 5726 /* Retry irq register for IO_APIC*/ 5727 instance->msix_vectors = 0; 5728 instance->msix_load_balance = false; 5729 if (is_probe) { 5730 pci_free_irq_vectors(instance->pdev); 5731 return megasas_setup_irqs_ioapic(instance); 5732 } else { 5733 return -1; 5734 } 5735 } 5736 } 5737 5738 return 0; 5739 } 5740 5741 /* 5742 * megasas_destroy_irqs- unregister interrupts. 5743 * @instance: Adapter soft state 5744 * return: void 5745 */ 5746 static void 5747 megasas_destroy_irqs(struct megasas_instance *instance) { 5748 5749 int i; 5750 int count; 5751 struct megasas_irq_context *irq_ctx; 5752 5753 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 5754 if (instance->adapter_type != MFI_SERIES) { 5755 for (i = 0; i < count; i++) { 5756 irq_ctx = &instance->irq_context[i]; 5757 irq_poll_disable(&irq_ctx->irqpoll); 5758 } 5759 } 5760 5761 if (instance->msix_vectors) 5762 for (i = 0; i < instance->msix_vectors; i++) { 5763 if (i < instance->low_latency_index_start) 5764 irq_set_affinity_hint( 5765 pci_irq_vector(instance->pdev, i), NULL); 5766 free_irq(pci_irq_vector(instance->pdev, i), 5767 &instance->irq_context[i]); 5768 } 5769 else 5770 free_irq(pci_irq_vector(instance->pdev, 0), 5771 &instance->irq_context[0]); 5772 } 5773 5774 /** 5775 * megasas_setup_jbod_map - setup jbod map for FP seq_number. 5776 * @instance: Adapter soft state 5777 * 5778 * Return 0 on success. 5779 */ 5780 void 5781 megasas_setup_jbod_map(struct megasas_instance *instance) 5782 { 5783 int i; 5784 struct fusion_context *fusion = instance->ctrl_context; 5785 u32 pd_seq_map_sz; 5786 5787 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 5788 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1)); 5789 5790 instance->use_seqnum_jbod_fp = 5791 instance->support_seqnum_jbod_fp; 5792 if (reset_devices || !fusion || 5793 !instance->support_seqnum_jbod_fp) { 5794 dev_info(&instance->pdev->dev, 5795 "JBOD sequence map is disabled %s %d\n", 5796 __func__, __LINE__); 5797 instance->use_seqnum_jbod_fp = false; 5798 return; 5799 } 5800 5801 if (fusion->pd_seq_sync[0]) 5802 goto skip_alloc; 5803 5804 for (i = 0; i < JBOD_MAPS_COUNT; i++) { 5805 fusion->pd_seq_sync[i] = dma_alloc_coherent 5806 (&instance->pdev->dev, pd_seq_map_sz, 5807 &fusion->pd_seq_phys[i], GFP_KERNEL); 5808 if (!fusion->pd_seq_sync[i]) { 5809 dev_err(&instance->pdev->dev, 5810 "Failed to allocate memory from %s %d\n", 5811 __func__, __LINE__); 5812 if (i == 1) { 5813 dma_free_coherent(&instance->pdev->dev, 5814 pd_seq_map_sz, fusion->pd_seq_sync[0], 5815 fusion->pd_seq_phys[0]); 5816 fusion->pd_seq_sync[0] = NULL; 5817 } 5818 instance->use_seqnum_jbod_fp = false; 5819 return; 5820 } 5821 } 5822 5823 skip_alloc: 5824 if (!megasas_sync_pd_seq_num(instance, false) && 5825 !megasas_sync_pd_seq_num(instance, true)) 5826 instance->use_seqnum_jbod_fp = true; 5827 else 5828 instance->use_seqnum_jbod_fp = false; 5829 } 5830 5831 static void megasas_setup_reply_map(struct megasas_instance *instance) 5832 { 5833 const struct cpumask *mask; 5834 unsigned int queue, cpu, low_latency_index_start; 5835 5836 low_latency_index_start = instance->low_latency_index_start; 5837 5838 for (queue = low_latency_index_start; queue < instance->msix_vectors; queue++) { 5839 mask = pci_irq_get_affinity(instance->pdev, queue); 5840 if (!mask) 5841 goto fallback; 5842 5843 for_each_cpu(cpu, mask) 5844 instance->reply_map[cpu] = queue; 5845 } 5846 return; 5847 5848 fallback: 5849 queue = low_latency_index_start; 5850 for_each_possible_cpu(cpu) { 5851 instance->reply_map[cpu] = queue; 5852 if (queue == (instance->msix_vectors - 1)) 5853 queue = low_latency_index_start; 5854 else 5855 queue++; 5856 } 5857 } 5858 5859 /** 5860 * megasas_get_device_list - Get the PD and LD device list from FW. 5861 * @instance: Adapter soft state 5862 * @return: Success or failure 5863 * 5864 * Issue DCMDs to Firmware to get the PD and LD list. 5865 * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination 5866 * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list. 5867 */ 5868 static 5869 int megasas_get_device_list(struct megasas_instance *instance) 5870 { 5871 memset(instance->pd_list, 0, 5872 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); 5873 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 5874 5875 if (instance->enable_fw_dev_list) { 5876 if (megasas_host_device_list_query(instance, true)) 5877 return FAILED; 5878 } else { 5879 if (megasas_get_pd_list(instance) < 0) { 5880 dev_err(&instance->pdev->dev, "failed to get PD list\n"); 5881 return FAILED; 5882 } 5883 5884 if (megasas_ld_list_query(instance, 5885 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) { 5886 dev_err(&instance->pdev->dev, "failed to get LD list\n"); 5887 return FAILED; 5888 } 5889 } 5890 5891 return SUCCESS; 5892 } 5893 5894 /** 5895 * megasas_set_high_iops_queue_affinity_hint - Set affinity hint for high IOPS queues 5896 * @instance: Adapter soft state 5897 * return: void 5898 */ 5899 static inline void 5900 megasas_set_high_iops_queue_affinity_hint(struct megasas_instance *instance) 5901 { 5902 int i; 5903 int local_numa_node; 5904 5905 if (instance->perf_mode == MR_BALANCED_PERF_MODE) { 5906 local_numa_node = dev_to_node(&instance->pdev->dev); 5907 5908 for (i = 0; i < instance->low_latency_index_start; i++) 5909 irq_set_affinity_hint(pci_irq_vector(instance->pdev, i), 5910 cpumask_of_node(local_numa_node)); 5911 } 5912 } 5913 5914 static int 5915 __megasas_alloc_irq_vectors(struct megasas_instance *instance) 5916 { 5917 int i, irq_flags; 5918 struct irq_affinity desc = { .pre_vectors = instance->low_latency_index_start }; 5919 struct irq_affinity *descp = &desc; 5920 5921 irq_flags = PCI_IRQ_MSIX; 5922 5923 if (instance->smp_affinity_enable) 5924 irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; 5925 else 5926 descp = NULL; 5927 5928 /* Do not allocate msix vectors for poll_queues. 5929 * msix_vectors is always within a range of FW supported reply queue. 5930 */ 5931 i = pci_alloc_irq_vectors_affinity(instance->pdev, 5932 instance->low_latency_index_start, 5933 instance->msix_vectors - instance->iopoll_q_count, irq_flags, descp); 5934 5935 return i; 5936 } 5937 5938 /** 5939 * megasas_alloc_irq_vectors - Allocate IRQ vectors/enable MSI-x vectors 5940 * @instance: Adapter soft state 5941 * return: void 5942 */ 5943 static void 5944 megasas_alloc_irq_vectors(struct megasas_instance *instance) 5945 { 5946 int i; 5947 unsigned int num_msix_req; 5948 5949 instance->iopoll_q_count = 0; 5950 if ((instance->adapter_type != MFI_SERIES) && 5951 poll_queues) { 5952 5953 instance->perf_mode = MR_LATENCY_PERF_MODE; 5954 instance->low_latency_index_start = 1; 5955 5956 /* reserve for default and non-mananged pre-vector. */ 5957 if (instance->msix_vectors > (poll_queues + 2)) 5958 instance->iopoll_q_count = poll_queues; 5959 else 5960 instance->iopoll_q_count = 0; 5961 5962 num_msix_req = num_online_cpus() + instance->low_latency_index_start; 5963 instance->msix_vectors = min(num_msix_req, 5964 instance->msix_vectors); 5965 5966 } 5967 5968 i = __megasas_alloc_irq_vectors(instance); 5969 5970 if (((instance->perf_mode == MR_BALANCED_PERF_MODE) 5971 || instance->iopoll_q_count) && 5972 (i != (instance->msix_vectors - instance->iopoll_q_count))) { 5973 if (instance->msix_vectors) 5974 pci_free_irq_vectors(instance->pdev); 5975 /* Disable Balanced IOPS mode and try realloc vectors */ 5976 instance->perf_mode = MR_LATENCY_PERF_MODE; 5977 instance->low_latency_index_start = 1; 5978 num_msix_req = num_online_cpus() + instance->low_latency_index_start; 5979 5980 instance->msix_vectors = min(num_msix_req, 5981 instance->msix_vectors); 5982 5983 instance->iopoll_q_count = 0; 5984 i = __megasas_alloc_irq_vectors(instance); 5985 5986 } 5987 5988 dev_info(&instance->pdev->dev, 5989 "requested/available msix %d/%d poll_queue %d\n", 5990 instance->msix_vectors - instance->iopoll_q_count, 5991 i, instance->iopoll_q_count); 5992 5993 if (i > 0) 5994 instance->msix_vectors = i; 5995 else 5996 instance->msix_vectors = 0; 5997 5998 if (instance->smp_affinity_enable) 5999 megasas_set_high_iops_queue_affinity_hint(instance); 6000 } 6001 6002 /** 6003 * megasas_init_fw - Initializes the FW 6004 * @instance: Adapter soft state 6005 * 6006 * This is the main function for initializing firmware 6007 */ 6008 6009 static int megasas_init_fw(struct megasas_instance *instance) 6010 { 6011 u32 max_sectors_1; 6012 u32 max_sectors_2, tmp_sectors, msix_enable; 6013 u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg; 6014 resource_size_t base_addr; 6015 void *base_addr_phys; 6016 struct megasas_ctrl_info *ctrl_info = NULL; 6017 unsigned long bar_list; 6018 int i, j, loop; 6019 struct IOV_111 *iovPtr; 6020 struct fusion_context *fusion; 6021 bool intr_coalescing; 6022 unsigned int num_msix_req; 6023 u16 lnksta, speed; 6024 6025 fusion = instance->ctrl_context; 6026 6027 /* Find first memory bar */ 6028 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); 6029 instance->bar = find_first_bit(&bar_list, BITS_PER_LONG); 6030 if (pci_request_selected_regions(instance->pdev, 1<<instance->bar, 6031 "megasas: LSI")) { 6032 dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n"); 6033 return -EBUSY; 6034 } 6035 6036 base_addr = pci_resource_start(instance->pdev, instance->bar); 6037 instance->reg_set = ioremap(base_addr, 8192); 6038 6039 if (!instance->reg_set) { 6040 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n"); 6041 goto fail_ioremap; 6042 } 6043 6044 base_addr_phys = &base_addr; 6045 dev_printk(KERN_DEBUG, &instance->pdev->dev, 6046 "BAR:0x%lx BAR's base_addr(phys):%pa mapped virt_addr:0x%p\n", 6047 instance->bar, base_addr_phys, instance->reg_set); 6048 6049 if (instance->adapter_type != MFI_SERIES) 6050 instance->instancet = &megasas_instance_template_fusion; 6051 else { 6052 switch (instance->pdev->device) { 6053 case PCI_DEVICE_ID_LSI_SAS1078R: 6054 case PCI_DEVICE_ID_LSI_SAS1078DE: 6055 instance->instancet = &megasas_instance_template_ppc; 6056 break; 6057 case PCI_DEVICE_ID_LSI_SAS1078GEN2: 6058 case PCI_DEVICE_ID_LSI_SAS0079GEN2: 6059 instance->instancet = &megasas_instance_template_gen2; 6060 break; 6061 case PCI_DEVICE_ID_LSI_SAS0073SKINNY: 6062 case PCI_DEVICE_ID_LSI_SAS0071SKINNY: 6063 instance->instancet = &megasas_instance_template_skinny; 6064 break; 6065 case PCI_DEVICE_ID_LSI_SAS1064R: 6066 case PCI_DEVICE_ID_DELL_PERC5: 6067 default: 6068 instance->instancet = &megasas_instance_template_xscale; 6069 instance->pd_list_not_supported = 1; 6070 break; 6071 } 6072 } 6073 6074 if (megasas_transition_to_ready(instance, 0)) { 6075 dev_info(&instance->pdev->dev, 6076 "Failed to transition controller to ready from %s!\n", 6077 __func__); 6078 if (instance->adapter_type != MFI_SERIES) { 6079 status_reg = instance->instancet->read_fw_status_reg( 6080 instance); 6081 if (status_reg & MFI_RESET_ADAPTER) { 6082 if (megasas_adp_reset_wait_for_ready 6083 (instance, true, 0) == FAILED) 6084 goto fail_ready_state; 6085 } else { 6086 goto fail_ready_state; 6087 } 6088 } else { 6089 atomic_set(&instance->fw_reset_no_pci_access, 1); 6090 instance->instancet->adp_reset 6091 (instance, instance->reg_set); 6092 atomic_set(&instance->fw_reset_no_pci_access, 0); 6093 6094 /*waiting for about 30 second before retry*/ 6095 ssleep(30); 6096 6097 if (megasas_transition_to_ready(instance, 0)) 6098 goto fail_ready_state; 6099 } 6100 6101 dev_info(&instance->pdev->dev, 6102 "FW restarted successfully from %s!\n", 6103 __func__); 6104 } 6105 6106 megasas_init_ctrl_params(instance); 6107 6108 if (megasas_set_dma_mask(instance)) 6109 goto fail_ready_state; 6110 6111 if (megasas_alloc_ctrl_mem(instance)) 6112 goto fail_alloc_dma_buf; 6113 6114 if (megasas_alloc_ctrl_dma_buffers(instance)) 6115 goto fail_alloc_dma_buf; 6116 6117 fusion = instance->ctrl_context; 6118 6119 if (instance->adapter_type >= VENTURA_SERIES) { 6120 scratch_pad_2 = 6121 megasas_readl(instance, 6122 &instance->reg_set->outbound_scratch_pad_2); 6123 instance->max_raid_mapsize = ((scratch_pad_2 >> 6124 MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) & 6125 MR_MAX_RAID_MAP_SIZE_MASK); 6126 } 6127 6128 instance->enable_sdev_max_qd = enable_sdev_max_qd; 6129 6130 switch (instance->adapter_type) { 6131 case VENTURA_SERIES: 6132 fusion->pcie_bw_limitation = true; 6133 break; 6134 case AERO_SERIES: 6135 fusion->r56_div_offload = true; 6136 break; 6137 default: 6138 break; 6139 } 6140 6141 /* Check if MSI-X is supported while in ready state */ 6142 msix_enable = (instance->instancet->read_fw_status_reg(instance) & 6143 0x4000000) >> 0x1a; 6144 if (msix_enable && !msix_disable) { 6145 6146 scratch_pad_1 = megasas_readl 6147 (instance, &instance->reg_set->outbound_scratch_pad_1); 6148 /* Check max MSI-X vectors */ 6149 if (fusion) { 6150 if (instance->adapter_type == THUNDERBOLT_SERIES) { 6151 /* Thunderbolt Series*/ 6152 instance->msix_vectors = (scratch_pad_1 6153 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; 6154 } else { 6155 instance->msix_vectors = ((scratch_pad_1 6156 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) 6157 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; 6158 6159 /* 6160 * For Invader series, > 8 MSI-x vectors 6161 * supported by FW/HW implies combined 6162 * reply queue mode is enabled. 6163 * For Ventura series, > 16 MSI-x vectors 6164 * supported by FW/HW implies combined 6165 * reply queue mode is enabled. 6166 */ 6167 switch (instance->adapter_type) { 6168 case INVADER_SERIES: 6169 if (instance->msix_vectors > 8) 6170 instance->msix_combined = true; 6171 break; 6172 case AERO_SERIES: 6173 case VENTURA_SERIES: 6174 if (instance->msix_vectors > 16) 6175 instance->msix_combined = true; 6176 break; 6177 } 6178 6179 if (rdpq_enable) 6180 instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ? 6181 1 : 0; 6182 6183 if (instance->adapter_type >= INVADER_SERIES && 6184 !instance->msix_combined) { 6185 instance->msix_load_balance = true; 6186 instance->smp_affinity_enable = false; 6187 } 6188 6189 /* Save 1-15 reply post index address to local memory 6190 * Index 0 is already saved from reg offset 6191 * MPI2_REPLY_POST_HOST_INDEX_OFFSET 6192 */ 6193 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) { 6194 instance->reply_post_host_index_addr[loop] = 6195 (u32 __iomem *) 6196 ((u8 __iomem *)instance->reg_set + 6197 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET 6198 + (loop * 0x10)); 6199 } 6200 } 6201 6202 dev_info(&instance->pdev->dev, 6203 "firmware supports msix\t: (%d)", 6204 instance->msix_vectors); 6205 if (msix_vectors) 6206 instance->msix_vectors = min(msix_vectors, 6207 instance->msix_vectors); 6208 } else /* MFI adapters */ 6209 instance->msix_vectors = 1; 6210 6211 6212 /* 6213 * For Aero (if some conditions are met), driver will configure a 6214 * few additional reply queues with interrupt coalescing enabled. 6215 * These queues with interrupt coalescing enabled are called 6216 * High IOPS queues and rest of reply queues (based on number of 6217 * logical CPUs) are termed as Low latency queues. 6218 * 6219 * Total Number of reply queues = High IOPS queues + low latency queues 6220 * 6221 * For rest of fusion adapters, 1 additional reply queue will be 6222 * reserved for management commands, rest of reply queues 6223 * (based on number of logical CPUs) will be used for IOs and 6224 * referenced as IO queues. 6225 * Total Number of reply queues = 1 + IO queues 6226 * 6227 * MFI adapters supports single MSI-x so single reply queue 6228 * will be used for IO and management commands. 6229 */ 6230 6231 intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ? 6232 true : false; 6233 if (intr_coalescing && 6234 (num_online_cpus() >= MR_HIGH_IOPS_QUEUE_COUNT) && 6235 (instance->msix_vectors == MEGASAS_MAX_MSIX_QUEUES)) 6236 instance->perf_mode = MR_BALANCED_PERF_MODE; 6237 else 6238 instance->perf_mode = MR_LATENCY_PERF_MODE; 6239 6240 6241 if (instance->adapter_type == AERO_SERIES) { 6242 pcie_capability_read_word(instance->pdev, PCI_EXP_LNKSTA, &lnksta); 6243 speed = lnksta & PCI_EXP_LNKSTA_CLS; 6244 6245 /* 6246 * For Aero, if PCIe link speed is <16 GT/s, then driver should operate 6247 * in latency perf mode and enable R1 PCI bandwidth algorithm 6248 */ 6249 if (speed < 0x4) { 6250 instance->perf_mode = MR_LATENCY_PERF_MODE; 6251 fusion->pcie_bw_limitation = true; 6252 } 6253 6254 /* 6255 * Performance mode settings provided through module parameter-perf_mode will 6256 * take affect only for: 6257 * 1. Aero family of adapters. 6258 * 2. When user sets module parameter- perf_mode in range of 0-2. 6259 */ 6260 if ((perf_mode >= MR_BALANCED_PERF_MODE) && 6261 (perf_mode <= MR_LATENCY_PERF_MODE)) 6262 instance->perf_mode = perf_mode; 6263 /* 6264 * If intr coalescing is not supported by controller FW, then IOPS 6265 * and Balanced modes are not feasible. 6266 */ 6267 if (!intr_coalescing) 6268 instance->perf_mode = MR_LATENCY_PERF_MODE; 6269 6270 } 6271 6272 if (instance->perf_mode == MR_BALANCED_PERF_MODE) 6273 instance->low_latency_index_start = 6274 MR_HIGH_IOPS_QUEUE_COUNT; 6275 else 6276 instance->low_latency_index_start = 1; 6277 6278 num_msix_req = num_online_cpus() + instance->low_latency_index_start; 6279 6280 instance->msix_vectors = min(num_msix_req, 6281 instance->msix_vectors); 6282 6283 megasas_alloc_irq_vectors(instance); 6284 if (!instance->msix_vectors) 6285 instance->msix_load_balance = false; 6286 } 6287 /* 6288 * MSI-X host index 0 is common for all adapter. 6289 * It is used for all MPT based Adapters. 6290 */ 6291 if (instance->msix_combined) { 6292 instance->reply_post_host_index_addr[0] = 6293 (u32 *)((u8 *)instance->reg_set + 6294 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET); 6295 } else { 6296 instance->reply_post_host_index_addr[0] = 6297 (u32 *)((u8 *)instance->reg_set + 6298 MPI2_REPLY_POST_HOST_INDEX_OFFSET); 6299 } 6300 6301 if (!instance->msix_vectors) { 6302 i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); 6303 if (i < 0) 6304 goto fail_init_adapter; 6305 } 6306 6307 megasas_setup_reply_map(instance); 6308 6309 dev_info(&instance->pdev->dev, 6310 "current msix/online cpus\t: (%d/%d)\n", 6311 instance->msix_vectors, (unsigned int)num_online_cpus()); 6312 dev_info(&instance->pdev->dev, 6313 "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled"); 6314 6315 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 6316 (unsigned long)instance); 6317 6318 /* 6319 * Below are default value for legacy Firmware. 6320 * non-fusion based controllers 6321 */ 6322 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 6323 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 6324 /* Get operational params, sge flags, send init cmd to controller */ 6325 if (instance->instancet->init_adapter(instance)) 6326 goto fail_init_adapter; 6327 6328 if (instance->adapter_type >= VENTURA_SERIES) { 6329 scratch_pad_3 = 6330 megasas_readl(instance, 6331 &instance->reg_set->outbound_scratch_pad_3); 6332 if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >= 6333 MR_DEFAULT_NVME_PAGE_SHIFT) 6334 instance->nvme_page_size = 6335 (1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK)); 6336 6337 dev_info(&instance->pdev->dev, 6338 "NVME page size\t: (%d)\n", instance->nvme_page_size); 6339 } 6340 6341 if (instance->msix_vectors ? 6342 megasas_setup_irqs_msix(instance, 1) : 6343 megasas_setup_irqs_ioapic(instance)) 6344 goto fail_init_adapter; 6345 6346 if (instance->adapter_type != MFI_SERIES) 6347 megasas_setup_irq_poll(instance); 6348 6349 instance->instancet->enable_intr(instance); 6350 6351 dev_info(&instance->pdev->dev, "INIT adapter done\n"); 6352 6353 megasas_setup_jbod_map(instance); 6354 6355 if (megasas_get_device_list(instance) != SUCCESS) { 6356 dev_err(&instance->pdev->dev, 6357 "%s: megasas_get_device_list failed\n", 6358 __func__); 6359 goto fail_get_ld_pd_list; 6360 } 6361 6362 /* stream detection initialization */ 6363 if (instance->adapter_type >= VENTURA_SERIES) { 6364 fusion->stream_detect_by_ld = 6365 kcalloc(MAX_LOGICAL_DRIVES_EXT, 6366 sizeof(struct LD_STREAM_DETECT *), 6367 GFP_KERNEL); 6368 if (!fusion->stream_detect_by_ld) { 6369 dev_err(&instance->pdev->dev, 6370 "unable to allocate stream detection for pool of LDs\n"); 6371 goto fail_get_ld_pd_list; 6372 } 6373 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) { 6374 fusion->stream_detect_by_ld[i] = 6375 kzalloc(sizeof(struct LD_STREAM_DETECT), 6376 GFP_KERNEL); 6377 if (!fusion->stream_detect_by_ld[i]) { 6378 dev_err(&instance->pdev->dev, 6379 "unable to allocate stream detect by LD\n "); 6380 for (j = 0; j < i; ++j) 6381 kfree(fusion->stream_detect_by_ld[j]); 6382 kfree(fusion->stream_detect_by_ld); 6383 fusion->stream_detect_by_ld = NULL; 6384 goto fail_get_ld_pd_list; 6385 } 6386 fusion->stream_detect_by_ld[i]->mru_bit_map 6387 = MR_STREAM_BITMAP; 6388 } 6389 } 6390 6391 /* 6392 * Compute the max allowed sectors per IO: The controller info has two 6393 * limits on max sectors. Driver should use the minimum of these two. 6394 * 6395 * 1 << stripe_sz_ops.min = max sectors per strip 6396 * 6397 * Note that older firmwares ( < FW ver 30) didn't report information 6398 * to calculate max_sectors_1. So the number ended up as zero always. 6399 */ 6400 tmp_sectors = 0; 6401 ctrl_info = instance->ctrl_info_buf; 6402 6403 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * 6404 le16_to_cpu(ctrl_info->max_strips_per_io); 6405 max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size); 6406 6407 tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2); 6408 6409 instance->peerIsPresent = ctrl_info->cluster.peerIsPresent; 6410 instance->passive = ctrl_info->cluster.passive; 6411 memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId)); 6412 instance->UnevenSpanSupport = 6413 ctrl_info->adapterOperations2.supportUnevenSpans; 6414 if (instance->UnevenSpanSupport) { 6415 struct fusion_context *fusion = instance->ctrl_context; 6416 if (MR_ValidateMapInfo(instance, instance->map_id)) 6417 fusion->fast_path_io = 1; 6418 else 6419 fusion->fast_path_io = 0; 6420 6421 } 6422 if (ctrl_info->host_interface.SRIOV) { 6423 instance->requestorId = ctrl_info->iov.requestorId; 6424 if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) { 6425 if (!ctrl_info->adapterOperations2.activePassive) 6426 instance->PlasmaFW111 = 1; 6427 6428 dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n", 6429 instance->PlasmaFW111 ? "1.11" : "new"); 6430 6431 if (instance->PlasmaFW111) { 6432 iovPtr = (struct IOV_111 *) 6433 ((unsigned char *)ctrl_info + IOV_111_OFFSET); 6434 instance->requestorId = iovPtr->requestorId; 6435 } 6436 } 6437 dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n", 6438 instance->requestorId); 6439 } 6440 6441 instance->crash_dump_fw_support = 6442 ctrl_info->adapterOperations3.supportCrashDump; 6443 instance->crash_dump_drv_support = 6444 (instance->crash_dump_fw_support && 6445 instance->crash_dump_buf); 6446 if (instance->crash_dump_drv_support) 6447 megasas_set_crash_dump_params(instance, 6448 MR_CRASH_BUF_TURN_OFF); 6449 6450 else { 6451 if (instance->crash_dump_buf) 6452 dma_free_coherent(&instance->pdev->dev, 6453 CRASH_DMA_BUF_SIZE, 6454 instance->crash_dump_buf, 6455 instance->crash_dump_h); 6456 instance->crash_dump_buf = NULL; 6457 } 6458 6459 if (instance->snapdump_wait_time) { 6460 megasas_get_snapdump_properties(instance); 6461 dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n", 6462 instance->snapdump_wait_time); 6463 } 6464 6465 dev_info(&instance->pdev->dev, 6466 "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n", 6467 le16_to_cpu(ctrl_info->pci.vendor_id), 6468 le16_to_cpu(ctrl_info->pci.device_id), 6469 le16_to_cpu(ctrl_info->pci.sub_vendor_id), 6470 le16_to_cpu(ctrl_info->pci.sub_device_id)); 6471 dev_info(&instance->pdev->dev, "unevenspan support : %s\n", 6472 instance->UnevenSpanSupport ? "yes" : "no"); 6473 dev_info(&instance->pdev->dev, "firmware crash dump : %s\n", 6474 instance->crash_dump_drv_support ? "yes" : "no"); 6475 dev_info(&instance->pdev->dev, "JBOD sequence map : %s\n", 6476 instance->use_seqnum_jbod_fp ? "enabled" : "disabled"); 6477 6478 instance->max_sectors_per_req = instance->max_num_sge * 6479 SGE_BUFFER_SIZE / 512; 6480 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) 6481 instance->max_sectors_per_req = tmp_sectors; 6482 6483 /* Check for valid throttlequeuedepth module parameter */ 6484 if (throttlequeuedepth && 6485 throttlequeuedepth <= instance->max_scsi_cmds) 6486 instance->throttlequeuedepth = throttlequeuedepth; 6487 else 6488 instance->throttlequeuedepth = 6489 MEGASAS_THROTTLE_QUEUE_DEPTH; 6490 6491 if ((resetwaittime < 1) || 6492 (resetwaittime > MEGASAS_RESET_WAIT_TIME)) 6493 resetwaittime = MEGASAS_RESET_WAIT_TIME; 6494 6495 if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT)) 6496 scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; 6497 6498 /* Launch SR-IOV heartbeat timer */ 6499 if (instance->requestorId) { 6500 if (!megasas_sriov_start_heartbeat(instance, 1)) { 6501 megasas_start_timer(instance); 6502 } else { 6503 instance->skip_heartbeat_timer_del = 1; 6504 goto fail_get_ld_pd_list; 6505 } 6506 } 6507 6508 /* 6509 * Create and start watchdog thread which will monitor 6510 * controller state every 1 sec and trigger OCR when 6511 * it enters fault state 6512 */ 6513 if (instance->adapter_type != MFI_SERIES) 6514 if (megasas_fusion_start_watchdog(instance) != SUCCESS) 6515 goto fail_start_watchdog; 6516 6517 return 0; 6518 6519 fail_start_watchdog: 6520 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 6521 del_timer_sync(&instance->sriov_heartbeat_timer); 6522 fail_get_ld_pd_list: 6523 instance->instancet->disable_intr(instance); 6524 megasas_destroy_irqs(instance); 6525 fail_init_adapter: 6526 if (instance->msix_vectors) 6527 pci_free_irq_vectors(instance->pdev); 6528 instance->msix_vectors = 0; 6529 fail_alloc_dma_buf: 6530 megasas_free_ctrl_dma_buffers(instance); 6531 megasas_free_ctrl_mem(instance); 6532 fail_ready_state: 6533 iounmap(instance->reg_set); 6534 6535 fail_ioremap: 6536 pci_release_selected_regions(instance->pdev, 1<<instance->bar); 6537 6538 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 6539 __func__, __LINE__); 6540 return -EINVAL; 6541 } 6542 6543 /** 6544 * megasas_release_mfi - Reverses the FW initialization 6545 * @instance: Adapter soft state 6546 */ 6547 static void megasas_release_mfi(struct megasas_instance *instance) 6548 { 6549 u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1); 6550 6551 if (instance->reply_queue) 6552 dma_free_coherent(&instance->pdev->dev, reply_q_sz, 6553 instance->reply_queue, instance->reply_queue_h); 6554 6555 megasas_free_cmds(instance); 6556 6557 iounmap(instance->reg_set); 6558 6559 pci_release_selected_regions(instance->pdev, 1<<instance->bar); 6560 } 6561 6562 /** 6563 * megasas_get_seq_num - Gets latest event sequence numbers 6564 * @instance: Adapter soft state 6565 * @eli: FW event log sequence numbers information 6566 * 6567 * FW maintains a log of all events in a non-volatile area. Upper layers would 6568 * usually find out the latest sequence number of the events, the seq number at 6569 * the boot etc. They would "read" all the events below the latest seq number 6570 * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq 6571 * number), they would subsribe to AEN (asynchronous event notification) and 6572 * wait for the events to happen. 6573 */ 6574 static int 6575 megasas_get_seq_num(struct megasas_instance *instance, 6576 struct megasas_evt_log_info *eli) 6577 { 6578 struct megasas_cmd *cmd; 6579 struct megasas_dcmd_frame *dcmd; 6580 struct megasas_evt_log_info *el_info; 6581 dma_addr_t el_info_h = 0; 6582 int ret; 6583 6584 cmd = megasas_get_cmd(instance); 6585 6586 if (!cmd) { 6587 return -ENOMEM; 6588 } 6589 6590 dcmd = &cmd->frame->dcmd; 6591 el_info = dma_alloc_coherent(&instance->pdev->dev, 6592 sizeof(struct megasas_evt_log_info), 6593 &el_info_h, GFP_KERNEL); 6594 if (!el_info) { 6595 megasas_return_cmd(instance, cmd); 6596 return -ENOMEM; 6597 } 6598 6599 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6600 6601 dcmd->cmd = MFI_CMD_DCMD; 6602 dcmd->cmd_status = 0x0; 6603 dcmd->sge_count = 1; 6604 dcmd->flags = MFI_FRAME_DIR_READ; 6605 dcmd->timeout = 0; 6606 dcmd->pad_0 = 0; 6607 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info)); 6608 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO); 6609 6610 megasas_set_dma_settings(instance, dcmd, el_info_h, 6611 sizeof(struct megasas_evt_log_info)); 6612 6613 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 6614 if (ret != DCMD_SUCCESS) { 6615 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 6616 __func__, __LINE__); 6617 goto dcmd_failed; 6618 } 6619 6620 /* 6621 * Copy the data back into callers buffer 6622 */ 6623 eli->newest_seq_num = el_info->newest_seq_num; 6624 eli->oldest_seq_num = el_info->oldest_seq_num; 6625 eli->clear_seq_num = el_info->clear_seq_num; 6626 eli->shutdown_seq_num = el_info->shutdown_seq_num; 6627 eli->boot_seq_num = el_info->boot_seq_num; 6628 6629 dcmd_failed: 6630 dma_free_coherent(&instance->pdev->dev, 6631 sizeof(struct megasas_evt_log_info), 6632 el_info, el_info_h); 6633 6634 megasas_return_cmd(instance, cmd); 6635 6636 return ret; 6637 } 6638 6639 /** 6640 * megasas_register_aen - Registers for asynchronous event notification 6641 * @instance: Adapter soft state 6642 * @seq_num: The starting sequence number 6643 * @class_locale_word: Class of the event 6644 * 6645 * This function subscribes for AEN for events beyond the @seq_num. It requests 6646 * to be notified if and only if the event is of type @class_locale 6647 */ 6648 static int 6649 megasas_register_aen(struct megasas_instance *instance, u32 seq_num, 6650 u32 class_locale_word) 6651 { 6652 int ret_val; 6653 struct megasas_cmd *cmd; 6654 struct megasas_dcmd_frame *dcmd; 6655 union megasas_evt_class_locale curr_aen; 6656 union megasas_evt_class_locale prev_aen; 6657 6658 /* 6659 * If there an AEN pending already (aen_cmd), check if the 6660 * class_locale of that pending AEN is inclusive of the new 6661 * AEN request we currently have. If it is, then we don't have 6662 * to do anything. In other words, whichever events the current 6663 * AEN request is subscribing to, have already been subscribed 6664 * to. 6665 * 6666 * If the old_cmd is _not_ inclusive, then we have to abort 6667 * that command, form a class_locale that is superset of both 6668 * old and current and re-issue to the FW 6669 */ 6670 6671 curr_aen.word = class_locale_word; 6672 6673 if (instance->aen_cmd) { 6674 6675 prev_aen.word = 6676 le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]); 6677 6678 if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) || 6679 (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) { 6680 dev_info(&instance->pdev->dev, 6681 "%s %d out of range class %d send by application\n", 6682 __func__, __LINE__, curr_aen.members.class); 6683 return 0; 6684 } 6685 6686 /* 6687 * A class whose enum value is smaller is inclusive of all 6688 * higher values. If a PROGRESS (= -1) was previously 6689 * registered, then a new registration requests for higher 6690 * classes need not be sent to FW. They are automatically 6691 * included. 6692 * 6693 * Locale numbers don't have such hierarchy. They are bitmap 6694 * values 6695 */ 6696 if ((prev_aen.members.class <= curr_aen.members.class) && 6697 !((prev_aen.members.locale & curr_aen.members.locale) ^ 6698 curr_aen.members.locale)) { 6699 /* 6700 * Previously issued event registration includes 6701 * current request. Nothing to do. 6702 */ 6703 return 0; 6704 } else { 6705 curr_aen.members.locale |= prev_aen.members.locale; 6706 6707 if (prev_aen.members.class < curr_aen.members.class) 6708 curr_aen.members.class = prev_aen.members.class; 6709 6710 instance->aen_cmd->abort_aen = 1; 6711 ret_val = megasas_issue_blocked_abort_cmd(instance, 6712 instance-> 6713 aen_cmd, 30); 6714 6715 if (ret_val) { 6716 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort " 6717 "previous AEN command\n"); 6718 return ret_val; 6719 } 6720 } 6721 } 6722 6723 cmd = megasas_get_cmd(instance); 6724 6725 if (!cmd) 6726 return -ENOMEM; 6727 6728 dcmd = &cmd->frame->dcmd; 6729 6730 memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail)); 6731 6732 /* 6733 * Prepare DCMD for aen registration 6734 */ 6735 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6736 6737 dcmd->cmd = MFI_CMD_DCMD; 6738 dcmd->cmd_status = 0x0; 6739 dcmd->sge_count = 1; 6740 dcmd->flags = MFI_FRAME_DIR_READ; 6741 dcmd->timeout = 0; 6742 dcmd->pad_0 = 0; 6743 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail)); 6744 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT); 6745 dcmd->mbox.w[0] = cpu_to_le32(seq_num); 6746 instance->last_seq_num = seq_num; 6747 dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word); 6748 6749 megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h, 6750 sizeof(struct megasas_evt_detail)); 6751 6752 if (instance->aen_cmd != NULL) { 6753 megasas_return_cmd(instance, cmd); 6754 return 0; 6755 } 6756 6757 /* 6758 * Store reference to the cmd used to register for AEN. When an 6759 * application wants us to register for AEN, we have to abort this 6760 * cmd and re-register with a new EVENT LOCALE supplied by that app 6761 */ 6762 instance->aen_cmd = cmd; 6763 6764 /* 6765 * Issue the aen registration frame 6766 */ 6767 instance->instancet->issue_dcmd(instance, cmd); 6768 6769 return 0; 6770 } 6771 6772 /* megasas_get_target_prop - Send DCMD with below details to firmware. 6773 * 6774 * This DCMD will fetch few properties of LD/system PD defined 6775 * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value. 6776 * 6777 * DCMD send by drivers whenever new target is added to the OS. 6778 * 6779 * dcmd.opcode - MR_DCMD_DEV_GET_TARGET_PROP 6780 * dcmd.mbox.b[0] - DCMD is to be fired for LD or system PD. 6781 * 0 = system PD, 1 = LD. 6782 * dcmd.mbox.s[1] - TargetID for LD/system PD. 6783 * dcmd.sge IN - Pointer to return MR_TARGET_DEV_PROPERTIES. 6784 * 6785 * @instance: Adapter soft state 6786 * @sdev: OS provided scsi device 6787 * 6788 * Returns 0 on success non-zero on failure. 6789 */ 6790 int 6791 megasas_get_target_prop(struct megasas_instance *instance, 6792 struct scsi_device *sdev) 6793 { 6794 int ret; 6795 struct megasas_cmd *cmd; 6796 struct megasas_dcmd_frame *dcmd; 6797 u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + 6798 sdev->id; 6799 6800 cmd = megasas_get_cmd(instance); 6801 6802 if (!cmd) { 6803 dev_err(&instance->pdev->dev, 6804 "Failed to get cmd %s\n", __func__); 6805 return -ENOMEM; 6806 } 6807 6808 dcmd = &cmd->frame->dcmd; 6809 6810 memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop)); 6811 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6812 dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev); 6813 6814 dcmd->mbox.s[1] = cpu_to_le16(targetId); 6815 dcmd->cmd = MFI_CMD_DCMD; 6816 dcmd->cmd_status = 0xFF; 6817 dcmd->sge_count = 1; 6818 dcmd->flags = MFI_FRAME_DIR_READ; 6819 dcmd->timeout = 0; 6820 dcmd->pad_0 = 0; 6821 dcmd->data_xfer_len = 6822 cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES)); 6823 dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP); 6824 6825 megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h, 6826 sizeof(struct MR_TARGET_PROPERTIES)); 6827 6828 if ((instance->adapter_type != MFI_SERIES) && 6829 !instance->mask_interrupts) 6830 ret = megasas_issue_blocked_cmd(instance, 6831 cmd, MFI_IO_TIMEOUT_SECS); 6832 else 6833 ret = megasas_issue_polled(instance, cmd); 6834 6835 switch (ret) { 6836 case DCMD_TIMEOUT: 6837 switch (dcmd_timeout_ocr_possible(instance)) { 6838 case INITIATE_OCR: 6839 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 6840 mutex_unlock(&instance->reset_mutex); 6841 megasas_reset_fusion(instance->host, 6842 MFI_IO_TIMEOUT_OCR); 6843 mutex_lock(&instance->reset_mutex); 6844 break; 6845 case KILL_ADAPTER: 6846 megaraid_sas_kill_hba(instance); 6847 break; 6848 case IGNORE_TIMEOUT: 6849 dev_info(&instance->pdev->dev, 6850 "Ignore DCMD timeout: %s %d\n", 6851 __func__, __LINE__); 6852 break; 6853 } 6854 break; 6855 6856 default: 6857 megasas_return_cmd(instance, cmd); 6858 } 6859 if (ret != DCMD_SUCCESS) 6860 dev_err(&instance->pdev->dev, 6861 "return from %s %d return value %d\n", 6862 __func__, __LINE__, ret); 6863 6864 return ret; 6865 } 6866 6867 /** 6868 * megasas_start_aen - Subscribes to AEN during driver load time 6869 * @instance: Adapter soft state 6870 */ 6871 static int megasas_start_aen(struct megasas_instance *instance) 6872 { 6873 struct megasas_evt_log_info eli; 6874 union megasas_evt_class_locale class_locale; 6875 6876 /* 6877 * Get the latest sequence number from FW 6878 */ 6879 memset(&eli, 0, sizeof(eli)); 6880 6881 if (megasas_get_seq_num(instance, &eli)) 6882 return -1; 6883 6884 /* 6885 * Register AEN with FW for latest sequence number plus 1 6886 */ 6887 class_locale.members.reserved = 0; 6888 class_locale.members.locale = MR_EVT_LOCALE_ALL; 6889 class_locale.members.class = MR_EVT_CLASS_DEBUG; 6890 6891 return megasas_register_aen(instance, 6892 le32_to_cpu(eli.newest_seq_num) + 1, 6893 class_locale.word); 6894 } 6895 6896 /** 6897 * megasas_io_attach - Attaches this driver to SCSI mid-layer 6898 * @instance: Adapter soft state 6899 */ 6900 static int megasas_io_attach(struct megasas_instance *instance) 6901 { 6902 struct Scsi_Host *host = instance->host; 6903 6904 /* 6905 * Export parameters required by SCSI mid-layer 6906 */ 6907 host->unique_id = instance->unique_id; 6908 host->can_queue = instance->max_scsi_cmds; 6909 host->this_id = instance->init_id; 6910 host->sg_tablesize = instance->max_num_sge; 6911 6912 if (instance->fw_support_ieee) 6913 instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE; 6914 6915 /* 6916 * Check if the module parameter value for max_sectors can be used 6917 */ 6918 if (max_sectors && max_sectors < instance->max_sectors_per_req) 6919 instance->max_sectors_per_req = max_sectors; 6920 else { 6921 if (max_sectors) { 6922 if (((instance->pdev->device == 6923 PCI_DEVICE_ID_LSI_SAS1078GEN2) || 6924 (instance->pdev->device == 6925 PCI_DEVICE_ID_LSI_SAS0079GEN2)) && 6926 (max_sectors <= MEGASAS_MAX_SECTORS)) { 6927 instance->max_sectors_per_req = max_sectors; 6928 } else { 6929 dev_info(&instance->pdev->dev, "max_sectors should be > 0" 6930 "and <= %d (or < 1MB for GEN2 controller)\n", 6931 instance->max_sectors_per_req); 6932 } 6933 } 6934 } 6935 6936 host->max_sectors = instance->max_sectors_per_req; 6937 host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN; 6938 host->max_channel = MEGASAS_MAX_CHANNELS - 1; 6939 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; 6940 host->max_lun = MEGASAS_MAX_LUN; 6941 host->max_cmd_len = 16; 6942 6943 /* Use shared host tagset only for fusion adaptors 6944 * if there are managed interrupts (smp affinity enabled case). 6945 * Single msix_vectors in kdump, so shared host tag is also disabled. 6946 */ 6947 6948 host->host_tagset = 0; 6949 host->nr_hw_queues = 1; 6950 6951 if ((instance->adapter_type != MFI_SERIES) && 6952 (instance->msix_vectors > instance->low_latency_index_start) && 6953 host_tagset_enable && 6954 instance->smp_affinity_enable) { 6955 host->host_tagset = 1; 6956 host->nr_hw_queues = instance->msix_vectors - 6957 instance->low_latency_index_start + instance->iopoll_q_count; 6958 if (instance->iopoll_q_count) 6959 host->nr_maps = 3; 6960 } else { 6961 instance->iopoll_q_count = 0; 6962 } 6963 6964 dev_info(&instance->pdev->dev, 6965 "Max firmware commands: %d shared with default " 6966 "hw_queues = %d poll_queues %d\n", instance->max_fw_cmds, 6967 host->nr_hw_queues - instance->iopoll_q_count, 6968 instance->iopoll_q_count); 6969 /* 6970 * Notify the mid-layer about the new controller 6971 */ 6972 if (scsi_add_host(host, &instance->pdev->dev)) { 6973 dev_err(&instance->pdev->dev, 6974 "Failed to add host from %s %d\n", 6975 __func__, __LINE__); 6976 return -ENODEV; 6977 } 6978 6979 return 0; 6980 } 6981 6982 /** 6983 * megasas_set_dma_mask - Set DMA mask for supported controllers 6984 * 6985 * @instance: Adapter soft state 6986 * Description: 6987 * 6988 * For Ventura, driver/FW will operate in 63bit DMA addresses. 6989 * 6990 * For invader- 6991 * By default, driver/FW will operate in 32bit DMA addresses 6992 * for consistent DMA mapping but if 32 bit consistent 6993 * DMA mask fails, driver will try with 63 bit consistent 6994 * mask provided FW is true 63bit DMA capable 6995 * 6996 * For older controllers(Thunderbolt and MFI based adapters)- 6997 * driver/FW will operate in 32 bit consistent DMA addresses. 6998 */ 6999 static int 7000 megasas_set_dma_mask(struct megasas_instance *instance) 7001 { 7002 u64 consistent_mask; 7003 struct pci_dev *pdev; 7004 u32 scratch_pad_1; 7005 7006 pdev = instance->pdev; 7007 consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ? 7008 DMA_BIT_MASK(63) : DMA_BIT_MASK(32); 7009 7010 if (IS_DMA64) { 7011 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) && 7012 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) 7013 goto fail_set_dma_mask; 7014 7015 if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) && 7016 (dma_set_coherent_mask(&pdev->dev, consistent_mask) && 7017 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) { 7018 /* 7019 * If 32 bit DMA mask fails, then try for 64 bit mask 7020 * for FW capable of handling 64 bit DMA. 7021 */ 7022 scratch_pad_1 = megasas_readl 7023 (instance, &instance->reg_set->outbound_scratch_pad_1); 7024 7025 if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET)) 7026 goto fail_set_dma_mask; 7027 else if (dma_set_mask_and_coherent(&pdev->dev, 7028 DMA_BIT_MASK(63))) 7029 goto fail_set_dma_mask; 7030 } 7031 } else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) 7032 goto fail_set_dma_mask; 7033 7034 if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32)) 7035 instance->consistent_mask_64bit = false; 7036 else 7037 instance->consistent_mask_64bit = true; 7038 7039 dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n", 7040 ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"), 7041 (instance->consistent_mask_64bit ? "63" : "32")); 7042 7043 return 0; 7044 7045 fail_set_dma_mask: 7046 dev_err(&pdev->dev, "Failed to set DMA mask\n"); 7047 return -1; 7048 7049 } 7050 7051 /* 7052 * megasas_set_adapter_type - Set adapter type. 7053 * Supported controllers can be divided in 7054 * different categories- 7055 * enum MR_ADAPTER_TYPE { 7056 * MFI_SERIES = 1, 7057 * THUNDERBOLT_SERIES = 2, 7058 * INVADER_SERIES = 3, 7059 * VENTURA_SERIES = 4, 7060 * AERO_SERIES = 5, 7061 * }; 7062 * @instance: Adapter soft state 7063 * return: void 7064 */ 7065 static inline void megasas_set_adapter_type(struct megasas_instance *instance) 7066 { 7067 if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) && 7068 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) { 7069 instance->adapter_type = MFI_SERIES; 7070 } else { 7071 switch (instance->pdev->device) { 7072 case PCI_DEVICE_ID_LSI_AERO_10E1: 7073 case PCI_DEVICE_ID_LSI_AERO_10E2: 7074 case PCI_DEVICE_ID_LSI_AERO_10E5: 7075 case PCI_DEVICE_ID_LSI_AERO_10E6: 7076 instance->adapter_type = AERO_SERIES; 7077 break; 7078 case PCI_DEVICE_ID_LSI_VENTURA: 7079 case PCI_DEVICE_ID_LSI_CRUSADER: 7080 case PCI_DEVICE_ID_LSI_HARPOON: 7081 case PCI_DEVICE_ID_LSI_TOMCAT: 7082 case PCI_DEVICE_ID_LSI_VENTURA_4PORT: 7083 case PCI_DEVICE_ID_LSI_CRUSADER_4PORT: 7084 instance->adapter_type = VENTURA_SERIES; 7085 break; 7086 case PCI_DEVICE_ID_LSI_FUSION: 7087 case PCI_DEVICE_ID_LSI_PLASMA: 7088 instance->adapter_type = THUNDERBOLT_SERIES; 7089 break; 7090 case PCI_DEVICE_ID_LSI_INVADER: 7091 case PCI_DEVICE_ID_LSI_INTRUDER: 7092 case PCI_DEVICE_ID_LSI_INTRUDER_24: 7093 case PCI_DEVICE_ID_LSI_CUTLASS_52: 7094 case PCI_DEVICE_ID_LSI_CUTLASS_53: 7095 case PCI_DEVICE_ID_LSI_FURY: 7096 instance->adapter_type = INVADER_SERIES; 7097 break; 7098 default: /* For all other supported controllers */ 7099 instance->adapter_type = MFI_SERIES; 7100 break; 7101 } 7102 } 7103 } 7104 7105 static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance) 7106 { 7107 instance->producer = dma_alloc_coherent(&instance->pdev->dev, 7108 sizeof(u32), &instance->producer_h, GFP_KERNEL); 7109 instance->consumer = dma_alloc_coherent(&instance->pdev->dev, 7110 sizeof(u32), &instance->consumer_h, GFP_KERNEL); 7111 7112 if (!instance->producer || !instance->consumer) { 7113 dev_err(&instance->pdev->dev, 7114 "Failed to allocate memory for producer, consumer\n"); 7115 return -1; 7116 } 7117 7118 *instance->producer = 0; 7119 *instance->consumer = 0; 7120 return 0; 7121 } 7122 7123 /** 7124 * megasas_alloc_ctrl_mem - Allocate per controller memory for core data 7125 * structures which are not common across MFI 7126 * adapters and fusion adapters. 7127 * For MFI based adapters, allocate producer and 7128 * consumer buffers. For fusion adapters, allocate 7129 * memory for fusion context. 7130 * @instance: Adapter soft state 7131 * return: 0 for SUCCESS 7132 */ 7133 static int megasas_alloc_ctrl_mem(struct megasas_instance *instance) 7134 { 7135 instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int), 7136 GFP_KERNEL); 7137 if (!instance->reply_map) 7138 return -ENOMEM; 7139 7140 switch (instance->adapter_type) { 7141 case MFI_SERIES: 7142 if (megasas_alloc_mfi_ctrl_mem(instance)) 7143 goto fail; 7144 break; 7145 case AERO_SERIES: 7146 case VENTURA_SERIES: 7147 case THUNDERBOLT_SERIES: 7148 case INVADER_SERIES: 7149 if (megasas_alloc_fusion_context(instance)) 7150 goto fail; 7151 break; 7152 } 7153 7154 return 0; 7155 fail: 7156 kfree(instance->reply_map); 7157 instance->reply_map = NULL; 7158 return -ENOMEM; 7159 } 7160 7161 /* 7162 * megasas_free_ctrl_mem - Free fusion context for fusion adapters and 7163 * producer, consumer buffers for MFI adapters 7164 * 7165 * @instance - Adapter soft instance 7166 * 7167 */ 7168 static inline void megasas_free_ctrl_mem(struct megasas_instance *instance) 7169 { 7170 kfree(instance->reply_map); 7171 if (instance->adapter_type == MFI_SERIES) { 7172 if (instance->producer) 7173 dma_free_coherent(&instance->pdev->dev, sizeof(u32), 7174 instance->producer, 7175 instance->producer_h); 7176 if (instance->consumer) 7177 dma_free_coherent(&instance->pdev->dev, sizeof(u32), 7178 instance->consumer, 7179 instance->consumer_h); 7180 } else { 7181 megasas_free_fusion_context(instance); 7182 } 7183 } 7184 7185 /** 7186 * megasas_alloc_ctrl_dma_buffers - Allocate consistent DMA buffers during 7187 * driver load time 7188 * 7189 * @instance: Adapter soft instance 7190 * 7191 * @return: O for SUCCESS 7192 */ 7193 static inline 7194 int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance) 7195 { 7196 struct pci_dev *pdev = instance->pdev; 7197 struct fusion_context *fusion = instance->ctrl_context; 7198 7199 instance->evt_detail = dma_alloc_coherent(&pdev->dev, 7200 sizeof(struct megasas_evt_detail), 7201 &instance->evt_detail_h, GFP_KERNEL); 7202 7203 if (!instance->evt_detail) { 7204 dev_err(&instance->pdev->dev, 7205 "Failed to allocate event detail buffer\n"); 7206 return -ENOMEM; 7207 } 7208 7209 if (fusion) { 7210 fusion->ioc_init_request = 7211 dma_alloc_coherent(&pdev->dev, 7212 sizeof(struct MPI2_IOC_INIT_REQUEST), 7213 &fusion->ioc_init_request_phys, 7214 GFP_KERNEL); 7215 7216 if (!fusion->ioc_init_request) { 7217 dev_err(&pdev->dev, 7218 "Failed to allocate PD list buffer\n"); 7219 return -ENOMEM; 7220 } 7221 7222 instance->snapdump_prop = dma_alloc_coherent(&pdev->dev, 7223 sizeof(struct MR_SNAPDUMP_PROPERTIES), 7224 &instance->snapdump_prop_h, GFP_KERNEL); 7225 7226 if (!instance->snapdump_prop) 7227 dev_err(&pdev->dev, 7228 "Failed to allocate snapdump properties buffer\n"); 7229 7230 instance->host_device_list_buf = dma_alloc_coherent(&pdev->dev, 7231 HOST_DEVICE_LIST_SZ, 7232 &instance->host_device_list_buf_h, 7233 GFP_KERNEL); 7234 7235 if (!instance->host_device_list_buf) { 7236 dev_err(&pdev->dev, 7237 "Failed to allocate targetid list buffer\n"); 7238 return -ENOMEM; 7239 } 7240 7241 } 7242 7243 instance->pd_list_buf = 7244 dma_alloc_coherent(&pdev->dev, 7245 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), 7246 &instance->pd_list_buf_h, GFP_KERNEL); 7247 7248 if (!instance->pd_list_buf) { 7249 dev_err(&pdev->dev, "Failed to allocate PD list buffer\n"); 7250 return -ENOMEM; 7251 } 7252 7253 instance->ctrl_info_buf = 7254 dma_alloc_coherent(&pdev->dev, 7255 sizeof(struct megasas_ctrl_info), 7256 &instance->ctrl_info_buf_h, GFP_KERNEL); 7257 7258 if (!instance->ctrl_info_buf) { 7259 dev_err(&pdev->dev, 7260 "Failed to allocate controller info buffer\n"); 7261 return -ENOMEM; 7262 } 7263 7264 instance->ld_list_buf = 7265 dma_alloc_coherent(&pdev->dev, 7266 sizeof(struct MR_LD_LIST), 7267 &instance->ld_list_buf_h, GFP_KERNEL); 7268 7269 if (!instance->ld_list_buf) { 7270 dev_err(&pdev->dev, "Failed to allocate LD list buffer\n"); 7271 return -ENOMEM; 7272 } 7273 7274 instance->ld_targetid_list_buf = 7275 dma_alloc_coherent(&pdev->dev, 7276 sizeof(struct MR_LD_TARGETID_LIST), 7277 &instance->ld_targetid_list_buf_h, GFP_KERNEL); 7278 7279 if (!instance->ld_targetid_list_buf) { 7280 dev_err(&pdev->dev, 7281 "Failed to allocate LD targetid list buffer\n"); 7282 return -ENOMEM; 7283 } 7284 7285 if (!reset_devices) { 7286 instance->system_info_buf = 7287 dma_alloc_coherent(&pdev->dev, 7288 sizeof(struct MR_DRV_SYSTEM_INFO), 7289 &instance->system_info_h, GFP_KERNEL); 7290 instance->pd_info = 7291 dma_alloc_coherent(&pdev->dev, 7292 sizeof(struct MR_PD_INFO), 7293 &instance->pd_info_h, GFP_KERNEL); 7294 instance->tgt_prop = 7295 dma_alloc_coherent(&pdev->dev, 7296 sizeof(struct MR_TARGET_PROPERTIES), 7297 &instance->tgt_prop_h, GFP_KERNEL); 7298 instance->crash_dump_buf = 7299 dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE, 7300 &instance->crash_dump_h, GFP_KERNEL); 7301 7302 if (!instance->system_info_buf) 7303 dev_err(&instance->pdev->dev, 7304 "Failed to allocate system info buffer\n"); 7305 7306 if (!instance->pd_info) 7307 dev_err(&instance->pdev->dev, 7308 "Failed to allocate pd_info buffer\n"); 7309 7310 if (!instance->tgt_prop) 7311 dev_err(&instance->pdev->dev, 7312 "Failed to allocate tgt_prop buffer\n"); 7313 7314 if (!instance->crash_dump_buf) 7315 dev_err(&instance->pdev->dev, 7316 "Failed to allocate crash dump buffer\n"); 7317 } 7318 7319 return 0; 7320 } 7321 7322 /* 7323 * megasas_free_ctrl_dma_buffers - Free consistent DMA buffers allocated 7324 * during driver load time 7325 * 7326 * @instance- Adapter soft instance 7327 * 7328 */ 7329 static inline 7330 void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance) 7331 { 7332 struct pci_dev *pdev = instance->pdev; 7333 struct fusion_context *fusion = instance->ctrl_context; 7334 7335 if (instance->evt_detail) 7336 dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail), 7337 instance->evt_detail, 7338 instance->evt_detail_h); 7339 7340 if (fusion && fusion->ioc_init_request) 7341 dma_free_coherent(&pdev->dev, 7342 sizeof(struct MPI2_IOC_INIT_REQUEST), 7343 fusion->ioc_init_request, 7344 fusion->ioc_init_request_phys); 7345 7346 if (instance->pd_list_buf) 7347 dma_free_coherent(&pdev->dev, 7348 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), 7349 instance->pd_list_buf, 7350 instance->pd_list_buf_h); 7351 7352 if (instance->ld_list_buf) 7353 dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST), 7354 instance->ld_list_buf, 7355 instance->ld_list_buf_h); 7356 7357 if (instance->ld_targetid_list_buf) 7358 dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST), 7359 instance->ld_targetid_list_buf, 7360 instance->ld_targetid_list_buf_h); 7361 7362 if (instance->ctrl_info_buf) 7363 dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info), 7364 instance->ctrl_info_buf, 7365 instance->ctrl_info_buf_h); 7366 7367 if (instance->system_info_buf) 7368 dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO), 7369 instance->system_info_buf, 7370 instance->system_info_h); 7371 7372 if (instance->pd_info) 7373 dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO), 7374 instance->pd_info, instance->pd_info_h); 7375 7376 if (instance->tgt_prop) 7377 dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES), 7378 instance->tgt_prop, instance->tgt_prop_h); 7379 7380 if (instance->crash_dump_buf) 7381 dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE, 7382 instance->crash_dump_buf, 7383 instance->crash_dump_h); 7384 7385 if (instance->snapdump_prop) 7386 dma_free_coherent(&pdev->dev, 7387 sizeof(struct MR_SNAPDUMP_PROPERTIES), 7388 instance->snapdump_prop, 7389 instance->snapdump_prop_h); 7390 7391 if (instance->host_device_list_buf) 7392 dma_free_coherent(&pdev->dev, 7393 HOST_DEVICE_LIST_SZ, 7394 instance->host_device_list_buf, 7395 instance->host_device_list_buf_h); 7396 7397 } 7398 7399 /* 7400 * megasas_init_ctrl_params - Initialize controller's instance 7401 * parameters before FW init 7402 * @instance - Adapter soft instance 7403 * @return - void 7404 */ 7405 static inline void megasas_init_ctrl_params(struct megasas_instance *instance) 7406 { 7407 instance->fw_crash_state = UNAVAILABLE; 7408 7409 megasas_poll_wait_aen = 0; 7410 instance->issuepend_done = 1; 7411 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 7412 7413 /* 7414 * Initialize locks and queues 7415 */ 7416 INIT_LIST_HEAD(&instance->cmd_pool); 7417 INIT_LIST_HEAD(&instance->internal_reset_pending_q); 7418 7419 atomic_set(&instance->fw_outstanding, 0); 7420 atomic64_set(&instance->total_io_count, 0); 7421 7422 init_waitqueue_head(&instance->int_cmd_wait_q); 7423 init_waitqueue_head(&instance->abort_cmd_wait_q); 7424 7425 spin_lock_init(&instance->crashdump_lock); 7426 spin_lock_init(&instance->mfi_pool_lock); 7427 spin_lock_init(&instance->hba_lock); 7428 spin_lock_init(&instance->stream_lock); 7429 spin_lock_init(&instance->completion_lock); 7430 7431 mutex_init(&instance->reset_mutex); 7432 7433 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 7434 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) 7435 instance->flag_ieee = 1; 7436 7437 megasas_dbg_lvl = 0; 7438 instance->flag = 0; 7439 instance->unload = 1; 7440 instance->last_time = 0; 7441 instance->disableOnlineCtrlReset = 1; 7442 instance->UnevenSpanSupport = 0; 7443 instance->smp_affinity_enable = smp_affinity_enable ? true : false; 7444 instance->msix_load_balance = false; 7445 7446 if (instance->adapter_type != MFI_SERIES) 7447 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); 7448 else 7449 INIT_WORK(&instance->work_init, process_fw_state_change_wq); 7450 } 7451 7452 /** 7453 * megasas_probe_one - PCI hotplug entry point 7454 * @pdev: PCI device structure 7455 * @id: PCI ids of supported hotplugged adapter 7456 */ 7457 static int megasas_probe_one(struct pci_dev *pdev, 7458 const struct pci_device_id *id) 7459 { 7460 int rval, pos; 7461 struct Scsi_Host *host; 7462 struct megasas_instance *instance; 7463 u16 control = 0; 7464 7465 switch (pdev->device) { 7466 case PCI_DEVICE_ID_LSI_AERO_10E0: 7467 case PCI_DEVICE_ID_LSI_AERO_10E3: 7468 case PCI_DEVICE_ID_LSI_AERO_10E4: 7469 case PCI_DEVICE_ID_LSI_AERO_10E7: 7470 dev_err(&pdev->dev, "Adapter is in non secure mode\n"); 7471 return 1; 7472 case PCI_DEVICE_ID_LSI_AERO_10E1: 7473 case PCI_DEVICE_ID_LSI_AERO_10E5: 7474 dev_info(&pdev->dev, "Adapter is in configurable secure mode\n"); 7475 break; 7476 } 7477 7478 /* Reset MSI-X in the kdump kernel */ 7479 if (reset_devices) { 7480 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 7481 if (pos) { 7482 pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, 7483 &control); 7484 if (control & PCI_MSIX_FLAGS_ENABLE) { 7485 dev_info(&pdev->dev, "resetting MSI-X\n"); 7486 pci_write_config_word(pdev, 7487 pos + PCI_MSIX_FLAGS, 7488 control & 7489 ~PCI_MSIX_FLAGS_ENABLE); 7490 } 7491 } 7492 } 7493 7494 /* 7495 * PCI prepping: enable device set bus mastering and dma mask 7496 */ 7497 rval = pci_enable_device_mem(pdev); 7498 7499 if (rval) { 7500 return rval; 7501 } 7502 7503 pci_set_master(pdev); 7504 7505 host = scsi_host_alloc(&megasas_template, 7506 sizeof(struct megasas_instance)); 7507 7508 if (!host) { 7509 dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n"); 7510 goto fail_alloc_instance; 7511 } 7512 7513 instance = (struct megasas_instance *)host->hostdata; 7514 memset(instance, 0, sizeof(*instance)); 7515 atomic_set(&instance->fw_reset_no_pci_access, 0); 7516 7517 /* 7518 * Initialize PCI related and misc parameters 7519 */ 7520 instance->pdev = pdev; 7521 instance->host = host; 7522 instance->unique_id = pdev->bus->number << 8 | pdev->devfn; 7523 instance->init_id = MEGASAS_DEFAULT_INIT_ID; 7524 7525 megasas_set_adapter_type(instance); 7526 7527 /* 7528 * Initialize MFI Firmware 7529 */ 7530 if (megasas_init_fw(instance)) 7531 goto fail_init_mfi; 7532 7533 if (instance->requestorId) { 7534 if (instance->PlasmaFW111) { 7535 instance->vf_affiliation_111 = 7536 dma_alloc_coherent(&pdev->dev, 7537 sizeof(struct MR_LD_VF_AFFILIATION_111), 7538 &instance->vf_affiliation_111_h, 7539 GFP_KERNEL); 7540 if (!instance->vf_affiliation_111) 7541 dev_warn(&pdev->dev, "Can't allocate " 7542 "memory for VF affiliation buffer\n"); 7543 } else { 7544 instance->vf_affiliation = 7545 dma_alloc_coherent(&pdev->dev, 7546 (MAX_LOGICAL_DRIVES + 1) * 7547 sizeof(struct MR_LD_VF_AFFILIATION), 7548 &instance->vf_affiliation_h, 7549 GFP_KERNEL); 7550 if (!instance->vf_affiliation) 7551 dev_warn(&pdev->dev, "Can't allocate " 7552 "memory for VF affiliation buffer\n"); 7553 } 7554 } 7555 7556 /* 7557 * Store instance in PCI softstate 7558 */ 7559 pci_set_drvdata(pdev, instance); 7560 7561 /* 7562 * Add this controller to megasas_mgmt_info structure so that it 7563 * can be exported to management applications 7564 */ 7565 megasas_mgmt_info.count++; 7566 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance; 7567 megasas_mgmt_info.max_index++; 7568 7569 /* 7570 * Register with SCSI mid-layer 7571 */ 7572 if (megasas_io_attach(instance)) 7573 goto fail_io_attach; 7574 7575 instance->unload = 0; 7576 /* 7577 * Trigger SCSI to scan our drives 7578 */ 7579 if (!instance->enable_fw_dev_list || 7580 (instance->host_device_list_buf->count > 0)) 7581 scsi_scan_host(host); 7582 7583 /* 7584 * Initiate AEN (Asynchronous Event Notification) 7585 */ 7586 if (megasas_start_aen(instance)) { 7587 dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n"); 7588 goto fail_start_aen; 7589 } 7590 7591 megasas_setup_debugfs(instance); 7592 7593 /* Get current SR-IOV LD/VF affiliation */ 7594 if (instance->requestorId) 7595 megasas_get_ld_vf_affiliation(instance, 1); 7596 7597 return 0; 7598 7599 fail_start_aen: 7600 instance->unload = 1; 7601 scsi_remove_host(instance->host); 7602 fail_io_attach: 7603 megasas_mgmt_info.count--; 7604 megasas_mgmt_info.max_index--; 7605 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; 7606 7607 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7608 del_timer_sync(&instance->sriov_heartbeat_timer); 7609 7610 instance->instancet->disable_intr(instance); 7611 megasas_destroy_irqs(instance); 7612 7613 if (instance->adapter_type != MFI_SERIES) 7614 megasas_release_fusion(instance); 7615 else 7616 megasas_release_mfi(instance); 7617 7618 if (instance->msix_vectors) 7619 pci_free_irq_vectors(instance->pdev); 7620 instance->msix_vectors = 0; 7621 7622 if (instance->fw_crash_state != UNAVAILABLE) 7623 megasas_free_host_crash_buffer(instance); 7624 7625 if (instance->adapter_type != MFI_SERIES) 7626 megasas_fusion_stop_watchdog(instance); 7627 fail_init_mfi: 7628 scsi_host_put(host); 7629 fail_alloc_instance: 7630 pci_disable_device(pdev); 7631 7632 return -ENODEV; 7633 } 7634 7635 /** 7636 * megasas_flush_cache - Requests FW to flush all its caches 7637 * @instance: Adapter soft state 7638 */ 7639 static void megasas_flush_cache(struct megasas_instance *instance) 7640 { 7641 struct megasas_cmd *cmd; 7642 struct megasas_dcmd_frame *dcmd; 7643 7644 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 7645 return; 7646 7647 cmd = megasas_get_cmd(instance); 7648 7649 if (!cmd) 7650 return; 7651 7652 dcmd = &cmd->frame->dcmd; 7653 7654 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 7655 7656 dcmd->cmd = MFI_CMD_DCMD; 7657 dcmd->cmd_status = 0x0; 7658 dcmd->sge_count = 0; 7659 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 7660 dcmd->timeout = 0; 7661 dcmd->pad_0 = 0; 7662 dcmd->data_xfer_len = 0; 7663 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH); 7664 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 7665 7666 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) 7667 != DCMD_SUCCESS) { 7668 dev_err(&instance->pdev->dev, 7669 "return from %s %d\n", __func__, __LINE__); 7670 return; 7671 } 7672 7673 megasas_return_cmd(instance, cmd); 7674 } 7675 7676 /** 7677 * megasas_shutdown_controller - Instructs FW to shutdown the controller 7678 * @instance: Adapter soft state 7679 * @opcode: Shutdown/Hibernate 7680 */ 7681 static void megasas_shutdown_controller(struct megasas_instance *instance, 7682 u32 opcode) 7683 { 7684 struct megasas_cmd *cmd; 7685 struct megasas_dcmd_frame *dcmd; 7686 7687 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 7688 return; 7689 7690 cmd = megasas_get_cmd(instance); 7691 7692 if (!cmd) 7693 return; 7694 7695 if (instance->aen_cmd) 7696 megasas_issue_blocked_abort_cmd(instance, 7697 instance->aen_cmd, MFI_IO_TIMEOUT_SECS); 7698 if (instance->map_update_cmd) 7699 megasas_issue_blocked_abort_cmd(instance, 7700 instance->map_update_cmd, MFI_IO_TIMEOUT_SECS); 7701 if (instance->jbod_seq_cmd) 7702 megasas_issue_blocked_abort_cmd(instance, 7703 instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS); 7704 7705 dcmd = &cmd->frame->dcmd; 7706 7707 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 7708 7709 dcmd->cmd = MFI_CMD_DCMD; 7710 dcmd->cmd_status = 0x0; 7711 dcmd->sge_count = 0; 7712 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 7713 dcmd->timeout = 0; 7714 dcmd->pad_0 = 0; 7715 dcmd->data_xfer_len = 0; 7716 dcmd->opcode = cpu_to_le32(opcode); 7717 7718 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) 7719 != DCMD_SUCCESS) { 7720 dev_err(&instance->pdev->dev, 7721 "return from %s %d\n", __func__, __LINE__); 7722 return; 7723 } 7724 7725 megasas_return_cmd(instance, cmd); 7726 } 7727 7728 /** 7729 * megasas_suspend - driver suspend entry point 7730 * @dev: Device structure 7731 */ 7732 static int __maybe_unused 7733 megasas_suspend(struct device *dev) 7734 { 7735 struct megasas_instance *instance; 7736 7737 instance = dev_get_drvdata(dev); 7738 7739 if (!instance) 7740 return 0; 7741 7742 instance->unload = 1; 7743 7744 dev_info(dev, "%s is called\n", __func__); 7745 7746 /* Shutdown SR-IOV heartbeat timer */ 7747 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7748 del_timer_sync(&instance->sriov_heartbeat_timer); 7749 7750 /* Stop the FW fault detection watchdog */ 7751 if (instance->adapter_type != MFI_SERIES) 7752 megasas_fusion_stop_watchdog(instance); 7753 7754 megasas_flush_cache(instance); 7755 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); 7756 7757 /* cancel the delayed work if this work still in queue */ 7758 if (instance->ev != NULL) { 7759 struct megasas_aen_event *ev = instance->ev; 7760 cancel_delayed_work_sync(&ev->hotplug_work); 7761 instance->ev = NULL; 7762 } 7763 7764 tasklet_kill(&instance->isr_tasklet); 7765 7766 pci_set_drvdata(instance->pdev, instance); 7767 instance->instancet->disable_intr(instance); 7768 7769 megasas_destroy_irqs(instance); 7770 7771 if (instance->msix_vectors) 7772 pci_free_irq_vectors(instance->pdev); 7773 7774 return 0; 7775 } 7776 7777 /** 7778 * megasas_resume- driver resume entry point 7779 * @dev: Device structure 7780 */ 7781 static int __maybe_unused 7782 megasas_resume(struct device *dev) 7783 { 7784 int rval; 7785 struct Scsi_Host *host; 7786 struct megasas_instance *instance; 7787 u32 status_reg; 7788 7789 instance = dev_get_drvdata(dev); 7790 7791 if (!instance) 7792 return 0; 7793 7794 host = instance->host; 7795 7796 dev_info(dev, "%s is called\n", __func__); 7797 7798 /* 7799 * We expect the FW state to be READY 7800 */ 7801 7802 if (megasas_transition_to_ready(instance, 0)) { 7803 dev_info(&instance->pdev->dev, 7804 "Failed to transition controller to ready from %s!\n", 7805 __func__); 7806 if (instance->adapter_type != MFI_SERIES) { 7807 status_reg = 7808 instance->instancet->read_fw_status_reg(instance); 7809 if (!(status_reg & MFI_RESET_ADAPTER) || 7810 ((megasas_adp_reset_wait_for_ready 7811 (instance, true, 0)) == FAILED)) 7812 goto fail_ready_state; 7813 } else { 7814 atomic_set(&instance->fw_reset_no_pci_access, 1); 7815 instance->instancet->adp_reset 7816 (instance, instance->reg_set); 7817 atomic_set(&instance->fw_reset_no_pci_access, 0); 7818 7819 /* waiting for about 30 seconds before retry */ 7820 ssleep(30); 7821 7822 if (megasas_transition_to_ready(instance, 0)) 7823 goto fail_ready_state; 7824 } 7825 7826 dev_info(&instance->pdev->dev, 7827 "FW restarted successfully from %s!\n", 7828 __func__); 7829 } 7830 if (megasas_set_dma_mask(instance)) 7831 goto fail_set_dma_mask; 7832 7833 /* 7834 * Initialize MFI Firmware 7835 */ 7836 7837 atomic_set(&instance->fw_outstanding, 0); 7838 atomic_set(&instance->ldio_outstanding, 0); 7839 7840 /* Now re-enable MSI-X */ 7841 if (instance->msix_vectors) 7842 megasas_alloc_irq_vectors(instance); 7843 7844 if (!instance->msix_vectors) { 7845 rval = pci_alloc_irq_vectors(instance->pdev, 1, 1, 7846 PCI_IRQ_LEGACY); 7847 if (rval < 0) 7848 goto fail_reenable_msix; 7849 } 7850 7851 megasas_setup_reply_map(instance); 7852 7853 if (instance->adapter_type != MFI_SERIES) { 7854 megasas_reset_reply_desc(instance); 7855 if (megasas_ioc_init_fusion(instance)) { 7856 megasas_free_cmds(instance); 7857 megasas_free_cmds_fusion(instance); 7858 goto fail_init_mfi; 7859 } 7860 if (!megasas_get_map_info(instance)) 7861 megasas_sync_map_info(instance); 7862 } else { 7863 *instance->producer = 0; 7864 *instance->consumer = 0; 7865 if (megasas_issue_init_mfi(instance)) 7866 goto fail_init_mfi; 7867 } 7868 7869 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) 7870 goto fail_init_mfi; 7871 7872 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 7873 (unsigned long)instance); 7874 7875 if (instance->msix_vectors ? 7876 megasas_setup_irqs_msix(instance, 0) : 7877 megasas_setup_irqs_ioapic(instance)) 7878 goto fail_init_mfi; 7879 7880 if (instance->adapter_type != MFI_SERIES) 7881 megasas_setup_irq_poll(instance); 7882 7883 /* Re-launch SR-IOV heartbeat timer */ 7884 if (instance->requestorId) { 7885 if (!megasas_sriov_start_heartbeat(instance, 0)) 7886 megasas_start_timer(instance); 7887 else { 7888 instance->skip_heartbeat_timer_del = 1; 7889 goto fail_init_mfi; 7890 } 7891 } 7892 7893 instance->instancet->enable_intr(instance); 7894 megasas_setup_jbod_map(instance); 7895 instance->unload = 0; 7896 7897 /* 7898 * Initiate AEN (Asynchronous Event Notification) 7899 */ 7900 if (megasas_start_aen(instance)) 7901 dev_err(&instance->pdev->dev, "Start AEN failed\n"); 7902 7903 /* Re-launch FW fault watchdog */ 7904 if (instance->adapter_type != MFI_SERIES) 7905 if (megasas_fusion_start_watchdog(instance) != SUCCESS) 7906 goto fail_start_watchdog; 7907 7908 return 0; 7909 7910 fail_start_watchdog: 7911 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7912 del_timer_sync(&instance->sriov_heartbeat_timer); 7913 fail_init_mfi: 7914 megasas_free_ctrl_dma_buffers(instance); 7915 megasas_free_ctrl_mem(instance); 7916 scsi_host_put(host); 7917 7918 fail_reenable_msix: 7919 fail_set_dma_mask: 7920 fail_ready_state: 7921 7922 return -ENODEV; 7923 } 7924 7925 static inline int 7926 megasas_wait_for_adapter_operational(struct megasas_instance *instance) 7927 { 7928 int wait_time = MEGASAS_RESET_WAIT_TIME * 2; 7929 int i; 7930 u8 adp_state; 7931 7932 for (i = 0; i < wait_time; i++) { 7933 adp_state = atomic_read(&instance->adprecovery); 7934 if ((adp_state == MEGASAS_HBA_OPERATIONAL) || 7935 (adp_state == MEGASAS_HW_CRITICAL_ERROR)) 7936 break; 7937 7938 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) 7939 dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n"); 7940 7941 msleep(1000); 7942 } 7943 7944 if (adp_state != MEGASAS_HBA_OPERATIONAL) { 7945 dev_info(&instance->pdev->dev, 7946 "%s HBA failed to become operational, adp_state %d\n", 7947 __func__, adp_state); 7948 return 1; 7949 } 7950 7951 return 0; 7952 } 7953 7954 /** 7955 * megasas_detach_one - PCI hot"un"plug entry point 7956 * @pdev: PCI device structure 7957 */ 7958 static void megasas_detach_one(struct pci_dev *pdev) 7959 { 7960 int i; 7961 struct Scsi_Host *host; 7962 struct megasas_instance *instance; 7963 struct fusion_context *fusion; 7964 u32 pd_seq_map_sz; 7965 7966 instance = pci_get_drvdata(pdev); 7967 7968 if (!instance) 7969 return; 7970 7971 host = instance->host; 7972 fusion = instance->ctrl_context; 7973 7974 /* Shutdown SR-IOV heartbeat timer */ 7975 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7976 del_timer_sync(&instance->sriov_heartbeat_timer); 7977 7978 /* Stop the FW fault detection watchdog */ 7979 if (instance->adapter_type != MFI_SERIES) 7980 megasas_fusion_stop_watchdog(instance); 7981 7982 if (instance->fw_crash_state != UNAVAILABLE) 7983 megasas_free_host_crash_buffer(instance); 7984 scsi_remove_host(instance->host); 7985 instance->unload = 1; 7986 7987 if (megasas_wait_for_adapter_operational(instance)) 7988 goto skip_firing_dcmds; 7989 7990 megasas_flush_cache(instance); 7991 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 7992 7993 skip_firing_dcmds: 7994 /* cancel the delayed work if this work still in queue*/ 7995 if (instance->ev != NULL) { 7996 struct megasas_aen_event *ev = instance->ev; 7997 cancel_delayed_work_sync(&ev->hotplug_work); 7998 instance->ev = NULL; 7999 } 8000 8001 /* cancel all wait events */ 8002 wake_up_all(&instance->int_cmd_wait_q); 8003 8004 tasklet_kill(&instance->isr_tasklet); 8005 8006 /* 8007 * Take the instance off the instance array. Note that we will not 8008 * decrement the max_index. We let this array be sparse array 8009 */ 8010 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 8011 if (megasas_mgmt_info.instance[i] == instance) { 8012 megasas_mgmt_info.count--; 8013 megasas_mgmt_info.instance[i] = NULL; 8014 8015 break; 8016 } 8017 } 8018 8019 instance->instancet->disable_intr(instance); 8020 8021 megasas_destroy_irqs(instance); 8022 8023 if (instance->msix_vectors) 8024 pci_free_irq_vectors(instance->pdev); 8025 8026 if (instance->adapter_type >= VENTURA_SERIES) { 8027 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) 8028 kfree(fusion->stream_detect_by_ld[i]); 8029 kfree(fusion->stream_detect_by_ld); 8030 fusion->stream_detect_by_ld = NULL; 8031 } 8032 8033 8034 if (instance->adapter_type != MFI_SERIES) { 8035 megasas_release_fusion(instance); 8036 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 8037 (sizeof(struct MR_PD_CFG_SEQ) * 8038 (MAX_PHYSICAL_DEVICES - 1)); 8039 for (i = 0; i < 2 ; i++) { 8040 if (fusion->ld_map[i]) 8041 dma_free_coherent(&instance->pdev->dev, 8042 fusion->max_map_sz, 8043 fusion->ld_map[i], 8044 fusion->ld_map_phys[i]); 8045 if (fusion->ld_drv_map[i]) { 8046 if (is_vmalloc_addr(fusion->ld_drv_map[i])) 8047 vfree(fusion->ld_drv_map[i]); 8048 else 8049 free_pages((ulong)fusion->ld_drv_map[i], 8050 fusion->drv_map_pages); 8051 } 8052 8053 if (fusion->pd_seq_sync[i]) 8054 dma_free_coherent(&instance->pdev->dev, 8055 pd_seq_map_sz, 8056 fusion->pd_seq_sync[i], 8057 fusion->pd_seq_phys[i]); 8058 } 8059 } else { 8060 megasas_release_mfi(instance); 8061 } 8062 8063 if (instance->vf_affiliation) 8064 dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) * 8065 sizeof(struct MR_LD_VF_AFFILIATION), 8066 instance->vf_affiliation, 8067 instance->vf_affiliation_h); 8068 8069 if (instance->vf_affiliation_111) 8070 dma_free_coherent(&pdev->dev, 8071 sizeof(struct MR_LD_VF_AFFILIATION_111), 8072 instance->vf_affiliation_111, 8073 instance->vf_affiliation_111_h); 8074 8075 if (instance->hb_host_mem) 8076 dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM), 8077 instance->hb_host_mem, 8078 instance->hb_host_mem_h); 8079 8080 megasas_free_ctrl_dma_buffers(instance); 8081 8082 megasas_free_ctrl_mem(instance); 8083 8084 megasas_destroy_debugfs(instance); 8085 8086 scsi_host_put(host); 8087 8088 pci_disable_device(pdev); 8089 } 8090 8091 /** 8092 * megasas_shutdown - Shutdown entry point 8093 * @pdev: PCI device structure 8094 */ 8095 static void megasas_shutdown(struct pci_dev *pdev) 8096 { 8097 struct megasas_instance *instance = pci_get_drvdata(pdev); 8098 8099 if (!instance) 8100 return; 8101 8102 instance->unload = 1; 8103 8104 if (megasas_wait_for_adapter_operational(instance)) 8105 goto skip_firing_dcmds; 8106 8107 megasas_flush_cache(instance); 8108 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 8109 8110 skip_firing_dcmds: 8111 instance->instancet->disable_intr(instance); 8112 megasas_destroy_irqs(instance); 8113 8114 if (instance->msix_vectors) 8115 pci_free_irq_vectors(instance->pdev); 8116 } 8117 8118 /* 8119 * megasas_mgmt_open - char node "open" entry point 8120 * @inode: char node inode 8121 * @filep: char node file 8122 */ 8123 static int megasas_mgmt_open(struct inode *inode, struct file *filep) 8124 { 8125 /* 8126 * Allow only those users with admin rights 8127 */ 8128 if (!capable(CAP_SYS_ADMIN)) 8129 return -EACCES; 8130 8131 return 0; 8132 } 8133 8134 /* 8135 * megasas_mgmt_fasync - Async notifier registration from applications 8136 * @fd: char node file descriptor number 8137 * @filep: char node file 8138 * @mode: notifier on/off 8139 * 8140 * This function adds the calling process to a driver global queue. When an 8141 * event occurs, SIGIO will be sent to all processes in this queue. 8142 */ 8143 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode) 8144 { 8145 int rc; 8146 8147 mutex_lock(&megasas_async_queue_mutex); 8148 8149 rc = fasync_helper(fd, filep, mode, &megasas_async_queue); 8150 8151 mutex_unlock(&megasas_async_queue_mutex); 8152 8153 if (rc >= 0) { 8154 /* For sanity check when we get ioctl */ 8155 filep->private_data = filep; 8156 return 0; 8157 } 8158 8159 printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc); 8160 8161 return rc; 8162 } 8163 8164 /* 8165 * megasas_mgmt_poll - char node "poll" entry point 8166 * @filep: char node file 8167 * @wait: Events to poll for 8168 */ 8169 static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait) 8170 { 8171 __poll_t mask; 8172 unsigned long flags; 8173 8174 poll_wait(file, &megasas_poll_wait, wait); 8175 spin_lock_irqsave(&poll_aen_lock, flags); 8176 if (megasas_poll_wait_aen) 8177 mask = (EPOLLIN | EPOLLRDNORM); 8178 else 8179 mask = 0; 8180 megasas_poll_wait_aen = 0; 8181 spin_unlock_irqrestore(&poll_aen_lock, flags); 8182 return mask; 8183 } 8184 8185 /* 8186 * megasas_set_crash_dump_params_ioctl: 8187 * Send CRASH_DUMP_MODE DCMD to all controllers 8188 * @cmd: MFI command frame 8189 */ 8190 8191 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd) 8192 { 8193 struct megasas_instance *local_instance; 8194 int i, error = 0; 8195 int crash_support; 8196 8197 crash_support = cmd->frame->dcmd.mbox.w[0]; 8198 8199 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 8200 local_instance = megasas_mgmt_info.instance[i]; 8201 if (local_instance && local_instance->crash_dump_drv_support) { 8202 if ((atomic_read(&local_instance->adprecovery) == 8203 MEGASAS_HBA_OPERATIONAL) && 8204 !megasas_set_crash_dump_params(local_instance, 8205 crash_support)) { 8206 local_instance->crash_dump_app_support = 8207 crash_support; 8208 dev_info(&local_instance->pdev->dev, 8209 "Application firmware crash " 8210 "dump mode set success\n"); 8211 error = 0; 8212 } else { 8213 dev_info(&local_instance->pdev->dev, 8214 "Application firmware crash " 8215 "dump mode set failed\n"); 8216 error = -1; 8217 } 8218 } 8219 } 8220 return error; 8221 } 8222 8223 /** 8224 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW 8225 * @instance: Adapter soft state 8226 * @user_ioc: User's ioctl packet 8227 * @ioc: ioctl packet 8228 */ 8229 static int 8230 megasas_mgmt_fw_ioctl(struct megasas_instance *instance, 8231 struct megasas_iocpacket __user * user_ioc, 8232 struct megasas_iocpacket *ioc) 8233 { 8234 struct megasas_sge64 *kern_sge64 = NULL; 8235 struct megasas_sge32 *kern_sge32 = NULL; 8236 struct megasas_cmd *cmd; 8237 void *kbuff_arr[MAX_IOCTL_SGE]; 8238 dma_addr_t buf_handle = 0; 8239 int error = 0, i; 8240 void *sense = NULL; 8241 dma_addr_t sense_handle; 8242 void *sense_ptr; 8243 u32 opcode = 0; 8244 int ret = DCMD_SUCCESS; 8245 8246 memset(kbuff_arr, 0, sizeof(kbuff_arr)); 8247 8248 if (ioc->sge_count > MAX_IOCTL_SGE) { 8249 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n", 8250 ioc->sge_count, MAX_IOCTL_SGE); 8251 return -EINVAL; 8252 } 8253 8254 if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) || 8255 ((ioc->frame.hdr.cmd == MFI_CMD_NVME) && 8256 !instance->support_nvme_passthru) || 8257 ((ioc->frame.hdr.cmd == MFI_CMD_TOOLBOX) && 8258 !instance->support_pci_lane_margining)) { 8259 dev_err(&instance->pdev->dev, 8260 "Received invalid ioctl command 0x%x\n", 8261 ioc->frame.hdr.cmd); 8262 return -ENOTSUPP; 8263 } 8264 8265 cmd = megasas_get_cmd(instance); 8266 if (!cmd) { 8267 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n"); 8268 return -ENOMEM; 8269 } 8270 8271 /* 8272 * User's IOCTL packet has 2 frames (maximum). Copy those two 8273 * frames into our cmd's frames. cmd->frame's context will get 8274 * overwritten when we copy from user's frames. So set that value 8275 * alone separately 8276 */ 8277 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); 8278 cmd->frame->hdr.context = cpu_to_le32(cmd->index); 8279 cmd->frame->hdr.pad_0 = 0; 8280 8281 cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE); 8282 8283 if (instance->consistent_mask_64bit) 8284 cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 | 8285 MFI_FRAME_SENSE64)); 8286 else 8287 cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 | 8288 MFI_FRAME_SENSE64)); 8289 8290 if (cmd->frame->hdr.cmd == MFI_CMD_DCMD) 8291 opcode = le32_to_cpu(cmd->frame->dcmd.opcode); 8292 8293 if (opcode == MR_DCMD_CTRL_SHUTDOWN) { 8294 mutex_lock(&instance->reset_mutex); 8295 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) { 8296 megasas_return_cmd(instance, cmd); 8297 mutex_unlock(&instance->reset_mutex); 8298 return -1; 8299 } 8300 mutex_unlock(&instance->reset_mutex); 8301 } 8302 8303 if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) { 8304 error = megasas_set_crash_dump_params_ioctl(cmd); 8305 megasas_return_cmd(instance, cmd); 8306 return error; 8307 } 8308 8309 /* 8310 * The management interface between applications and the fw uses 8311 * MFI frames. E.g, RAID configuration changes, LD property changes 8312 * etc are accomplishes through different kinds of MFI frames. The 8313 * driver needs to care only about substituting user buffers with 8314 * kernel buffers in SGLs. The location of SGL is embedded in the 8315 * struct iocpacket itself. 8316 */ 8317 if (instance->consistent_mask_64bit) 8318 kern_sge64 = (struct megasas_sge64 *) 8319 ((unsigned long)cmd->frame + ioc->sgl_off); 8320 else 8321 kern_sge32 = (struct megasas_sge32 *) 8322 ((unsigned long)cmd->frame + ioc->sgl_off); 8323 8324 /* 8325 * For each user buffer, create a mirror buffer and copy in 8326 */ 8327 for (i = 0; i < ioc->sge_count; i++) { 8328 if (!ioc->sgl[i].iov_len) 8329 continue; 8330 8331 kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev, 8332 ioc->sgl[i].iov_len, 8333 &buf_handle, GFP_KERNEL); 8334 if (!kbuff_arr[i]) { 8335 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc " 8336 "kernel SGL buffer for IOCTL\n"); 8337 error = -ENOMEM; 8338 goto out; 8339 } 8340 8341 /* 8342 * We don't change the dma_coherent_mask, so 8343 * dma_alloc_coherent only returns 32bit addresses 8344 */ 8345 if (instance->consistent_mask_64bit) { 8346 kern_sge64[i].phys_addr = cpu_to_le64(buf_handle); 8347 kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len); 8348 } else { 8349 kern_sge32[i].phys_addr = cpu_to_le32(buf_handle); 8350 kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len); 8351 } 8352 8353 /* 8354 * We created a kernel buffer corresponding to the 8355 * user buffer. Now copy in from the user buffer 8356 */ 8357 if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base, 8358 (u32) (ioc->sgl[i].iov_len))) { 8359 error = -EFAULT; 8360 goto out; 8361 } 8362 } 8363 8364 if (ioc->sense_len) { 8365 /* make sure the pointer is part of the frame */ 8366 if (ioc->sense_off > 8367 (sizeof(union megasas_frame) - sizeof(__le64))) { 8368 error = -EINVAL; 8369 goto out; 8370 } 8371 8372 sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len, 8373 &sense_handle, GFP_KERNEL); 8374 if (!sense) { 8375 error = -ENOMEM; 8376 goto out; 8377 } 8378 8379 /* always store 64 bits regardless of addressing */ 8380 sense_ptr = (void *)cmd->frame + ioc->sense_off; 8381 put_unaligned_le64(sense_handle, sense_ptr); 8382 } 8383 8384 /* 8385 * Set the sync_cmd flag so that the ISR knows not to complete this 8386 * cmd to the SCSI mid-layer 8387 */ 8388 cmd->sync_cmd = 1; 8389 8390 ret = megasas_issue_blocked_cmd(instance, cmd, 0); 8391 switch (ret) { 8392 case DCMD_INIT: 8393 case DCMD_BUSY: 8394 cmd->sync_cmd = 0; 8395 dev_err(&instance->pdev->dev, 8396 "return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n", 8397 __func__, __LINE__, cmd->frame->hdr.cmd, opcode, 8398 cmd->cmd_status_drv); 8399 error = -EBUSY; 8400 goto out; 8401 } 8402 8403 cmd->sync_cmd = 0; 8404 8405 if (instance->unload == 1) { 8406 dev_info(&instance->pdev->dev, "Driver unload is in progress " 8407 "don't submit data to application\n"); 8408 goto out; 8409 } 8410 /* 8411 * copy out the kernel buffers to user buffers 8412 */ 8413 for (i = 0; i < ioc->sge_count; i++) { 8414 if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i], 8415 ioc->sgl[i].iov_len)) { 8416 error = -EFAULT; 8417 goto out; 8418 } 8419 } 8420 8421 /* 8422 * copy out the sense 8423 */ 8424 if (ioc->sense_len) { 8425 void __user *uptr; 8426 /* 8427 * sense_ptr points to the location that has the user 8428 * sense buffer address 8429 */ 8430 sense_ptr = (void *)ioc->frame.raw + ioc->sense_off; 8431 if (in_compat_syscall()) 8432 uptr = compat_ptr(get_unaligned((compat_uptr_t *) 8433 sense_ptr)); 8434 else 8435 uptr = get_unaligned((void __user **)sense_ptr); 8436 8437 if (copy_to_user(uptr, sense, ioc->sense_len)) { 8438 dev_err(&instance->pdev->dev, "Failed to copy out to user " 8439 "sense data\n"); 8440 error = -EFAULT; 8441 goto out; 8442 } 8443 } 8444 8445 /* 8446 * copy the status codes returned by the fw 8447 */ 8448 if (copy_to_user(&user_ioc->frame.hdr.cmd_status, 8449 &cmd->frame->hdr.cmd_status, sizeof(u8))) { 8450 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n"); 8451 error = -EFAULT; 8452 } 8453 8454 out: 8455 if (sense) { 8456 dma_free_coherent(&instance->pdev->dev, ioc->sense_len, 8457 sense, sense_handle); 8458 } 8459 8460 for (i = 0; i < ioc->sge_count; i++) { 8461 if (kbuff_arr[i]) { 8462 if (instance->consistent_mask_64bit) 8463 dma_free_coherent(&instance->pdev->dev, 8464 le32_to_cpu(kern_sge64[i].length), 8465 kbuff_arr[i], 8466 le64_to_cpu(kern_sge64[i].phys_addr)); 8467 else 8468 dma_free_coherent(&instance->pdev->dev, 8469 le32_to_cpu(kern_sge32[i].length), 8470 kbuff_arr[i], 8471 le32_to_cpu(kern_sge32[i].phys_addr)); 8472 kbuff_arr[i] = NULL; 8473 } 8474 } 8475 8476 megasas_return_cmd(instance, cmd); 8477 return error; 8478 } 8479 8480 static struct megasas_iocpacket * 8481 megasas_compat_iocpacket_get_user(void __user *arg) 8482 { 8483 struct megasas_iocpacket *ioc; 8484 struct compat_megasas_iocpacket __user *cioc = arg; 8485 size_t size; 8486 int err = -EFAULT; 8487 int i; 8488 8489 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL); 8490 if (!ioc) 8491 return ERR_PTR(-ENOMEM); 8492 size = offsetof(struct megasas_iocpacket, frame) + sizeof(ioc->frame); 8493 if (copy_from_user(ioc, arg, size)) 8494 goto out; 8495 8496 for (i = 0; i < MAX_IOCTL_SGE; i++) { 8497 compat_uptr_t iov_base; 8498 8499 if (get_user(iov_base, &cioc->sgl[i].iov_base) || 8500 get_user(ioc->sgl[i].iov_len, &cioc->sgl[i].iov_len)) 8501 goto out; 8502 8503 ioc->sgl[i].iov_base = compat_ptr(iov_base); 8504 } 8505 8506 return ioc; 8507 out: 8508 kfree(ioc); 8509 return ERR_PTR(err); 8510 } 8511 8512 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) 8513 { 8514 struct megasas_iocpacket __user *user_ioc = 8515 (struct megasas_iocpacket __user *)arg; 8516 struct megasas_iocpacket *ioc; 8517 struct megasas_instance *instance; 8518 int error; 8519 8520 if (in_compat_syscall()) 8521 ioc = megasas_compat_iocpacket_get_user(user_ioc); 8522 else 8523 ioc = memdup_user(user_ioc, sizeof(struct megasas_iocpacket)); 8524 8525 if (IS_ERR(ioc)) 8526 return PTR_ERR(ioc); 8527 8528 instance = megasas_lookup_instance(ioc->host_no); 8529 if (!instance) { 8530 error = -ENODEV; 8531 goto out_kfree_ioc; 8532 } 8533 8534 /* Block ioctls in VF mode */ 8535 if (instance->requestorId && !allow_vf_ioctls) { 8536 error = -ENODEV; 8537 goto out_kfree_ioc; 8538 } 8539 8540 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 8541 dev_err(&instance->pdev->dev, "Controller in crit error\n"); 8542 error = -ENODEV; 8543 goto out_kfree_ioc; 8544 } 8545 8546 if (instance->unload == 1) { 8547 error = -ENODEV; 8548 goto out_kfree_ioc; 8549 } 8550 8551 if (down_interruptible(&instance->ioctl_sem)) { 8552 error = -ERESTARTSYS; 8553 goto out_kfree_ioc; 8554 } 8555 8556 if (megasas_wait_for_adapter_operational(instance)) { 8557 error = -ENODEV; 8558 goto out_up; 8559 } 8560 8561 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); 8562 out_up: 8563 up(&instance->ioctl_sem); 8564 8565 out_kfree_ioc: 8566 kfree(ioc); 8567 return error; 8568 } 8569 8570 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) 8571 { 8572 struct megasas_instance *instance; 8573 struct megasas_aen aen; 8574 int error; 8575 8576 if (file->private_data != file) { 8577 printk(KERN_DEBUG "megasas: fasync_helper was not " 8578 "called first\n"); 8579 return -EINVAL; 8580 } 8581 8582 if (copy_from_user(&aen, (void __user *)arg, sizeof(aen))) 8583 return -EFAULT; 8584 8585 instance = megasas_lookup_instance(aen.host_no); 8586 8587 if (!instance) 8588 return -ENODEV; 8589 8590 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 8591 return -ENODEV; 8592 } 8593 8594 if (instance->unload == 1) { 8595 return -ENODEV; 8596 } 8597 8598 if (megasas_wait_for_adapter_operational(instance)) 8599 return -ENODEV; 8600 8601 mutex_lock(&instance->reset_mutex); 8602 error = megasas_register_aen(instance, aen.seq_num, 8603 aen.class_locale_word); 8604 mutex_unlock(&instance->reset_mutex); 8605 return error; 8606 } 8607 8608 /** 8609 * megasas_mgmt_ioctl - char node ioctl entry point 8610 * @file: char device file pointer 8611 * @cmd: ioctl command 8612 * @arg: ioctl command arguments address 8613 */ 8614 static long 8615 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 8616 { 8617 switch (cmd) { 8618 case MEGASAS_IOC_FIRMWARE: 8619 return megasas_mgmt_ioctl_fw(file, arg); 8620 8621 case MEGASAS_IOC_GET_AEN: 8622 return megasas_mgmt_ioctl_aen(file, arg); 8623 } 8624 8625 return -ENOTTY; 8626 } 8627 8628 #ifdef CONFIG_COMPAT 8629 static long 8630 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd, 8631 unsigned long arg) 8632 { 8633 switch (cmd) { 8634 case MEGASAS_IOC_FIRMWARE32: 8635 return megasas_mgmt_ioctl_fw(file, arg); 8636 case MEGASAS_IOC_GET_AEN: 8637 return megasas_mgmt_ioctl_aen(file, arg); 8638 } 8639 8640 return -ENOTTY; 8641 } 8642 #endif 8643 8644 /* 8645 * File operations structure for management interface 8646 */ 8647 static const struct file_operations megasas_mgmt_fops = { 8648 .owner = THIS_MODULE, 8649 .open = megasas_mgmt_open, 8650 .fasync = megasas_mgmt_fasync, 8651 .unlocked_ioctl = megasas_mgmt_ioctl, 8652 .poll = megasas_mgmt_poll, 8653 #ifdef CONFIG_COMPAT 8654 .compat_ioctl = megasas_mgmt_compat_ioctl, 8655 #endif 8656 .llseek = noop_llseek, 8657 }; 8658 8659 static SIMPLE_DEV_PM_OPS(megasas_pm_ops, megasas_suspend, megasas_resume); 8660 8661 /* 8662 * PCI hotplug support registration structure 8663 */ 8664 static struct pci_driver megasas_pci_driver = { 8665 8666 .name = "megaraid_sas", 8667 .id_table = megasas_pci_table, 8668 .probe = megasas_probe_one, 8669 .remove = megasas_detach_one, 8670 .driver.pm = &megasas_pm_ops, 8671 .shutdown = megasas_shutdown, 8672 }; 8673 8674 /* 8675 * Sysfs driver attributes 8676 */ 8677 static ssize_t version_show(struct device_driver *dd, char *buf) 8678 { 8679 return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n", 8680 MEGASAS_VERSION); 8681 } 8682 static DRIVER_ATTR_RO(version); 8683 8684 static ssize_t release_date_show(struct device_driver *dd, char *buf) 8685 { 8686 return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n", 8687 MEGASAS_RELDATE); 8688 } 8689 static DRIVER_ATTR_RO(release_date); 8690 8691 static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf) 8692 { 8693 return sprintf(buf, "%u\n", support_poll_for_event); 8694 } 8695 static DRIVER_ATTR_RO(support_poll_for_event); 8696 8697 static ssize_t support_device_change_show(struct device_driver *dd, char *buf) 8698 { 8699 return sprintf(buf, "%u\n", support_device_change); 8700 } 8701 static DRIVER_ATTR_RO(support_device_change); 8702 8703 static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf) 8704 { 8705 return sprintf(buf, "%u\n", megasas_dbg_lvl); 8706 } 8707 8708 static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf, 8709 size_t count) 8710 { 8711 int retval = count; 8712 8713 if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) { 8714 printk(KERN_ERR "megasas: could not set dbg_lvl\n"); 8715 retval = -EINVAL; 8716 } 8717 return retval; 8718 } 8719 static DRIVER_ATTR_RW(dbg_lvl); 8720 8721 static ssize_t 8722 support_nvme_encapsulation_show(struct device_driver *dd, char *buf) 8723 { 8724 return sprintf(buf, "%u\n", support_nvme_encapsulation); 8725 } 8726 8727 static DRIVER_ATTR_RO(support_nvme_encapsulation); 8728 8729 static ssize_t 8730 support_pci_lane_margining_show(struct device_driver *dd, char *buf) 8731 { 8732 return sprintf(buf, "%u\n", support_pci_lane_margining); 8733 } 8734 8735 static DRIVER_ATTR_RO(support_pci_lane_margining); 8736 8737 static inline void megasas_remove_scsi_device(struct scsi_device *sdev) 8738 { 8739 sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n"); 8740 scsi_remove_device(sdev); 8741 scsi_device_put(sdev); 8742 } 8743 8744 /** 8745 * megasas_update_device_list - Update the PD and LD device list from FW 8746 * after an AEN event notification 8747 * @instance: Adapter soft state 8748 * @event_type: Indicates type of event (PD or LD event) 8749 * 8750 * @return: Success or failure 8751 * 8752 * Issue DCMDs to Firmware to update the internal device list in driver. 8753 * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination 8754 * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list. 8755 */ 8756 static 8757 int megasas_update_device_list(struct megasas_instance *instance, 8758 int event_type) 8759 { 8760 int dcmd_ret = DCMD_SUCCESS; 8761 8762 if (instance->enable_fw_dev_list) { 8763 dcmd_ret = megasas_host_device_list_query(instance, false); 8764 if (dcmd_ret != DCMD_SUCCESS) 8765 goto out; 8766 } else { 8767 if (event_type & SCAN_PD_CHANNEL) { 8768 dcmd_ret = megasas_get_pd_list(instance); 8769 8770 if (dcmd_ret != DCMD_SUCCESS) 8771 goto out; 8772 } 8773 8774 if (event_type & SCAN_VD_CHANNEL) { 8775 if (!instance->requestorId || 8776 megasas_get_ld_vf_affiliation(instance, 0)) { 8777 dcmd_ret = megasas_ld_list_query(instance, 8778 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); 8779 if (dcmd_ret != DCMD_SUCCESS) 8780 goto out; 8781 } 8782 } 8783 } 8784 8785 out: 8786 return dcmd_ret; 8787 } 8788 8789 /** 8790 * megasas_add_remove_devices - Add/remove devices to SCSI mid-layer 8791 * after an AEN event notification 8792 * @instance: Adapter soft state 8793 * @scan_type: Indicates type of devices (PD/LD) to add 8794 * @return void 8795 */ 8796 static 8797 void megasas_add_remove_devices(struct megasas_instance *instance, 8798 int scan_type) 8799 { 8800 int i, j; 8801 u16 pd_index = 0; 8802 u16 ld_index = 0; 8803 u16 channel = 0, id = 0; 8804 struct Scsi_Host *host; 8805 struct scsi_device *sdev1; 8806 struct MR_HOST_DEVICE_LIST *targetid_list = NULL; 8807 struct MR_HOST_DEVICE_LIST_ENTRY *targetid_entry = NULL; 8808 8809 host = instance->host; 8810 8811 if (instance->enable_fw_dev_list) { 8812 targetid_list = instance->host_device_list_buf; 8813 for (i = 0; i < targetid_list->count; i++) { 8814 targetid_entry = &targetid_list->host_device_list[i]; 8815 if (targetid_entry->flags.u.bits.is_sys_pd) { 8816 channel = le16_to_cpu(targetid_entry->target_id) / 8817 MEGASAS_MAX_DEV_PER_CHANNEL; 8818 id = le16_to_cpu(targetid_entry->target_id) % 8819 MEGASAS_MAX_DEV_PER_CHANNEL; 8820 } else { 8821 channel = MEGASAS_MAX_PD_CHANNELS + 8822 (le16_to_cpu(targetid_entry->target_id) / 8823 MEGASAS_MAX_DEV_PER_CHANNEL); 8824 id = le16_to_cpu(targetid_entry->target_id) % 8825 MEGASAS_MAX_DEV_PER_CHANNEL; 8826 } 8827 sdev1 = scsi_device_lookup(host, channel, id, 0); 8828 if (!sdev1) { 8829 scsi_add_device(host, channel, id, 0); 8830 } else { 8831 scsi_device_put(sdev1); 8832 } 8833 } 8834 } 8835 8836 if (scan_type & SCAN_PD_CHANNEL) { 8837 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { 8838 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 8839 pd_index = i * MEGASAS_MAX_DEV_PER_CHANNEL + j; 8840 sdev1 = scsi_device_lookup(host, i, j, 0); 8841 if (instance->pd_list[pd_index].driveState == 8842 MR_PD_STATE_SYSTEM) { 8843 if (!sdev1) 8844 scsi_add_device(host, i, j, 0); 8845 else 8846 scsi_device_put(sdev1); 8847 } else { 8848 if (sdev1) 8849 megasas_remove_scsi_device(sdev1); 8850 } 8851 } 8852 } 8853 } 8854 8855 if (scan_type & SCAN_VD_CHANNEL) { 8856 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { 8857 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 8858 ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; 8859 sdev1 = scsi_device_lookup(host, 8860 MEGASAS_MAX_PD_CHANNELS + i, j, 0); 8861 if (instance->ld_ids[ld_index] != 0xff) { 8862 if (!sdev1) 8863 scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); 8864 else 8865 scsi_device_put(sdev1); 8866 } else { 8867 if (sdev1) 8868 megasas_remove_scsi_device(sdev1); 8869 } 8870 } 8871 } 8872 } 8873 8874 } 8875 8876 static void 8877 megasas_aen_polling(struct work_struct *work) 8878 { 8879 struct megasas_aen_event *ev = 8880 container_of(work, struct megasas_aen_event, hotplug_work.work); 8881 struct megasas_instance *instance = ev->instance; 8882 union megasas_evt_class_locale class_locale; 8883 int event_type = 0; 8884 u32 seq_num; 8885 u16 ld_target_id; 8886 int error; 8887 u8 dcmd_ret = DCMD_SUCCESS; 8888 struct scsi_device *sdev1; 8889 8890 if (!instance) { 8891 printk(KERN_ERR "invalid instance!\n"); 8892 kfree(ev); 8893 return; 8894 } 8895 8896 /* Don't run the event workqueue thread if OCR is running */ 8897 mutex_lock(&instance->reset_mutex); 8898 8899 instance->ev = NULL; 8900 if (instance->evt_detail) { 8901 megasas_decode_evt(instance); 8902 8903 switch (le32_to_cpu(instance->evt_detail->code)) { 8904 8905 case MR_EVT_PD_INSERTED: 8906 case MR_EVT_PD_REMOVED: 8907 event_type = SCAN_PD_CHANNEL; 8908 break; 8909 8910 case MR_EVT_LD_OFFLINE: 8911 case MR_EVT_LD_DELETED: 8912 ld_target_id = instance->evt_detail->args.ld.target_id; 8913 sdev1 = scsi_device_lookup(instance->host, 8914 MEGASAS_MAX_PD_CHANNELS + 8915 (ld_target_id / MEGASAS_MAX_DEV_PER_CHANNEL), 8916 (ld_target_id - MEGASAS_MAX_DEV_PER_CHANNEL), 8917 0); 8918 if (sdev1) 8919 megasas_remove_scsi_device(sdev1); 8920 8921 event_type = SCAN_VD_CHANNEL; 8922 break; 8923 case MR_EVT_LD_CREATED: 8924 event_type = SCAN_VD_CHANNEL; 8925 break; 8926 8927 case MR_EVT_CFG_CLEARED: 8928 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: 8929 case MR_EVT_FOREIGN_CFG_IMPORTED: 8930 case MR_EVT_LD_STATE_CHANGE: 8931 event_type = SCAN_PD_CHANNEL | SCAN_VD_CHANNEL; 8932 dev_info(&instance->pdev->dev, "scanning for scsi%d...\n", 8933 instance->host->host_no); 8934 break; 8935 8936 case MR_EVT_CTRL_PROP_CHANGED: 8937 dcmd_ret = megasas_get_ctrl_info(instance); 8938 if (dcmd_ret == DCMD_SUCCESS && 8939 instance->snapdump_wait_time) { 8940 megasas_get_snapdump_properties(instance); 8941 dev_info(&instance->pdev->dev, 8942 "Snap dump wait time\t: %d\n", 8943 instance->snapdump_wait_time); 8944 } 8945 break; 8946 default: 8947 event_type = 0; 8948 break; 8949 } 8950 } else { 8951 dev_err(&instance->pdev->dev, "invalid evt_detail!\n"); 8952 mutex_unlock(&instance->reset_mutex); 8953 kfree(ev); 8954 return; 8955 } 8956 8957 if (event_type) 8958 dcmd_ret = megasas_update_device_list(instance, event_type); 8959 8960 mutex_unlock(&instance->reset_mutex); 8961 8962 if (event_type && dcmd_ret == DCMD_SUCCESS) 8963 megasas_add_remove_devices(instance, event_type); 8964 8965 if (dcmd_ret == DCMD_SUCCESS) 8966 seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1; 8967 else 8968 seq_num = instance->last_seq_num; 8969 8970 /* Register AEN with FW for latest sequence number plus 1 */ 8971 class_locale.members.reserved = 0; 8972 class_locale.members.locale = MR_EVT_LOCALE_ALL; 8973 class_locale.members.class = MR_EVT_CLASS_DEBUG; 8974 8975 if (instance->aen_cmd != NULL) { 8976 kfree(ev); 8977 return; 8978 } 8979 8980 mutex_lock(&instance->reset_mutex); 8981 error = megasas_register_aen(instance, seq_num, 8982 class_locale.word); 8983 if (error) 8984 dev_err(&instance->pdev->dev, 8985 "register aen failed error %x\n", error); 8986 8987 mutex_unlock(&instance->reset_mutex); 8988 kfree(ev); 8989 } 8990 8991 /** 8992 * megasas_init - Driver load entry point 8993 */ 8994 static int __init megasas_init(void) 8995 { 8996 int rval; 8997 8998 /* 8999 * Booted in kdump kernel, minimize memory footprints by 9000 * disabling few features 9001 */ 9002 if (reset_devices) { 9003 msix_vectors = 1; 9004 rdpq_enable = 0; 9005 dual_qdepth_disable = 1; 9006 poll_queues = 0; 9007 } 9008 9009 /* 9010 * Announce driver version and other information 9011 */ 9012 pr_info("megasas: %s\n", MEGASAS_VERSION); 9013 9014 support_poll_for_event = 2; 9015 support_device_change = 1; 9016 support_nvme_encapsulation = true; 9017 support_pci_lane_margining = true; 9018 9019 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); 9020 9021 /* 9022 * Register character device node 9023 */ 9024 rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops); 9025 9026 if (rval < 0) { 9027 printk(KERN_DEBUG "megasas: failed to open device node\n"); 9028 return rval; 9029 } 9030 9031 megasas_mgmt_majorno = rval; 9032 9033 megasas_init_debugfs(); 9034 9035 /* 9036 * Register ourselves as PCI hotplug module 9037 */ 9038 rval = pci_register_driver(&megasas_pci_driver); 9039 9040 if (rval) { 9041 printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n"); 9042 goto err_pcidrv; 9043 } 9044 9045 if ((event_log_level < MFI_EVT_CLASS_DEBUG) || 9046 (event_log_level > MFI_EVT_CLASS_DEAD)) { 9047 pr_warn("megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n"); 9048 event_log_level = MFI_EVT_CLASS_CRITICAL; 9049 } 9050 9051 rval = driver_create_file(&megasas_pci_driver.driver, 9052 &driver_attr_version); 9053 if (rval) 9054 goto err_dcf_attr_ver; 9055 9056 rval = driver_create_file(&megasas_pci_driver.driver, 9057 &driver_attr_release_date); 9058 if (rval) 9059 goto err_dcf_rel_date; 9060 9061 rval = driver_create_file(&megasas_pci_driver.driver, 9062 &driver_attr_support_poll_for_event); 9063 if (rval) 9064 goto err_dcf_support_poll_for_event; 9065 9066 rval = driver_create_file(&megasas_pci_driver.driver, 9067 &driver_attr_dbg_lvl); 9068 if (rval) 9069 goto err_dcf_dbg_lvl; 9070 rval = driver_create_file(&megasas_pci_driver.driver, 9071 &driver_attr_support_device_change); 9072 if (rval) 9073 goto err_dcf_support_device_change; 9074 9075 rval = driver_create_file(&megasas_pci_driver.driver, 9076 &driver_attr_support_nvme_encapsulation); 9077 if (rval) 9078 goto err_dcf_support_nvme_encapsulation; 9079 9080 rval = driver_create_file(&megasas_pci_driver.driver, 9081 &driver_attr_support_pci_lane_margining); 9082 if (rval) 9083 goto err_dcf_support_pci_lane_margining; 9084 9085 return rval; 9086 9087 err_dcf_support_pci_lane_margining: 9088 driver_remove_file(&megasas_pci_driver.driver, 9089 &driver_attr_support_nvme_encapsulation); 9090 9091 err_dcf_support_nvme_encapsulation: 9092 driver_remove_file(&megasas_pci_driver.driver, 9093 &driver_attr_support_device_change); 9094 9095 err_dcf_support_device_change: 9096 driver_remove_file(&megasas_pci_driver.driver, 9097 &driver_attr_dbg_lvl); 9098 err_dcf_dbg_lvl: 9099 driver_remove_file(&megasas_pci_driver.driver, 9100 &driver_attr_support_poll_for_event); 9101 err_dcf_support_poll_for_event: 9102 driver_remove_file(&megasas_pci_driver.driver, 9103 &driver_attr_release_date); 9104 err_dcf_rel_date: 9105 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 9106 err_dcf_attr_ver: 9107 pci_unregister_driver(&megasas_pci_driver); 9108 err_pcidrv: 9109 megasas_exit_debugfs(); 9110 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 9111 return rval; 9112 } 9113 9114 /** 9115 * megasas_exit - Driver unload entry point 9116 */ 9117 static void __exit megasas_exit(void) 9118 { 9119 driver_remove_file(&megasas_pci_driver.driver, 9120 &driver_attr_dbg_lvl); 9121 driver_remove_file(&megasas_pci_driver.driver, 9122 &driver_attr_support_poll_for_event); 9123 driver_remove_file(&megasas_pci_driver.driver, 9124 &driver_attr_support_device_change); 9125 driver_remove_file(&megasas_pci_driver.driver, 9126 &driver_attr_release_date); 9127 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 9128 driver_remove_file(&megasas_pci_driver.driver, 9129 &driver_attr_support_nvme_encapsulation); 9130 driver_remove_file(&megasas_pci_driver.driver, 9131 &driver_attr_support_pci_lane_margining); 9132 9133 pci_unregister_driver(&megasas_pci_driver); 9134 megasas_exit_debugfs(); 9135 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 9136 } 9137 9138 module_init(megasas_init); 9139 module_exit(megasas_exit); 9140