1 /* 2 * Linux MegaRAID driver for SAS based RAID controllers 3 * 4 * Copyright (c) 2003-2013 LSI Corporation 5 * Copyright (c) 2013-2014 Avago Technologies 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 2 10 * of the License, or (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program. If not, see <http://www.gnu.org/licenses/>. 19 * 20 * Authors: Avago Technologies 21 * Sreenivas Bagalkote 22 * Sumant Patro 23 * Bo Yang 24 * Adam Radford 25 * Kashyap Desai <kashyap.desai@avagotech.com> 26 * Sumit Saxena <sumit.saxena@avagotech.com> 27 * 28 * Send feedback to: megaraidlinux.pdl@avagotech.com 29 * 30 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90, 31 * San Jose, California 95131 32 */ 33 34 #include <linux/kernel.h> 35 #include <linux/types.h> 36 #include <linux/pci.h> 37 #include <linux/list.h> 38 #include <linux/moduleparam.h> 39 #include <linux/module.h> 40 #include <linux/spinlock.h> 41 #include <linux/interrupt.h> 42 #include <linux/delay.h> 43 #include <linux/uio.h> 44 #include <linux/slab.h> 45 #include <linux/uaccess.h> 46 #include <asm/unaligned.h> 47 #include <linux/fs.h> 48 #include <linux/compat.h> 49 #include <linux/blkdev.h> 50 #include <linux/mutex.h> 51 #include <linux/poll.h> 52 53 #include <scsi/scsi.h> 54 #include <scsi/scsi_cmnd.h> 55 #include <scsi/scsi_device.h> 56 #include <scsi/scsi_host.h> 57 #include <scsi/scsi_tcq.h> 58 #include "megaraid_sas_fusion.h" 59 #include "megaraid_sas.h" 60 61 /* 62 * Number of sectors per IO command 63 * Will be set in megasas_init_mfi if user does not provide 64 */ 65 static unsigned int max_sectors; 66 module_param_named(max_sectors, max_sectors, int, 0); 67 MODULE_PARM_DESC(max_sectors, 68 "Maximum number of sectors per IO command"); 69 70 static int msix_disable; 71 module_param(msix_disable, int, S_IRUGO); 72 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0"); 73 74 static unsigned int msix_vectors; 75 module_param(msix_vectors, int, S_IRUGO); 76 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW"); 77 78 static int allow_vf_ioctls; 79 module_param(allow_vf_ioctls, int, S_IRUGO); 80 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0"); 81 82 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; 83 module_param(throttlequeuedepth, int, S_IRUGO); 84 MODULE_PARM_DESC(throttlequeuedepth, 85 "Adapter queue depth when throttled due to I/O timeout. Default: 16"); 86 87 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME; 88 module_param(resetwaittime, int, S_IRUGO); 89 MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout " 90 "before resetting adapter. Default: 180"); 91 92 int smp_affinity_enable = 1; 93 module_param(smp_affinity_enable, int, S_IRUGO); 94 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)"); 95 96 int rdpq_enable = 1; 97 module_param(rdpq_enable, int, S_IRUGO); 98 MODULE_PARM_DESC(rdpq_enable, " Allocate reply queue in chunks for large queue depth enable/disable Default: disable(0)"); 99 100 unsigned int dual_qdepth_disable; 101 module_param(dual_qdepth_disable, int, S_IRUGO); 102 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0"); 103 104 unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; 105 module_param(scmd_timeout, int, S_IRUGO); 106 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer."); 107 108 MODULE_LICENSE("GPL"); 109 MODULE_VERSION(MEGASAS_VERSION); 110 MODULE_AUTHOR("megaraidlinux.pdl@avagotech.com"); 111 MODULE_DESCRIPTION("Avago MegaRAID SAS Driver"); 112 113 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); 114 static int megasas_get_pd_list(struct megasas_instance *instance); 115 static int megasas_ld_list_query(struct megasas_instance *instance, 116 u8 query_type); 117 static int megasas_issue_init_mfi(struct megasas_instance *instance); 118 static int megasas_register_aen(struct megasas_instance *instance, 119 u32 seq_num, u32 class_locale_word); 120 static void megasas_get_pd_info(struct megasas_instance *instance, 121 struct scsi_device *sdev); 122 static int megasas_get_target_prop(struct megasas_instance *instance, 123 struct scsi_device *sdev); 124 /* 125 * PCI ID table for all supported controllers 126 */ 127 static struct pci_device_id megasas_pci_table[] = { 128 129 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)}, 130 /* xscale IOP */ 131 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)}, 132 /* ppc IOP */ 133 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)}, 134 /* ppc IOP */ 135 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)}, 136 /* gen2*/ 137 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)}, 138 /* gen2*/ 139 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)}, 140 /* skinny*/ 141 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)}, 142 /* skinny*/ 143 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, 144 /* xscale IOP, vega */ 145 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, 146 /* xscale IOP */ 147 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)}, 148 /* Fusion */ 149 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)}, 150 /* Plasma */ 151 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)}, 152 /* Invader */ 153 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)}, 154 /* Fury */ 155 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)}, 156 /* Intruder */ 157 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)}, 158 /* Intruder 24 port*/ 159 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)}, 160 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)}, 161 /* VENTURA */ 162 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)}, 163 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)}, 164 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)}, 165 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)}, 166 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)}, 167 {} 168 }; 169 170 MODULE_DEVICE_TABLE(pci, megasas_pci_table); 171 172 static int megasas_mgmt_majorno; 173 struct megasas_mgmt_info megasas_mgmt_info; 174 static struct fasync_struct *megasas_async_queue; 175 static DEFINE_MUTEX(megasas_async_queue_mutex); 176 177 static int megasas_poll_wait_aen; 178 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait); 179 static u32 support_poll_for_event; 180 u32 megasas_dbg_lvl; 181 static u32 support_device_change; 182 183 /* define lock for aen poll */ 184 spinlock_t poll_aen_lock; 185 186 void 187 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 188 u8 alt_status); 189 static u32 190 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs); 191 static int 192 megasas_adp_reset_gen2(struct megasas_instance *instance, 193 struct megasas_register_set __iomem *reg_set); 194 static irqreturn_t megasas_isr(int irq, void *devp); 195 static u32 196 megasas_init_adapter_mfi(struct megasas_instance *instance); 197 u32 198 megasas_build_and_issue_cmd(struct megasas_instance *instance, 199 struct scsi_cmnd *scmd); 200 static void megasas_complete_cmd_dpc(unsigned long instance_addr); 201 int 202 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, 203 int seconds); 204 void megasas_fusion_ocr_wq(struct work_struct *work); 205 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 206 int initial); 207 208 void 209 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 210 { 211 instance->instancet->fire_cmd(instance, 212 cmd->frame_phys_addr, 0, instance->reg_set); 213 return; 214 } 215 216 /** 217 * megasas_get_cmd - Get a command from the free pool 218 * @instance: Adapter soft state 219 * 220 * Returns a free command from the pool 221 */ 222 struct megasas_cmd *megasas_get_cmd(struct megasas_instance 223 *instance) 224 { 225 unsigned long flags; 226 struct megasas_cmd *cmd = NULL; 227 228 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 229 230 if (!list_empty(&instance->cmd_pool)) { 231 cmd = list_entry((&instance->cmd_pool)->next, 232 struct megasas_cmd, list); 233 list_del_init(&cmd->list); 234 } else { 235 dev_err(&instance->pdev->dev, "Command pool empty!\n"); 236 } 237 238 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 239 return cmd; 240 } 241 242 /** 243 * megasas_return_cmd - Return a cmd to free command pool 244 * @instance: Adapter soft state 245 * @cmd: Command packet to be returned to free command pool 246 */ 247 void 248 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 249 { 250 unsigned long flags; 251 u32 blk_tags; 252 struct megasas_cmd_fusion *cmd_fusion; 253 struct fusion_context *fusion = instance->ctrl_context; 254 255 /* This flag is used only for fusion adapter. 256 * Wait for Interrupt for Polled mode DCMD 257 */ 258 if (cmd->flags & DRV_DCMD_POLLED_MODE) 259 return; 260 261 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 262 263 if (fusion) { 264 blk_tags = instance->max_scsi_cmds + cmd->index; 265 cmd_fusion = fusion->cmd_list[blk_tags]; 266 megasas_return_cmd_fusion(instance, cmd_fusion); 267 } 268 cmd->scmd = NULL; 269 cmd->frame_count = 0; 270 cmd->flags = 0; 271 memset(cmd->frame, 0, instance->mfi_frame_size); 272 cmd->frame->io.context = cpu_to_le32(cmd->index); 273 if (!fusion && reset_devices) 274 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 275 list_add(&cmd->list, (&instance->cmd_pool)->next); 276 277 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 278 279 } 280 281 static const char * 282 format_timestamp(uint32_t timestamp) 283 { 284 static char buffer[32]; 285 286 if ((timestamp & 0xff000000) == 0xff000000) 287 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp & 288 0x00ffffff); 289 else 290 snprintf(buffer, sizeof(buffer), "%us", timestamp); 291 return buffer; 292 } 293 294 static const char * 295 format_class(int8_t class) 296 { 297 static char buffer[6]; 298 299 switch (class) { 300 case MFI_EVT_CLASS_DEBUG: 301 return "debug"; 302 case MFI_EVT_CLASS_PROGRESS: 303 return "progress"; 304 case MFI_EVT_CLASS_INFO: 305 return "info"; 306 case MFI_EVT_CLASS_WARNING: 307 return "WARN"; 308 case MFI_EVT_CLASS_CRITICAL: 309 return "CRIT"; 310 case MFI_EVT_CLASS_FATAL: 311 return "FATAL"; 312 case MFI_EVT_CLASS_DEAD: 313 return "DEAD"; 314 default: 315 snprintf(buffer, sizeof(buffer), "%d", class); 316 return buffer; 317 } 318 } 319 320 /** 321 * megasas_decode_evt: Decode FW AEN event and print critical event 322 * for information. 323 * @instance: Adapter soft state 324 */ 325 static void 326 megasas_decode_evt(struct megasas_instance *instance) 327 { 328 struct megasas_evt_detail *evt_detail = instance->evt_detail; 329 union megasas_evt_class_locale class_locale; 330 class_locale.word = le32_to_cpu(evt_detail->cl.word); 331 332 if (class_locale.members.class >= MFI_EVT_CLASS_CRITICAL) 333 dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n", 334 le32_to_cpu(evt_detail->seq_num), 335 format_timestamp(le32_to_cpu(evt_detail->time_stamp)), 336 (class_locale.members.locale), 337 format_class(class_locale.members.class), 338 evt_detail->description); 339 } 340 341 /** 342 * The following functions are defined for xscale 343 * (deviceid : 1064R, PERC5) controllers 344 */ 345 346 /** 347 * megasas_enable_intr_xscale - Enables interrupts 348 * @regs: MFI register set 349 */ 350 static inline void 351 megasas_enable_intr_xscale(struct megasas_instance *instance) 352 { 353 struct megasas_register_set __iomem *regs; 354 355 regs = instance->reg_set; 356 writel(0, &(regs)->outbound_intr_mask); 357 358 /* Dummy readl to force pci flush */ 359 readl(®s->outbound_intr_mask); 360 } 361 362 /** 363 * megasas_disable_intr_xscale -Disables interrupt 364 * @regs: MFI register set 365 */ 366 static inline void 367 megasas_disable_intr_xscale(struct megasas_instance *instance) 368 { 369 struct megasas_register_set __iomem *regs; 370 u32 mask = 0x1f; 371 372 regs = instance->reg_set; 373 writel(mask, ®s->outbound_intr_mask); 374 /* Dummy readl to force pci flush */ 375 readl(®s->outbound_intr_mask); 376 } 377 378 /** 379 * megasas_read_fw_status_reg_xscale - returns the current FW status value 380 * @regs: MFI register set 381 */ 382 static u32 383 megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs) 384 { 385 return readl(&(regs)->outbound_msg_0); 386 } 387 /** 388 * megasas_clear_interrupt_xscale - Check & clear interrupt 389 * @regs: MFI register set 390 */ 391 static int 392 megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs) 393 { 394 u32 status; 395 u32 mfiStatus = 0; 396 397 /* 398 * Check if it is our interrupt 399 */ 400 status = readl(®s->outbound_intr_status); 401 402 if (status & MFI_OB_INTR_STATUS_MASK) 403 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 404 if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT) 405 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 406 407 /* 408 * Clear the interrupt by writing back the same value 409 */ 410 if (mfiStatus) 411 writel(status, ®s->outbound_intr_status); 412 413 /* Dummy readl to force pci flush */ 414 readl(®s->outbound_intr_status); 415 416 return mfiStatus; 417 } 418 419 /** 420 * megasas_fire_cmd_xscale - Sends command to the FW 421 * @frame_phys_addr : Physical address of cmd 422 * @frame_count : Number of frames for the command 423 * @regs : MFI register set 424 */ 425 static inline void 426 megasas_fire_cmd_xscale(struct megasas_instance *instance, 427 dma_addr_t frame_phys_addr, 428 u32 frame_count, 429 struct megasas_register_set __iomem *regs) 430 { 431 unsigned long flags; 432 433 spin_lock_irqsave(&instance->hba_lock, flags); 434 writel((frame_phys_addr >> 3)|(frame_count), 435 &(regs)->inbound_queue_port); 436 spin_unlock_irqrestore(&instance->hba_lock, flags); 437 } 438 439 /** 440 * megasas_adp_reset_xscale - For controller reset 441 * @regs: MFI register set 442 */ 443 static int 444 megasas_adp_reset_xscale(struct megasas_instance *instance, 445 struct megasas_register_set __iomem *regs) 446 { 447 u32 i; 448 u32 pcidata; 449 450 writel(MFI_ADP_RESET, ®s->inbound_doorbell); 451 452 for (i = 0; i < 3; i++) 453 msleep(1000); /* sleep for 3 secs */ 454 pcidata = 0; 455 pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata); 456 dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata); 457 if (pcidata & 0x2) { 458 dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata); 459 pcidata &= ~0x2; 460 pci_write_config_dword(instance->pdev, 461 MFI_1068_PCSR_OFFSET, pcidata); 462 463 for (i = 0; i < 2; i++) 464 msleep(1000); /* need to wait 2 secs again */ 465 466 pcidata = 0; 467 pci_read_config_dword(instance->pdev, 468 MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata); 469 dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata); 470 if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) { 471 dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata); 472 pcidata = 0; 473 pci_write_config_dword(instance->pdev, 474 MFI_1068_FW_HANDSHAKE_OFFSET, pcidata); 475 } 476 } 477 return 0; 478 } 479 480 /** 481 * megasas_check_reset_xscale - For controller reset check 482 * @regs: MFI register set 483 */ 484 static int 485 megasas_check_reset_xscale(struct megasas_instance *instance, 486 struct megasas_register_set __iomem *regs) 487 { 488 if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) && 489 (le32_to_cpu(*instance->consumer) == 490 MEGASAS_ADPRESET_INPROG_SIGN)) 491 return 1; 492 return 0; 493 } 494 495 static struct megasas_instance_template megasas_instance_template_xscale = { 496 497 .fire_cmd = megasas_fire_cmd_xscale, 498 .enable_intr = megasas_enable_intr_xscale, 499 .disable_intr = megasas_disable_intr_xscale, 500 .clear_intr = megasas_clear_intr_xscale, 501 .read_fw_status_reg = megasas_read_fw_status_reg_xscale, 502 .adp_reset = megasas_adp_reset_xscale, 503 .check_reset = megasas_check_reset_xscale, 504 .service_isr = megasas_isr, 505 .tasklet = megasas_complete_cmd_dpc, 506 .init_adapter = megasas_init_adapter_mfi, 507 .build_and_issue_cmd = megasas_build_and_issue_cmd, 508 .issue_dcmd = megasas_issue_dcmd, 509 }; 510 511 /** 512 * This is the end of set of functions & definitions specific 513 * to xscale (deviceid : 1064R, PERC5) controllers 514 */ 515 516 /** 517 * The following functions are defined for ppc (deviceid : 0x60) 518 * controllers 519 */ 520 521 /** 522 * megasas_enable_intr_ppc - Enables interrupts 523 * @regs: MFI register set 524 */ 525 static inline void 526 megasas_enable_intr_ppc(struct megasas_instance *instance) 527 { 528 struct megasas_register_set __iomem *regs; 529 530 regs = instance->reg_set; 531 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 532 533 writel(~0x80000000, &(regs)->outbound_intr_mask); 534 535 /* Dummy readl to force pci flush */ 536 readl(®s->outbound_intr_mask); 537 } 538 539 /** 540 * megasas_disable_intr_ppc - Disable interrupt 541 * @regs: MFI register set 542 */ 543 static inline void 544 megasas_disable_intr_ppc(struct megasas_instance *instance) 545 { 546 struct megasas_register_set __iomem *regs; 547 u32 mask = 0xFFFFFFFF; 548 549 regs = instance->reg_set; 550 writel(mask, ®s->outbound_intr_mask); 551 /* Dummy readl to force pci flush */ 552 readl(®s->outbound_intr_mask); 553 } 554 555 /** 556 * megasas_read_fw_status_reg_ppc - returns the current FW status value 557 * @regs: MFI register set 558 */ 559 static u32 560 megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs) 561 { 562 return readl(&(regs)->outbound_scratch_pad); 563 } 564 565 /** 566 * megasas_clear_interrupt_ppc - Check & clear interrupt 567 * @regs: MFI register set 568 */ 569 static int 570 megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs) 571 { 572 u32 status, mfiStatus = 0; 573 574 /* 575 * Check if it is our interrupt 576 */ 577 status = readl(®s->outbound_intr_status); 578 579 if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT) 580 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 581 582 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) 583 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 584 585 /* 586 * Clear the interrupt by writing back the same value 587 */ 588 writel(status, ®s->outbound_doorbell_clear); 589 590 /* Dummy readl to force pci flush */ 591 readl(®s->outbound_doorbell_clear); 592 593 return mfiStatus; 594 } 595 596 /** 597 * megasas_fire_cmd_ppc - Sends command to the FW 598 * @frame_phys_addr : Physical address of cmd 599 * @frame_count : Number of frames for the command 600 * @regs : MFI register set 601 */ 602 static inline void 603 megasas_fire_cmd_ppc(struct megasas_instance *instance, 604 dma_addr_t frame_phys_addr, 605 u32 frame_count, 606 struct megasas_register_set __iomem *regs) 607 { 608 unsigned long flags; 609 610 spin_lock_irqsave(&instance->hba_lock, flags); 611 writel((frame_phys_addr | (frame_count<<1))|1, 612 &(regs)->inbound_queue_port); 613 spin_unlock_irqrestore(&instance->hba_lock, flags); 614 } 615 616 /** 617 * megasas_check_reset_ppc - For controller reset check 618 * @regs: MFI register set 619 */ 620 static int 621 megasas_check_reset_ppc(struct megasas_instance *instance, 622 struct megasas_register_set __iomem *regs) 623 { 624 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 625 return 1; 626 627 return 0; 628 } 629 630 static struct megasas_instance_template megasas_instance_template_ppc = { 631 632 .fire_cmd = megasas_fire_cmd_ppc, 633 .enable_intr = megasas_enable_intr_ppc, 634 .disable_intr = megasas_disable_intr_ppc, 635 .clear_intr = megasas_clear_intr_ppc, 636 .read_fw_status_reg = megasas_read_fw_status_reg_ppc, 637 .adp_reset = megasas_adp_reset_xscale, 638 .check_reset = megasas_check_reset_ppc, 639 .service_isr = megasas_isr, 640 .tasklet = megasas_complete_cmd_dpc, 641 .init_adapter = megasas_init_adapter_mfi, 642 .build_and_issue_cmd = megasas_build_and_issue_cmd, 643 .issue_dcmd = megasas_issue_dcmd, 644 }; 645 646 /** 647 * megasas_enable_intr_skinny - Enables interrupts 648 * @regs: MFI register set 649 */ 650 static inline void 651 megasas_enable_intr_skinny(struct megasas_instance *instance) 652 { 653 struct megasas_register_set __iomem *regs; 654 655 regs = instance->reg_set; 656 writel(0xFFFFFFFF, &(regs)->outbound_intr_mask); 657 658 writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 659 660 /* Dummy readl to force pci flush */ 661 readl(®s->outbound_intr_mask); 662 } 663 664 /** 665 * megasas_disable_intr_skinny - Disables interrupt 666 * @regs: MFI register set 667 */ 668 static inline void 669 megasas_disable_intr_skinny(struct megasas_instance *instance) 670 { 671 struct megasas_register_set __iomem *regs; 672 u32 mask = 0xFFFFFFFF; 673 674 regs = instance->reg_set; 675 writel(mask, ®s->outbound_intr_mask); 676 /* Dummy readl to force pci flush */ 677 readl(®s->outbound_intr_mask); 678 } 679 680 /** 681 * megasas_read_fw_status_reg_skinny - returns the current FW status value 682 * @regs: MFI register set 683 */ 684 static u32 685 megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs) 686 { 687 return readl(&(regs)->outbound_scratch_pad); 688 } 689 690 /** 691 * megasas_clear_interrupt_skinny - Check & clear interrupt 692 * @regs: MFI register set 693 */ 694 static int 695 megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs) 696 { 697 u32 status; 698 u32 mfiStatus = 0; 699 700 /* 701 * Check if it is our interrupt 702 */ 703 status = readl(®s->outbound_intr_status); 704 705 if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) { 706 return 0; 707 } 708 709 /* 710 * Check if it is our interrupt 711 */ 712 if ((megasas_read_fw_status_reg_skinny(regs) & MFI_STATE_MASK) == 713 MFI_STATE_FAULT) { 714 mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 715 } else 716 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 717 718 /* 719 * Clear the interrupt by writing back the same value 720 */ 721 writel(status, ®s->outbound_intr_status); 722 723 /* 724 * dummy read to flush PCI 725 */ 726 readl(®s->outbound_intr_status); 727 728 return mfiStatus; 729 } 730 731 /** 732 * megasas_fire_cmd_skinny - Sends command to the FW 733 * @frame_phys_addr : Physical address of cmd 734 * @frame_count : Number of frames for the command 735 * @regs : MFI register set 736 */ 737 static inline void 738 megasas_fire_cmd_skinny(struct megasas_instance *instance, 739 dma_addr_t frame_phys_addr, 740 u32 frame_count, 741 struct megasas_register_set __iomem *regs) 742 { 743 unsigned long flags; 744 745 spin_lock_irqsave(&instance->hba_lock, flags); 746 writel(upper_32_bits(frame_phys_addr), 747 &(regs)->inbound_high_queue_port); 748 writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1, 749 &(regs)->inbound_low_queue_port); 750 mmiowb(); 751 spin_unlock_irqrestore(&instance->hba_lock, flags); 752 } 753 754 /** 755 * megasas_check_reset_skinny - For controller reset check 756 * @regs: MFI register set 757 */ 758 static int 759 megasas_check_reset_skinny(struct megasas_instance *instance, 760 struct megasas_register_set __iomem *regs) 761 { 762 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 763 return 1; 764 765 return 0; 766 } 767 768 static struct megasas_instance_template megasas_instance_template_skinny = { 769 770 .fire_cmd = megasas_fire_cmd_skinny, 771 .enable_intr = megasas_enable_intr_skinny, 772 .disable_intr = megasas_disable_intr_skinny, 773 .clear_intr = megasas_clear_intr_skinny, 774 .read_fw_status_reg = megasas_read_fw_status_reg_skinny, 775 .adp_reset = megasas_adp_reset_gen2, 776 .check_reset = megasas_check_reset_skinny, 777 .service_isr = megasas_isr, 778 .tasklet = megasas_complete_cmd_dpc, 779 .init_adapter = megasas_init_adapter_mfi, 780 .build_and_issue_cmd = megasas_build_and_issue_cmd, 781 .issue_dcmd = megasas_issue_dcmd, 782 }; 783 784 785 /** 786 * The following functions are defined for gen2 (deviceid : 0x78 0x79) 787 * controllers 788 */ 789 790 /** 791 * megasas_enable_intr_gen2 - Enables interrupts 792 * @regs: MFI register set 793 */ 794 static inline void 795 megasas_enable_intr_gen2(struct megasas_instance *instance) 796 { 797 struct megasas_register_set __iomem *regs; 798 799 regs = instance->reg_set; 800 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 801 802 /* write ~0x00000005 (4 & 1) to the intr mask*/ 803 writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 804 805 /* Dummy readl to force pci flush */ 806 readl(®s->outbound_intr_mask); 807 } 808 809 /** 810 * megasas_disable_intr_gen2 - Disables interrupt 811 * @regs: MFI register set 812 */ 813 static inline void 814 megasas_disable_intr_gen2(struct megasas_instance *instance) 815 { 816 struct megasas_register_set __iomem *regs; 817 u32 mask = 0xFFFFFFFF; 818 819 regs = instance->reg_set; 820 writel(mask, ®s->outbound_intr_mask); 821 /* Dummy readl to force pci flush */ 822 readl(®s->outbound_intr_mask); 823 } 824 825 /** 826 * megasas_read_fw_status_reg_gen2 - returns the current FW status value 827 * @regs: MFI register set 828 */ 829 static u32 830 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs) 831 { 832 return readl(&(regs)->outbound_scratch_pad); 833 } 834 835 /** 836 * megasas_clear_interrupt_gen2 - Check & clear interrupt 837 * @regs: MFI register set 838 */ 839 static int 840 megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs) 841 { 842 u32 status; 843 u32 mfiStatus = 0; 844 845 /* 846 * Check if it is our interrupt 847 */ 848 status = readl(®s->outbound_intr_status); 849 850 if (status & MFI_INTR_FLAG_REPLY_MESSAGE) { 851 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 852 } 853 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) { 854 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 855 } 856 857 /* 858 * Clear the interrupt by writing back the same value 859 */ 860 if (mfiStatus) 861 writel(status, ®s->outbound_doorbell_clear); 862 863 /* Dummy readl to force pci flush */ 864 readl(®s->outbound_intr_status); 865 866 return mfiStatus; 867 } 868 /** 869 * megasas_fire_cmd_gen2 - Sends command to the FW 870 * @frame_phys_addr : Physical address of cmd 871 * @frame_count : Number of frames for the command 872 * @regs : MFI register set 873 */ 874 static inline void 875 megasas_fire_cmd_gen2(struct megasas_instance *instance, 876 dma_addr_t frame_phys_addr, 877 u32 frame_count, 878 struct megasas_register_set __iomem *regs) 879 { 880 unsigned long flags; 881 882 spin_lock_irqsave(&instance->hba_lock, flags); 883 writel((frame_phys_addr | (frame_count<<1))|1, 884 &(regs)->inbound_queue_port); 885 spin_unlock_irqrestore(&instance->hba_lock, flags); 886 } 887 888 /** 889 * megasas_adp_reset_gen2 - For controller reset 890 * @regs: MFI register set 891 */ 892 static int 893 megasas_adp_reset_gen2(struct megasas_instance *instance, 894 struct megasas_register_set __iomem *reg_set) 895 { 896 u32 retry = 0 ; 897 u32 HostDiag; 898 u32 __iomem *seq_offset = ®_set->seq_offset; 899 u32 __iomem *hostdiag_offset = ®_set->host_diag; 900 901 if (instance->instancet == &megasas_instance_template_skinny) { 902 seq_offset = ®_set->fusion_seq_offset; 903 hostdiag_offset = ®_set->fusion_host_diag; 904 } 905 906 writel(0, seq_offset); 907 writel(4, seq_offset); 908 writel(0xb, seq_offset); 909 writel(2, seq_offset); 910 writel(7, seq_offset); 911 writel(0xd, seq_offset); 912 913 msleep(1000); 914 915 HostDiag = (u32)readl(hostdiag_offset); 916 917 while (!(HostDiag & DIAG_WRITE_ENABLE)) { 918 msleep(100); 919 HostDiag = (u32)readl(hostdiag_offset); 920 dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n", 921 retry, HostDiag); 922 923 if (retry++ >= 100) 924 return 1; 925 926 } 927 928 dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag); 929 930 writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset); 931 932 ssleep(10); 933 934 HostDiag = (u32)readl(hostdiag_offset); 935 while (HostDiag & DIAG_RESET_ADAPTER) { 936 msleep(100); 937 HostDiag = (u32)readl(hostdiag_offset); 938 dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n", 939 retry, HostDiag); 940 941 if (retry++ >= 1000) 942 return 1; 943 944 } 945 return 0; 946 } 947 948 /** 949 * megasas_check_reset_gen2 - For controller reset check 950 * @regs: MFI register set 951 */ 952 static int 953 megasas_check_reset_gen2(struct megasas_instance *instance, 954 struct megasas_register_set __iomem *regs) 955 { 956 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 957 return 1; 958 959 return 0; 960 } 961 962 static struct megasas_instance_template megasas_instance_template_gen2 = { 963 964 .fire_cmd = megasas_fire_cmd_gen2, 965 .enable_intr = megasas_enable_intr_gen2, 966 .disable_intr = megasas_disable_intr_gen2, 967 .clear_intr = megasas_clear_intr_gen2, 968 .read_fw_status_reg = megasas_read_fw_status_reg_gen2, 969 .adp_reset = megasas_adp_reset_gen2, 970 .check_reset = megasas_check_reset_gen2, 971 .service_isr = megasas_isr, 972 .tasklet = megasas_complete_cmd_dpc, 973 .init_adapter = megasas_init_adapter_mfi, 974 .build_and_issue_cmd = megasas_build_and_issue_cmd, 975 .issue_dcmd = megasas_issue_dcmd, 976 }; 977 978 /** 979 * This is the end of set of functions & definitions 980 * specific to gen2 (deviceid : 0x78, 0x79) controllers 981 */ 982 983 /* 984 * Template added for TB (Fusion) 985 */ 986 extern struct megasas_instance_template megasas_instance_template_fusion; 987 988 /** 989 * megasas_issue_polled - Issues a polling command 990 * @instance: Adapter soft state 991 * @cmd: Command packet to be issued 992 * 993 * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting. 994 */ 995 int 996 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) 997 { 998 struct megasas_header *frame_hdr = &cmd->frame->hdr; 999 1000 frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS; 1001 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); 1002 1003 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1004 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1005 __func__, __LINE__); 1006 return DCMD_NOT_FIRED; 1007 } 1008 1009 instance->instancet->issue_dcmd(instance, cmd); 1010 1011 return wait_and_poll(instance, cmd, instance->requestorId ? 1012 MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS); 1013 } 1014 1015 /** 1016 * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds 1017 * @instance: Adapter soft state 1018 * @cmd: Command to be issued 1019 * @timeout: Timeout in seconds 1020 * 1021 * This function waits on an event for the command to be returned from ISR. 1022 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 1023 * Used to issue ioctl commands. 1024 */ 1025 int 1026 megasas_issue_blocked_cmd(struct megasas_instance *instance, 1027 struct megasas_cmd *cmd, int timeout) 1028 { 1029 int ret = 0; 1030 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 1031 1032 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1033 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1034 __func__, __LINE__); 1035 return DCMD_NOT_FIRED; 1036 } 1037 1038 instance->instancet->issue_dcmd(instance, cmd); 1039 1040 if (timeout) { 1041 ret = wait_event_timeout(instance->int_cmd_wait_q, 1042 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); 1043 if (!ret) { 1044 dev_err(&instance->pdev->dev, "Failed from %s %d DCMD Timed out\n", 1045 __func__, __LINE__); 1046 return DCMD_TIMEOUT; 1047 } 1048 } else 1049 wait_event(instance->int_cmd_wait_q, 1050 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS); 1051 1052 return (cmd->cmd_status_drv == MFI_STAT_OK) ? 1053 DCMD_SUCCESS : DCMD_FAILED; 1054 } 1055 1056 /** 1057 * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd 1058 * @instance: Adapter soft state 1059 * @cmd_to_abort: Previously issued cmd to be aborted 1060 * @timeout: Timeout in seconds 1061 * 1062 * MFI firmware can abort previously issued AEN comamnd (automatic event 1063 * notification). The megasas_issue_blocked_abort_cmd() issues such abort 1064 * cmd and waits for return status. 1065 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 1066 */ 1067 static int 1068 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, 1069 struct megasas_cmd *cmd_to_abort, int timeout) 1070 { 1071 struct megasas_cmd *cmd; 1072 struct megasas_abort_frame *abort_fr; 1073 int ret = 0; 1074 1075 cmd = megasas_get_cmd(instance); 1076 1077 if (!cmd) 1078 return -1; 1079 1080 abort_fr = &cmd->frame->abort; 1081 1082 /* 1083 * Prepare and issue the abort frame 1084 */ 1085 abort_fr->cmd = MFI_CMD_ABORT; 1086 abort_fr->cmd_status = MFI_STAT_INVALID_STATUS; 1087 abort_fr->flags = cpu_to_le16(0); 1088 abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index); 1089 abort_fr->abort_mfi_phys_addr_lo = 1090 cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr)); 1091 abort_fr->abort_mfi_phys_addr_hi = 1092 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); 1093 1094 cmd->sync_cmd = 1; 1095 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 1096 1097 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1098 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1099 __func__, __LINE__); 1100 return DCMD_NOT_FIRED; 1101 } 1102 1103 instance->instancet->issue_dcmd(instance, cmd); 1104 1105 if (timeout) { 1106 ret = wait_event_timeout(instance->abort_cmd_wait_q, 1107 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); 1108 if (!ret) { 1109 dev_err(&instance->pdev->dev, "Failed from %s %d Abort Timed out\n", 1110 __func__, __LINE__); 1111 return DCMD_TIMEOUT; 1112 } 1113 } else 1114 wait_event(instance->abort_cmd_wait_q, 1115 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS); 1116 1117 cmd->sync_cmd = 0; 1118 1119 megasas_return_cmd(instance, cmd); 1120 return (cmd->cmd_status_drv == MFI_STAT_OK) ? 1121 DCMD_SUCCESS : DCMD_FAILED; 1122 } 1123 1124 /** 1125 * megasas_make_sgl32 - Prepares 32-bit SGL 1126 * @instance: Adapter soft state 1127 * @scp: SCSI command from the mid-layer 1128 * @mfi_sgl: SGL to be filled in 1129 * 1130 * If successful, this function returns the number of SG elements. Otherwise, 1131 * it returnes -1. 1132 */ 1133 static int 1134 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp, 1135 union megasas_sgl *mfi_sgl) 1136 { 1137 int i; 1138 int sge_count; 1139 struct scatterlist *os_sgl; 1140 1141 sge_count = scsi_dma_map(scp); 1142 BUG_ON(sge_count < 0); 1143 1144 if (sge_count) { 1145 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1146 mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl)); 1147 mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl)); 1148 } 1149 } 1150 return sge_count; 1151 } 1152 1153 /** 1154 * megasas_make_sgl64 - Prepares 64-bit SGL 1155 * @instance: Adapter soft state 1156 * @scp: SCSI command from the mid-layer 1157 * @mfi_sgl: SGL to be filled in 1158 * 1159 * If successful, this function returns the number of SG elements. Otherwise, 1160 * it returnes -1. 1161 */ 1162 static int 1163 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, 1164 union megasas_sgl *mfi_sgl) 1165 { 1166 int i; 1167 int sge_count; 1168 struct scatterlist *os_sgl; 1169 1170 sge_count = scsi_dma_map(scp); 1171 BUG_ON(sge_count < 0); 1172 1173 if (sge_count) { 1174 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1175 mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl)); 1176 mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl)); 1177 } 1178 } 1179 return sge_count; 1180 } 1181 1182 /** 1183 * megasas_make_sgl_skinny - Prepares IEEE SGL 1184 * @instance: Adapter soft state 1185 * @scp: SCSI command from the mid-layer 1186 * @mfi_sgl: SGL to be filled in 1187 * 1188 * If successful, this function returns the number of SG elements. Otherwise, 1189 * it returnes -1. 1190 */ 1191 static int 1192 megasas_make_sgl_skinny(struct megasas_instance *instance, 1193 struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) 1194 { 1195 int i; 1196 int sge_count; 1197 struct scatterlist *os_sgl; 1198 1199 sge_count = scsi_dma_map(scp); 1200 1201 if (sge_count) { 1202 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1203 mfi_sgl->sge_skinny[i].length = 1204 cpu_to_le32(sg_dma_len(os_sgl)); 1205 mfi_sgl->sge_skinny[i].phys_addr = 1206 cpu_to_le64(sg_dma_address(os_sgl)); 1207 mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0); 1208 } 1209 } 1210 return sge_count; 1211 } 1212 1213 /** 1214 * megasas_get_frame_count - Computes the number of frames 1215 * @frame_type : type of frame- io or pthru frame 1216 * @sge_count : number of sg elements 1217 * 1218 * Returns the number of frames required for numnber of sge's (sge_count) 1219 */ 1220 1221 static u32 megasas_get_frame_count(struct megasas_instance *instance, 1222 u8 sge_count, u8 frame_type) 1223 { 1224 int num_cnt; 1225 int sge_bytes; 1226 u32 sge_sz; 1227 u32 frame_count = 0; 1228 1229 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : 1230 sizeof(struct megasas_sge32); 1231 1232 if (instance->flag_ieee) { 1233 sge_sz = sizeof(struct megasas_sge_skinny); 1234 } 1235 1236 /* 1237 * Main frame can contain 2 SGEs for 64-bit SGLs and 1238 * 3 SGEs for 32-bit SGLs for ldio & 1239 * 1 SGEs for 64-bit SGLs and 1240 * 2 SGEs for 32-bit SGLs for pthru frame 1241 */ 1242 if (unlikely(frame_type == PTHRU_FRAME)) { 1243 if (instance->flag_ieee == 1) { 1244 num_cnt = sge_count - 1; 1245 } else if (IS_DMA64) 1246 num_cnt = sge_count - 1; 1247 else 1248 num_cnt = sge_count - 2; 1249 } else { 1250 if (instance->flag_ieee == 1) { 1251 num_cnt = sge_count - 1; 1252 } else if (IS_DMA64) 1253 num_cnt = sge_count - 2; 1254 else 1255 num_cnt = sge_count - 3; 1256 } 1257 1258 if (num_cnt > 0) { 1259 sge_bytes = sge_sz * num_cnt; 1260 1261 frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + 1262 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ; 1263 } 1264 /* Main frame */ 1265 frame_count += 1; 1266 1267 if (frame_count > 7) 1268 frame_count = 8; 1269 return frame_count; 1270 } 1271 1272 /** 1273 * megasas_build_dcdb - Prepares a direct cdb (DCDB) command 1274 * @instance: Adapter soft state 1275 * @scp: SCSI command 1276 * @cmd: Command to be prepared in 1277 * 1278 * This function prepares CDB commands. These are typcially pass-through 1279 * commands to the devices. 1280 */ 1281 static int 1282 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, 1283 struct megasas_cmd *cmd) 1284 { 1285 u32 is_logical; 1286 u32 device_id; 1287 u16 flags = 0; 1288 struct megasas_pthru_frame *pthru; 1289 1290 is_logical = MEGASAS_IS_LOGICAL(scp->device); 1291 device_id = MEGASAS_DEV_INDEX(scp); 1292 pthru = (struct megasas_pthru_frame *)cmd->frame; 1293 1294 if (scp->sc_data_direction == PCI_DMA_TODEVICE) 1295 flags = MFI_FRAME_DIR_WRITE; 1296 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 1297 flags = MFI_FRAME_DIR_READ; 1298 else if (scp->sc_data_direction == PCI_DMA_NONE) 1299 flags = MFI_FRAME_DIR_NONE; 1300 1301 if (instance->flag_ieee == 1) { 1302 flags |= MFI_FRAME_IEEE; 1303 } 1304 1305 /* 1306 * Prepare the DCDB frame 1307 */ 1308 pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO; 1309 pthru->cmd_status = 0x0; 1310 pthru->scsi_status = 0x0; 1311 pthru->target_id = device_id; 1312 pthru->lun = scp->device->lun; 1313 pthru->cdb_len = scp->cmd_len; 1314 pthru->timeout = 0; 1315 pthru->pad_0 = 0; 1316 pthru->flags = cpu_to_le16(flags); 1317 pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp)); 1318 1319 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); 1320 1321 /* 1322 * If the command is for the tape device, set the 1323 * pthru timeout to the os layer timeout value. 1324 */ 1325 if (scp->device->type == TYPE_TAPE) { 1326 if ((scp->request->timeout / HZ) > 0xFFFF) 1327 pthru->timeout = cpu_to_le16(0xFFFF); 1328 else 1329 pthru->timeout = cpu_to_le16(scp->request->timeout / HZ); 1330 } 1331 1332 /* 1333 * Construct SGL 1334 */ 1335 if (instance->flag_ieee == 1) { 1336 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1337 pthru->sge_count = megasas_make_sgl_skinny(instance, scp, 1338 &pthru->sgl); 1339 } else if (IS_DMA64) { 1340 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1341 pthru->sge_count = megasas_make_sgl64(instance, scp, 1342 &pthru->sgl); 1343 } else 1344 pthru->sge_count = megasas_make_sgl32(instance, scp, 1345 &pthru->sgl); 1346 1347 if (pthru->sge_count > instance->max_num_sge) { 1348 dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n", 1349 pthru->sge_count); 1350 return 0; 1351 } 1352 1353 /* 1354 * Sense info specific 1355 */ 1356 pthru->sense_len = SCSI_SENSE_BUFFERSIZE; 1357 pthru->sense_buf_phys_addr_hi = 1358 cpu_to_le32(upper_32_bits(cmd->sense_phys_addr)); 1359 pthru->sense_buf_phys_addr_lo = 1360 cpu_to_le32(lower_32_bits(cmd->sense_phys_addr)); 1361 1362 /* 1363 * Compute the total number of frames this command consumes. FW uses 1364 * this number to pull sufficient number of frames from host memory. 1365 */ 1366 cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count, 1367 PTHRU_FRAME); 1368 1369 return cmd->frame_count; 1370 } 1371 1372 /** 1373 * megasas_build_ldio - Prepares IOs to logical devices 1374 * @instance: Adapter soft state 1375 * @scp: SCSI command 1376 * @cmd: Command to be prepared 1377 * 1378 * Frames (and accompanying SGLs) for regular SCSI IOs use this function. 1379 */ 1380 static int 1381 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, 1382 struct megasas_cmd *cmd) 1383 { 1384 u32 device_id; 1385 u8 sc = scp->cmnd[0]; 1386 u16 flags = 0; 1387 struct megasas_io_frame *ldio; 1388 1389 device_id = MEGASAS_DEV_INDEX(scp); 1390 ldio = (struct megasas_io_frame *)cmd->frame; 1391 1392 if (scp->sc_data_direction == PCI_DMA_TODEVICE) 1393 flags = MFI_FRAME_DIR_WRITE; 1394 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 1395 flags = MFI_FRAME_DIR_READ; 1396 1397 if (instance->flag_ieee == 1) { 1398 flags |= MFI_FRAME_IEEE; 1399 } 1400 1401 /* 1402 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds 1403 */ 1404 ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ; 1405 ldio->cmd_status = 0x0; 1406 ldio->scsi_status = 0x0; 1407 ldio->target_id = device_id; 1408 ldio->timeout = 0; 1409 ldio->reserved_0 = 0; 1410 ldio->pad_0 = 0; 1411 ldio->flags = cpu_to_le16(flags); 1412 ldio->start_lba_hi = 0; 1413 ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0; 1414 1415 /* 1416 * 6-byte READ(0x08) or WRITE(0x0A) cdb 1417 */ 1418 if (scp->cmd_len == 6) { 1419 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]); 1420 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) | 1421 ((u32) scp->cmnd[2] << 8) | 1422 (u32) scp->cmnd[3]); 1423 1424 ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF); 1425 } 1426 1427 /* 1428 * 10-byte READ(0x28) or WRITE(0x2A) cdb 1429 */ 1430 else if (scp->cmd_len == 10) { 1431 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] | 1432 ((u32) scp->cmnd[7] << 8)); 1433 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1434 ((u32) scp->cmnd[3] << 16) | 1435 ((u32) scp->cmnd[4] << 8) | 1436 (u32) scp->cmnd[5]); 1437 } 1438 1439 /* 1440 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 1441 */ 1442 else if (scp->cmd_len == 12) { 1443 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) | 1444 ((u32) scp->cmnd[7] << 16) | 1445 ((u32) scp->cmnd[8] << 8) | 1446 (u32) scp->cmnd[9]); 1447 1448 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1449 ((u32) scp->cmnd[3] << 16) | 1450 ((u32) scp->cmnd[4] << 8) | 1451 (u32) scp->cmnd[5]); 1452 } 1453 1454 /* 1455 * 16-byte READ(0x88) or WRITE(0x8A) cdb 1456 */ 1457 else if (scp->cmd_len == 16) { 1458 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) | 1459 ((u32) scp->cmnd[11] << 16) | 1460 ((u32) scp->cmnd[12] << 8) | 1461 (u32) scp->cmnd[13]); 1462 1463 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) | 1464 ((u32) scp->cmnd[7] << 16) | 1465 ((u32) scp->cmnd[8] << 8) | 1466 (u32) scp->cmnd[9]); 1467 1468 ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1469 ((u32) scp->cmnd[3] << 16) | 1470 ((u32) scp->cmnd[4] << 8) | 1471 (u32) scp->cmnd[5]); 1472 1473 } 1474 1475 /* 1476 * Construct SGL 1477 */ 1478 if (instance->flag_ieee) { 1479 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1480 ldio->sge_count = megasas_make_sgl_skinny(instance, scp, 1481 &ldio->sgl); 1482 } else if (IS_DMA64) { 1483 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1484 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); 1485 } else 1486 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); 1487 1488 if (ldio->sge_count > instance->max_num_sge) { 1489 dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n", 1490 ldio->sge_count); 1491 return 0; 1492 } 1493 1494 /* 1495 * Sense info specific 1496 */ 1497 ldio->sense_len = SCSI_SENSE_BUFFERSIZE; 1498 ldio->sense_buf_phys_addr_hi = 0; 1499 ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr); 1500 1501 /* 1502 * Compute the total number of frames this command consumes. FW uses 1503 * this number to pull sufficient number of frames from host memory. 1504 */ 1505 cmd->frame_count = megasas_get_frame_count(instance, 1506 ldio->sge_count, IO_FRAME); 1507 1508 return cmd->frame_count; 1509 } 1510 1511 /** 1512 * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD 1513 * and whether it's RW or non RW 1514 * @scmd: SCSI command 1515 * 1516 */ 1517 inline int megasas_cmd_type(struct scsi_cmnd *cmd) 1518 { 1519 int ret; 1520 1521 switch (cmd->cmnd[0]) { 1522 case READ_10: 1523 case WRITE_10: 1524 case READ_12: 1525 case WRITE_12: 1526 case READ_6: 1527 case WRITE_6: 1528 case READ_16: 1529 case WRITE_16: 1530 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? 1531 READ_WRITE_LDIO : READ_WRITE_SYSPDIO; 1532 break; 1533 default: 1534 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? 1535 NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO; 1536 } 1537 return ret; 1538 } 1539 1540 /** 1541 * megasas_dump_pending_frames - Dumps the frame address of all pending cmds 1542 * in FW 1543 * @instance: Adapter soft state 1544 */ 1545 static inline void 1546 megasas_dump_pending_frames(struct megasas_instance *instance) 1547 { 1548 struct megasas_cmd *cmd; 1549 int i,n; 1550 union megasas_sgl *mfi_sgl; 1551 struct megasas_io_frame *ldio; 1552 struct megasas_pthru_frame *pthru; 1553 u32 sgcount; 1554 u16 max_cmd = instance->max_fw_cmds; 1555 1556 dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no); 1557 dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding)); 1558 if (IS_DMA64) 1559 dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no); 1560 else 1561 dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no); 1562 1563 dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no); 1564 for (i = 0; i < max_cmd; i++) { 1565 cmd = instance->cmd_list[i]; 1566 if (!cmd->scmd) 1567 continue; 1568 dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr); 1569 if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) { 1570 ldio = (struct megasas_io_frame *)cmd->frame; 1571 mfi_sgl = &ldio->sgl; 1572 sgcount = ldio->sge_count; 1573 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x," 1574 " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1575 instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id, 1576 le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi), 1577 le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount); 1578 } else { 1579 pthru = (struct megasas_pthru_frame *) cmd->frame; 1580 mfi_sgl = &pthru->sgl; 1581 sgcount = pthru->sge_count; 1582 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, " 1583 "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1584 instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id, 1585 pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len), 1586 le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount); 1587 } 1588 if (megasas_dbg_lvl & MEGASAS_DBG_LVL) { 1589 for (n = 0; n < sgcount; n++) { 1590 if (IS_DMA64) 1591 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n", 1592 le32_to_cpu(mfi_sgl->sge64[n].length), 1593 le64_to_cpu(mfi_sgl->sge64[n].phys_addr)); 1594 else 1595 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n", 1596 le32_to_cpu(mfi_sgl->sge32[n].length), 1597 le32_to_cpu(mfi_sgl->sge32[n].phys_addr)); 1598 } 1599 } 1600 } /*for max_cmd*/ 1601 dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no); 1602 for (i = 0; i < max_cmd; i++) { 1603 1604 cmd = instance->cmd_list[i]; 1605 1606 if (cmd->sync_cmd == 1) 1607 dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr); 1608 } 1609 dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no); 1610 } 1611 1612 u32 1613 megasas_build_and_issue_cmd(struct megasas_instance *instance, 1614 struct scsi_cmnd *scmd) 1615 { 1616 struct megasas_cmd *cmd; 1617 u32 frame_count; 1618 1619 cmd = megasas_get_cmd(instance); 1620 if (!cmd) 1621 return SCSI_MLQUEUE_HOST_BUSY; 1622 1623 /* 1624 * Logical drive command 1625 */ 1626 if (megasas_cmd_type(scmd) == READ_WRITE_LDIO) 1627 frame_count = megasas_build_ldio(instance, scmd, cmd); 1628 else 1629 frame_count = megasas_build_dcdb(instance, scmd, cmd); 1630 1631 if (!frame_count) 1632 goto out_return_cmd; 1633 1634 cmd->scmd = scmd; 1635 scmd->SCp.ptr = (char *)cmd; 1636 1637 /* 1638 * Issue the command to the FW 1639 */ 1640 atomic_inc(&instance->fw_outstanding); 1641 1642 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 1643 cmd->frame_count-1, instance->reg_set); 1644 1645 return 0; 1646 out_return_cmd: 1647 megasas_return_cmd(instance, cmd); 1648 return SCSI_MLQUEUE_HOST_BUSY; 1649 } 1650 1651 1652 /** 1653 * megasas_queue_command - Queue entry point 1654 * @scmd: SCSI command to be queued 1655 * @done: Callback entry point 1656 */ 1657 static int 1658 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) 1659 { 1660 struct megasas_instance *instance; 1661 struct MR_PRIV_DEVICE *mr_device_priv_data; 1662 1663 instance = (struct megasas_instance *) 1664 scmd->device->host->hostdata; 1665 1666 if (instance->unload == 1) { 1667 scmd->result = DID_NO_CONNECT << 16; 1668 scmd->scsi_done(scmd); 1669 return 0; 1670 } 1671 1672 if (instance->issuepend_done == 0) 1673 return SCSI_MLQUEUE_HOST_BUSY; 1674 1675 1676 /* Check for an mpio path and adjust behavior */ 1677 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { 1678 if (megasas_check_mpio_paths(instance, scmd) == 1679 (DID_REQUEUE << 16)) { 1680 return SCSI_MLQUEUE_HOST_BUSY; 1681 } else { 1682 scmd->result = DID_NO_CONNECT << 16; 1683 scmd->scsi_done(scmd); 1684 return 0; 1685 } 1686 } 1687 1688 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1689 scmd->result = DID_NO_CONNECT << 16; 1690 scmd->scsi_done(scmd); 1691 return 0; 1692 } 1693 1694 mr_device_priv_data = scmd->device->hostdata; 1695 if (!mr_device_priv_data) { 1696 scmd->result = DID_NO_CONNECT << 16; 1697 scmd->scsi_done(scmd); 1698 return 0; 1699 } 1700 1701 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 1702 return SCSI_MLQUEUE_HOST_BUSY; 1703 1704 if (mr_device_priv_data->tm_busy) 1705 return SCSI_MLQUEUE_DEVICE_BUSY; 1706 1707 1708 scmd->result = 0; 1709 1710 if (MEGASAS_IS_LOGICAL(scmd->device) && 1711 (scmd->device->id >= instance->fw_supported_vd_count || 1712 scmd->device->lun)) { 1713 scmd->result = DID_BAD_TARGET << 16; 1714 goto out_done; 1715 } 1716 1717 if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && 1718 MEGASAS_IS_LOGICAL(scmd->device) && 1719 (!instance->fw_sync_cache_support)) { 1720 scmd->result = DID_OK << 16; 1721 goto out_done; 1722 } 1723 1724 return instance->instancet->build_and_issue_cmd(instance, scmd); 1725 1726 out_done: 1727 scmd->scsi_done(scmd); 1728 return 0; 1729 } 1730 1731 static struct megasas_instance *megasas_lookup_instance(u16 host_no) 1732 { 1733 int i; 1734 1735 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 1736 1737 if ((megasas_mgmt_info.instance[i]) && 1738 (megasas_mgmt_info.instance[i]->host->host_no == host_no)) 1739 return megasas_mgmt_info.instance[i]; 1740 } 1741 1742 return NULL; 1743 } 1744 1745 /* 1746 * megasas_set_dynamic_target_properties - 1747 * Device property set by driver may not be static and it is required to be 1748 * updated after OCR 1749 * 1750 * set tm_capable. 1751 * set dma alignment (only for eedp protection enable vd). 1752 * 1753 * @sdev: OS provided scsi device 1754 * 1755 * Returns void 1756 */ 1757 void megasas_set_dynamic_target_properties(struct scsi_device *sdev) 1758 { 1759 u16 pd_index = 0, ld; 1760 u32 device_id; 1761 struct megasas_instance *instance; 1762 struct fusion_context *fusion; 1763 struct MR_PRIV_DEVICE *mr_device_priv_data; 1764 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 1765 struct MR_LD_RAID *raid; 1766 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 1767 1768 instance = megasas_lookup_instance(sdev->host->host_no); 1769 fusion = instance->ctrl_context; 1770 mr_device_priv_data = sdev->hostdata; 1771 1772 if (!fusion || !mr_device_priv_data) 1773 return; 1774 1775 if (MEGASAS_IS_LOGICAL(sdev)) { 1776 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) 1777 + sdev->id; 1778 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 1779 ld = MR_TargetIdToLdGet(device_id, local_map_ptr); 1780 if (ld >= instance->fw_supported_vd_count) 1781 return; 1782 raid = MR_LdRaidGet(ld, local_map_ptr); 1783 1784 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) 1785 blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1786 1787 mr_device_priv_data->is_tm_capable = 1788 raid->capability.tmCapable; 1789 } else if (instance->use_seqnum_jbod_fp) { 1790 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1791 sdev->id; 1792 pd_sync = (void *)fusion->pd_seq_sync 1793 [(instance->pd_seq_map_id - 1) & 1]; 1794 mr_device_priv_data->is_tm_capable = 1795 pd_sync->seq[pd_index].capability.tmCapable; 1796 } 1797 } 1798 1799 /* 1800 * megasas_set_nvme_device_properties - 1801 * set nomerges=2 1802 * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K). 1803 * set maximum io transfer = MDTS of NVME device provided by MR firmware. 1804 * 1805 * MR firmware provides value in KB. Caller of this function converts 1806 * kb into bytes. 1807 * 1808 * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size, 1809 * MR firmware provides value 128 as (32 * 4K) = 128K. 1810 * 1811 * @sdev: scsi device 1812 * @max_io_size: maximum io transfer size 1813 * 1814 */ 1815 static inline void 1816 megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size) 1817 { 1818 struct megasas_instance *instance; 1819 u32 mr_nvme_pg_size; 1820 1821 instance = (struct megasas_instance *)sdev->host->hostdata; 1822 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, 1823 MR_DEFAULT_NVME_PAGE_SIZE); 1824 1825 blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512)); 1826 1827 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue); 1828 blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1); 1829 } 1830 1831 1832 /* 1833 * megasas_set_static_target_properties - 1834 * Device property set by driver are static and it is not required to be 1835 * updated after OCR. 1836 * 1837 * set io timeout 1838 * set device queue depth 1839 * set nvme device properties. see - megasas_set_nvme_device_properties 1840 * 1841 * @sdev: scsi device 1842 * @is_target_prop true, if fw provided target properties. 1843 */ 1844 static void megasas_set_static_target_properties(struct scsi_device *sdev, 1845 bool is_target_prop) 1846 { 1847 u16 target_index = 0; 1848 u8 interface_type; 1849 u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN; 1850 u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB; 1851 u32 tgt_device_qd; 1852 struct megasas_instance *instance; 1853 struct MR_PRIV_DEVICE *mr_device_priv_data; 1854 1855 instance = megasas_lookup_instance(sdev->host->host_no); 1856 mr_device_priv_data = sdev->hostdata; 1857 interface_type = mr_device_priv_data->interface_type; 1858 1859 /* 1860 * The RAID firmware may require extended timeouts. 1861 */ 1862 blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ); 1863 1864 target_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; 1865 1866 switch (interface_type) { 1867 case SAS_PD: 1868 device_qd = MEGASAS_SAS_QD; 1869 break; 1870 case SATA_PD: 1871 device_qd = MEGASAS_SATA_QD; 1872 break; 1873 case NVME_PD: 1874 device_qd = MEGASAS_NVME_QD; 1875 break; 1876 } 1877 1878 if (is_target_prop) { 1879 tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth); 1880 if (tgt_device_qd && 1881 (tgt_device_qd <= instance->host->can_queue)) 1882 device_qd = tgt_device_qd; 1883 1884 /* max_io_size_kb will be set to non zero for 1885 * nvme based vd and syspd. 1886 */ 1887 max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb); 1888 } 1889 1890 if (instance->nvme_page_size && max_io_size_kb) 1891 megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10)); 1892 1893 scsi_change_queue_depth(sdev, device_qd); 1894 1895 } 1896 1897 1898 static int megasas_slave_configure(struct scsi_device *sdev) 1899 { 1900 u16 pd_index = 0; 1901 struct megasas_instance *instance; 1902 int ret_target_prop = DCMD_FAILED; 1903 bool is_target_prop = false; 1904 1905 instance = megasas_lookup_instance(sdev->host->host_no); 1906 if (instance->pd_list_not_supported) { 1907 if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) { 1908 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1909 sdev->id; 1910 if (instance->pd_list[pd_index].driveState != 1911 MR_PD_STATE_SYSTEM) 1912 return -ENXIO; 1913 } 1914 } 1915 1916 mutex_lock(&instance->hba_mutex); 1917 /* Send DCMD to Firmware and cache the information */ 1918 if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev)) 1919 megasas_get_pd_info(instance, sdev); 1920 1921 /* Some ventura firmware may not have instance->nvme_page_size set. 1922 * Do not send MR_DCMD_DRV_GET_TARGET_PROP 1923 */ 1924 if ((instance->tgt_prop) && (instance->nvme_page_size)) 1925 ret_target_prop = megasas_get_target_prop(instance, sdev); 1926 1927 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; 1928 megasas_set_static_target_properties(sdev, is_target_prop); 1929 1930 mutex_unlock(&instance->hba_mutex); 1931 1932 /* This sdev property may change post OCR */ 1933 megasas_set_dynamic_target_properties(sdev); 1934 1935 return 0; 1936 } 1937 1938 static int megasas_slave_alloc(struct scsi_device *sdev) 1939 { 1940 u16 pd_index = 0; 1941 struct megasas_instance *instance ; 1942 struct MR_PRIV_DEVICE *mr_device_priv_data; 1943 1944 instance = megasas_lookup_instance(sdev->host->host_no); 1945 if (!MEGASAS_IS_LOGICAL(sdev)) { 1946 /* 1947 * Open the OS scan to the SYSTEM PD 1948 */ 1949 pd_index = 1950 (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1951 sdev->id; 1952 if ((instance->pd_list_not_supported || 1953 instance->pd_list[pd_index].driveState == 1954 MR_PD_STATE_SYSTEM)) { 1955 goto scan_target; 1956 } 1957 return -ENXIO; 1958 } 1959 1960 scan_target: 1961 mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data), 1962 GFP_KERNEL); 1963 if (!mr_device_priv_data) 1964 return -ENOMEM; 1965 sdev->hostdata = mr_device_priv_data; 1966 1967 atomic_set(&mr_device_priv_data->r1_ldio_hint, 1968 instance->r1_ldio_hint_default); 1969 return 0; 1970 } 1971 1972 static void megasas_slave_destroy(struct scsi_device *sdev) 1973 { 1974 kfree(sdev->hostdata); 1975 sdev->hostdata = NULL; 1976 } 1977 1978 /* 1979 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a 1980 * kill adapter 1981 * @instance: Adapter soft state 1982 * 1983 */ 1984 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance) 1985 { 1986 int i; 1987 struct megasas_cmd *cmd_mfi; 1988 struct megasas_cmd_fusion *cmd_fusion; 1989 struct fusion_context *fusion = instance->ctrl_context; 1990 1991 /* Find all outstanding ioctls */ 1992 if (fusion) { 1993 for (i = 0; i < instance->max_fw_cmds; i++) { 1994 cmd_fusion = fusion->cmd_list[i]; 1995 if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) { 1996 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 1997 if (cmd_mfi->sync_cmd && 1998 cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) 1999 megasas_complete_cmd(instance, 2000 cmd_mfi, DID_OK); 2001 } 2002 } 2003 } else { 2004 for (i = 0; i < instance->max_fw_cmds; i++) { 2005 cmd_mfi = instance->cmd_list[i]; 2006 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != 2007 MFI_CMD_ABORT) 2008 megasas_complete_cmd(instance, cmd_mfi, DID_OK); 2009 } 2010 } 2011 } 2012 2013 2014 void megaraid_sas_kill_hba(struct megasas_instance *instance) 2015 { 2016 /* Set critical error to block I/O & ioctls in case caller didn't */ 2017 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); 2018 /* Wait 1 second to ensure IO or ioctls in build have posted */ 2019 msleep(1000); 2020 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 2021 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 2022 (instance->ctrl_context)) { 2023 writel(MFI_STOP_ADP, &instance->reg_set->doorbell); 2024 /* Flush */ 2025 readl(&instance->reg_set->doorbell); 2026 if (instance->requestorId && instance->peerIsPresent) 2027 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 2028 } else { 2029 writel(MFI_STOP_ADP, 2030 &instance->reg_set->inbound_doorbell); 2031 } 2032 /* Complete outstanding ioctls when adapter is killed */ 2033 megasas_complete_outstanding_ioctls(instance); 2034 } 2035 2036 /** 2037 * megasas_check_and_restore_queue_depth - Check if queue depth needs to be 2038 * restored to max value 2039 * @instance: Adapter soft state 2040 * 2041 */ 2042 void 2043 megasas_check_and_restore_queue_depth(struct megasas_instance *instance) 2044 { 2045 unsigned long flags; 2046 2047 if (instance->flag & MEGASAS_FW_BUSY 2048 && time_after(jiffies, instance->last_time + 5 * HZ) 2049 && atomic_read(&instance->fw_outstanding) < 2050 instance->throttlequeuedepth + 1) { 2051 2052 spin_lock_irqsave(instance->host->host_lock, flags); 2053 instance->flag &= ~MEGASAS_FW_BUSY; 2054 2055 instance->host->can_queue = instance->cur_can_queue; 2056 spin_unlock_irqrestore(instance->host->host_lock, flags); 2057 } 2058 } 2059 2060 /** 2061 * megasas_complete_cmd_dpc - Returns FW's controller structure 2062 * @instance_addr: Address of adapter soft state 2063 * 2064 * Tasklet to complete cmds 2065 */ 2066 static void megasas_complete_cmd_dpc(unsigned long instance_addr) 2067 { 2068 u32 producer; 2069 u32 consumer; 2070 u32 context; 2071 struct megasas_cmd *cmd; 2072 struct megasas_instance *instance = 2073 (struct megasas_instance *)instance_addr; 2074 unsigned long flags; 2075 2076 /* If we have already declared adapter dead, donot complete cmds */ 2077 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 2078 return; 2079 2080 spin_lock_irqsave(&instance->completion_lock, flags); 2081 2082 producer = le32_to_cpu(*instance->producer); 2083 consumer = le32_to_cpu(*instance->consumer); 2084 2085 while (consumer != producer) { 2086 context = le32_to_cpu(instance->reply_queue[consumer]); 2087 if (context >= instance->max_fw_cmds) { 2088 dev_err(&instance->pdev->dev, "Unexpected context value %x\n", 2089 context); 2090 BUG(); 2091 } 2092 2093 cmd = instance->cmd_list[context]; 2094 2095 megasas_complete_cmd(instance, cmd, DID_OK); 2096 2097 consumer++; 2098 if (consumer == (instance->max_fw_cmds + 1)) { 2099 consumer = 0; 2100 } 2101 } 2102 2103 *instance->consumer = cpu_to_le32(producer); 2104 2105 spin_unlock_irqrestore(&instance->completion_lock, flags); 2106 2107 /* 2108 * Check if we can restore can_queue 2109 */ 2110 megasas_check_and_restore_queue_depth(instance); 2111 } 2112 2113 /** 2114 * megasas_start_timer - Initializes a timer object 2115 * @instance: Adapter soft state 2116 * @timer: timer object to be initialized 2117 * @fn: timer function 2118 * @interval: time interval between timer function call 2119 * 2120 */ 2121 void megasas_start_timer(struct megasas_instance *instance, 2122 struct timer_list *timer, 2123 void *fn, unsigned long interval) 2124 { 2125 init_timer(timer); 2126 timer->expires = jiffies + interval; 2127 timer->data = (unsigned long)instance; 2128 timer->function = fn; 2129 add_timer(timer); 2130 } 2131 2132 static void 2133 megasas_internal_reset_defer_cmds(struct megasas_instance *instance); 2134 2135 static void 2136 process_fw_state_change_wq(struct work_struct *work); 2137 2138 void megasas_do_ocr(struct megasas_instance *instance) 2139 { 2140 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 2141 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 2142 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 2143 *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); 2144 } 2145 instance->instancet->disable_intr(instance); 2146 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 2147 instance->issuepend_done = 0; 2148 2149 atomic_set(&instance->fw_outstanding, 0); 2150 megasas_internal_reset_defer_cmds(instance); 2151 process_fw_state_change_wq(&instance->work_init); 2152 } 2153 2154 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance, 2155 int initial) 2156 { 2157 struct megasas_cmd *cmd; 2158 struct megasas_dcmd_frame *dcmd; 2159 struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL; 2160 dma_addr_t new_affiliation_111_h; 2161 int ld, retval = 0; 2162 u8 thisVf; 2163 2164 cmd = megasas_get_cmd(instance); 2165 2166 if (!cmd) { 2167 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:" 2168 "Failed to get cmd for scsi%d\n", 2169 instance->host->host_no); 2170 return -ENOMEM; 2171 } 2172 2173 dcmd = &cmd->frame->dcmd; 2174 2175 if (!instance->vf_affiliation_111) { 2176 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " 2177 "affiliation for scsi%d\n", instance->host->host_no); 2178 megasas_return_cmd(instance, cmd); 2179 return -ENOMEM; 2180 } 2181 2182 if (initial) 2183 memset(instance->vf_affiliation_111, 0, 2184 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2185 else { 2186 new_affiliation_111 = 2187 pci_alloc_consistent(instance->pdev, 2188 sizeof(struct MR_LD_VF_AFFILIATION_111), 2189 &new_affiliation_111_h); 2190 if (!new_affiliation_111) { 2191 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2192 "memory for new affiliation for scsi%d\n", 2193 instance->host->host_no); 2194 megasas_return_cmd(instance, cmd); 2195 return -ENOMEM; 2196 } 2197 memset(new_affiliation_111, 0, 2198 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2199 } 2200 2201 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2202 2203 dcmd->cmd = MFI_CMD_DCMD; 2204 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2205 dcmd->sge_count = 1; 2206 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2207 dcmd->timeout = 0; 2208 dcmd->pad_0 = 0; 2209 dcmd->data_xfer_len = 2210 cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111)); 2211 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111); 2212 2213 if (initial) 2214 dcmd->sgl.sge32[0].phys_addr = 2215 cpu_to_le32(instance->vf_affiliation_111_h); 2216 else 2217 dcmd->sgl.sge32[0].phys_addr = 2218 cpu_to_le32(new_affiliation_111_h); 2219 2220 dcmd->sgl.sge32[0].length = cpu_to_le32( 2221 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2222 2223 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " 2224 "scsi%d\n", instance->host->host_no); 2225 2226 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { 2227 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" 2228 " failed with status 0x%x for scsi%d\n", 2229 dcmd->cmd_status, instance->host->host_no); 2230 retval = 1; /* Do a scan if we couldn't get affiliation */ 2231 goto out; 2232 } 2233 2234 if (!initial) { 2235 thisVf = new_affiliation_111->thisVf; 2236 for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++) 2237 if (instance->vf_affiliation_111->map[ld].policy[thisVf] != 2238 new_affiliation_111->map[ld].policy[thisVf]) { 2239 dev_warn(&instance->pdev->dev, "SR-IOV: " 2240 "Got new LD/VF affiliation for scsi%d\n", 2241 instance->host->host_no); 2242 memcpy(instance->vf_affiliation_111, 2243 new_affiliation_111, 2244 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2245 retval = 1; 2246 goto out; 2247 } 2248 } 2249 out: 2250 if (new_affiliation_111) { 2251 pci_free_consistent(instance->pdev, 2252 sizeof(struct MR_LD_VF_AFFILIATION_111), 2253 new_affiliation_111, 2254 new_affiliation_111_h); 2255 } 2256 2257 megasas_return_cmd(instance, cmd); 2258 2259 return retval; 2260 } 2261 2262 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance, 2263 int initial) 2264 { 2265 struct megasas_cmd *cmd; 2266 struct megasas_dcmd_frame *dcmd; 2267 struct MR_LD_VF_AFFILIATION *new_affiliation = NULL; 2268 struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL; 2269 dma_addr_t new_affiliation_h; 2270 int i, j, retval = 0, found = 0, doscan = 0; 2271 u8 thisVf; 2272 2273 cmd = megasas_get_cmd(instance); 2274 2275 if (!cmd) { 2276 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: " 2277 "Failed to get cmd for scsi%d\n", 2278 instance->host->host_no); 2279 return -ENOMEM; 2280 } 2281 2282 dcmd = &cmd->frame->dcmd; 2283 2284 if (!instance->vf_affiliation) { 2285 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " 2286 "affiliation for scsi%d\n", instance->host->host_no); 2287 megasas_return_cmd(instance, cmd); 2288 return -ENOMEM; 2289 } 2290 2291 if (initial) 2292 memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) * 2293 sizeof(struct MR_LD_VF_AFFILIATION)); 2294 else { 2295 new_affiliation = 2296 pci_alloc_consistent(instance->pdev, 2297 (MAX_LOGICAL_DRIVES + 1) * 2298 sizeof(struct MR_LD_VF_AFFILIATION), 2299 &new_affiliation_h); 2300 if (!new_affiliation) { 2301 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2302 "memory for new affiliation for scsi%d\n", 2303 instance->host->host_no); 2304 megasas_return_cmd(instance, cmd); 2305 return -ENOMEM; 2306 } 2307 memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) * 2308 sizeof(struct MR_LD_VF_AFFILIATION)); 2309 } 2310 2311 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2312 2313 dcmd->cmd = MFI_CMD_DCMD; 2314 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2315 dcmd->sge_count = 1; 2316 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2317 dcmd->timeout = 0; 2318 dcmd->pad_0 = 0; 2319 dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * 2320 sizeof(struct MR_LD_VF_AFFILIATION)); 2321 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS); 2322 2323 if (initial) 2324 dcmd->sgl.sge32[0].phys_addr = 2325 cpu_to_le32(instance->vf_affiliation_h); 2326 else 2327 dcmd->sgl.sge32[0].phys_addr = 2328 cpu_to_le32(new_affiliation_h); 2329 2330 dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * 2331 sizeof(struct MR_LD_VF_AFFILIATION)); 2332 2333 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " 2334 "scsi%d\n", instance->host->host_no); 2335 2336 2337 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { 2338 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" 2339 " failed with status 0x%x for scsi%d\n", 2340 dcmd->cmd_status, instance->host->host_no); 2341 retval = 1; /* Do a scan if we couldn't get affiliation */ 2342 goto out; 2343 } 2344 2345 if (!initial) { 2346 if (!new_affiliation->ldCount) { 2347 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " 2348 "affiliation for passive path for scsi%d\n", 2349 instance->host->host_no); 2350 retval = 1; 2351 goto out; 2352 } 2353 newmap = new_affiliation->map; 2354 savedmap = instance->vf_affiliation->map; 2355 thisVf = new_affiliation->thisVf; 2356 for (i = 0 ; i < new_affiliation->ldCount; i++) { 2357 found = 0; 2358 for (j = 0; j < instance->vf_affiliation->ldCount; 2359 j++) { 2360 if (newmap->ref.targetId == 2361 savedmap->ref.targetId) { 2362 found = 1; 2363 if (newmap->policy[thisVf] != 2364 savedmap->policy[thisVf]) { 2365 doscan = 1; 2366 goto out; 2367 } 2368 } 2369 savedmap = (struct MR_LD_VF_MAP *) 2370 ((unsigned char *)savedmap + 2371 savedmap->size); 2372 } 2373 if (!found && newmap->policy[thisVf] != 2374 MR_LD_ACCESS_HIDDEN) { 2375 doscan = 1; 2376 goto out; 2377 } 2378 newmap = (struct MR_LD_VF_MAP *) 2379 ((unsigned char *)newmap + newmap->size); 2380 } 2381 2382 newmap = new_affiliation->map; 2383 savedmap = instance->vf_affiliation->map; 2384 2385 for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) { 2386 found = 0; 2387 for (j = 0 ; j < new_affiliation->ldCount; j++) { 2388 if (savedmap->ref.targetId == 2389 newmap->ref.targetId) { 2390 found = 1; 2391 if (savedmap->policy[thisVf] != 2392 newmap->policy[thisVf]) { 2393 doscan = 1; 2394 goto out; 2395 } 2396 } 2397 newmap = (struct MR_LD_VF_MAP *) 2398 ((unsigned char *)newmap + 2399 newmap->size); 2400 } 2401 if (!found && savedmap->policy[thisVf] != 2402 MR_LD_ACCESS_HIDDEN) { 2403 doscan = 1; 2404 goto out; 2405 } 2406 savedmap = (struct MR_LD_VF_MAP *) 2407 ((unsigned char *)savedmap + 2408 savedmap->size); 2409 } 2410 } 2411 out: 2412 if (doscan) { 2413 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " 2414 "affiliation for scsi%d\n", instance->host->host_no); 2415 memcpy(instance->vf_affiliation, new_affiliation, 2416 new_affiliation->size); 2417 retval = 1; 2418 } 2419 2420 if (new_affiliation) 2421 pci_free_consistent(instance->pdev, 2422 (MAX_LOGICAL_DRIVES + 1) * 2423 sizeof(struct MR_LD_VF_AFFILIATION), 2424 new_affiliation, new_affiliation_h); 2425 megasas_return_cmd(instance, cmd); 2426 2427 return retval; 2428 } 2429 2430 /* This function will get the current SR-IOV LD/VF affiliation */ 2431 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 2432 int initial) 2433 { 2434 int retval; 2435 2436 if (instance->PlasmaFW111) 2437 retval = megasas_get_ld_vf_affiliation_111(instance, initial); 2438 else 2439 retval = megasas_get_ld_vf_affiliation_12(instance, initial); 2440 return retval; 2441 } 2442 2443 /* This function will tell FW to start the SR-IOV heartbeat */ 2444 int megasas_sriov_start_heartbeat(struct megasas_instance *instance, 2445 int initial) 2446 { 2447 struct megasas_cmd *cmd; 2448 struct megasas_dcmd_frame *dcmd; 2449 int retval = 0; 2450 2451 cmd = megasas_get_cmd(instance); 2452 2453 if (!cmd) { 2454 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: " 2455 "Failed to get cmd for scsi%d\n", 2456 instance->host->host_no); 2457 return -ENOMEM; 2458 } 2459 2460 dcmd = &cmd->frame->dcmd; 2461 2462 if (initial) { 2463 instance->hb_host_mem = 2464 pci_zalloc_consistent(instance->pdev, 2465 sizeof(struct MR_CTRL_HB_HOST_MEM), 2466 &instance->hb_host_mem_h); 2467 if (!instance->hb_host_mem) { 2468 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate" 2469 " memory for heartbeat host memory for scsi%d\n", 2470 instance->host->host_no); 2471 retval = -ENOMEM; 2472 goto out; 2473 } 2474 } 2475 2476 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2477 2478 dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2479 dcmd->cmd = MFI_CMD_DCMD; 2480 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2481 dcmd->sge_count = 1; 2482 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2483 dcmd->timeout = 0; 2484 dcmd->pad_0 = 0; 2485 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2486 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC); 2487 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->hb_host_mem_h); 2488 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2489 2490 dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n", 2491 instance->host->host_no); 2492 2493 if (instance->ctrl_context && !instance->mask_interrupts) 2494 retval = megasas_issue_blocked_cmd(instance, cmd, 2495 MEGASAS_ROUTINE_WAIT_TIME_VF); 2496 else 2497 retval = megasas_issue_polled(instance, cmd); 2498 2499 if (retval) { 2500 dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST" 2501 "_MEM_ALLOC DCMD %s for scsi%d\n", 2502 (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ? 2503 "timed out" : "failed", instance->host->host_no); 2504 retval = 1; 2505 } 2506 2507 out: 2508 megasas_return_cmd(instance, cmd); 2509 2510 return retval; 2511 } 2512 2513 /* Handler for SR-IOV heartbeat */ 2514 void megasas_sriov_heartbeat_handler(unsigned long instance_addr) 2515 { 2516 struct megasas_instance *instance = 2517 (struct megasas_instance *)instance_addr; 2518 2519 if (instance->hb_host_mem->HB.fwCounter != 2520 instance->hb_host_mem->HB.driverCounter) { 2521 instance->hb_host_mem->HB.driverCounter = 2522 instance->hb_host_mem->HB.fwCounter; 2523 mod_timer(&instance->sriov_heartbeat_timer, 2524 jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 2525 } else { 2526 dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never " 2527 "completed for scsi%d\n", instance->host->host_no); 2528 schedule_work(&instance->work_init); 2529 } 2530 } 2531 2532 /** 2533 * megasas_wait_for_outstanding - Wait for all outstanding cmds 2534 * @instance: Adapter soft state 2535 * 2536 * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to 2537 * complete all its outstanding commands. Returns error if one or more IOs 2538 * are pending after this time period. It also marks the controller dead. 2539 */ 2540 static int megasas_wait_for_outstanding(struct megasas_instance *instance) 2541 { 2542 int i, sl, outstanding; 2543 u32 reset_index; 2544 u32 wait_time = MEGASAS_RESET_WAIT_TIME; 2545 unsigned long flags; 2546 struct list_head clist_local; 2547 struct megasas_cmd *reset_cmd; 2548 u32 fw_state; 2549 2550 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2551 dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n", 2552 __func__, __LINE__); 2553 return FAILED; 2554 } 2555 2556 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 2557 2558 INIT_LIST_HEAD(&clist_local); 2559 spin_lock_irqsave(&instance->hba_lock, flags); 2560 list_splice_init(&instance->internal_reset_pending_q, 2561 &clist_local); 2562 spin_unlock_irqrestore(&instance->hba_lock, flags); 2563 2564 dev_notice(&instance->pdev->dev, "HBA reset wait ...\n"); 2565 for (i = 0; i < wait_time; i++) { 2566 msleep(1000); 2567 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) 2568 break; 2569 } 2570 2571 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 2572 dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n"); 2573 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); 2574 return FAILED; 2575 } 2576 2577 reset_index = 0; 2578 while (!list_empty(&clist_local)) { 2579 reset_cmd = list_entry((&clist_local)->next, 2580 struct megasas_cmd, list); 2581 list_del_init(&reset_cmd->list); 2582 if (reset_cmd->scmd) { 2583 reset_cmd->scmd->result = DID_REQUEUE << 16; 2584 dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n", 2585 reset_index, reset_cmd, 2586 reset_cmd->scmd->cmnd[0]); 2587 2588 reset_cmd->scmd->scsi_done(reset_cmd->scmd); 2589 megasas_return_cmd(instance, reset_cmd); 2590 } else if (reset_cmd->sync_cmd) { 2591 dev_notice(&instance->pdev->dev, "%p synch cmds" 2592 "reset queue\n", 2593 reset_cmd); 2594 2595 reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 2596 instance->instancet->fire_cmd(instance, 2597 reset_cmd->frame_phys_addr, 2598 0, instance->reg_set); 2599 } else { 2600 dev_notice(&instance->pdev->dev, "%p unexpected" 2601 "cmds lst\n", 2602 reset_cmd); 2603 } 2604 reset_index++; 2605 } 2606 2607 return SUCCESS; 2608 } 2609 2610 for (i = 0; i < resetwaittime; i++) { 2611 outstanding = atomic_read(&instance->fw_outstanding); 2612 2613 if (!outstanding) 2614 break; 2615 2616 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 2617 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d " 2618 "commands to complete\n",i,outstanding); 2619 /* 2620 * Call cmd completion routine. Cmd to be 2621 * be completed directly without depending on isr. 2622 */ 2623 megasas_complete_cmd_dpc((unsigned long)instance); 2624 } 2625 2626 msleep(1000); 2627 } 2628 2629 i = 0; 2630 outstanding = atomic_read(&instance->fw_outstanding); 2631 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; 2632 2633 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) 2634 goto no_outstanding; 2635 2636 if (instance->disableOnlineCtrlReset) 2637 goto kill_hba_and_failed; 2638 do { 2639 if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) { 2640 dev_info(&instance->pdev->dev, 2641 "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, oustanding 0x%x\n", 2642 __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding)); 2643 if (i == 3) 2644 goto kill_hba_and_failed; 2645 megasas_do_ocr(instance); 2646 2647 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2648 dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n", 2649 __func__, __LINE__); 2650 return FAILED; 2651 } 2652 dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n", 2653 __func__, __LINE__); 2654 2655 for (sl = 0; sl < 10; sl++) 2656 msleep(500); 2657 2658 outstanding = atomic_read(&instance->fw_outstanding); 2659 2660 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; 2661 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) 2662 goto no_outstanding; 2663 } 2664 i++; 2665 } while (i <= 3); 2666 2667 no_outstanding: 2668 2669 dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n", 2670 __func__, __LINE__); 2671 return SUCCESS; 2672 2673 kill_hba_and_failed: 2674 2675 /* Reset not supported, kill adapter */ 2676 dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d" 2677 " disableOnlineCtrlReset %d fw_outstanding %d \n", 2678 __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset, 2679 atomic_read(&instance->fw_outstanding)); 2680 megasas_dump_pending_frames(instance); 2681 megaraid_sas_kill_hba(instance); 2682 2683 return FAILED; 2684 } 2685 2686 /** 2687 * megasas_generic_reset - Generic reset routine 2688 * @scmd: Mid-layer SCSI command 2689 * 2690 * This routine implements a generic reset handler for device, bus and host 2691 * reset requests. Device, bus and host specific reset handlers can use this 2692 * function after they do their specific tasks. 2693 */ 2694 static int megasas_generic_reset(struct scsi_cmnd *scmd) 2695 { 2696 int ret_val; 2697 struct megasas_instance *instance; 2698 2699 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2700 2701 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n", 2702 scmd->cmnd[0], scmd->retries); 2703 2704 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2705 dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n"); 2706 return FAILED; 2707 } 2708 2709 ret_val = megasas_wait_for_outstanding(instance); 2710 if (ret_val == SUCCESS) 2711 dev_notice(&instance->pdev->dev, "reset successful\n"); 2712 else 2713 dev_err(&instance->pdev->dev, "failed to do reset\n"); 2714 2715 return ret_val; 2716 } 2717 2718 /** 2719 * megasas_reset_timer - quiesce the adapter if required 2720 * @scmd: scsi cmnd 2721 * 2722 * Sets the FW busy flag and reduces the host->can_queue if the 2723 * cmd has not been completed within the timeout period. 2724 */ 2725 static enum 2726 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) 2727 { 2728 struct megasas_instance *instance; 2729 unsigned long flags; 2730 2731 if (time_after(jiffies, scmd->jiffies_at_alloc + 2732 (scmd_timeout * 2) * HZ)) { 2733 return BLK_EH_NOT_HANDLED; 2734 } 2735 2736 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2737 if (!(instance->flag & MEGASAS_FW_BUSY)) { 2738 /* FW is busy, throttle IO */ 2739 spin_lock_irqsave(instance->host->host_lock, flags); 2740 2741 instance->host->can_queue = instance->throttlequeuedepth; 2742 instance->last_time = jiffies; 2743 instance->flag |= MEGASAS_FW_BUSY; 2744 2745 spin_unlock_irqrestore(instance->host->host_lock, flags); 2746 } 2747 return BLK_EH_RESET_TIMER; 2748 } 2749 2750 /** 2751 * megasas_dump_frame - This function will dump MPT/MFI frame 2752 */ 2753 static inline void 2754 megasas_dump_frame(void *mpi_request, int sz) 2755 { 2756 int i; 2757 __le32 *mfp = (__le32 *)mpi_request; 2758 2759 printk(KERN_INFO "IO request frame:\n\t"); 2760 for (i = 0; i < sz / sizeof(__le32); i++) { 2761 if (i && ((i % 8) == 0)) 2762 printk("\n\t"); 2763 printk("%08x ", le32_to_cpu(mfp[i])); 2764 } 2765 printk("\n"); 2766 } 2767 2768 /** 2769 * megasas_reset_bus_host - Bus & host reset handler entry point 2770 */ 2771 static int megasas_reset_bus_host(struct scsi_cmnd *scmd) 2772 { 2773 int ret; 2774 struct megasas_instance *instance; 2775 2776 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2777 2778 scmd_printk(KERN_INFO, scmd, 2779 "Controller reset is requested due to IO timeout\n" 2780 "SCSI command pointer: (%p)\t SCSI host state: %d\t" 2781 " SCSI host busy: %d\t FW outstanding: %d\n", 2782 scmd, scmd->device->host->shost_state, 2783 atomic_read((atomic_t *)&scmd->device->host->host_busy), 2784 atomic_read(&instance->fw_outstanding)); 2785 2786 /* 2787 * First wait for all commands to complete 2788 */ 2789 if (instance->ctrl_context) { 2790 struct megasas_cmd_fusion *cmd; 2791 cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr; 2792 if (cmd) 2793 megasas_dump_frame(cmd->io_request, 2794 sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); 2795 ret = megasas_reset_fusion(scmd->device->host, 2796 SCSIIO_TIMEOUT_OCR); 2797 } else 2798 ret = megasas_generic_reset(scmd); 2799 2800 return ret; 2801 } 2802 2803 /** 2804 * megasas_task_abort - Issues task abort request to firmware 2805 * (supported only for fusion adapters) 2806 * @scmd: SCSI command pointer 2807 */ 2808 static int megasas_task_abort(struct scsi_cmnd *scmd) 2809 { 2810 int ret; 2811 struct megasas_instance *instance; 2812 2813 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2814 2815 if (instance->ctrl_context) 2816 ret = megasas_task_abort_fusion(scmd); 2817 else { 2818 sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n"); 2819 ret = FAILED; 2820 } 2821 2822 return ret; 2823 } 2824 2825 /** 2826 * megasas_reset_target: Issues target reset request to firmware 2827 * (supported only for fusion adapters) 2828 * @scmd: SCSI command pointer 2829 */ 2830 static int megasas_reset_target(struct scsi_cmnd *scmd) 2831 { 2832 int ret; 2833 struct megasas_instance *instance; 2834 2835 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2836 2837 if (instance->ctrl_context) 2838 ret = megasas_reset_target_fusion(scmd); 2839 else { 2840 sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n"); 2841 ret = FAILED; 2842 } 2843 2844 return ret; 2845 } 2846 2847 /** 2848 * megasas_bios_param - Returns disk geometry for a disk 2849 * @sdev: device handle 2850 * @bdev: block device 2851 * @capacity: drive capacity 2852 * @geom: geometry parameters 2853 */ 2854 static int 2855 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev, 2856 sector_t capacity, int geom[]) 2857 { 2858 int heads; 2859 int sectors; 2860 sector_t cylinders; 2861 unsigned long tmp; 2862 2863 /* Default heads (64) & sectors (32) */ 2864 heads = 64; 2865 sectors = 32; 2866 2867 tmp = heads * sectors; 2868 cylinders = capacity; 2869 2870 sector_div(cylinders, tmp); 2871 2872 /* 2873 * Handle extended translation size for logical drives > 1Gb 2874 */ 2875 2876 if (capacity >= 0x200000) { 2877 heads = 255; 2878 sectors = 63; 2879 tmp = heads*sectors; 2880 cylinders = capacity; 2881 sector_div(cylinders, tmp); 2882 } 2883 2884 geom[0] = heads; 2885 geom[1] = sectors; 2886 geom[2] = cylinders; 2887 2888 return 0; 2889 } 2890 2891 static void megasas_aen_polling(struct work_struct *work); 2892 2893 /** 2894 * megasas_service_aen - Processes an event notification 2895 * @instance: Adapter soft state 2896 * @cmd: AEN command completed by the ISR 2897 * 2898 * For AEN, driver sends a command down to FW that is held by the FW till an 2899 * event occurs. When an event of interest occurs, FW completes the command 2900 * that it was previously holding. 2901 * 2902 * This routines sends SIGIO signal to processes that have registered with the 2903 * driver for AEN. 2904 */ 2905 static void 2906 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) 2907 { 2908 unsigned long flags; 2909 2910 /* 2911 * Don't signal app if it is just an aborted previously registered aen 2912 */ 2913 if ((!cmd->abort_aen) && (instance->unload == 0)) { 2914 spin_lock_irqsave(&poll_aen_lock, flags); 2915 megasas_poll_wait_aen = 1; 2916 spin_unlock_irqrestore(&poll_aen_lock, flags); 2917 wake_up(&megasas_poll_wait); 2918 kill_fasync(&megasas_async_queue, SIGIO, POLL_IN); 2919 } 2920 else 2921 cmd->abort_aen = 0; 2922 2923 instance->aen_cmd = NULL; 2924 2925 megasas_return_cmd(instance, cmd); 2926 2927 if ((instance->unload == 0) && 2928 ((instance->issuepend_done == 1))) { 2929 struct megasas_aen_event *ev; 2930 2931 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 2932 if (!ev) { 2933 dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n"); 2934 } else { 2935 ev->instance = instance; 2936 instance->ev = ev; 2937 INIT_DELAYED_WORK(&ev->hotplug_work, 2938 megasas_aen_polling); 2939 schedule_delayed_work(&ev->hotplug_work, 0); 2940 } 2941 } 2942 } 2943 2944 static ssize_t 2945 megasas_fw_crash_buffer_store(struct device *cdev, 2946 struct device_attribute *attr, const char *buf, size_t count) 2947 { 2948 struct Scsi_Host *shost = class_to_shost(cdev); 2949 struct megasas_instance *instance = 2950 (struct megasas_instance *) shost->hostdata; 2951 int val = 0; 2952 unsigned long flags; 2953 2954 if (kstrtoint(buf, 0, &val) != 0) 2955 return -EINVAL; 2956 2957 spin_lock_irqsave(&instance->crashdump_lock, flags); 2958 instance->fw_crash_buffer_offset = val; 2959 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 2960 return strlen(buf); 2961 } 2962 2963 static ssize_t 2964 megasas_fw_crash_buffer_show(struct device *cdev, 2965 struct device_attribute *attr, char *buf) 2966 { 2967 struct Scsi_Host *shost = class_to_shost(cdev); 2968 struct megasas_instance *instance = 2969 (struct megasas_instance *) shost->hostdata; 2970 u32 size; 2971 unsigned long buff_addr; 2972 unsigned long dmachunk = CRASH_DMA_BUF_SIZE; 2973 unsigned long src_addr; 2974 unsigned long flags; 2975 u32 buff_offset; 2976 2977 spin_lock_irqsave(&instance->crashdump_lock, flags); 2978 buff_offset = instance->fw_crash_buffer_offset; 2979 if (!instance->crash_dump_buf && 2980 !((instance->fw_crash_state == AVAILABLE) || 2981 (instance->fw_crash_state == COPYING))) { 2982 dev_err(&instance->pdev->dev, 2983 "Firmware crash dump is not available\n"); 2984 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 2985 return -EINVAL; 2986 } 2987 2988 buff_addr = (unsigned long) buf; 2989 2990 if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) { 2991 dev_err(&instance->pdev->dev, 2992 "Firmware crash dump offset is out of range\n"); 2993 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 2994 return 0; 2995 } 2996 2997 size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset; 2998 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; 2999 3000 src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] + 3001 (buff_offset % dmachunk); 3002 memcpy(buf, (void *)src_addr, size); 3003 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3004 3005 return size; 3006 } 3007 3008 static ssize_t 3009 megasas_fw_crash_buffer_size_show(struct device *cdev, 3010 struct device_attribute *attr, char *buf) 3011 { 3012 struct Scsi_Host *shost = class_to_shost(cdev); 3013 struct megasas_instance *instance = 3014 (struct megasas_instance *) shost->hostdata; 3015 3016 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long) 3017 ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE); 3018 } 3019 3020 static ssize_t 3021 megasas_fw_crash_state_store(struct device *cdev, 3022 struct device_attribute *attr, const char *buf, size_t count) 3023 { 3024 struct Scsi_Host *shost = class_to_shost(cdev); 3025 struct megasas_instance *instance = 3026 (struct megasas_instance *) shost->hostdata; 3027 int val = 0; 3028 unsigned long flags; 3029 3030 if (kstrtoint(buf, 0, &val) != 0) 3031 return -EINVAL; 3032 3033 if ((val <= AVAILABLE || val > COPY_ERROR)) { 3034 dev_err(&instance->pdev->dev, "application updates invalid " 3035 "firmware crash state\n"); 3036 return -EINVAL; 3037 } 3038 3039 instance->fw_crash_state = val; 3040 3041 if ((val == COPIED) || (val == COPY_ERROR)) { 3042 spin_lock_irqsave(&instance->crashdump_lock, flags); 3043 megasas_free_host_crash_buffer(instance); 3044 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 3045 if (val == COPY_ERROR) 3046 dev_info(&instance->pdev->dev, "application failed to " 3047 "copy Firmware crash dump\n"); 3048 else 3049 dev_info(&instance->pdev->dev, "Firmware crash dump " 3050 "copied successfully\n"); 3051 } 3052 return strlen(buf); 3053 } 3054 3055 static ssize_t 3056 megasas_fw_crash_state_show(struct device *cdev, 3057 struct device_attribute *attr, char *buf) 3058 { 3059 struct Scsi_Host *shost = class_to_shost(cdev); 3060 struct megasas_instance *instance = 3061 (struct megasas_instance *) shost->hostdata; 3062 3063 return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state); 3064 } 3065 3066 static ssize_t 3067 megasas_page_size_show(struct device *cdev, 3068 struct device_attribute *attr, char *buf) 3069 { 3070 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1); 3071 } 3072 3073 static ssize_t 3074 megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr, 3075 char *buf) 3076 { 3077 struct Scsi_Host *shost = class_to_shost(cdev); 3078 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3079 3080 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding)); 3081 } 3082 3083 static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR, 3084 megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store); 3085 static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO, 3086 megasas_fw_crash_buffer_size_show, NULL); 3087 static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR, 3088 megasas_fw_crash_state_show, megasas_fw_crash_state_store); 3089 static DEVICE_ATTR(page_size, S_IRUGO, 3090 megasas_page_size_show, NULL); 3091 static DEVICE_ATTR(ldio_outstanding, S_IRUGO, 3092 megasas_ldio_outstanding_show, NULL); 3093 3094 struct device_attribute *megaraid_host_attrs[] = { 3095 &dev_attr_fw_crash_buffer_size, 3096 &dev_attr_fw_crash_buffer, 3097 &dev_attr_fw_crash_state, 3098 &dev_attr_page_size, 3099 &dev_attr_ldio_outstanding, 3100 NULL, 3101 }; 3102 3103 /* 3104 * Scsi host template for megaraid_sas driver 3105 */ 3106 static struct scsi_host_template megasas_template = { 3107 3108 .module = THIS_MODULE, 3109 .name = "Avago SAS based MegaRAID driver", 3110 .proc_name = "megaraid_sas", 3111 .slave_configure = megasas_slave_configure, 3112 .slave_alloc = megasas_slave_alloc, 3113 .slave_destroy = megasas_slave_destroy, 3114 .queuecommand = megasas_queue_command, 3115 .eh_target_reset_handler = megasas_reset_target, 3116 .eh_abort_handler = megasas_task_abort, 3117 .eh_host_reset_handler = megasas_reset_bus_host, 3118 .eh_timed_out = megasas_reset_timer, 3119 .shost_attrs = megaraid_host_attrs, 3120 .bios_param = megasas_bios_param, 3121 .use_clustering = ENABLE_CLUSTERING, 3122 .change_queue_depth = scsi_change_queue_depth, 3123 .no_write_same = 1, 3124 }; 3125 3126 /** 3127 * megasas_complete_int_cmd - Completes an internal command 3128 * @instance: Adapter soft state 3129 * @cmd: Command to be completed 3130 * 3131 * The megasas_issue_blocked_cmd() function waits for a command to complete 3132 * after it issues a command. This function wakes up that waiting routine by 3133 * calling wake_up() on the wait queue. 3134 */ 3135 static void 3136 megasas_complete_int_cmd(struct megasas_instance *instance, 3137 struct megasas_cmd *cmd) 3138 { 3139 cmd->cmd_status_drv = cmd->frame->io.cmd_status; 3140 wake_up(&instance->int_cmd_wait_q); 3141 } 3142 3143 /** 3144 * megasas_complete_abort - Completes aborting a command 3145 * @instance: Adapter soft state 3146 * @cmd: Cmd that was issued to abort another cmd 3147 * 3148 * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q 3149 * after it issues an abort on a previously issued command. This function 3150 * wakes up all functions waiting on the same wait queue. 3151 */ 3152 static void 3153 megasas_complete_abort(struct megasas_instance *instance, 3154 struct megasas_cmd *cmd) 3155 { 3156 if (cmd->sync_cmd) { 3157 cmd->sync_cmd = 0; 3158 cmd->cmd_status_drv = 0; 3159 wake_up(&instance->abort_cmd_wait_q); 3160 } 3161 } 3162 3163 /** 3164 * megasas_complete_cmd - Completes a command 3165 * @instance: Adapter soft state 3166 * @cmd: Command to be completed 3167 * @alt_status: If non-zero, use this value as status to 3168 * SCSI mid-layer instead of the value returned 3169 * by the FW. This should be used if caller wants 3170 * an alternate status (as in the case of aborted 3171 * commands) 3172 */ 3173 void 3174 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 3175 u8 alt_status) 3176 { 3177 int exception = 0; 3178 struct megasas_header *hdr = &cmd->frame->hdr; 3179 unsigned long flags; 3180 struct fusion_context *fusion = instance->ctrl_context; 3181 u32 opcode, status; 3182 3183 /* flag for the retry reset */ 3184 cmd->retry_for_fw_reset = 0; 3185 3186 if (cmd->scmd) 3187 cmd->scmd->SCp.ptr = NULL; 3188 3189 switch (hdr->cmd) { 3190 case MFI_CMD_INVALID: 3191 /* Some older 1068 controller FW may keep a pended 3192 MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel 3193 when booting the kdump kernel. Ignore this command to 3194 prevent a kernel panic on shutdown of the kdump kernel. */ 3195 dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command " 3196 "completed\n"); 3197 dev_warn(&instance->pdev->dev, "If you have a controller " 3198 "other than PERC5, please upgrade your firmware\n"); 3199 break; 3200 case MFI_CMD_PD_SCSI_IO: 3201 case MFI_CMD_LD_SCSI_IO: 3202 3203 /* 3204 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been 3205 * issued either through an IO path or an IOCTL path. If it 3206 * was via IOCTL, we will send it to internal completion. 3207 */ 3208 if (cmd->sync_cmd) { 3209 cmd->sync_cmd = 0; 3210 megasas_complete_int_cmd(instance, cmd); 3211 break; 3212 } 3213 3214 case MFI_CMD_LD_READ: 3215 case MFI_CMD_LD_WRITE: 3216 3217 if (alt_status) { 3218 cmd->scmd->result = alt_status << 16; 3219 exception = 1; 3220 } 3221 3222 if (exception) { 3223 3224 atomic_dec(&instance->fw_outstanding); 3225 3226 scsi_dma_unmap(cmd->scmd); 3227 cmd->scmd->scsi_done(cmd->scmd); 3228 megasas_return_cmd(instance, cmd); 3229 3230 break; 3231 } 3232 3233 switch (hdr->cmd_status) { 3234 3235 case MFI_STAT_OK: 3236 cmd->scmd->result = DID_OK << 16; 3237 break; 3238 3239 case MFI_STAT_SCSI_IO_FAILED: 3240 case MFI_STAT_LD_INIT_IN_PROGRESS: 3241 cmd->scmd->result = 3242 (DID_ERROR << 16) | hdr->scsi_status; 3243 break; 3244 3245 case MFI_STAT_SCSI_DONE_WITH_ERROR: 3246 3247 cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status; 3248 3249 if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) { 3250 memset(cmd->scmd->sense_buffer, 0, 3251 SCSI_SENSE_BUFFERSIZE); 3252 memcpy(cmd->scmd->sense_buffer, cmd->sense, 3253 hdr->sense_len); 3254 3255 cmd->scmd->result |= DRIVER_SENSE << 24; 3256 } 3257 3258 break; 3259 3260 case MFI_STAT_LD_OFFLINE: 3261 case MFI_STAT_DEVICE_NOT_FOUND: 3262 cmd->scmd->result = DID_BAD_TARGET << 16; 3263 break; 3264 3265 default: 3266 dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n", 3267 hdr->cmd_status); 3268 cmd->scmd->result = DID_ERROR << 16; 3269 break; 3270 } 3271 3272 atomic_dec(&instance->fw_outstanding); 3273 3274 scsi_dma_unmap(cmd->scmd); 3275 cmd->scmd->scsi_done(cmd->scmd); 3276 megasas_return_cmd(instance, cmd); 3277 3278 break; 3279 3280 case MFI_CMD_SMP: 3281 case MFI_CMD_STP: 3282 case MFI_CMD_DCMD: 3283 opcode = le32_to_cpu(cmd->frame->dcmd.opcode); 3284 /* Check for LD map update */ 3285 if ((opcode == MR_DCMD_LD_MAP_GET_INFO) 3286 && (cmd->frame->dcmd.mbox.b[1] == 1)) { 3287 fusion->fast_path_io = 0; 3288 spin_lock_irqsave(instance->host->host_lock, flags); 3289 instance->map_update_cmd = NULL; 3290 if (cmd->frame->hdr.cmd_status != 0) { 3291 if (cmd->frame->hdr.cmd_status != 3292 MFI_STAT_NOT_FOUND) 3293 dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n", 3294 cmd->frame->hdr.cmd_status); 3295 else { 3296 megasas_return_cmd(instance, cmd); 3297 spin_unlock_irqrestore( 3298 instance->host->host_lock, 3299 flags); 3300 break; 3301 } 3302 } else 3303 instance->map_id++; 3304 megasas_return_cmd(instance, cmd); 3305 3306 /* 3307 * Set fast path IO to ZERO. 3308 * Validate Map will set proper value. 3309 * Meanwhile all IOs will go as LD IO. 3310 */ 3311 if (MR_ValidateMapInfo(instance)) 3312 fusion->fast_path_io = 1; 3313 else 3314 fusion->fast_path_io = 0; 3315 megasas_sync_map_info(instance); 3316 spin_unlock_irqrestore(instance->host->host_lock, 3317 flags); 3318 break; 3319 } 3320 if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO || 3321 opcode == MR_DCMD_CTRL_EVENT_GET) { 3322 spin_lock_irqsave(&poll_aen_lock, flags); 3323 megasas_poll_wait_aen = 0; 3324 spin_unlock_irqrestore(&poll_aen_lock, flags); 3325 } 3326 3327 /* FW has an updated PD sequence */ 3328 if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && 3329 (cmd->frame->dcmd.mbox.b[0] == 1)) { 3330 3331 spin_lock_irqsave(instance->host->host_lock, flags); 3332 status = cmd->frame->hdr.cmd_status; 3333 instance->jbod_seq_cmd = NULL; 3334 megasas_return_cmd(instance, cmd); 3335 3336 if (status == MFI_STAT_OK) { 3337 instance->pd_seq_map_id++; 3338 /* Re-register a pd sync seq num cmd */ 3339 if (megasas_sync_pd_seq_num(instance, true)) 3340 instance->use_seqnum_jbod_fp = false; 3341 } else 3342 instance->use_seqnum_jbod_fp = false; 3343 3344 spin_unlock_irqrestore(instance->host->host_lock, flags); 3345 break; 3346 } 3347 3348 /* 3349 * See if got an event notification 3350 */ 3351 if (opcode == MR_DCMD_CTRL_EVENT_WAIT) 3352 megasas_service_aen(instance, cmd); 3353 else 3354 megasas_complete_int_cmd(instance, cmd); 3355 3356 break; 3357 3358 case MFI_CMD_ABORT: 3359 /* 3360 * Cmd issued to abort another cmd returned 3361 */ 3362 megasas_complete_abort(instance, cmd); 3363 break; 3364 3365 default: 3366 dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n", 3367 hdr->cmd); 3368 break; 3369 } 3370 } 3371 3372 /** 3373 * megasas_issue_pending_cmds_again - issue all pending cmds 3374 * in FW again because of the fw reset 3375 * @instance: Adapter soft state 3376 */ 3377 static inline void 3378 megasas_issue_pending_cmds_again(struct megasas_instance *instance) 3379 { 3380 struct megasas_cmd *cmd; 3381 struct list_head clist_local; 3382 union megasas_evt_class_locale class_locale; 3383 unsigned long flags; 3384 u32 seq_num; 3385 3386 INIT_LIST_HEAD(&clist_local); 3387 spin_lock_irqsave(&instance->hba_lock, flags); 3388 list_splice_init(&instance->internal_reset_pending_q, &clist_local); 3389 spin_unlock_irqrestore(&instance->hba_lock, flags); 3390 3391 while (!list_empty(&clist_local)) { 3392 cmd = list_entry((&clist_local)->next, 3393 struct megasas_cmd, list); 3394 list_del_init(&cmd->list); 3395 3396 if (cmd->sync_cmd || cmd->scmd) { 3397 dev_notice(&instance->pdev->dev, "command %p, %p:%d" 3398 "detected to be pending while HBA reset\n", 3399 cmd, cmd->scmd, cmd->sync_cmd); 3400 3401 cmd->retry_for_fw_reset++; 3402 3403 if (cmd->retry_for_fw_reset == 3) { 3404 dev_notice(&instance->pdev->dev, "cmd %p, %p:%d" 3405 "was tried multiple times during reset." 3406 "Shutting down the HBA\n", 3407 cmd, cmd->scmd, cmd->sync_cmd); 3408 instance->instancet->disable_intr(instance); 3409 atomic_set(&instance->fw_reset_no_pci_access, 1); 3410 megaraid_sas_kill_hba(instance); 3411 return; 3412 } 3413 } 3414 3415 if (cmd->sync_cmd == 1) { 3416 if (cmd->scmd) { 3417 dev_notice(&instance->pdev->dev, "unexpected" 3418 "cmd attached to internal command!\n"); 3419 } 3420 dev_notice(&instance->pdev->dev, "%p synchronous cmd" 3421 "on the internal reset queue," 3422 "issue it again.\n", cmd); 3423 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 3424 instance->instancet->fire_cmd(instance, 3425 cmd->frame_phys_addr, 3426 0, instance->reg_set); 3427 } else if (cmd->scmd) { 3428 dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]" 3429 "detected on the internal queue, issue again.\n", 3430 cmd, cmd->scmd->cmnd[0]); 3431 3432 atomic_inc(&instance->fw_outstanding); 3433 instance->instancet->fire_cmd(instance, 3434 cmd->frame_phys_addr, 3435 cmd->frame_count-1, instance->reg_set); 3436 } else { 3437 dev_notice(&instance->pdev->dev, "%p unexpected cmd on the" 3438 "internal reset defer list while re-issue!!\n", 3439 cmd); 3440 } 3441 } 3442 3443 if (instance->aen_cmd) { 3444 dev_notice(&instance->pdev->dev, "aen_cmd in def process\n"); 3445 megasas_return_cmd(instance, instance->aen_cmd); 3446 3447 instance->aen_cmd = NULL; 3448 } 3449 3450 /* 3451 * Initiate AEN (Asynchronous Event Notification) 3452 */ 3453 seq_num = instance->last_seq_num; 3454 class_locale.members.reserved = 0; 3455 class_locale.members.locale = MR_EVT_LOCALE_ALL; 3456 class_locale.members.class = MR_EVT_CLASS_DEBUG; 3457 3458 megasas_register_aen(instance, seq_num, class_locale.word); 3459 } 3460 3461 /** 3462 * Move the internal reset pending commands to a deferred queue. 3463 * 3464 * We move the commands pending at internal reset time to a 3465 * pending queue. This queue would be flushed after successful 3466 * completion of the internal reset sequence. if the internal reset 3467 * did not complete in time, the kernel reset handler would flush 3468 * these commands. 3469 **/ 3470 static void 3471 megasas_internal_reset_defer_cmds(struct megasas_instance *instance) 3472 { 3473 struct megasas_cmd *cmd; 3474 int i; 3475 u16 max_cmd = instance->max_fw_cmds; 3476 u32 defer_index; 3477 unsigned long flags; 3478 3479 defer_index = 0; 3480 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 3481 for (i = 0; i < max_cmd; i++) { 3482 cmd = instance->cmd_list[i]; 3483 if (cmd->sync_cmd == 1 || cmd->scmd) { 3484 dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p" 3485 "on the defer queue as internal\n", 3486 defer_index, cmd, cmd->sync_cmd, cmd->scmd); 3487 3488 if (!list_empty(&cmd->list)) { 3489 dev_notice(&instance->pdev->dev, "ERROR while" 3490 " moving this cmd:%p, %d %p, it was" 3491 "discovered on some list?\n", 3492 cmd, cmd->sync_cmd, cmd->scmd); 3493 3494 list_del_init(&cmd->list); 3495 } 3496 defer_index++; 3497 list_add_tail(&cmd->list, 3498 &instance->internal_reset_pending_q); 3499 } 3500 } 3501 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 3502 } 3503 3504 3505 static void 3506 process_fw_state_change_wq(struct work_struct *work) 3507 { 3508 struct megasas_instance *instance = 3509 container_of(work, struct megasas_instance, work_init); 3510 u32 wait; 3511 unsigned long flags; 3512 3513 if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) { 3514 dev_notice(&instance->pdev->dev, "error, recovery st %x\n", 3515 atomic_read(&instance->adprecovery)); 3516 return ; 3517 } 3518 3519 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { 3520 dev_notice(&instance->pdev->dev, "FW detected to be in fault" 3521 "state, restarting it...\n"); 3522 3523 instance->instancet->disable_intr(instance); 3524 atomic_set(&instance->fw_outstanding, 0); 3525 3526 atomic_set(&instance->fw_reset_no_pci_access, 1); 3527 instance->instancet->adp_reset(instance, instance->reg_set); 3528 atomic_set(&instance->fw_reset_no_pci_access, 0); 3529 3530 dev_notice(&instance->pdev->dev, "FW restarted successfully," 3531 "initiating next stage...\n"); 3532 3533 dev_notice(&instance->pdev->dev, "HBA recovery state machine," 3534 "state 2 starting...\n"); 3535 3536 /* waiting for about 20 second before start the second init */ 3537 for (wait = 0; wait < 30; wait++) { 3538 msleep(1000); 3539 } 3540 3541 if (megasas_transition_to_ready(instance, 1)) { 3542 dev_notice(&instance->pdev->dev, "adapter not ready\n"); 3543 3544 atomic_set(&instance->fw_reset_no_pci_access, 1); 3545 megaraid_sas_kill_hba(instance); 3546 return ; 3547 } 3548 3549 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 3550 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 3551 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR) 3552 ) { 3553 *instance->consumer = *instance->producer; 3554 } else { 3555 *instance->consumer = 0; 3556 *instance->producer = 0; 3557 } 3558 3559 megasas_issue_init_mfi(instance); 3560 3561 spin_lock_irqsave(&instance->hba_lock, flags); 3562 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 3563 spin_unlock_irqrestore(&instance->hba_lock, flags); 3564 instance->instancet->enable_intr(instance); 3565 3566 megasas_issue_pending_cmds_again(instance); 3567 instance->issuepend_done = 1; 3568 } 3569 } 3570 3571 /** 3572 * megasas_deplete_reply_queue - Processes all completed commands 3573 * @instance: Adapter soft state 3574 * @alt_status: Alternate status to be returned to 3575 * SCSI mid-layer instead of the status 3576 * returned by the FW 3577 * Note: this must be called with hba lock held 3578 */ 3579 static int 3580 megasas_deplete_reply_queue(struct megasas_instance *instance, 3581 u8 alt_status) 3582 { 3583 u32 mfiStatus; 3584 u32 fw_state; 3585 3586 if ((mfiStatus = instance->instancet->check_reset(instance, 3587 instance->reg_set)) == 1) { 3588 return IRQ_HANDLED; 3589 } 3590 3591 if ((mfiStatus = instance->instancet->clear_intr( 3592 instance->reg_set) 3593 ) == 0) { 3594 /* Hardware may not set outbound_intr_status in MSI-X mode */ 3595 if (!instance->msix_vectors) 3596 return IRQ_NONE; 3597 } 3598 3599 instance->mfiStatus = mfiStatus; 3600 3601 if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) { 3602 fw_state = instance->instancet->read_fw_status_reg( 3603 instance->reg_set) & MFI_STATE_MASK; 3604 3605 if (fw_state != MFI_STATE_FAULT) { 3606 dev_notice(&instance->pdev->dev, "fw state:%x\n", 3607 fw_state); 3608 } 3609 3610 if ((fw_state == MFI_STATE_FAULT) && 3611 (instance->disableOnlineCtrlReset == 0)) { 3612 dev_notice(&instance->pdev->dev, "wait adp restart\n"); 3613 3614 if ((instance->pdev->device == 3615 PCI_DEVICE_ID_LSI_SAS1064R) || 3616 (instance->pdev->device == 3617 PCI_DEVICE_ID_DELL_PERC5) || 3618 (instance->pdev->device == 3619 PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 3620 3621 *instance->consumer = 3622 cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); 3623 } 3624 3625 3626 instance->instancet->disable_intr(instance); 3627 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 3628 instance->issuepend_done = 0; 3629 3630 atomic_set(&instance->fw_outstanding, 0); 3631 megasas_internal_reset_defer_cmds(instance); 3632 3633 dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n", 3634 fw_state, atomic_read(&instance->adprecovery)); 3635 3636 schedule_work(&instance->work_init); 3637 return IRQ_HANDLED; 3638 3639 } else { 3640 dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n", 3641 fw_state, instance->disableOnlineCtrlReset); 3642 } 3643 } 3644 3645 tasklet_schedule(&instance->isr_tasklet); 3646 return IRQ_HANDLED; 3647 } 3648 /** 3649 * megasas_isr - isr entry point 3650 */ 3651 static irqreturn_t megasas_isr(int irq, void *devp) 3652 { 3653 struct megasas_irq_context *irq_context = devp; 3654 struct megasas_instance *instance = irq_context->instance; 3655 unsigned long flags; 3656 irqreturn_t rc; 3657 3658 if (atomic_read(&instance->fw_reset_no_pci_access)) 3659 return IRQ_HANDLED; 3660 3661 spin_lock_irqsave(&instance->hba_lock, flags); 3662 rc = megasas_deplete_reply_queue(instance, DID_OK); 3663 spin_unlock_irqrestore(&instance->hba_lock, flags); 3664 3665 return rc; 3666 } 3667 3668 /** 3669 * megasas_transition_to_ready - Move the FW to READY state 3670 * @instance: Adapter soft state 3671 * 3672 * During the initialization, FW passes can potentially be in any one of 3673 * several possible states. If the FW in operational, waiting-for-handshake 3674 * states, driver must take steps to bring it to ready state. Otherwise, it 3675 * has to wait for the ready state. 3676 */ 3677 int 3678 megasas_transition_to_ready(struct megasas_instance *instance, int ocr) 3679 { 3680 int i; 3681 u8 max_wait; 3682 u32 fw_state; 3683 u32 cur_state; 3684 u32 abs_state, curr_abs_state; 3685 3686 abs_state = instance->instancet->read_fw_status_reg(instance->reg_set); 3687 fw_state = abs_state & MFI_STATE_MASK; 3688 3689 if (fw_state != MFI_STATE_READY) 3690 dev_info(&instance->pdev->dev, "Waiting for FW to come to ready" 3691 " state\n"); 3692 3693 while (fw_state != MFI_STATE_READY) { 3694 3695 switch (fw_state) { 3696 3697 case MFI_STATE_FAULT: 3698 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW in FAULT state!!\n"); 3699 if (ocr) { 3700 max_wait = MEGASAS_RESET_WAIT_TIME; 3701 cur_state = MFI_STATE_FAULT; 3702 break; 3703 } else 3704 return -ENODEV; 3705 3706 case MFI_STATE_WAIT_HANDSHAKE: 3707 /* 3708 * Set the CLR bit in inbound doorbell 3709 */ 3710 if ((instance->pdev->device == 3711 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 3712 (instance->pdev->device == 3713 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 3714 (instance->ctrl_context)) 3715 writel( 3716 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 3717 &instance->reg_set->doorbell); 3718 else 3719 writel( 3720 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 3721 &instance->reg_set->inbound_doorbell); 3722 3723 max_wait = MEGASAS_RESET_WAIT_TIME; 3724 cur_state = MFI_STATE_WAIT_HANDSHAKE; 3725 break; 3726 3727 case MFI_STATE_BOOT_MESSAGE_PENDING: 3728 if ((instance->pdev->device == 3729 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 3730 (instance->pdev->device == 3731 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 3732 (instance->ctrl_context)) 3733 writel(MFI_INIT_HOTPLUG, 3734 &instance->reg_set->doorbell); 3735 else 3736 writel(MFI_INIT_HOTPLUG, 3737 &instance->reg_set->inbound_doorbell); 3738 3739 max_wait = MEGASAS_RESET_WAIT_TIME; 3740 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 3741 break; 3742 3743 case MFI_STATE_OPERATIONAL: 3744 /* 3745 * Bring it to READY state; assuming max wait 10 secs 3746 */ 3747 instance->instancet->disable_intr(instance); 3748 if ((instance->pdev->device == 3749 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 3750 (instance->pdev->device == 3751 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 3752 (instance->ctrl_context)) { 3753 writel(MFI_RESET_FLAGS, 3754 &instance->reg_set->doorbell); 3755 3756 if (instance->ctrl_context) { 3757 for (i = 0; i < (10 * 1000); i += 20) { 3758 if (readl( 3759 &instance-> 3760 reg_set-> 3761 doorbell) & 1) 3762 msleep(20); 3763 else 3764 break; 3765 } 3766 } 3767 } else 3768 writel(MFI_RESET_FLAGS, 3769 &instance->reg_set->inbound_doorbell); 3770 3771 max_wait = MEGASAS_RESET_WAIT_TIME; 3772 cur_state = MFI_STATE_OPERATIONAL; 3773 break; 3774 3775 case MFI_STATE_UNDEFINED: 3776 /* 3777 * This state should not last for more than 2 seconds 3778 */ 3779 max_wait = MEGASAS_RESET_WAIT_TIME; 3780 cur_state = MFI_STATE_UNDEFINED; 3781 break; 3782 3783 case MFI_STATE_BB_INIT: 3784 max_wait = MEGASAS_RESET_WAIT_TIME; 3785 cur_state = MFI_STATE_BB_INIT; 3786 break; 3787 3788 case MFI_STATE_FW_INIT: 3789 max_wait = MEGASAS_RESET_WAIT_TIME; 3790 cur_state = MFI_STATE_FW_INIT; 3791 break; 3792 3793 case MFI_STATE_FW_INIT_2: 3794 max_wait = MEGASAS_RESET_WAIT_TIME; 3795 cur_state = MFI_STATE_FW_INIT_2; 3796 break; 3797 3798 case MFI_STATE_DEVICE_SCAN: 3799 max_wait = MEGASAS_RESET_WAIT_TIME; 3800 cur_state = MFI_STATE_DEVICE_SCAN; 3801 break; 3802 3803 case MFI_STATE_FLUSH_CACHE: 3804 max_wait = MEGASAS_RESET_WAIT_TIME; 3805 cur_state = MFI_STATE_FLUSH_CACHE; 3806 break; 3807 3808 default: 3809 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n", 3810 fw_state); 3811 return -ENODEV; 3812 } 3813 3814 /* 3815 * The cur_state should not last for more than max_wait secs 3816 */ 3817 for (i = 0; i < (max_wait * 1000); i++) { 3818 curr_abs_state = instance->instancet-> 3819 read_fw_status_reg(instance->reg_set); 3820 3821 if (abs_state == curr_abs_state) { 3822 msleep(1); 3823 } else 3824 break; 3825 } 3826 3827 /* 3828 * Return error if fw_state hasn't changed after max_wait 3829 */ 3830 if (curr_abs_state == abs_state) { 3831 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed " 3832 "in %d secs\n", fw_state, max_wait); 3833 return -ENODEV; 3834 } 3835 3836 abs_state = curr_abs_state; 3837 fw_state = curr_abs_state & MFI_STATE_MASK; 3838 } 3839 dev_info(&instance->pdev->dev, "FW now in Ready state\n"); 3840 3841 return 0; 3842 } 3843 3844 /** 3845 * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool 3846 * @instance: Adapter soft state 3847 */ 3848 static void megasas_teardown_frame_pool(struct megasas_instance *instance) 3849 { 3850 int i; 3851 u16 max_cmd = instance->max_mfi_cmds; 3852 struct megasas_cmd *cmd; 3853 3854 if (!instance->frame_dma_pool) 3855 return; 3856 3857 /* 3858 * Return all frames to pool 3859 */ 3860 for (i = 0; i < max_cmd; i++) { 3861 3862 cmd = instance->cmd_list[i]; 3863 3864 if (cmd->frame) 3865 pci_pool_free(instance->frame_dma_pool, cmd->frame, 3866 cmd->frame_phys_addr); 3867 3868 if (cmd->sense) 3869 pci_pool_free(instance->sense_dma_pool, cmd->sense, 3870 cmd->sense_phys_addr); 3871 } 3872 3873 /* 3874 * Now destroy the pool itself 3875 */ 3876 pci_pool_destroy(instance->frame_dma_pool); 3877 pci_pool_destroy(instance->sense_dma_pool); 3878 3879 instance->frame_dma_pool = NULL; 3880 instance->sense_dma_pool = NULL; 3881 } 3882 3883 /** 3884 * megasas_create_frame_pool - Creates DMA pool for cmd frames 3885 * @instance: Adapter soft state 3886 * 3887 * Each command packet has an embedded DMA memory buffer that is used for 3888 * filling MFI frame and the SG list that immediately follows the frame. This 3889 * function creates those DMA memory buffers for each command packet by using 3890 * PCI pool facility. 3891 */ 3892 static int megasas_create_frame_pool(struct megasas_instance *instance) 3893 { 3894 int i; 3895 u16 max_cmd; 3896 u32 sge_sz; 3897 u32 frame_count; 3898 struct megasas_cmd *cmd; 3899 3900 max_cmd = instance->max_mfi_cmds; 3901 3902 /* 3903 * Size of our frame is 64 bytes for MFI frame, followed by max SG 3904 * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer 3905 */ 3906 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : 3907 sizeof(struct megasas_sge32); 3908 3909 if (instance->flag_ieee) 3910 sge_sz = sizeof(struct megasas_sge_skinny); 3911 3912 /* 3913 * For MFI controllers. 3914 * max_num_sge = 60 3915 * max_sge_sz = 16 byte (sizeof megasas_sge_skinny) 3916 * Total 960 byte (15 MFI frame of 64 byte) 3917 * 3918 * Fusion adapter require only 3 extra frame. 3919 * max_num_sge = 16 (defined as MAX_IOCTL_SGE) 3920 * max_sge_sz = 12 byte (sizeof megasas_sge64) 3921 * Total 192 byte (3 MFI frame of 64 byte) 3922 */ 3923 frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1); 3924 instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count; 3925 /* 3926 * Use DMA pool facility provided by PCI layer 3927 */ 3928 instance->frame_dma_pool = pci_pool_create("megasas frame pool", 3929 instance->pdev, instance->mfi_frame_size, 3930 256, 0); 3931 3932 if (!instance->frame_dma_pool) { 3933 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n"); 3934 return -ENOMEM; 3935 } 3936 3937 instance->sense_dma_pool = pci_pool_create("megasas sense pool", 3938 instance->pdev, 128, 4, 0); 3939 3940 if (!instance->sense_dma_pool) { 3941 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n"); 3942 3943 pci_pool_destroy(instance->frame_dma_pool); 3944 instance->frame_dma_pool = NULL; 3945 3946 return -ENOMEM; 3947 } 3948 3949 /* 3950 * Allocate and attach a frame to each of the commands in cmd_list. 3951 * By making cmd->index as the context instead of the &cmd, we can 3952 * always use 32bit context regardless of the architecture 3953 */ 3954 for (i = 0; i < max_cmd; i++) { 3955 3956 cmd = instance->cmd_list[i]; 3957 3958 cmd->frame = pci_pool_alloc(instance->frame_dma_pool, 3959 GFP_KERNEL, &cmd->frame_phys_addr); 3960 3961 cmd->sense = pci_pool_alloc(instance->sense_dma_pool, 3962 GFP_KERNEL, &cmd->sense_phys_addr); 3963 3964 /* 3965 * megasas_teardown_frame_pool() takes care of freeing 3966 * whatever has been allocated 3967 */ 3968 if (!cmd->frame || !cmd->sense) { 3969 dev_printk(KERN_DEBUG, &instance->pdev->dev, "pci_pool_alloc failed\n"); 3970 megasas_teardown_frame_pool(instance); 3971 return -ENOMEM; 3972 } 3973 3974 memset(cmd->frame, 0, instance->mfi_frame_size); 3975 cmd->frame->io.context = cpu_to_le32(cmd->index); 3976 cmd->frame->io.pad_0 = 0; 3977 if (!instance->ctrl_context && reset_devices) 3978 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 3979 } 3980 3981 return 0; 3982 } 3983 3984 /** 3985 * megasas_free_cmds - Free all the cmds in the free cmd pool 3986 * @instance: Adapter soft state 3987 */ 3988 void megasas_free_cmds(struct megasas_instance *instance) 3989 { 3990 int i; 3991 3992 /* First free the MFI frame pool */ 3993 megasas_teardown_frame_pool(instance); 3994 3995 /* Free all the commands in the cmd_list */ 3996 for (i = 0; i < instance->max_mfi_cmds; i++) 3997 3998 kfree(instance->cmd_list[i]); 3999 4000 /* Free the cmd_list buffer itself */ 4001 kfree(instance->cmd_list); 4002 instance->cmd_list = NULL; 4003 4004 INIT_LIST_HEAD(&instance->cmd_pool); 4005 } 4006 4007 /** 4008 * megasas_alloc_cmds - Allocates the command packets 4009 * @instance: Adapter soft state 4010 * 4011 * Each command that is issued to the FW, whether IO commands from the OS or 4012 * internal commands like IOCTLs, are wrapped in local data structure called 4013 * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to 4014 * the FW. 4015 * 4016 * Each frame has a 32-bit field called context (tag). This context is used 4017 * to get back the megasas_cmd from the frame when a frame gets completed in 4018 * the ISR. Typically the address of the megasas_cmd itself would be used as 4019 * the context. But we wanted to keep the differences between 32 and 64 bit 4020 * systems to the mininum. We always use 32 bit integers for the context. In 4021 * this driver, the 32 bit values are the indices into an array cmd_list. 4022 * This array is used only to look up the megasas_cmd given the context. The 4023 * free commands themselves are maintained in a linked list called cmd_pool. 4024 */ 4025 int megasas_alloc_cmds(struct megasas_instance *instance) 4026 { 4027 int i; 4028 int j; 4029 u16 max_cmd; 4030 struct megasas_cmd *cmd; 4031 struct fusion_context *fusion; 4032 4033 fusion = instance->ctrl_context; 4034 max_cmd = instance->max_mfi_cmds; 4035 4036 /* 4037 * instance->cmd_list is an array of struct megasas_cmd pointers. 4038 * Allocate the dynamic array first and then allocate individual 4039 * commands. 4040 */ 4041 instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL); 4042 4043 if (!instance->cmd_list) { 4044 dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n"); 4045 return -ENOMEM; 4046 } 4047 4048 memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd); 4049 4050 for (i = 0; i < max_cmd; i++) { 4051 instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd), 4052 GFP_KERNEL); 4053 4054 if (!instance->cmd_list[i]) { 4055 4056 for (j = 0; j < i; j++) 4057 kfree(instance->cmd_list[j]); 4058 4059 kfree(instance->cmd_list); 4060 instance->cmd_list = NULL; 4061 4062 return -ENOMEM; 4063 } 4064 } 4065 4066 for (i = 0; i < max_cmd; i++) { 4067 cmd = instance->cmd_list[i]; 4068 memset(cmd, 0, sizeof(struct megasas_cmd)); 4069 cmd->index = i; 4070 cmd->scmd = NULL; 4071 cmd->instance = instance; 4072 4073 list_add_tail(&cmd->list, &instance->cmd_pool); 4074 } 4075 4076 /* 4077 * Create a frame pool and assign one frame to each cmd 4078 */ 4079 if (megasas_create_frame_pool(instance)) { 4080 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n"); 4081 megasas_free_cmds(instance); 4082 } 4083 4084 return 0; 4085 } 4086 4087 /* 4088 * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state. 4089 * @instance: Adapter soft state 4090 * 4091 * Return 0 for only Fusion adapter, if driver load/unload is not in progress 4092 * or FW is not under OCR. 4093 */ 4094 inline int 4095 dcmd_timeout_ocr_possible(struct megasas_instance *instance) { 4096 4097 if (!instance->ctrl_context) 4098 return KILL_ADAPTER; 4099 else if (instance->unload || 4100 test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) 4101 return IGNORE_TIMEOUT; 4102 else 4103 return INITIATE_OCR; 4104 } 4105 4106 static void 4107 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev) 4108 { 4109 int ret; 4110 struct megasas_cmd *cmd; 4111 struct megasas_dcmd_frame *dcmd; 4112 4113 struct MR_PRIV_DEVICE *mr_device_priv_data; 4114 u16 device_id = 0; 4115 4116 device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; 4117 cmd = megasas_get_cmd(instance); 4118 4119 if (!cmd) { 4120 dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__); 4121 return; 4122 } 4123 4124 dcmd = &cmd->frame->dcmd; 4125 4126 memset(instance->pd_info, 0, sizeof(*instance->pd_info)); 4127 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4128 4129 dcmd->mbox.s[0] = cpu_to_le16(device_id); 4130 dcmd->cmd = MFI_CMD_DCMD; 4131 dcmd->cmd_status = 0xFF; 4132 dcmd->sge_count = 1; 4133 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 4134 dcmd->timeout = 0; 4135 dcmd->pad_0 = 0; 4136 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO)); 4137 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO); 4138 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->pd_info_h); 4139 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_PD_INFO)); 4140 4141 if (instance->ctrl_context && !instance->mask_interrupts) 4142 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4143 else 4144 ret = megasas_issue_polled(instance, cmd); 4145 4146 switch (ret) { 4147 case DCMD_SUCCESS: 4148 mr_device_priv_data = sdev->hostdata; 4149 le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType); 4150 mr_device_priv_data->interface_type = 4151 instance->pd_info->state.ddf.pdType.intf; 4152 break; 4153 4154 case DCMD_TIMEOUT: 4155 4156 switch (dcmd_timeout_ocr_possible(instance)) { 4157 case INITIATE_OCR: 4158 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4159 megasas_reset_fusion(instance->host, 4160 MFI_IO_TIMEOUT_OCR); 4161 break; 4162 case KILL_ADAPTER: 4163 megaraid_sas_kill_hba(instance); 4164 break; 4165 case IGNORE_TIMEOUT: 4166 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4167 __func__, __LINE__); 4168 break; 4169 } 4170 4171 break; 4172 } 4173 4174 if (ret != DCMD_TIMEOUT) 4175 megasas_return_cmd(instance, cmd); 4176 4177 return; 4178 } 4179 /* 4180 * megasas_get_pd_list_info - Returns FW's pd_list structure 4181 * @instance: Adapter soft state 4182 * @pd_list: pd_list structure 4183 * 4184 * Issues an internal command (DCMD) to get the FW's controller PD 4185 * list structure. This information is mainly used to find out SYSTEM 4186 * supported by the FW. 4187 */ 4188 static int 4189 megasas_get_pd_list(struct megasas_instance *instance) 4190 { 4191 int ret = 0, pd_index = 0; 4192 struct megasas_cmd *cmd; 4193 struct megasas_dcmd_frame *dcmd; 4194 struct MR_PD_LIST *ci; 4195 struct MR_PD_ADDRESS *pd_addr; 4196 dma_addr_t ci_h = 0; 4197 4198 if (instance->pd_list_not_supported) { 4199 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " 4200 "not supported by firmware\n"); 4201 return ret; 4202 } 4203 4204 cmd = megasas_get_cmd(instance); 4205 4206 if (!cmd) { 4207 dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n"); 4208 return -ENOMEM; 4209 } 4210 4211 dcmd = &cmd->frame->dcmd; 4212 4213 ci = pci_alloc_consistent(instance->pdev, 4214 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h); 4215 4216 if (!ci) { 4217 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for pd_list\n"); 4218 megasas_return_cmd(instance, cmd); 4219 return -ENOMEM; 4220 } 4221 4222 memset(ci, 0, sizeof(*ci)); 4223 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4224 4225 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; 4226 dcmd->mbox.b[1] = 0; 4227 dcmd->cmd = MFI_CMD_DCMD; 4228 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4229 dcmd->sge_count = 1; 4230 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 4231 dcmd->timeout = 0; 4232 dcmd->pad_0 = 0; 4233 dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); 4234 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY); 4235 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); 4236 dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); 4237 4238 if (instance->ctrl_context && !instance->mask_interrupts) 4239 ret = megasas_issue_blocked_cmd(instance, cmd, 4240 MFI_IO_TIMEOUT_SECS); 4241 else 4242 ret = megasas_issue_polled(instance, cmd); 4243 4244 switch (ret) { 4245 case DCMD_FAILED: 4246 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " 4247 "failed/not supported by firmware\n"); 4248 4249 if (instance->ctrl_context) 4250 megaraid_sas_kill_hba(instance); 4251 else 4252 instance->pd_list_not_supported = 1; 4253 break; 4254 case DCMD_TIMEOUT: 4255 4256 switch (dcmd_timeout_ocr_possible(instance)) { 4257 case INITIATE_OCR: 4258 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4259 /* 4260 * DCMD failed from AEN path. 4261 * AEN path already hold reset_mutex to avoid PCI access 4262 * while OCR is in progress. 4263 */ 4264 mutex_unlock(&instance->reset_mutex); 4265 megasas_reset_fusion(instance->host, 4266 MFI_IO_TIMEOUT_OCR); 4267 mutex_lock(&instance->reset_mutex); 4268 break; 4269 case KILL_ADAPTER: 4270 megaraid_sas_kill_hba(instance); 4271 break; 4272 case IGNORE_TIMEOUT: 4273 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n", 4274 __func__, __LINE__); 4275 break; 4276 } 4277 4278 break; 4279 4280 case DCMD_SUCCESS: 4281 pd_addr = ci->addr; 4282 4283 if ((le32_to_cpu(ci->count) > 4284 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) 4285 break; 4286 4287 memset(instance->local_pd_list, 0, 4288 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); 4289 4290 for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) { 4291 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid = 4292 le16_to_cpu(pd_addr->deviceId); 4293 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType = 4294 pd_addr->scsiDevType; 4295 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState = 4296 MR_PD_STATE_SYSTEM; 4297 pd_addr++; 4298 } 4299 4300 memcpy(instance->pd_list, instance->local_pd_list, 4301 sizeof(instance->pd_list)); 4302 break; 4303 4304 } 4305 4306 pci_free_consistent(instance->pdev, 4307 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), 4308 ci, ci_h); 4309 4310 if (ret != DCMD_TIMEOUT) 4311 megasas_return_cmd(instance, cmd); 4312 4313 return ret; 4314 } 4315 4316 /* 4317 * megasas_get_ld_list_info - Returns FW's ld_list structure 4318 * @instance: Adapter soft state 4319 * @ld_list: ld_list structure 4320 * 4321 * Issues an internal command (DCMD) to get the FW's controller PD 4322 * list structure. This information is mainly used to find out SYSTEM 4323 * supported by the FW. 4324 */ 4325 static int 4326 megasas_get_ld_list(struct megasas_instance *instance) 4327 { 4328 int ret = 0, ld_index = 0, ids = 0; 4329 struct megasas_cmd *cmd; 4330 struct megasas_dcmd_frame *dcmd; 4331 struct MR_LD_LIST *ci; 4332 dma_addr_t ci_h = 0; 4333 u32 ld_count; 4334 4335 cmd = megasas_get_cmd(instance); 4336 4337 if (!cmd) { 4338 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n"); 4339 return -ENOMEM; 4340 } 4341 4342 dcmd = &cmd->frame->dcmd; 4343 4344 ci = pci_alloc_consistent(instance->pdev, 4345 sizeof(struct MR_LD_LIST), 4346 &ci_h); 4347 4348 if (!ci) { 4349 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem in get_ld_list\n"); 4350 megasas_return_cmd(instance, cmd); 4351 return -ENOMEM; 4352 } 4353 4354 memset(ci, 0, sizeof(*ci)); 4355 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4356 4357 if (instance->supportmax256vd) 4358 dcmd->mbox.b[0] = 1; 4359 dcmd->cmd = MFI_CMD_DCMD; 4360 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4361 dcmd->sge_count = 1; 4362 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 4363 dcmd->timeout = 0; 4364 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST)); 4365 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST); 4366 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); 4367 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST)); 4368 dcmd->pad_0 = 0; 4369 4370 if (instance->ctrl_context && !instance->mask_interrupts) 4371 ret = megasas_issue_blocked_cmd(instance, cmd, 4372 MFI_IO_TIMEOUT_SECS); 4373 else 4374 ret = megasas_issue_polled(instance, cmd); 4375 4376 ld_count = le32_to_cpu(ci->ldCount); 4377 4378 switch (ret) { 4379 case DCMD_FAILED: 4380 megaraid_sas_kill_hba(instance); 4381 break; 4382 case DCMD_TIMEOUT: 4383 4384 switch (dcmd_timeout_ocr_possible(instance)) { 4385 case INITIATE_OCR: 4386 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4387 /* 4388 * DCMD failed from AEN path. 4389 * AEN path already hold reset_mutex to avoid PCI access 4390 * while OCR is in progress. 4391 */ 4392 mutex_unlock(&instance->reset_mutex); 4393 megasas_reset_fusion(instance->host, 4394 MFI_IO_TIMEOUT_OCR); 4395 mutex_lock(&instance->reset_mutex); 4396 break; 4397 case KILL_ADAPTER: 4398 megaraid_sas_kill_hba(instance); 4399 break; 4400 case IGNORE_TIMEOUT: 4401 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4402 __func__, __LINE__); 4403 break; 4404 } 4405 4406 break; 4407 4408 case DCMD_SUCCESS: 4409 if (ld_count > instance->fw_supported_vd_count) 4410 break; 4411 4412 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 4413 4414 for (ld_index = 0; ld_index < ld_count; ld_index++) { 4415 if (ci->ldList[ld_index].state != 0) { 4416 ids = ci->ldList[ld_index].ref.targetId; 4417 instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId; 4418 } 4419 } 4420 4421 break; 4422 } 4423 4424 pci_free_consistent(instance->pdev, sizeof(struct MR_LD_LIST), ci, ci_h); 4425 4426 if (ret != DCMD_TIMEOUT) 4427 megasas_return_cmd(instance, cmd); 4428 4429 return ret; 4430 } 4431 4432 /** 4433 * megasas_ld_list_query - Returns FW's ld_list structure 4434 * @instance: Adapter soft state 4435 * @ld_list: ld_list structure 4436 * 4437 * Issues an internal command (DCMD) to get the FW's controller PD 4438 * list structure. This information is mainly used to find out SYSTEM 4439 * supported by the FW. 4440 */ 4441 static int 4442 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) 4443 { 4444 int ret = 0, ld_index = 0, ids = 0; 4445 struct megasas_cmd *cmd; 4446 struct megasas_dcmd_frame *dcmd; 4447 struct MR_LD_TARGETID_LIST *ci; 4448 dma_addr_t ci_h = 0; 4449 u32 tgtid_count; 4450 4451 cmd = megasas_get_cmd(instance); 4452 4453 if (!cmd) { 4454 dev_warn(&instance->pdev->dev, 4455 "megasas_ld_list_query: Failed to get cmd\n"); 4456 return -ENOMEM; 4457 } 4458 4459 dcmd = &cmd->frame->dcmd; 4460 4461 ci = pci_alloc_consistent(instance->pdev, 4462 sizeof(struct MR_LD_TARGETID_LIST), &ci_h); 4463 4464 if (!ci) { 4465 dev_warn(&instance->pdev->dev, 4466 "Failed to alloc mem for ld_list_query\n"); 4467 megasas_return_cmd(instance, cmd); 4468 return -ENOMEM; 4469 } 4470 4471 memset(ci, 0, sizeof(*ci)); 4472 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4473 4474 dcmd->mbox.b[0] = query_type; 4475 if (instance->supportmax256vd) 4476 dcmd->mbox.b[2] = 1; 4477 4478 dcmd->cmd = MFI_CMD_DCMD; 4479 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4480 dcmd->sge_count = 1; 4481 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 4482 dcmd->timeout = 0; 4483 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); 4484 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY); 4485 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); 4486 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); 4487 dcmd->pad_0 = 0; 4488 4489 if (instance->ctrl_context && !instance->mask_interrupts) 4490 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4491 else 4492 ret = megasas_issue_polled(instance, cmd); 4493 4494 switch (ret) { 4495 case DCMD_FAILED: 4496 dev_info(&instance->pdev->dev, 4497 "DCMD not supported by firmware - %s %d\n", 4498 __func__, __LINE__); 4499 ret = megasas_get_ld_list(instance); 4500 break; 4501 case DCMD_TIMEOUT: 4502 switch (dcmd_timeout_ocr_possible(instance)) { 4503 case INITIATE_OCR: 4504 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4505 /* 4506 * DCMD failed from AEN path. 4507 * AEN path already hold reset_mutex to avoid PCI access 4508 * while OCR is in progress. 4509 */ 4510 mutex_unlock(&instance->reset_mutex); 4511 megasas_reset_fusion(instance->host, 4512 MFI_IO_TIMEOUT_OCR); 4513 mutex_lock(&instance->reset_mutex); 4514 break; 4515 case KILL_ADAPTER: 4516 megaraid_sas_kill_hba(instance); 4517 break; 4518 case IGNORE_TIMEOUT: 4519 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4520 __func__, __LINE__); 4521 break; 4522 } 4523 4524 break; 4525 case DCMD_SUCCESS: 4526 tgtid_count = le32_to_cpu(ci->count); 4527 4528 if ((tgtid_count > (instance->fw_supported_vd_count))) 4529 break; 4530 4531 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 4532 for (ld_index = 0; ld_index < tgtid_count; ld_index++) { 4533 ids = ci->targetId[ld_index]; 4534 instance->ld_ids[ids] = ci->targetId[ld_index]; 4535 } 4536 4537 break; 4538 } 4539 4540 pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST), 4541 ci, ci_h); 4542 4543 if (ret != DCMD_TIMEOUT) 4544 megasas_return_cmd(instance, cmd); 4545 4546 return ret; 4547 } 4548 4549 /* 4550 * megasas_update_ext_vd_details : Update details w.r.t Extended VD 4551 * instance : Controller's instance 4552 */ 4553 static void megasas_update_ext_vd_details(struct megasas_instance *instance) 4554 { 4555 struct fusion_context *fusion; 4556 u32 ventura_map_sz = 0; 4557 4558 fusion = instance->ctrl_context; 4559 /* For MFI based controllers return dummy success */ 4560 if (!fusion) 4561 return; 4562 4563 instance->supportmax256vd = 4564 instance->ctrl_info->adapterOperations3.supportMaxExtLDs; 4565 /* Below is additional check to address future FW enhancement */ 4566 if (instance->ctrl_info->max_lds > 64) 4567 instance->supportmax256vd = 1; 4568 4569 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS 4570 * MEGASAS_MAX_DEV_PER_CHANNEL; 4571 instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS 4572 * MEGASAS_MAX_DEV_PER_CHANNEL; 4573 if (instance->supportmax256vd) { 4574 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; 4575 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 4576 } else { 4577 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 4578 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 4579 } 4580 4581 dev_info(&instance->pdev->dev, 4582 "firmware type\t: %s\n", 4583 instance->supportmax256vd ? "Extended VD(240 VD)firmware" : 4584 "Legacy(64 VD) firmware"); 4585 4586 if (instance->max_raid_mapsize) { 4587 ventura_map_sz = instance->max_raid_mapsize * 4588 MR_MIN_MAP_SIZE; /* 64k */ 4589 fusion->current_map_sz = ventura_map_sz; 4590 fusion->max_map_sz = ventura_map_sz; 4591 } else { 4592 fusion->old_map_sz = sizeof(struct MR_FW_RAID_MAP) + 4593 (sizeof(struct MR_LD_SPAN_MAP) * 4594 (instance->fw_supported_vd_count - 1)); 4595 fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT); 4596 4597 fusion->max_map_sz = 4598 max(fusion->old_map_sz, fusion->new_map_sz); 4599 4600 if (instance->supportmax256vd) 4601 fusion->current_map_sz = fusion->new_map_sz; 4602 else 4603 fusion->current_map_sz = fusion->old_map_sz; 4604 } 4605 /* irrespective of FW raid maps, driver raid map is constant */ 4606 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL); 4607 } 4608 4609 /** 4610 * megasas_get_controller_info - Returns FW's controller structure 4611 * @instance: Adapter soft state 4612 * 4613 * Issues an internal command (DCMD) to get the FW's controller structure. 4614 * This information is mainly used to find out the maximum IO transfer per 4615 * command supported by the FW. 4616 */ 4617 int 4618 megasas_get_ctrl_info(struct megasas_instance *instance) 4619 { 4620 int ret = 0; 4621 struct megasas_cmd *cmd; 4622 struct megasas_dcmd_frame *dcmd; 4623 struct megasas_ctrl_info *ci; 4624 struct megasas_ctrl_info *ctrl_info; 4625 dma_addr_t ci_h = 0; 4626 4627 ctrl_info = instance->ctrl_info; 4628 4629 cmd = megasas_get_cmd(instance); 4630 4631 if (!cmd) { 4632 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n"); 4633 return -ENOMEM; 4634 } 4635 4636 dcmd = &cmd->frame->dcmd; 4637 4638 ci = pci_alloc_consistent(instance->pdev, 4639 sizeof(struct megasas_ctrl_info), &ci_h); 4640 4641 if (!ci) { 4642 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ctrl info\n"); 4643 megasas_return_cmd(instance, cmd); 4644 return -ENOMEM; 4645 } 4646 4647 memset(ci, 0, sizeof(*ci)); 4648 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4649 4650 dcmd->cmd = MFI_CMD_DCMD; 4651 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4652 dcmd->sge_count = 1; 4653 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 4654 dcmd->timeout = 0; 4655 dcmd->pad_0 = 0; 4656 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info)); 4657 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO); 4658 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); 4659 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info)); 4660 dcmd->mbox.b[0] = 1; 4661 4662 if (instance->ctrl_context && !instance->mask_interrupts) 4663 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4664 else 4665 ret = megasas_issue_polled(instance, cmd); 4666 4667 switch (ret) { 4668 case DCMD_SUCCESS: 4669 memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info)); 4670 /* Save required controller information in 4671 * CPU endianness format. 4672 */ 4673 le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties); 4674 le32_to_cpus((u32 *)&ctrl_info->adapterOperations2); 4675 le32_to_cpus((u32 *)&ctrl_info->adapterOperations3); 4676 le16_to_cpus((u16 *)&ctrl_info->adapter_operations4); 4677 4678 /* Update the latest Ext VD info. 4679 * From Init path, store current firmware details. 4680 * From OCR path, detect any firmware properties changes. 4681 * in case of Firmware upgrade without system reboot. 4682 */ 4683 megasas_update_ext_vd_details(instance); 4684 instance->use_seqnum_jbod_fp = 4685 ctrl_info->adapterOperations3.useSeqNumJbodFP; 4686 instance->support_morethan256jbod = 4687 ctrl_info->adapter_operations4.support_pd_map_target_id; 4688 4689 /*Check whether controller is iMR or MR */ 4690 instance->is_imr = (ctrl_info->memory_size ? 0 : 1); 4691 dev_info(&instance->pdev->dev, 4692 "controller type\t: %s(%dMB)\n", 4693 instance->is_imr ? "iMR" : "MR", 4694 le16_to_cpu(ctrl_info->memory_size)); 4695 4696 instance->disableOnlineCtrlReset = 4697 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 4698 instance->secure_jbod_support = 4699 ctrl_info->adapterOperations3.supportSecurityonJBOD; 4700 dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n", 4701 instance->disableOnlineCtrlReset ? "Disabled" : "Enabled"); 4702 dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n", 4703 instance->secure_jbod_support ? "Yes" : "No"); 4704 break; 4705 4706 case DCMD_TIMEOUT: 4707 switch (dcmd_timeout_ocr_possible(instance)) { 4708 case INITIATE_OCR: 4709 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4710 megasas_reset_fusion(instance->host, 4711 MFI_IO_TIMEOUT_OCR); 4712 break; 4713 case KILL_ADAPTER: 4714 megaraid_sas_kill_hba(instance); 4715 break; 4716 case IGNORE_TIMEOUT: 4717 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4718 __func__, __LINE__); 4719 break; 4720 } 4721 case DCMD_FAILED: 4722 megaraid_sas_kill_hba(instance); 4723 break; 4724 4725 } 4726 4727 pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info), 4728 ci, ci_h); 4729 4730 megasas_return_cmd(instance, cmd); 4731 4732 4733 return ret; 4734 } 4735 4736 /* 4737 * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer 4738 * to firmware 4739 * 4740 * @instance: Adapter soft state 4741 * @crash_buf_state - tell FW to turn ON/OFF crash dump feature 4742 MR_CRASH_BUF_TURN_OFF = 0 4743 MR_CRASH_BUF_TURN_ON = 1 4744 * @return 0 on success non-zero on failure. 4745 * Issues an internal command (DCMD) to set parameters for crash dump feature. 4746 * Driver will send address of crash dump DMA buffer and set mbox to tell FW 4747 * that driver supports crash dump feature. This DCMD will be sent only if 4748 * crash dump feature is supported by the FW. 4749 * 4750 */ 4751 int megasas_set_crash_dump_params(struct megasas_instance *instance, 4752 u8 crash_buf_state) 4753 { 4754 int ret = 0; 4755 struct megasas_cmd *cmd; 4756 struct megasas_dcmd_frame *dcmd; 4757 4758 cmd = megasas_get_cmd(instance); 4759 4760 if (!cmd) { 4761 dev_err(&instance->pdev->dev, "Failed to get a free cmd\n"); 4762 return -ENOMEM; 4763 } 4764 4765 4766 dcmd = &cmd->frame->dcmd; 4767 4768 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4769 dcmd->mbox.b[0] = crash_buf_state; 4770 dcmd->cmd = MFI_CMD_DCMD; 4771 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4772 dcmd->sge_count = 1; 4773 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 4774 dcmd->timeout = 0; 4775 dcmd->pad_0 = 0; 4776 dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE); 4777 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS); 4778 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->crash_dump_h); 4779 dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE); 4780 4781 if (instance->ctrl_context && !instance->mask_interrupts) 4782 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4783 else 4784 ret = megasas_issue_polled(instance, cmd); 4785 4786 if (ret == DCMD_TIMEOUT) { 4787 switch (dcmd_timeout_ocr_possible(instance)) { 4788 case INITIATE_OCR: 4789 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4790 megasas_reset_fusion(instance->host, 4791 MFI_IO_TIMEOUT_OCR); 4792 break; 4793 case KILL_ADAPTER: 4794 megaraid_sas_kill_hba(instance); 4795 break; 4796 case IGNORE_TIMEOUT: 4797 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4798 __func__, __LINE__); 4799 break; 4800 } 4801 } else 4802 megasas_return_cmd(instance, cmd); 4803 4804 return ret; 4805 } 4806 4807 /** 4808 * megasas_issue_init_mfi - Initializes the FW 4809 * @instance: Adapter soft state 4810 * 4811 * Issues the INIT MFI cmd 4812 */ 4813 static int 4814 megasas_issue_init_mfi(struct megasas_instance *instance) 4815 { 4816 __le32 context; 4817 struct megasas_cmd *cmd; 4818 struct megasas_init_frame *init_frame; 4819 struct megasas_init_queue_info *initq_info; 4820 dma_addr_t init_frame_h; 4821 dma_addr_t initq_info_h; 4822 4823 /* 4824 * Prepare a init frame. Note the init frame points to queue info 4825 * structure. Each frame has SGL allocated after first 64 bytes. For 4826 * this frame - since we don't need any SGL - we use SGL's space as 4827 * queue info structure 4828 * 4829 * We will not get a NULL command below. We just created the pool. 4830 */ 4831 cmd = megasas_get_cmd(instance); 4832 4833 init_frame = (struct megasas_init_frame *)cmd->frame; 4834 initq_info = (struct megasas_init_queue_info *) 4835 ((unsigned long)init_frame + 64); 4836 4837 init_frame_h = cmd->frame_phys_addr; 4838 initq_info_h = init_frame_h + 64; 4839 4840 context = init_frame->context; 4841 memset(init_frame, 0, MEGAMFI_FRAME_SIZE); 4842 memset(initq_info, 0, sizeof(struct megasas_init_queue_info)); 4843 init_frame->context = context; 4844 4845 initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1); 4846 initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h); 4847 4848 initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h); 4849 initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h); 4850 4851 init_frame->cmd = MFI_CMD_INIT; 4852 init_frame->cmd_status = MFI_STAT_INVALID_STATUS; 4853 init_frame->queue_info_new_phys_addr_lo = 4854 cpu_to_le32(lower_32_bits(initq_info_h)); 4855 init_frame->queue_info_new_phys_addr_hi = 4856 cpu_to_le32(upper_32_bits(initq_info_h)); 4857 4858 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info)); 4859 4860 /* 4861 * disable the intr before firing the init frame to FW 4862 */ 4863 instance->instancet->disable_intr(instance); 4864 4865 /* 4866 * Issue the init frame in polled mode 4867 */ 4868 4869 if (megasas_issue_polled(instance, cmd)) { 4870 dev_err(&instance->pdev->dev, "Failed to init firmware\n"); 4871 megasas_return_cmd(instance, cmd); 4872 goto fail_fw_init; 4873 } 4874 4875 megasas_return_cmd(instance, cmd); 4876 4877 return 0; 4878 4879 fail_fw_init: 4880 return -EINVAL; 4881 } 4882 4883 static u32 4884 megasas_init_adapter_mfi(struct megasas_instance *instance) 4885 { 4886 struct megasas_register_set __iomem *reg_set; 4887 u32 context_sz; 4888 u32 reply_q_sz; 4889 4890 reg_set = instance->reg_set; 4891 4892 /* 4893 * Get various operational parameters from status register 4894 */ 4895 instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF; 4896 /* 4897 * Reduce the max supported cmds by 1. This is to ensure that the 4898 * reply_q_sz (1 more than the max cmd that driver may send) 4899 * does not exceed max cmds that the FW can support 4900 */ 4901 instance->max_fw_cmds = instance->max_fw_cmds-1; 4902 instance->max_mfi_cmds = instance->max_fw_cmds; 4903 instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >> 4904 0x10; 4905 /* 4906 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands 4907 * are reserved for IOCTL + driver's internal DCMDs. 4908 */ 4909 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 4910 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { 4911 instance->max_scsi_cmds = (instance->max_fw_cmds - 4912 MEGASAS_SKINNY_INT_CMDS); 4913 sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); 4914 } else { 4915 instance->max_scsi_cmds = (instance->max_fw_cmds - 4916 MEGASAS_INT_CMDS); 4917 sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS)); 4918 } 4919 4920 instance->cur_can_queue = instance->max_scsi_cmds; 4921 /* 4922 * Create a pool of commands 4923 */ 4924 if (megasas_alloc_cmds(instance)) 4925 goto fail_alloc_cmds; 4926 4927 /* 4928 * Allocate memory for reply queue. Length of reply queue should 4929 * be _one_ more than the maximum commands handled by the firmware. 4930 * 4931 * Note: When FW completes commands, it places corresponding contex 4932 * values in this circular reply queue. This circular queue is a fairly 4933 * typical producer-consumer queue. FW is the producer (of completed 4934 * commands) and the driver is the consumer. 4935 */ 4936 context_sz = sizeof(u32); 4937 reply_q_sz = context_sz * (instance->max_fw_cmds + 1); 4938 4939 instance->reply_queue = pci_alloc_consistent(instance->pdev, 4940 reply_q_sz, 4941 &instance->reply_queue_h); 4942 4943 if (!instance->reply_queue) { 4944 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n"); 4945 goto fail_reply_queue; 4946 } 4947 4948 if (megasas_issue_init_mfi(instance)) 4949 goto fail_fw_init; 4950 4951 if (megasas_get_ctrl_info(instance)) { 4952 dev_err(&instance->pdev->dev, "(%d): Could get controller info " 4953 "Fail from %s %d\n", instance->unique_id, 4954 __func__, __LINE__); 4955 goto fail_fw_init; 4956 } 4957 4958 instance->fw_support_ieee = 0; 4959 instance->fw_support_ieee = 4960 (instance->instancet->read_fw_status_reg(reg_set) & 4961 0x04000000); 4962 4963 dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d", 4964 instance->fw_support_ieee); 4965 4966 if (instance->fw_support_ieee) 4967 instance->flag_ieee = 1; 4968 4969 return 0; 4970 4971 fail_fw_init: 4972 4973 pci_free_consistent(instance->pdev, reply_q_sz, 4974 instance->reply_queue, instance->reply_queue_h); 4975 fail_reply_queue: 4976 megasas_free_cmds(instance); 4977 4978 fail_alloc_cmds: 4979 return 1; 4980 } 4981 4982 /* 4983 * megasas_setup_irqs_ioapic - register legacy interrupts. 4984 * @instance: Adapter soft state 4985 * 4986 * Do not enable interrupt, only setup ISRs. 4987 * 4988 * Return 0 on success. 4989 */ 4990 static int 4991 megasas_setup_irqs_ioapic(struct megasas_instance *instance) 4992 { 4993 struct pci_dev *pdev; 4994 4995 pdev = instance->pdev; 4996 instance->irq_context[0].instance = instance; 4997 instance->irq_context[0].MSIxIndex = 0; 4998 if (request_irq(pci_irq_vector(pdev, 0), 4999 instance->instancet->service_isr, IRQF_SHARED, 5000 "megasas", &instance->irq_context[0])) { 5001 dev_err(&instance->pdev->dev, 5002 "Failed to register IRQ from %s %d\n", 5003 __func__, __LINE__); 5004 return -1; 5005 } 5006 return 0; 5007 } 5008 5009 /** 5010 * megasas_setup_irqs_msix - register MSI-x interrupts. 5011 * @instance: Adapter soft state 5012 * @is_probe: Driver probe check 5013 * 5014 * Do not enable interrupt, only setup ISRs. 5015 * 5016 * Return 0 on success. 5017 */ 5018 static int 5019 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) 5020 { 5021 int i, j; 5022 struct pci_dev *pdev; 5023 5024 pdev = instance->pdev; 5025 5026 /* Try MSI-x */ 5027 for (i = 0; i < instance->msix_vectors; i++) { 5028 instance->irq_context[i].instance = instance; 5029 instance->irq_context[i].MSIxIndex = i; 5030 if (request_irq(pci_irq_vector(pdev, i), 5031 instance->instancet->service_isr, 0, "megasas", 5032 &instance->irq_context[i])) { 5033 dev_err(&instance->pdev->dev, 5034 "Failed to register IRQ for vector %d.\n", i); 5035 for (j = 0; j < i; j++) 5036 free_irq(pci_irq_vector(pdev, j), 5037 &instance->irq_context[j]); 5038 /* Retry irq register for IO_APIC*/ 5039 instance->msix_vectors = 0; 5040 if (is_probe) { 5041 pci_free_irq_vectors(instance->pdev); 5042 return megasas_setup_irqs_ioapic(instance); 5043 } else { 5044 return -1; 5045 } 5046 } 5047 } 5048 return 0; 5049 } 5050 5051 /* 5052 * megasas_destroy_irqs- unregister interrupts. 5053 * @instance: Adapter soft state 5054 * return: void 5055 */ 5056 static void 5057 megasas_destroy_irqs(struct megasas_instance *instance) { 5058 5059 int i; 5060 5061 if (instance->msix_vectors) 5062 for (i = 0; i < instance->msix_vectors; i++) { 5063 free_irq(pci_irq_vector(instance->pdev, i), 5064 &instance->irq_context[i]); 5065 } 5066 else 5067 free_irq(pci_irq_vector(instance->pdev, 0), 5068 &instance->irq_context[0]); 5069 } 5070 5071 /** 5072 * megasas_setup_jbod_map - setup jbod map for FP seq_number. 5073 * @instance: Adapter soft state 5074 * @is_probe: Driver probe check 5075 * 5076 * Return 0 on success. 5077 */ 5078 void 5079 megasas_setup_jbod_map(struct megasas_instance *instance) 5080 { 5081 int i; 5082 struct fusion_context *fusion = instance->ctrl_context; 5083 u32 pd_seq_map_sz; 5084 5085 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 5086 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1)); 5087 5088 if (reset_devices || !fusion || 5089 !instance->ctrl_info->adapterOperations3.useSeqNumJbodFP) { 5090 dev_info(&instance->pdev->dev, 5091 "Jbod map is not supported %s %d\n", 5092 __func__, __LINE__); 5093 instance->use_seqnum_jbod_fp = false; 5094 return; 5095 } 5096 5097 if (fusion->pd_seq_sync[0]) 5098 goto skip_alloc; 5099 5100 for (i = 0; i < JBOD_MAPS_COUNT; i++) { 5101 fusion->pd_seq_sync[i] = dma_alloc_coherent 5102 (&instance->pdev->dev, pd_seq_map_sz, 5103 &fusion->pd_seq_phys[i], GFP_KERNEL); 5104 if (!fusion->pd_seq_sync[i]) { 5105 dev_err(&instance->pdev->dev, 5106 "Failed to allocate memory from %s %d\n", 5107 __func__, __LINE__); 5108 if (i == 1) { 5109 dma_free_coherent(&instance->pdev->dev, 5110 pd_seq_map_sz, fusion->pd_seq_sync[0], 5111 fusion->pd_seq_phys[0]); 5112 fusion->pd_seq_sync[0] = NULL; 5113 } 5114 instance->use_seqnum_jbod_fp = false; 5115 return; 5116 } 5117 } 5118 5119 skip_alloc: 5120 if (!megasas_sync_pd_seq_num(instance, false) && 5121 !megasas_sync_pd_seq_num(instance, true)) 5122 instance->use_seqnum_jbod_fp = true; 5123 else 5124 instance->use_seqnum_jbod_fp = false; 5125 } 5126 5127 /** 5128 * megasas_init_fw - Initializes the FW 5129 * @instance: Adapter soft state 5130 * 5131 * This is the main function for initializing firmware 5132 */ 5133 5134 static int megasas_init_fw(struct megasas_instance *instance) 5135 { 5136 u32 max_sectors_1; 5137 u32 max_sectors_2, tmp_sectors, msix_enable; 5138 u32 scratch_pad_2, scratch_pad_3, scratch_pad_4; 5139 resource_size_t base_addr; 5140 struct megasas_register_set __iomem *reg_set; 5141 struct megasas_ctrl_info *ctrl_info = NULL; 5142 unsigned long bar_list; 5143 int i, j, loop, fw_msix_count = 0; 5144 struct IOV_111 *iovPtr; 5145 struct fusion_context *fusion; 5146 5147 fusion = instance->ctrl_context; 5148 5149 /* Find first memory bar */ 5150 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); 5151 instance->bar = find_first_bit(&bar_list, BITS_PER_LONG); 5152 if (pci_request_selected_regions(instance->pdev, 1<<instance->bar, 5153 "megasas: LSI")) { 5154 dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n"); 5155 return -EBUSY; 5156 } 5157 5158 base_addr = pci_resource_start(instance->pdev, instance->bar); 5159 instance->reg_set = ioremap_nocache(base_addr, 8192); 5160 5161 if (!instance->reg_set) { 5162 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n"); 5163 goto fail_ioremap; 5164 } 5165 5166 reg_set = instance->reg_set; 5167 5168 if (fusion) 5169 instance->instancet = &megasas_instance_template_fusion; 5170 else { 5171 switch (instance->pdev->device) { 5172 case PCI_DEVICE_ID_LSI_SAS1078R: 5173 case PCI_DEVICE_ID_LSI_SAS1078DE: 5174 instance->instancet = &megasas_instance_template_ppc; 5175 break; 5176 case PCI_DEVICE_ID_LSI_SAS1078GEN2: 5177 case PCI_DEVICE_ID_LSI_SAS0079GEN2: 5178 instance->instancet = &megasas_instance_template_gen2; 5179 break; 5180 case PCI_DEVICE_ID_LSI_SAS0073SKINNY: 5181 case PCI_DEVICE_ID_LSI_SAS0071SKINNY: 5182 instance->instancet = &megasas_instance_template_skinny; 5183 break; 5184 case PCI_DEVICE_ID_LSI_SAS1064R: 5185 case PCI_DEVICE_ID_DELL_PERC5: 5186 default: 5187 instance->instancet = &megasas_instance_template_xscale; 5188 instance->pd_list_not_supported = 1; 5189 break; 5190 } 5191 } 5192 5193 if (megasas_transition_to_ready(instance, 0)) { 5194 atomic_set(&instance->fw_reset_no_pci_access, 1); 5195 instance->instancet->adp_reset 5196 (instance, instance->reg_set); 5197 atomic_set(&instance->fw_reset_no_pci_access, 0); 5198 dev_info(&instance->pdev->dev, 5199 "FW restarted successfully from %s!\n", 5200 __func__); 5201 5202 /*waitting for about 30 second before retry*/ 5203 ssleep(30); 5204 5205 if (megasas_transition_to_ready(instance, 0)) 5206 goto fail_ready_state; 5207 } 5208 5209 if (instance->is_ventura) { 5210 scratch_pad_3 = 5211 readl(&instance->reg_set->outbound_scratch_pad_3); 5212 instance->max_raid_mapsize = ((scratch_pad_3 >> 5213 MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) & 5214 MR_MAX_RAID_MAP_SIZE_MASK); 5215 } 5216 5217 /* Check if MSI-X is supported while in ready state */ 5218 msix_enable = (instance->instancet->read_fw_status_reg(reg_set) & 5219 0x4000000) >> 0x1a; 5220 if (msix_enable && !msix_disable) { 5221 int irq_flags = PCI_IRQ_MSIX; 5222 5223 scratch_pad_2 = readl 5224 (&instance->reg_set->outbound_scratch_pad_2); 5225 /* Check max MSI-X vectors */ 5226 if (fusion) { 5227 if (fusion->adapter_type == THUNDERBOLT_SERIES) { /* Thunderbolt Series*/ 5228 instance->msix_vectors = (scratch_pad_2 5229 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; 5230 fw_msix_count = instance->msix_vectors; 5231 } else { /* Invader series supports more than 8 MSI-x vectors*/ 5232 instance->msix_vectors = ((scratch_pad_2 5233 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) 5234 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; 5235 if (instance->msix_vectors > 16) 5236 instance->msix_combined = true; 5237 5238 if (rdpq_enable) 5239 instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 5240 1 : 0; 5241 fw_msix_count = instance->msix_vectors; 5242 /* Save 1-15 reply post index address to local memory 5243 * Index 0 is already saved from reg offset 5244 * MPI2_REPLY_POST_HOST_INDEX_OFFSET 5245 */ 5246 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) { 5247 instance->reply_post_host_index_addr[loop] = 5248 (u32 __iomem *) 5249 ((u8 __iomem *)instance->reg_set + 5250 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET 5251 + (loop * 0x10)); 5252 } 5253 } 5254 if (msix_vectors) 5255 instance->msix_vectors = min(msix_vectors, 5256 instance->msix_vectors); 5257 } else /* MFI adapters */ 5258 instance->msix_vectors = 1; 5259 /* Don't bother allocating more MSI-X vectors than cpus */ 5260 instance->msix_vectors = min(instance->msix_vectors, 5261 (unsigned int)num_online_cpus()); 5262 if (smp_affinity_enable) 5263 irq_flags |= PCI_IRQ_AFFINITY; 5264 i = pci_alloc_irq_vectors(instance->pdev, 1, 5265 instance->msix_vectors, irq_flags); 5266 if (i > 0) 5267 instance->msix_vectors = i; 5268 else 5269 instance->msix_vectors = 0; 5270 } 5271 /* 5272 * MSI-X host index 0 is common for all adapter. 5273 * It is used for all MPT based Adapters. 5274 */ 5275 if (instance->msix_combined) { 5276 instance->reply_post_host_index_addr[0] = 5277 (u32 *)((u8 *)instance->reg_set + 5278 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET); 5279 } else { 5280 instance->reply_post_host_index_addr[0] = 5281 (u32 *)((u8 *)instance->reg_set + 5282 MPI2_REPLY_POST_HOST_INDEX_OFFSET); 5283 } 5284 5285 if (!instance->msix_vectors) { 5286 i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); 5287 if (i < 0) 5288 goto fail_setup_irqs; 5289 } 5290 5291 dev_info(&instance->pdev->dev, 5292 "firmware supports msix\t: (%d)", fw_msix_count); 5293 dev_info(&instance->pdev->dev, 5294 "current msix/online cpus\t: (%d/%d)\n", 5295 instance->msix_vectors, (unsigned int)num_online_cpus()); 5296 dev_info(&instance->pdev->dev, 5297 "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled"); 5298 5299 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 5300 (unsigned long)instance); 5301 5302 instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info), 5303 GFP_KERNEL); 5304 if (instance->ctrl_info == NULL) 5305 goto fail_init_adapter; 5306 5307 /* 5308 * Below are default value for legacy Firmware. 5309 * non-fusion based controllers 5310 */ 5311 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 5312 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 5313 /* Get operational params, sge flags, send init cmd to controller */ 5314 if (instance->instancet->init_adapter(instance)) 5315 goto fail_init_adapter; 5316 5317 if (instance->is_ventura) { 5318 scratch_pad_4 = 5319 readl(&instance->reg_set->outbound_scratch_pad_4); 5320 if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >= 5321 MR_DEFAULT_NVME_PAGE_SHIFT) 5322 instance->nvme_page_size = 5323 (1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK)); 5324 5325 dev_info(&instance->pdev->dev, 5326 "NVME page size\t: (%d)\n", instance->nvme_page_size); 5327 } 5328 5329 if (instance->msix_vectors ? 5330 megasas_setup_irqs_msix(instance, 1) : 5331 megasas_setup_irqs_ioapic(instance)) 5332 goto fail_init_adapter; 5333 5334 instance->instancet->enable_intr(instance); 5335 5336 dev_info(&instance->pdev->dev, "INIT adapter done\n"); 5337 5338 megasas_setup_jbod_map(instance); 5339 5340 /** for passthrough 5341 * the following function will get the PD LIST. 5342 */ 5343 memset(instance->pd_list, 0, 5344 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); 5345 if (megasas_get_pd_list(instance) < 0) { 5346 dev_err(&instance->pdev->dev, "failed to get PD list\n"); 5347 goto fail_get_ld_pd_list; 5348 } 5349 5350 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 5351 5352 /* stream detection initialization */ 5353 if (instance->is_ventura && fusion) { 5354 fusion->stream_detect_by_ld = 5355 kzalloc(sizeof(struct LD_STREAM_DETECT *) 5356 * MAX_LOGICAL_DRIVES_EXT, 5357 GFP_KERNEL); 5358 if (!fusion->stream_detect_by_ld) { 5359 dev_err(&instance->pdev->dev, 5360 "unable to allocate stream detection for pool of LDs\n"); 5361 goto fail_get_ld_pd_list; 5362 } 5363 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) { 5364 fusion->stream_detect_by_ld[i] = 5365 kmalloc(sizeof(struct LD_STREAM_DETECT), 5366 GFP_KERNEL); 5367 if (!fusion->stream_detect_by_ld[i]) { 5368 dev_err(&instance->pdev->dev, 5369 "unable to allocate stream detect by LD\n "); 5370 for (j = 0; j < i; ++j) 5371 kfree(fusion->stream_detect_by_ld[j]); 5372 kfree(fusion->stream_detect_by_ld); 5373 fusion->stream_detect_by_ld = NULL; 5374 goto fail_get_ld_pd_list; 5375 } 5376 fusion->stream_detect_by_ld[i]->mru_bit_map 5377 = MR_STREAM_BITMAP; 5378 } 5379 } 5380 5381 if (megasas_ld_list_query(instance, 5382 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) 5383 goto fail_get_ld_pd_list; 5384 5385 /* 5386 * Compute the max allowed sectors per IO: The controller info has two 5387 * limits on max sectors. Driver should use the minimum of these two. 5388 * 5389 * 1 << stripe_sz_ops.min = max sectors per strip 5390 * 5391 * Note that older firmwares ( < FW ver 30) didn't report information 5392 * to calculate max_sectors_1. So the number ended up as zero always. 5393 */ 5394 tmp_sectors = 0; 5395 ctrl_info = instance->ctrl_info; 5396 5397 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * 5398 le16_to_cpu(ctrl_info->max_strips_per_io); 5399 max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size); 5400 5401 tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2); 5402 5403 instance->peerIsPresent = ctrl_info->cluster.peerIsPresent; 5404 instance->passive = ctrl_info->cluster.passive; 5405 memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId)); 5406 instance->UnevenSpanSupport = 5407 ctrl_info->adapterOperations2.supportUnevenSpans; 5408 if (instance->UnevenSpanSupport) { 5409 struct fusion_context *fusion = instance->ctrl_context; 5410 if (MR_ValidateMapInfo(instance)) 5411 fusion->fast_path_io = 1; 5412 else 5413 fusion->fast_path_io = 0; 5414 5415 } 5416 if (ctrl_info->host_interface.SRIOV) { 5417 instance->requestorId = ctrl_info->iov.requestorId; 5418 if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) { 5419 if (!ctrl_info->adapterOperations2.activePassive) 5420 instance->PlasmaFW111 = 1; 5421 5422 dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n", 5423 instance->PlasmaFW111 ? "1.11" : "new"); 5424 5425 if (instance->PlasmaFW111) { 5426 iovPtr = (struct IOV_111 *) 5427 ((unsigned char *)ctrl_info + IOV_111_OFFSET); 5428 instance->requestorId = iovPtr->requestorId; 5429 } 5430 } 5431 dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n", 5432 instance->requestorId); 5433 } 5434 5435 instance->crash_dump_fw_support = 5436 ctrl_info->adapterOperations3.supportCrashDump; 5437 instance->crash_dump_drv_support = 5438 (instance->crash_dump_fw_support && 5439 instance->crash_dump_buf); 5440 if (instance->crash_dump_drv_support) 5441 megasas_set_crash_dump_params(instance, 5442 MR_CRASH_BUF_TURN_OFF); 5443 5444 else { 5445 if (instance->crash_dump_buf) 5446 pci_free_consistent(instance->pdev, 5447 CRASH_DMA_BUF_SIZE, 5448 instance->crash_dump_buf, 5449 instance->crash_dump_h); 5450 instance->crash_dump_buf = NULL; 5451 } 5452 5453 5454 dev_info(&instance->pdev->dev, 5455 "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n", 5456 le16_to_cpu(ctrl_info->pci.vendor_id), 5457 le16_to_cpu(ctrl_info->pci.device_id), 5458 le16_to_cpu(ctrl_info->pci.sub_vendor_id), 5459 le16_to_cpu(ctrl_info->pci.sub_device_id)); 5460 dev_info(&instance->pdev->dev, "unevenspan support : %s\n", 5461 instance->UnevenSpanSupport ? "yes" : "no"); 5462 dev_info(&instance->pdev->dev, "firmware crash dump : %s\n", 5463 instance->crash_dump_drv_support ? "yes" : "no"); 5464 dev_info(&instance->pdev->dev, "jbod sync map : %s\n", 5465 instance->use_seqnum_jbod_fp ? "yes" : "no"); 5466 5467 5468 instance->max_sectors_per_req = instance->max_num_sge * 5469 SGE_BUFFER_SIZE / 512; 5470 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) 5471 instance->max_sectors_per_req = tmp_sectors; 5472 5473 /* Check for valid throttlequeuedepth module parameter */ 5474 if (throttlequeuedepth && 5475 throttlequeuedepth <= instance->max_scsi_cmds) 5476 instance->throttlequeuedepth = throttlequeuedepth; 5477 else 5478 instance->throttlequeuedepth = 5479 MEGASAS_THROTTLE_QUEUE_DEPTH; 5480 5481 if (resetwaittime > MEGASAS_RESET_WAIT_TIME) 5482 resetwaittime = MEGASAS_RESET_WAIT_TIME; 5483 5484 if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT)) 5485 scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; 5486 5487 /* Launch SR-IOV heartbeat timer */ 5488 if (instance->requestorId) { 5489 if (!megasas_sriov_start_heartbeat(instance, 1)) 5490 megasas_start_timer(instance, 5491 &instance->sriov_heartbeat_timer, 5492 megasas_sriov_heartbeat_handler, 5493 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 5494 else 5495 instance->skip_heartbeat_timer_del = 1; 5496 } 5497 5498 return 0; 5499 5500 fail_get_ld_pd_list: 5501 instance->instancet->disable_intr(instance); 5502 fail_init_adapter: 5503 megasas_destroy_irqs(instance); 5504 fail_setup_irqs: 5505 if (instance->msix_vectors) 5506 pci_free_irq_vectors(instance->pdev); 5507 instance->msix_vectors = 0; 5508 fail_ready_state: 5509 kfree(instance->ctrl_info); 5510 instance->ctrl_info = NULL; 5511 iounmap(instance->reg_set); 5512 5513 fail_ioremap: 5514 pci_release_selected_regions(instance->pdev, 1<<instance->bar); 5515 5516 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 5517 __func__, __LINE__); 5518 return -EINVAL; 5519 } 5520 5521 /** 5522 * megasas_release_mfi - Reverses the FW initialization 5523 * @instance: Adapter soft state 5524 */ 5525 static void megasas_release_mfi(struct megasas_instance *instance) 5526 { 5527 u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1); 5528 5529 if (instance->reply_queue) 5530 pci_free_consistent(instance->pdev, reply_q_sz, 5531 instance->reply_queue, instance->reply_queue_h); 5532 5533 megasas_free_cmds(instance); 5534 5535 iounmap(instance->reg_set); 5536 5537 pci_release_selected_regions(instance->pdev, 1<<instance->bar); 5538 } 5539 5540 /** 5541 * megasas_get_seq_num - Gets latest event sequence numbers 5542 * @instance: Adapter soft state 5543 * @eli: FW event log sequence numbers information 5544 * 5545 * FW maintains a log of all events in a non-volatile area. Upper layers would 5546 * usually find out the latest sequence number of the events, the seq number at 5547 * the boot etc. They would "read" all the events below the latest seq number 5548 * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq 5549 * number), they would subsribe to AEN (asynchronous event notification) and 5550 * wait for the events to happen. 5551 */ 5552 static int 5553 megasas_get_seq_num(struct megasas_instance *instance, 5554 struct megasas_evt_log_info *eli) 5555 { 5556 struct megasas_cmd *cmd; 5557 struct megasas_dcmd_frame *dcmd; 5558 struct megasas_evt_log_info *el_info; 5559 dma_addr_t el_info_h = 0; 5560 5561 cmd = megasas_get_cmd(instance); 5562 5563 if (!cmd) { 5564 return -ENOMEM; 5565 } 5566 5567 dcmd = &cmd->frame->dcmd; 5568 el_info = pci_alloc_consistent(instance->pdev, 5569 sizeof(struct megasas_evt_log_info), 5570 &el_info_h); 5571 5572 if (!el_info) { 5573 megasas_return_cmd(instance, cmd); 5574 return -ENOMEM; 5575 } 5576 5577 memset(el_info, 0, sizeof(*el_info)); 5578 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5579 5580 dcmd->cmd = MFI_CMD_DCMD; 5581 dcmd->cmd_status = 0x0; 5582 dcmd->sge_count = 1; 5583 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 5584 dcmd->timeout = 0; 5585 dcmd->pad_0 = 0; 5586 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info)); 5587 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO); 5588 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h); 5589 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info)); 5590 5591 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) == 5592 DCMD_SUCCESS) { 5593 /* 5594 * Copy the data back into callers buffer 5595 */ 5596 eli->newest_seq_num = el_info->newest_seq_num; 5597 eli->oldest_seq_num = el_info->oldest_seq_num; 5598 eli->clear_seq_num = el_info->clear_seq_num; 5599 eli->shutdown_seq_num = el_info->shutdown_seq_num; 5600 eli->boot_seq_num = el_info->boot_seq_num; 5601 } else 5602 dev_err(&instance->pdev->dev, "DCMD failed " 5603 "from %s\n", __func__); 5604 5605 pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), 5606 el_info, el_info_h); 5607 5608 megasas_return_cmd(instance, cmd); 5609 5610 return 0; 5611 } 5612 5613 /** 5614 * megasas_register_aen - Registers for asynchronous event notification 5615 * @instance: Adapter soft state 5616 * @seq_num: The starting sequence number 5617 * @class_locale: Class of the event 5618 * 5619 * This function subscribes for AEN for events beyond the @seq_num. It requests 5620 * to be notified if and only if the event is of type @class_locale 5621 */ 5622 static int 5623 megasas_register_aen(struct megasas_instance *instance, u32 seq_num, 5624 u32 class_locale_word) 5625 { 5626 int ret_val; 5627 struct megasas_cmd *cmd; 5628 struct megasas_dcmd_frame *dcmd; 5629 union megasas_evt_class_locale curr_aen; 5630 union megasas_evt_class_locale prev_aen; 5631 5632 /* 5633 * If there an AEN pending already (aen_cmd), check if the 5634 * class_locale of that pending AEN is inclusive of the new 5635 * AEN request we currently have. If it is, then we don't have 5636 * to do anything. In other words, whichever events the current 5637 * AEN request is subscribing to, have already been subscribed 5638 * to. 5639 * 5640 * If the old_cmd is _not_ inclusive, then we have to abort 5641 * that command, form a class_locale that is superset of both 5642 * old and current and re-issue to the FW 5643 */ 5644 5645 curr_aen.word = class_locale_word; 5646 5647 if (instance->aen_cmd) { 5648 5649 prev_aen.word = 5650 le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]); 5651 5652 /* 5653 * A class whose enum value is smaller is inclusive of all 5654 * higher values. If a PROGRESS (= -1) was previously 5655 * registered, then a new registration requests for higher 5656 * classes need not be sent to FW. They are automatically 5657 * included. 5658 * 5659 * Locale numbers don't have such hierarchy. They are bitmap 5660 * values 5661 */ 5662 if ((prev_aen.members.class <= curr_aen.members.class) && 5663 !((prev_aen.members.locale & curr_aen.members.locale) ^ 5664 curr_aen.members.locale)) { 5665 /* 5666 * Previously issued event registration includes 5667 * current request. Nothing to do. 5668 */ 5669 return 0; 5670 } else { 5671 curr_aen.members.locale |= prev_aen.members.locale; 5672 5673 if (prev_aen.members.class < curr_aen.members.class) 5674 curr_aen.members.class = prev_aen.members.class; 5675 5676 instance->aen_cmd->abort_aen = 1; 5677 ret_val = megasas_issue_blocked_abort_cmd(instance, 5678 instance-> 5679 aen_cmd, 30); 5680 5681 if (ret_val) { 5682 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort " 5683 "previous AEN command\n"); 5684 return ret_val; 5685 } 5686 } 5687 } 5688 5689 cmd = megasas_get_cmd(instance); 5690 5691 if (!cmd) 5692 return -ENOMEM; 5693 5694 dcmd = &cmd->frame->dcmd; 5695 5696 memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail)); 5697 5698 /* 5699 * Prepare DCMD for aen registration 5700 */ 5701 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5702 5703 dcmd->cmd = MFI_CMD_DCMD; 5704 dcmd->cmd_status = 0x0; 5705 dcmd->sge_count = 1; 5706 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 5707 dcmd->timeout = 0; 5708 dcmd->pad_0 = 0; 5709 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail)); 5710 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT); 5711 dcmd->mbox.w[0] = cpu_to_le32(seq_num); 5712 instance->last_seq_num = seq_num; 5713 dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word); 5714 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h); 5715 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail)); 5716 5717 if (instance->aen_cmd != NULL) { 5718 megasas_return_cmd(instance, cmd); 5719 return 0; 5720 } 5721 5722 /* 5723 * Store reference to the cmd used to register for AEN. When an 5724 * application wants us to register for AEN, we have to abort this 5725 * cmd and re-register with a new EVENT LOCALE supplied by that app 5726 */ 5727 instance->aen_cmd = cmd; 5728 5729 /* 5730 * Issue the aen registration frame 5731 */ 5732 instance->instancet->issue_dcmd(instance, cmd); 5733 5734 return 0; 5735 } 5736 5737 /* megasas_get_target_prop - Send DCMD with below details to firmware. 5738 * 5739 * This DCMD will fetch few properties of LD/system PD defined 5740 * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value. 5741 * 5742 * DCMD send by drivers whenever new target is added to the OS. 5743 * 5744 * dcmd.opcode - MR_DCMD_DEV_GET_TARGET_PROP 5745 * dcmd.mbox.b[0] - DCMD is to be fired for LD or system PD. 5746 * 0 = system PD, 1 = LD. 5747 * dcmd.mbox.s[1] - TargetID for LD/system PD. 5748 * dcmd.sge IN - Pointer to return MR_TARGET_DEV_PROPERTIES. 5749 * 5750 * @instance: Adapter soft state 5751 * @sdev: OS provided scsi device 5752 * 5753 * Returns 0 on success non-zero on failure. 5754 */ 5755 static int 5756 megasas_get_target_prop(struct megasas_instance *instance, 5757 struct scsi_device *sdev) 5758 { 5759 int ret; 5760 struct megasas_cmd *cmd; 5761 struct megasas_dcmd_frame *dcmd; 5762 u16 targetId = (sdev->channel % 2) + sdev->id; 5763 5764 cmd = megasas_get_cmd(instance); 5765 5766 if (!cmd) { 5767 dev_err(&instance->pdev->dev, 5768 "Failed to get cmd %s\n", __func__); 5769 return -ENOMEM; 5770 } 5771 5772 dcmd = &cmd->frame->dcmd; 5773 5774 memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop)); 5775 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5776 dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev); 5777 5778 dcmd->mbox.s[1] = cpu_to_le16(targetId); 5779 dcmd->cmd = MFI_CMD_DCMD; 5780 dcmd->cmd_status = 0xFF; 5781 dcmd->sge_count = 1; 5782 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 5783 dcmd->timeout = 0; 5784 dcmd->pad_0 = 0; 5785 dcmd->data_xfer_len = 5786 cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES)); 5787 dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP); 5788 dcmd->sgl.sge32[0].phys_addr = 5789 cpu_to_le32(instance->tgt_prop_h); 5790 dcmd->sgl.sge32[0].length = 5791 cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES)); 5792 5793 if (instance->ctrl_context && !instance->mask_interrupts) 5794 ret = megasas_issue_blocked_cmd(instance, 5795 cmd, MFI_IO_TIMEOUT_SECS); 5796 else 5797 ret = megasas_issue_polled(instance, cmd); 5798 5799 switch (ret) { 5800 case DCMD_TIMEOUT: 5801 switch (dcmd_timeout_ocr_possible(instance)) { 5802 case INITIATE_OCR: 5803 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5804 megasas_reset_fusion(instance->host, 5805 MFI_IO_TIMEOUT_OCR); 5806 break; 5807 case KILL_ADAPTER: 5808 megaraid_sas_kill_hba(instance); 5809 break; 5810 case IGNORE_TIMEOUT: 5811 dev_info(&instance->pdev->dev, 5812 "Ignore DCMD timeout: %s %d\n", 5813 __func__, __LINE__); 5814 break; 5815 } 5816 break; 5817 5818 default: 5819 megasas_return_cmd(instance, cmd); 5820 } 5821 if (ret != DCMD_SUCCESS) 5822 dev_err(&instance->pdev->dev, 5823 "return from %s %d return value %d\n", 5824 __func__, __LINE__, ret); 5825 5826 return ret; 5827 } 5828 5829 /** 5830 * megasas_start_aen - Subscribes to AEN during driver load time 5831 * @instance: Adapter soft state 5832 */ 5833 static int megasas_start_aen(struct megasas_instance *instance) 5834 { 5835 struct megasas_evt_log_info eli; 5836 union megasas_evt_class_locale class_locale; 5837 5838 /* 5839 * Get the latest sequence number from FW 5840 */ 5841 memset(&eli, 0, sizeof(eli)); 5842 5843 if (megasas_get_seq_num(instance, &eli)) 5844 return -1; 5845 5846 /* 5847 * Register AEN with FW for latest sequence number plus 1 5848 */ 5849 class_locale.members.reserved = 0; 5850 class_locale.members.locale = MR_EVT_LOCALE_ALL; 5851 class_locale.members.class = MR_EVT_CLASS_DEBUG; 5852 5853 return megasas_register_aen(instance, 5854 le32_to_cpu(eli.newest_seq_num) + 1, 5855 class_locale.word); 5856 } 5857 5858 /** 5859 * megasas_io_attach - Attaches this driver to SCSI mid-layer 5860 * @instance: Adapter soft state 5861 */ 5862 static int megasas_io_attach(struct megasas_instance *instance) 5863 { 5864 struct Scsi_Host *host = instance->host; 5865 5866 /* 5867 * Export parameters required by SCSI mid-layer 5868 */ 5869 host->unique_id = instance->unique_id; 5870 host->can_queue = instance->max_scsi_cmds; 5871 host->this_id = instance->init_id; 5872 host->sg_tablesize = instance->max_num_sge; 5873 5874 if (instance->fw_support_ieee) 5875 instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE; 5876 5877 /* 5878 * Check if the module parameter value for max_sectors can be used 5879 */ 5880 if (max_sectors && max_sectors < instance->max_sectors_per_req) 5881 instance->max_sectors_per_req = max_sectors; 5882 else { 5883 if (max_sectors) { 5884 if (((instance->pdev->device == 5885 PCI_DEVICE_ID_LSI_SAS1078GEN2) || 5886 (instance->pdev->device == 5887 PCI_DEVICE_ID_LSI_SAS0079GEN2)) && 5888 (max_sectors <= MEGASAS_MAX_SECTORS)) { 5889 instance->max_sectors_per_req = max_sectors; 5890 } else { 5891 dev_info(&instance->pdev->dev, "max_sectors should be > 0" 5892 "and <= %d (or < 1MB for GEN2 controller)\n", 5893 instance->max_sectors_per_req); 5894 } 5895 } 5896 } 5897 5898 host->max_sectors = instance->max_sectors_per_req; 5899 host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN; 5900 host->max_channel = MEGASAS_MAX_CHANNELS - 1; 5901 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; 5902 host->max_lun = MEGASAS_MAX_LUN; 5903 host->max_cmd_len = 16; 5904 5905 /* 5906 * Notify the mid-layer about the new controller 5907 */ 5908 if (scsi_add_host(host, &instance->pdev->dev)) { 5909 dev_err(&instance->pdev->dev, 5910 "Failed to add host from %s %d\n", 5911 __func__, __LINE__); 5912 return -ENODEV; 5913 } 5914 5915 return 0; 5916 } 5917 5918 static int 5919 megasas_set_dma_mask(struct pci_dev *pdev) 5920 { 5921 /* 5922 * All our controllers are capable of performing 64-bit DMA 5923 */ 5924 if (IS_DMA64) { 5925 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { 5926 5927 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 5928 goto fail_set_dma_mask; 5929 } 5930 } else { 5931 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 5932 goto fail_set_dma_mask; 5933 } 5934 /* 5935 * Ensure that all data structures are allocated in 32-bit 5936 * memory. 5937 */ 5938 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { 5939 /* Try 32bit DMA mask and 32 bit Consistent dma mask */ 5940 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) 5941 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) 5942 dev_info(&pdev->dev, "set 32bit DMA mask" 5943 "and 32 bit consistent mask\n"); 5944 else 5945 goto fail_set_dma_mask; 5946 } 5947 5948 return 0; 5949 5950 fail_set_dma_mask: 5951 return 1; 5952 } 5953 5954 /** 5955 * megasas_probe_one - PCI hotplug entry point 5956 * @pdev: PCI device structure 5957 * @id: PCI ids of supported hotplugged adapter 5958 */ 5959 static int megasas_probe_one(struct pci_dev *pdev, 5960 const struct pci_device_id *id) 5961 { 5962 int rval, pos; 5963 struct Scsi_Host *host; 5964 struct megasas_instance *instance; 5965 u16 control = 0; 5966 struct fusion_context *fusion = NULL; 5967 5968 /* Reset MSI-X in the kdump kernel */ 5969 if (reset_devices) { 5970 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 5971 if (pos) { 5972 pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, 5973 &control); 5974 if (control & PCI_MSIX_FLAGS_ENABLE) { 5975 dev_info(&pdev->dev, "resetting MSI-X\n"); 5976 pci_write_config_word(pdev, 5977 pos + PCI_MSIX_FLAGS, 5978 control & 5979 ~PCI_MSIX_FLAGS_ENABLE); 5980 } 5981 } 5982 } 5983 5984 /* 5985 * PCI prepping: enable device set bus mastering and dma mask 5986 */ 5987 rval = pci_enable_device_mem(pdev); 5988 5989 if (rval) { 5990 return rval; 5991 } 5992 5993 pci_set_master(pdev); 5994 5995 if (megasas_set_dma_mask(pdev)) 5996 goto fail_set_dma_mask; 5997 5998 host = scsi_host_alloc(&megasas_template, 5999 sizeof(struct megasas_instance)); 6000 6001 if (!host) { 6002 dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n"); 6003 goto fail_alloc_instance; 6004 } 6005 6006 instance = (struct megasas_instance *)host->hostdata; 6007 memset(instance, 0, sizeof(*instance)); 6008 atomic_set(&instance->fw_reset_no_pci_access, 0); 6009 instance->pdev = pdev; 6010 6011 switch (instance->pdev->device) { 6012 case PCI_DEVICE_ID_LSI_VENTURA: 6013 case PCI_DEVICE_ID_LSI_HARPOON: 6014 case PCI_DEVICE_ID_LSI_TOMCAT: 6015 case PCI_DEVICE_ID_LSI_VENTURA_4PORT: 6016 case PCI_DEVICE_ID_LSI_CRUSADER_4PORT: 6017 instance->is_ventura = true; 6018 case PCI_DEVICE_ID_LSI_FUSION: 6019 case PCI_DEVICE_ID_LSI_PLASMA: 6020 case PCI_DEVICE_ID_LSI_INVADER: 6021 case PCI_DEVICE_ID_LSI_FURY: 6022 case PCI_DEVICE_ID_LSI_INTRUDER: 6023 case PCI_DEVICE_ID_LSI_INTRUDER_24: 6024 case PCI_DEVICE_ID_LSI_CUTLASS_52: 6025 case PCI_DEVICE_ID_LSI_CUTLASS_53: 6026 { 6027 if (megasas_alloc_fusion_context(instance)) { 6028 megasas_free_fusion_context(instance); 6029 goto fail_alloc_dma_buf; 6030 } 6031 fusion = instance->ctrl_context; 6032 6033 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || 6034 (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA)) 6035 fusion->adapter_type = THUNDERBOLT_SERIES; 6036 else if (instance->is_ventura) 6037 fusion->adapter_type = VENTURA_SERIES; 6038 else 6039 fusion->adapter_type = INVADER_SERIES; 6040 } 6041 break; 6042 default: /* For all other supported controllers */ 6043 6044 instance->producer = 6045 pci_alloc_consistent(pdev, sizeof(u32), 6046 &instance->producer_h); 6047 instance->consumer = 6048 pci_alloc_consistent(pdev, sizeof(u32), 6049 &instance->consumer_h); 6050 6051 if (!instance->producer || !instance->consumer) { 6052 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate " 6053 "memory for producer, consumer\n"); 6054 goto fail_alloc_dma_buf; 6055 } 6056 6057 *instance->producer = 0; 6058 *instance->consumer = 0; 6059 break; 6060 } 6061 6062 /* Crash dump feature related initialisation*/ 6063 instance->drv_buf_index = 0; 6064 instance->drv_buf_alloc = 0; 6065 instance->crash_dump_fw_support = 0; 6066 instance->crash_dump_app_support = 0; 6067 instance->fw_crash_state = UNAVAILABLE; 6068 spin_lock_init(&instance->crashdump_lock); 6069 instance->crash_dump_buf = NULL; 6070 6071 megasas_poll_wait_aen = 0; 6072 instance->flag_ieee = 0; 6073 instance->ev = NULL; 6074 instance->issuepend_done = 1; 6075 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 6076 instance->is_imr = 0; 6077 6078 instance->evt_detail = pci_alloc_consistent(pdev, 6079 sizeof(struct 6080 megasas_evt_detail), 6081 &instance->evt_detail_h); 6082 6083 if (!instance->evt_detail) { 6084 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate memory for " 6085 "event detail structure\n"); 6086 goto fail_alloc_dma_buf; 6087 } 6088 6089 if (!reset_devices) { 6090 instance->system_info_buf = pci_zalloc_consistent(pdev, 6091 sizeof(struct MR_DRV_SYSTEM_INFO), 6092 &instance->system_info_h); 6093 if (!instance->system_info_buf) 6094 dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n"); 6095 6096 instance->pd_info = pci_alloc_consistent(pdev, 6097 sizeof(struct MR_PD_INFO), &instance->pd_info_h); 6098 6099 instance->pd_info = pci_alloc_consistent(pdev, 6100 sizeof(struct MR_PD_INFO), &instance->pd_info_h); 6101 instance->tgt_prop = pci_alloc_consistent(pdev, 6102 sizeof(struct MR_TARGET_PROPERTIES), &instance->tgt_prop_h); 6103 6104 if (!instance->pd_info) 6105 dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n"); 6106 6107 if (!instance->tgt_prop) 6108 dev_err(&instance->pdev->dev, "Failed to alloc mem for tgt_prop\n"); 6109 6110 instance->crash_dump_buf = pci_alloc_consistent(pdev, 6111 CRASH_DMA_BUF_SIZE, 6112 &instance->crash_dump_h); 6113 if (!instance->crash_dump_buf) 6114 dev_err(&pdev->dev, "Can't allocate Firmware " 6115 "crash dump DMA buffer\n"); 6116 } 6117 6118 /* 6119 * Initialize locks and queues 6120 */ 6121 INIT_LIST_HEAD(&instance->cmd_pool); 6122 INIT_LIST_HEAD(&instance->internal_reset_pending_q); 6123 6124 atomic_set(&instance->fw_outstanding,0); 6125 6126 init_waitqueue_head(&instance->int_cmd_wait_q); 6127 init_waitqueue_head(&instance->abort_cmd_wait_q); 6128 6129 spin_lock_init(&instance->mfi_pool_lock); 6130 spin_lock_init(&instance->hba_lock); 6131 spin_lock_init(&instance->stream_lock); 6132 spin_lock_init(&instance->completion_lock); 6133 6134 mutex_init(&instance->reset_mutex); 6135 mutex_init(&instance->hba_mutex); 6136 6137 /* 6138 * Initialize PCI related and misc parameters 6139 */ 6140 instance->host = host; 6141 instance->unique_id = pdev->bus->number << 8 | pdev->devfn; 6142 instance->init_id = MEGASAS_DEFAULT_INIT_ID; 6143 instance->ctrl_info = NULL; 6144 6145 6146 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 6147 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) 6148 instance->flag_ieee = 1; 6149 6150 megasas_dbg_lvl = 0; 6151 instance->flag = 0; 6152 instance->unload = 1; 6153 instance->last_time = 0; 6154 instance->disableOnlineCtrlReset = 1; 6155 instance->UnevenSpanSupport = 0; 6156 6157 if (instance->ctrl_context) { 6158 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); 6159 INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq); 6160 } else 6161 INIT_WORK(&instance->work_init, process_fw_state_change_wq); 6162 6163 /* 6164 * Initialize MFI Firmware 6165 */ 6166 if (megasas_init_fw(instance)) 6167 goto fail_init_mfi; 6168 6169 if (instance->requestorId) { 6170 if (instance->PlasmaFW111) { 6171 instance->vf_affiliation_111 = 6172 pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111), 6173 &instance->vf_affiliation_111_h); 6174 if (!instance->vf_affiliation_111) 6175 dev_warn(&pdev->dev, "Can't allocate " 6176 "memory for VF affiliation buffer\n"); 6177 } else { 6178 instance->vf_affiliation = 6179 pci_alloc_consistent(pdev, 6180 (MAX_LOGICAL_DRIVES + 1) * 6181 sizeof(struct MR_LD_VF_AFFILIATION), 6182 &instance->vf_affiliation_h); 6183 if (!instance->vf_affiliation) 6184 dev_warn(&pdev->dev, "Can't allocate " 6185 "memory for VF affiliation buffer\n"); 6186 } 6187 } 6188 6189 /* 6190 * Store instance in PCI softstate 6191 */ 6192 pci_set_drvdata(pdev, instance); 6193 6194 /* 6195 * Add this controller to megasas_mgmt_info structure so that it 6196 * can be exported to management applications 6197 */ 6198 megasas_mgmt_info.count++; 6199 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance; 6200 megasas_mgmt_info.max_index++; 6201 6202 /* 6203 * Register with SCSI mid-layer 6204 */ 6205 if (megasas_io_attach(instance)) 6206 goto fail_io_attach; 6207 6208 instance->unload = 0; 6209 /* 6210 * Trigger SCSI to scan our drives 6211 */ 6212 scsi_scan_host(host); 6213 6214 /* 6215 * Initiate AEN (Asynchronous Event Notification) 6216 */ 6217 if (megasas_start_aen(instance)) { 6218 dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n"); 6219 goto fail_start_aen; 6220 } 6221 6222 /* Get current SR-IOV LD/VF affiliation */ 6223 if (instance->requestorId) 6224 megasas_get_ld_vf_affiliation(instance, 1); 6225 6226 return 0; 6227 6228 fail_start_aen: 6229 fail_io_attach: 6230 megasas_mgmt_info.count--; 6231 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; 6232 megasas_mgmt_info.max_index--; 6233 6234 instance->instancet->disable_intr(instance); 6235 megasas_destroy_irqs(instance); 6236 6237 if (instance->ctrl_context) 6238 megasas_release_fusion(instance); 6239 else 6240 megasas_release_mfi(instance); 6241 if (instance->msix_vectors) 6242 pci_free_irq_vectors(instance->pdev); 6243 fail_init_mfi: 6244 fail_alloc_dma_buf: 6245 if (instance->evt_detail) 6246 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), 6247 instance->evt_detail, 6248 instance->evt_detail_h); 6249 6250 if (instance->pd_info) 6251 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO), 6252 instance->pd_info, 6253 instance->pd_info_h); 6254 if (instance->tgt_prop) 6255 pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES), 6256 instance->tgt_prop, 6257 instance->tgt_prop_h); 6258 if (instance->producer) 6259 pci_free_consistent(pdev, sizeof(u32), instance->producer, 6260 instance->producer_h); 6261 if (instance->consumer) 6262 pci_free_consistent(pdev, sizeof(u32), instance->consumer, 6263 instance->consumer_h); 6264 scsi_host_put(host); 6265 6266 fail_alloc_instance: 6267 fail_set_dma_mask: 6268 pci_disable_device(pdev); 6269 6270 return -ENODEV; 6271 } 6272 6273 /** 6274 * megasas_flush_cache - Requests FW to flush all its caches 6275 * @instance: Adapter soft state 6276 */ 6277 static void megasas_flush_cache(struct megasas_instance *instance) 6278 { 6279 struct megasas_cmd *cmd; 6280 struct megasas_dcmd_frame *dcmd; 6281 6282 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 6283 return; 6284 6285 cmd = megasas_get_cmd(instance); 6286 6287 if (!cmd) 6288 return; 6289 6290 dcmd = &cmd->frame->dcmd; 6291 6292 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6293 6294 dcmd->cmd = MFI_CMD_DCMD; 6295 dcmd->cmd_status = 0x0; 6296 dcmd->sge_count = 0; 6297 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 6298 dcmd->timeout = 0; 6299 dcmd->pad_0 = 0; 6300 dcmd->data_xfer_len = 0; 6301 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH); 6302 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 6303 6304 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) 6305 != DCMD_SUCCESS) { 6306 dev_err(&instance->pdev->dev, 6307 "return from %s %d\n", __func__, __LINE__); 6308 return; 6309 } 6310 6311 megasas_return_cmd(instance, cmd); 6312 } 6313 6314 /** 6315 * megasas_shutdown_controller - Instructs FW to shutdown the controller 6316 * @instance: Adapter soft state 6317 * @opcode: Shutdown/Hibernate 6318 */ 6319 static void megasas_shutdown_controller(struct megasas_instance *instance, 6320 u32 opcode) 6321 { 6322 struct megasas_cmd *cmd; 6323 struct megasas_dcmd_frame *dcmd; 6324 6325 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 6326 return; 6327 6328 cmd = megasas_get_cmd(instance); 6329 6330 if (!cmd) 6331 return; 6332 6333 if (instance->aen_cmd) 6334 megasas_issue_blocked_abort_cmd(instance, 6335 instance->aen_cmd, MFI_IO_TIMEOUT_SECS); 6336 if (instance->map_update_cmd) 6337 megasas_issue_blocked_abort_cmd(instance, 6338 instance->map_update_cmd, MFI_IO_TIMEOUT_SECS); 6339 if (instance->jbod_seq_cmd) 6340 megasas_issue_blocked_abort_cmd(instance, 6341 instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS); 6342 6343 dcmd = &cmd->frame->dcmd; 6344 6345 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6346 6347 dcmd->cmd = MFI_CMD_DCMD; 6348 dcmd->cmd_status = 0x0; 6349 dcmd->sge_count = 0; 6350 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 6351 dcmd->timeout = 0; 6352 dcmd->pad_0 = 0; 6353 dcmd->data_xfer_len = 0; 6354 dcmd->opcode = cpu_to_le32(opcode); 6355 6356 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) 6357 != DCMD_SUCCESS) { 6358 dev_err(&instance->pdev->dev, 6359 "return from %s %d\n", __func__, __LINE__); 6360 return; 6361 } 6362 6363 megasas_return_cmd(instance, cmd); 6364 } 6365 6366 #ifdef CONFIG_PM 6367 /** 6368 * megasas_suspend - driver suspend entry point 6369 * @pdev: PCI device structure 6370 * @state: PCI power state to suspend routine 6371 */ 6372 static int 6373 megasas_suspend(struct pci_dev *pdev, pm_message_t state) 6374 { 6375 struct Scsi_Host *host; 6376 struct megasas_instance *instance; 6377 6378 instance = pci_get_drvdata(pdev); 6379 host = instance->host; 6380 instance->unload = 1; 6381 6382 /* Shutdown SR-IOV heartbeat timer */ 6383 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 6384 del_timer_sync(&instance->sriov_heartbeat_timer); 6385 6386 megasas_flush_cache(instance); 6387 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); 6388 6389 /* cancel the delayed work if this work still in queue */ 6390 if (instance->ev != NULL) { 6391 struct megasas_aen_event *ev = instance->ev; 6392 cancel_delayed_work_sync(&ev->hotplug_work); 6393 instance->ev = NULL; 6394 } 6395 6396 tasklet_kill(&instance->isr_tasklet); 6397 6398 pci_set_drvdata(instance->pdev, instance); 6399 instance->instancet->disable_intr(instance); 6400 6401 megasas_destroy_irqs(instance); 6402 6403 if (instance->msix_vectors) 6404 pci_free_irq_vectors(instance->pdev); 6405 6406 pci_save_state(pdev); 6407 pci_disable_device(pdev); 6408 6409 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 6410 6411 return 0; 6412 } 6413 6414 /** 6415 * megasas_resume- driver resume entry point 6416 * @pdev: PCI device structure 6417 */ 6418 static int 6419 megasas_resume(struct pci_dev *pdev) 6420 { 6421 int rval; 6422 struct Scsi_Host *host; 6423 struct megasas_instance *instance; 6424 int irq_flags = PCI_IRQ_LEGACY; 6425 6426 instance = pci_get_drvdata(pdev); 6427 host = instance->host; 6428 pci_set_power_state(pdev, PCI_D0); 6429 pci_enable_wake(pdev, PCI_D0, 0); 6430 pci_restore_state(pdev); 6431 6432 /* 6433 * PCI prepping: enable device set bus mastering and dma mask 6434 */ 6435 rval = pci_enable_device_mem(pdev); 6436 6437 if (rval) { 6438 dev_err(&pdev->dev, "Enable device failed\n"); 6439 return rval; 6440 } 6441 6442 pci_set_master(pdev); 6443 6444 if (megasas_set_dma_mask(pdev)) 6445 goto fail_set_dma_mask; 6446 6447 /* 6448 * Initialize MFI Firmware 6449 */ 6450 6451 atomic_set(&instance->fw_outstanding, 0); 6452 6453 /* 6454 * We expect the FW state to be READY 6455 */ 6456 if (megasas_transition_to_ready(instance, 0)) 6457 goto fail_ready_state; 6458 6459 /* Now re-enable MSI-X */ 6460 if (instance->msix_vectors) { 6461 irq_flags = PCI_IRQ_MSIX; 6462 if (smp_affinity_enable) 6463 irq_flags |= PCI_IRQ_AFFINITY; 6464 } 6465 rval = pci_alloc_irq_vectors(instance->pdev, 1, 6466 instance->msix_vectors ? 6467 instance->msix_vectors : 1, irq_flags); 6468 if (rval < 0) 6469 goto fail_reenable_msix; 6470 6471 if (instance->ctrl_context) { 6472 megasas_reset_reply_desc(instance); 6473 if (megasas_ioc_init_fusion(instance)) { 6474 megasas_free_cmds(instance); 6475 megasas_free_cmds_fusion(instance); 6476 goto fail_init_mfi; 6477 } 6478 if (!megasas_get_map_info(instance)) 6479 megasas_sync_map_info(instance); 6480 } else { 6481 *instance->producer = 0; 6482 *instance->consumer = 0; 6483 if (megasas_issue_init_mfi(instance)) 6484 goto fail_init_mfi; 6485 } 6486 6487 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 6488 (unsigned long)instance); 6489 6490 if (instance->msix_vectors ? 6491 megasas_setup_irqs_msix(instance, 0) : 6492 megasas_setup_irqs_ioapic(instance)) 6493 goto fail_init_mfi; 6494 6495 /* Re-launch SR-IOV heartbeat timer */ 6496 if (instance->requestorId) { 6497 if (!megasas_sriov_start_heartbeat(instance, 0)) 6498 megasas_start_timer(instance, 6499 &instance->sriov_heartbeat_timer, 6500 megasas_sriov_heartbeat_handler, 6501 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 6502 else { 6503 instance->skip_heartbeat_timer_del = 1; 6504 goto fail_init_mfi; 6505 } 6506 } 6507 6508 instance->instancet->enable_intr(instance); 6509 megasas_setup_jbod_map(instance); 6510 instance->unload = 0; 6511 6512 /* 6513 * Initiate AEN (Asynchronous Event Notification) 6514 */ 6515 if (megasas_start_aen(instance)) 6516 dev_err(&instance->pdev->dev, "Start AEN failed\n"); 6517 6518 return 0; 6519 6520 fail_init_mfi: 6521 if (instance->evt_detail) 6522 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), 6523 instance->evt_detail, 6524 instance->evt_detail_h); 6525 6526 if (instance->pd_info) 6527 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO), 6528 instance->pd_info, 6529 instance->pd_info_h); 6530 if (instance->tgt_prop) 6531 pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES), 6532 instance->tgt_prop, 6533 instance->tgt_prop_h); 6534 if (instance->producer) 6535 pci_free_consistent(pdev, sizeof(u32), instance->producer, 6536 instance->producer_h); 6537 if (instance->consumer) 6538 pci_free_consistent(pdev, sizeof(u32), instance->consumer, 6539 instance->consumer_h); 6540 scsi_host_put(host); 6541 6542 fail_set_dma_mask: 6543 fail_ready_state: 6544 fail_reenable_msix: 6545 6546 pci_disable_device(pdev); 6547 6548 return -ENODEV; 6549 } 6550 #else 6551 #define megasas_suspend NULL 6552 #define megasas_resume NULL 6553 #endif 6554 6555 static inline int 6556 megasas_wait_for_adapter_operational(struct megasas_instance *instance) 6557 { 6558 int wait_time = MEGASAS_RESET_WAIT_TIME * 2; 6559 int i; 6560 6561 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 6562 return 1; 6563 6564 for (i = 0; i < wait_time; i++) { 6565 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) 6566 break; 6567 6568 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) 6569 dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n"); 6570 6571 msleep(1000); 6572 } 6573 6574 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 6575 dev_info(&instance->pdev->dev, "%s timed out while waiting for HBA to recover.\n", 6576 __func__); 6577 return 1; 6578 } 6579 6580 return 0; 6581 } 6582 6583 /** 6584 * megasas_detach_one - PCI hot"un"plug entry point 6585 * @pdev: PCI device structure 6586 */ 6587 static void megasas_detach_one(struct pci_dev *pdev) 6588 { 6589 int i; 6590 struct Scsi_Host *host; 6591 struct megasas_instance *instance; 6592 struct fusion_context *fusion; 6593 u32 pd_seq_map_sz; 6594 6595 instance = pci_get_drvdata(pdev); 6596 instance->unload = 1; 6597 host = instance->host; 6598 fusion = instance->ctrl_context; 6599 6600 /* Shutdown SR-IOV heartbeat timer */ 6601 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 6602 del_timer_sync(&instance->sriov_heartbeat_timer); 6603 6604 if (instance->fw_crash_state != UNAVAILABLE) 6605 megasas_free_host_crash_buffer(instance); 6606 scsi_remove_host(instance->host); 6607 6608 if (megasas_wait_for_adapter_operational(instance)) 6609 goto skip_firing_dcmds; 6610 6611 megasas_flush_cache(instance); 6612 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 6613 6614 skip_firing_dcmds: 6615 /* cancel the delayed work if this work still in queue*/ 6616 if (instance->ev != NULL) { 6617 struct megasas_aen_event *ev = instance->ev; 6618 cancel_delayed_work_sync(&ev->hotplug_work); 6619 instance->ev = NULL; 6620 } 6621 6622 /* cancel all wait events */ 6623 wake_up_all(&instance->int_cmd_wait_q); 6624 6625 tasklet_kill(&instance->isr_tasklet); 6626 6627 /* 6628 * Take the instance off the instance array. Note that we will not 6629 * decrement the max_index. We let this array be sparse array 6630 */ 6631 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 6632 if (megasas_mgmt_info.instance[i] == instance) { 6633 megasas_mgmt_info.count--; 6634 megasas_mgmt_info.instance[i] = NULL; 6635 6636 break; 6637 } 6638 } 6639 6640 instance->instancet->disable_intr(instance); 6641 6642 megasas_destroy_irqs(instance); 6643 6644 if (instance->msix_vectors) 6645 pci_free_irq_vectors(instance->pdev); 6646 6647 if (instance->is_ventura) { 6648 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) 6649 kfree(fusion->stream_detect_by_ld[i]); 6650 kfree(fusion->stream_detect_by_ld); 6651 fusion->stream_detect_by_ld = NULL; 6652 } 6653 6654 6655 if (instance->ctrl_context) { 6656 megasas_release_fusion(instance); 6657 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 6658 (sizeof(struct MR_PD_CFG_SEQ) * 6659 (MAX_PHYSICAL_DEVICES - 1)); 6660 for (i = 0; i < 2 ; i++) { 6661 if (fusion->ld_map[i]) 6662 dma_free_coherent(&instance->pdev->dev, 6663 fusion->max_map_sz, 6664 fusion->ld_map[i], 6665 fusion->ld_map_phys[i]); 6666 if (fusion->ld_drv_map[i]) 6667 free_pages((ulong)fusion->ld_drv_map[i], 6668 fusion->drv_map_pages); 6669 if (fusion->pd_seq_sync[i]) 6670 dma_free_coherent(&instance->pdev->dev, 6671 pd_seq_map_sz, 6672 fusion->pd_seq_sync[i], 6673 fusion->pd_seq_phys[i]); 6674 } 6675 megasas_free_fusion_context(instance); 6676 } else { 6677 megasas_release_mfi(instance); 6678 pci_free_consistent(pdev, sizeof(u32), 6679 instance->producer, 6680 instance->producer_h); 6681 pci_free_consistent(pdev, sizeof(u32), 6682 instance->consumer, 6683 instance->consumer_h); 6684 } 6685 6686 kfree(instance->ctrl_info); 6687 6688 if (instance->evt_detail) 6689 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), 6690 instance->evt_detail, instance->evt_detail_h); 6691 if (instance->pd_info) 6692 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO), 6693 instance->pd_info, 6694 instance->pd_info_h); 6695 if (instance->tgt_prop) 6696 pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES), 6697 instance->tgt_prop, 6698 instance->tgt_prop_h); 6699 if (instance->vf_affiliation) 6700 pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) * 6701 sizeof(struct MR_LD_VF_AFFILIATION), 6702 instance->vf_affiliation, 6703 instance->vf_affiliation_h); 6704 6705 if (instance->vf_affiliation_111) 6706 pci_free_consistent(pdev, 6707 sizeof(struct MR_LD_VF_AFFILIATION_111), 6708 instance->vf_affiliation_111, 6709 instance->vf_affiliation_111_h); 6710 6711 if (instance->hb_host_mem) 6712 pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM), 6713 instance->hb_host_mem, 6714 instance->hb_host_mem_h); 6715 6716 if (instance->crash_dump_buf) 6717 pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE, 6718 instance->crash_dump_buf, instance->crash_dump_h); 6719 6720 if (instance->system_info_buf) 6721 pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO), 6722 instance->system_info_buf, instance->system_info_h); 6723 6724 scsi_host_put(host); 6725 6726 pci_disable_device(pdev); 6727 } 6728 6729 /** 6730 * megasas_shutdown - Shutdown entry point 6731 * @device: Generic device structure 6732 */ 6733 static void megasas_shutdown(struct pci_dev *pdev) 6734 { 6735 struct megasas_instance *instance = pci_get_drvdata(pdev); 6736 6737 instance->unload = 1; 6738 6739 if (megasas_wait_for_adapter_operational(instance)) 6740 goto skip_firing_dcmds; 6741 6742 megasas_flush_cache(instance); 6743 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 6744 6745 skip_firing_dcmds: 6746 instance->instancet->disable_intr(instance); 6747 megasas_destroy_irqs(instance); 6748 6749 if (instance->msix_vectors) 6750 pci_free_irq_vectors(instance->pdev); 6751 } 6752 6753 /** 6754 * megasas_mgmt_open - char node "open" entry point 6755 */ 6756 static int megasas_mgmt_open(struct inode *inode, struct file *filep) 6757 { 6758 /* 6759 * Allow only those users with admin rights 6760 */ 6761 if (!capable(CAP_SYS_ADMIN)) 6762 return -EACCES; 6763 6764 return 0; 6765 } 6766 6767 /** 6768 * megasas_mgmt_fasync - Async notifier registration from applications 6769 * 6770 * This function adds the calling process to a driver global queue. When an 6771 * event occurs, SIGIO will be sent to all processes in this queue. 6772 */ 6773 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode) 6774 { 6775 int rc; 6776 6777 mutex_lock(&megasas_async_queue_mutex); 6778 6779 rc = fasync_helper(fd, filep, mode, &megasas_async_queue); 6780 6781 mutex_unlock(&megasas_async_queue_mutex); 6782 6783 if (rc >= 0) { 6784 /* For sanity check when we get ioctl */ 6785 filep->private_data = filep; 6786 return 0; 6787 } 6788 6789 printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc); 6790 6791 return rc; 6792 } 6793 6794 /** 6795 * megasas_mgmt_poll - char node "poll" entry point 6796 * */ 6797 static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait) 6798 { 6799 unsigned int mask; 6800 unsigned long flags; 6801 6802 poll_wait(file, &megasas_poll_wait, wait); 6803 spin_lock_irqsave(&poll_aen_lock, flags); 6804 if (megasas_poll_wait_aen) 6805 mask = (POLLIN | POLLRDNORM); 6806 else 6807 mask = 0; 6808 megasas_poll_wait_aen = 0; 6809 spin_unlock_irqrestore(&poll_aen_lock, flags); 6810 return mask; 6811 } 6812 6813 /* 6814 * megasas_set_crash_dump_params_ioctl: 6815 * Send CRASH_DUMP_MODE DCMD to all controllers 6816 * @cmd: MFI command frame 6817 */ 6818 6819 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd) 6820 { 6821 struct megasas_instance *local_instance; 6822 int i, error = 0; 6823 int crash_support; 6824 6825 crash_support = cmd->frame->dcmd.mbox.w[0]; 6826 6827 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 6828 local_instance = megasas_mgmt_info.instance[i]; 6829 if (local_instance && local_instance->crash_dump_drv_support) { 6830 if ((atomic_read(&local_instance->adprecovery) == 6831 MEGASAS_HBA_OPERATIONAL) && 6832 !megasas_set_crash_dump_params(local_instance, 6833 crash_support)) { 6834 local_instance->crash_dump_app_support = 6835 crash_support; 6836 dev_info(&local_instance->pdev->dev, 6837 "Application firmware crash " 6838 "dump mode set success\n"); 6839 error = 0; 6840 } else { 6841 dev_info(&local_instance->pdev->dev, 6842 "Application firmware crash " 6843 "dump mode set failed\n"); 6844 error = -1; 6845 } 6846 } 6847 } 6848 return error; 6849 } 6850 6851 /** 6852 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW 6853 * @instance: Adapter soft state 6854 * @argp: User's ioctl packet 6855 */ 6856 static int 6857 megasas_mgmt_fw_ioctl(struct megasas_instance *instance, 6858 struct megasas_iocpacket __user * user_ioc, 6859 struct megasas_iocpacket *ioc) 6860 { 6861 struct megasas_sge32 *kern_sge32; 6862 struct megasas_cmd *cmd; 6863 void *kbuff_arr[MAX_IOCTL_SGE]; 6864 dma_addr_t buf_handle = 0; 6865 int error = 0, i; 6866 void *sense = NULL; 6867 dma_addr_t sense_handle; 6868 unsigned long *sense_ptr; 6869 6870 memset(kbuff_arr, 0, sizeof(kbuff_arr)); 6871 6872 if (ioc->sge_count > MAX_IOCTL_SGE) { 6873 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n", 6874 ioc->sge_count, MAX_IOCTL_SGE); 6875 return -EINVAL; 6876 } 6877 6878 cmd = megasas_get_cmd(instance); 6879 if (!cmd) { 6880 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n"); 6881 return -ENOMEM; 6882 } 6883 6884 /* 6885 * User's IOCTL packet has 2 frames (maximum). Copy those two 6886 * frames into our cmd's frames. cmd->frame's context will get 6887 * overwritten when we copy from user's frames. So set that value 6888 * alone separately 6889 */ 6890 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); 6891 cmd->frame->hdr.context = cpu_to_le32(cmd->index); 6892 cmd->frame->hdr.pad_0 = 0; 6893 cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE | 6894 MFI_FRAME_SGL64 | 6895 MFI_FRAME_SENSE64)); 6896 6897 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_SHUTDOWN) { 6898 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) { 6899 megasas_return_cmd(instance, cmd); 6900 return -1; 6901 } 6902 } 6903 6904 if (cmd->frame->dcmd.opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) { 6905 error = megasas_set_crash_dump_params_ioctl(cmd); 6906 megasas_return_cmd(instance, cmd); 6907 return error; 6908 } 6909 6910 /* 6911 * The management interface between applications and the fw uses 6912 * MFI frames. E.g, RAID configuration changes, LD property changes 6913 * etc are accomplishes through different kinds of MFI frames. The 6914 * driver needs to care only about substituting user buffers with 6915 * kernel buffers in SGLs. The location of SGL is embedded in the 6916 * struct iocpacket itself. 6917 */ 6918 kern_sge32 = (struct megasas_sge32 *) 6919 ((unsigned long)cmd->frame + ioc->sgl_off); 6920 6921 /* 6922 * For each user buffer, create a mirror buffer and copy in 6923 */ 6924 for (i = 0; i < ioc->sge_count; i++) { 6925 if (!ioc->sgl[i].iov_len) 6926 continue; 6927 6928 kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev, 6929 ioc->sgl[i].iov_len, 6930 &buf_handle, GFP_KERNEL); 6931 if (!kbuff_arr[i]) { 6932 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc " 6933 "kernel SGL buffer for IOCTL\n"); 6934 error = -ENOMEM; 6935 goto out; 6936 } 6937 6938 /* 6939 * We don't change the dma_coherent_mask, so 6940 * pci_alloc_consistent only returns 32bit addresses 6941 */ 6942 kern_sge32[i].phys_addr = cpu_to_le32(buf_handle); 6943 kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len); 6944 6945 /* 6946 * We created a kernel buffer corresponding to the 6947 * user buffer. Now copy in from the user buffer 6948 */ 6949 if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base, 6950 (u32) (ioc->sgl[i].iov_len))) { 6951 error = -EFAULT; 6952 goto out; 6953 } 6954 } 6955 6956 if (ioc->sense_len) { 6957 sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len, 6958 &sense_handle, GFP_KERNEL); 6959 if (!sense) { 6960 error = -ENOMEM; 6961 goto out; 6962 } 6963 6964 sense_ptr = 6965 (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off); 6966 *sense_ptr = cpu_to_le32(sense_handle); 6967 } 6968 6969 /* 6970 * Set the sync_cmd flag so that the ISR knows not to complete this 6971 * cmd to the SCSI mid-layer 6972 */ 6973 cmd->sync_cmd = 1; 6974 if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) { 6975 cmd->sync_cmd = 0; 6976 dev_err(&instance->pdev->dev, 6977 "return -EBUSY from %s %d opcode 0x%x cmd->cmd_status_drv 0x%x\n", 6978 __func__, __LINE__, cmd->frame->dcmd.opcode, 6979 cmd->cmd_status_drv); 6980 return -EBUSY; 6981 } 6982 6983 cmd->sync_cmd = 0; 6984 6985 if (instance->unload == 1) { 6986 dev_info(&instance->pdev->dev, "Driver unload is in progress " 6987 "don't submit data to application\n"); 6988 goto out; 6989 } 6990 /* 6991 * copy out the kernel buffers to user buffers 6992 */ 6993 for (i = 0; i < ioc->sge_count; i++) { 6994 if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i], 6995 ioc->sgl[i].iov_len)) { 6996 error = -EFAULT; 6997 goto out; 6998 } 6999 } 7000 7001 /* 7002 * copy out the sense 7003 */ 7004 if (ioc->sense_len) { 7005 /* 7006 * sense_ptr points to the location that has the user 7007 * sense buffer address 7008 */ 7009 sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw + 7010 ioc->sense_off); 7011 7012 if (copy_to_user((void __user *)((unsigned long) 7013 get_unaligned((unsigned long *)sense_ptr)), 7014 sense, ioc->sense_len)) { 7015 dev_err(&instance->pdev->dev, "Failed to copy out to user " 7016 "sense data\n"); 7017 error = -EFAULT; 7018 goto out; 7019 } 7020 } 7021 7022 /* 7023 * copy the status codes returned by the fw 7024 */ 7025 if (copy_to_user(&user_ioc->frame.hdr.cmd_status, 7026 &cmd->frame->hdr.cmd_status, sizeof(u8))) { 7027 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n"); 7028 error = -EFAULT; 7029 } 7030 7031 out: 7032 if (sense) { 7033 dma_free_coherent(&instance->pdev->dev, ioc->sense_len, 7034 sense, sense_handle); 7035 } 7036 7037 for (i = 0; i < ioc->sge_count; i++) { 7038 if (kbuff_arr[i]) { 7039 dma_free_coherent(&instance->pdev->dev, 7040 le32_to_cpu(kern_sge32[i].length), 7041 kbuff_arr[i], 7042 le32_to_cpu(kern_sge32[i].phys_addr)); 7043 kbuff_arr[i] = NULL; 7044 } 7045 } 7046 7047 megasas_return_cmd(instance, cmd); 7048 return error; 7049 } 7050 7051 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) 7052 { 7053 struct megasas_iocpacket __user *user_ioc = 7054 (struct megasas_iocpacket __user *)arg; 7055 struct megasas_iocpacket *ioc; 7056 struct megasas_instance *instance; 7057 int error; 7058 int i; 7059 unsigned long flags; 7060 u32 wait_time = MEGASAS_RESET_WAIT_TIME; 7061 7062 ioc = memdup_user(user_ioc, sizeof(*ioc)); 7063 if (IS_ERR(ioc)) 7064 return PTR_ERR(ioc); 7065 7066 instance = megasas_lookup_instance(ioc->host_no); 7067 if (!instance) { 7068 error = -ENODEV; 7069 goto out_kfree_ioc; 7070 } 7071 7072 /* Adjust ioctl wait time for VF mode */ 7073 if (instance->requestorId) 7074 wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF; 7075 7076 /* Block ioctls in VF mode */ 7077 if (instance->requestorId && !allow_vf_ioctls) { 7078 error = -ENODEV; 7079 goto out_kfree_ioc; 7080 } 7081 7082 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 7083 dev_err(&instance->pdev->dev, "Controller in crit error\n"); 7084 error = -ENODEV; 7085 goto out_kfree_ioc; 7086 } 7087 7088 if (instance->unload == 1) { 7089 error = -ENODEV; 7090 goto out_kfree_ioc; 7091 } 7092 7093 if (down_interruptible(&instance->ioctl_sem)) { 7094 error = -ERESTARTSYS; 7095 goto out_kfree_ioc; 7096 } 7097 7098 for (i = 0; i < wait_time; i++) { 7099 7100 spin_lock_irqsave(&instance->hba_lock, flags); 7101 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) { 7102 spin_unlock_irqrestore(&instance->hba_lock, flags); 7103 break; 7104 } 7105 spin_unlock_irqrestore(&instance->hba_lock, flags); 7106 7107 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 7108 dev_notice(&instance->pdev->dev, "waiting" 7109 "for controller reset to finish\n"); 7110 } 7111 7112 msleep(1000); 7113 } 7114 7115 spin_lock_irqsave(&instance->hba_lock, flags); 7116 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 7117 spin_unlock_irqrestore(&instance->hba_lock, flags); 7118 7119 dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n"); 7120 error = -ENODEV; 7121 goto out_up; 7122 } 7123 spin_unlock_irqrestore(&instance->hba_lock, flags); 7124 7125 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); 7126 out_up: 7127 up(&instance->ioctl_sem); 7128 7129 out_kfree_ioc: 7130 kfree(ioc); 7131 return error; 7132 } 7133 7134 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) 7135 { 7136 struct megasas_instance *instance; 7137 struct megasas_aen aen; 7138 int error; 7139 int i; 7140 unsigned long flags; 7141 u32 wait_time = MEGASAS_RESET_WAIT_TIME; 7142 7143 if (file->private_data != file) { 7144 printk(KERN_DEBUG "megasas: fasync_helper was not " 7145 "called first\n"); 7146 return -EINVAL; 7147 } 7148 7149 if (copy_from_user(&aen, (void __user *)arg, sizeof(aen))) 7150 return -EFAULT; 7151 7152 instance = megasas_lookup_instance(aen.host_no); 7153 7154 if (!instance) 7155 return -ENODEV; 7156 7157 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 7158 return -ENODEV; 7159 } 7160 7161 if (instance->unload == 1) { 7162 return -ENODEV; 7163 } 7164 7165 for (i = 0; i < wait_time; i++) { 7166 7167 spin_lock_irqsave(&instance->hba_lock, flags); 7168 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) { 7169 spin_unlock_irqrestore(&instance->hba_lock, 7170 flags); 7171 break; 7172 } 7173 7174 spin_unlock_irqrestore(&instance->hba_lock, flags); 7175 7176 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 7177 dev_notice(&instance->pdev->dev, "waiting for" 7178 "controller reset to finish\n"); 7179 } 7180 7181 msleep(1000); 7182 } 7183 7184 spin_lock_irqsave(&instance->hba_lock, flags); 7185 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 7186 spin_unlock_irqrestore(&instance->hba_lock, flags); 7187 dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n"); 7188 return -ENODEV; 7189 } 7190 spin_unlock_irqrestore(&instance->hba_lock, flags); 7191 7192 mutex_lock(&instance->reset_mutex); 7193 error = megasas_register_aen(instance, aen.seq_num, 7194 aen.class_locale_word); 7195 mutex_unlock(&instance->reset_mutex); 7196 return error; 7197 } 7198 7199 /** 7200 * megasas_mgmt_ioctl - char node ioctl entry point 7201 */ 7202 static long 7203 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 7204 { 7205 switch (cmd) { 7206 case MEGASAS_IOC_FIRMWARE: 7207 return megasas_mgmt_ioctl_fw(file, arg); 7208 7209 case MEGASAS_IOC_GET_AEN: 7210 return megasas_mgmt_ioctl_aen(file, arg); 7211 } 7212 7213 return -ENOTTY; 7214 } 7215 7216 #ifdef CONFIG_COMPAT 7217 static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) 7218 { 7219 struct compat_megasas_iocpacket __user *cioc = 7220 (struct compat_megasas_iocpacket __user *)arg; 7221 struct megasas_iocpacket __user *ioc = 7222 compat_alloc_user_space(sizeof(struct megasas_iocpacket)); 7223 int i; 7224 int error = 0; 7225 compat_uptr_t ptr; 7226 u32 local_sense_off; 7227 u32 local_sense_len; 7228 u32 user_sense_off; 7229 7230 if (clear_user(ioc, sizeof(*ioc))) 7231 return -EFAULT; 7232 7233 if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) || 7234 copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) || 7235 copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) || 7236 copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) || 7237 copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) || 7238 copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32))) 7239 return -EFAULT; 7240 7241 /* 7242 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when 7243 * sense_len is not null, so prepare the 64bit value under 7244 * the same condition. 7245 */ 7246 if (get_user(local_sense_off, &ioc->sense_off) || 7247 get_user(local_sense_len, &ioc->sense_len) || 7248 get_user(user_sense_off, &cioc->sense_off)) 7249 return -EFAULT; 7250 7251 if (local_sense_len) { 7252 void __user **sense_ioc_ptr = 7253 (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off); 7254 compat_uptr_t *sense_cioc_ptr = 7255 (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off); 7256 if (get_user(ptr, sense_cioc_ptr) || 7257 put_user(compat_ptr(ptr), sense_ioc_ptr)) 7258 return -EFAULT; 7259 } 7260 7261 for (i = 0; i < MAX_IOCTL_SGE; i++) { 7262 if (get_user(ptr, &cioc->sgl[i].iov_base) || 7263 put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) || 7264 copy_in_user(&ioc->sgl[i].iov_len, 7265 &cioc->sgl[i].iov_len, sizeof(compat_size_t))) 7266 return -EFAULT; 7267 } 7268 7269 error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc); 7270 7271 if (copy_in_user(&cioc->frame.hdr.cmd_status, 7272 &ioc->frame.hdr.cmd_status, sizeof(u8))) { 7273 printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n"); 7274 return -EFAULT; 7275 } 7276 return error; 7277 } 7278 7279 static long 7280 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd, 7281 unsigned long arg) 7282 { 7283 switch (cmd) { 7284 case MEGASAS_IOC_FIRMWARE32: 7285 return megasas_mgmt_compat_ioctl_fw(file, arg); 7286 case MEGASAS_IOC_GET_AEN: 7287 return megasas_mgmt_ioctl_aen(file, arg); 7288 } 7289 7290 return -ENOTTY; 7291 } 7292 #endif 7293 7294 /* 7295 * File operations structure for management interface 7296 */ 7297 static const struct file_operations megasas_mgmt_fops = { 7298 .owner = THIS_MODULE, 7299 .open = megasas_mgmt_open, 7300 .fasync = megasas_mgmt_fasync, 7301 .unlocked_ioctl = megasas_mgmt_ioctl, 7302 .poll = megasas_mgmt_poll, 7303 #ifdef CONFIG_COMPAT 7304 .compat_ioctl = megasas_mgmt_compat_ioctl, 7305 #endif 7306 .llseek = noop_llseek, 7307 }; 7308 7309 /* 7310 * PCI hotplug support registration structure 7311 */ 7312 static struct pci_driver megasas_pci_driver = { 7313 7314 .name = "megaraid_sas", 7315 .id_table = megasas_pci_table, 7316 .probe = megasas_probe_one, 7317 .remove = megasas_detach_one, 7318 .suspend = megasas_suspend, 7319 .resume = megasas_resume, 7320 .shutdown = megasas_shutdown, 7321 }; 7322 7323 /* 7324 * Sysfs driver attributes 7325 */ 7326 static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf) 7327 { 7328 return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n", 7329 MEGASAS_VERSION); 7330 } 7331 7332 static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL); 7333 7334 static ssize_t 7335 megasas_sysfs_show_release_date(struct device_driver *dd, char *buf) 7336 { 7337 return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n", 7338 MEGASAS_RELDATE); 7339 } 7340 7341 static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date, NULL); 7342 7343 static ssize_t 7344 megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf) 7345 { 7346 return sprintf(buf, "%u\n", support_poll_for_event); 7347 } 7348 7349 static DRIVER_ATTR(support_poll_for_event, S_IRUGO, 7350 megasas_sysfs_show_support_poll_for_event, NULL); 7351 7352 static ssize_t 7353 megasas_sysfs_show_support_device_change(struct device_driver *dd, char *buf) 7354 { 7355 return sprintf(buf, "%u\n", support_device_change); 7356 } 7357 7358 static DRIVER_ATTR(support_device_change, S_IRUGO, 7359 megasas_sysfs_show_support_device_change, NULL); 7360 7361 static ssize_t 7362 megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf) 7363 { 7364 return sprintf(buf, "%u\n", megasas_dbg_lvl); 7365 } 7366 7367 static ssize_t 7368 megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t count) 7369 { 7370 int retval = count; 7371 7372 if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) { 7373 printk(KERN_ERR "megasas: could not set dbg_lvl\n"); 7374 retval = -EINVAL; 7375 } 7376 return retval; 7377 } 7378 7379 static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl, 7380 megasas_sysfs_set_dbg_lvl); 7381 7382 static inline void megasas_remove_scsi_device(struct scsi_device *sdev) 7383 { 7384 sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n"); 7385 scsi_remove_device(sdev); 7386 scsi_device_put(sdev); 7387 } 7388 7389 static void 7390 megasas_aen_polling(struct work_struct *work) 7391 { 7392 struct megasas_aen_event *ev = 7393 container_of(work, struct megasas_aen_event, hotplug_work.work); 7394 struct megasas_instance *instance = ev->instance; 7395 union megasas_evt_class_locale class_locale; 7396 struct Scsi_Host *host; 7397 struct scsi_device *sdev1; 7398 u16 pd_index = 0; 7399 u16 ld_index = 0; 7400 int i, j, doscan = 0; 7401 u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME; 7402 int error; 7403 u8 dcmd_ret = DCMD_SUCCESS; 7404 7405 if (!instance) { 7406 printk(KERN_ERR "invalid instance!\n"); 7407 kfree(ev); 7408 return; 7409 } 7410 7411 /* Adjust event workqueue thread wait time for VF mode */ 7412 if (instance->requestorId) 7413 wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF; 7414 7415 /* Don't run the event workqueue thread if OCR is running */ 7416 mutex_lock(&instance->reset_mutex); 7417 7418 instance->ev = NULL; 7419 host = instance->host; 7420 if (instance->evt_detail) { 7421 megasas_decode_evt(instance); 7422 7423 switch (le32_to_cpu(instance->evt_detail->code)) { 7424 7425 case MR_EVT_PD_INSERTED: 7426 case MR_EVT_PD_REMOVED: 7427 dcmd_ret = megasas_get_pd_list(instance); 7428 if (dcmd_ret == DCMD_SUCCESS) 7429 doscan = SCAN_PD_CHANNEL; 7430 break; 7431 7432 case MR_EVT_LD_OFFLINE: 7433 case MR_EVT_CFG_CLEARED: 7434 case MR_EVT_LD_DELETED: 7435 case MR_EVT_LD_CREATED: 7436 if (!instance->requestorId || 7437 (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0))) 7438 dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); 7439 7440 if (dcmd_ret == DCMD_SUCCESS) 7441 doscan = SCAN_VD_CHANNEL; 7442 7443 break; 7444 7445 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: 7446 case MR_EVT_FOREIGN_CFG_IMPORTED: 7447 case MR_EVT_LD_STATE_CHANGE: 7448 dcmd_ret = megasas_get_pd_list(instance); 7449 7450 if (dcmd_ret != DCMD_SUCCESS) 7451 break; 7452 7453 if (!instance->requestorId || 7454 (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0))) 7455 dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); 7456 7457 if (dcmd_ret != DCMD_SUCCESS) 7458 break; 7459 7460 doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL; 7461 dev_info(&instance->pdev->dev, "scanning for scsi%d...\n", 7462 instance->host->host_no); 7463 break; 7464 7465 case MR_EVT_CTRL_PROP_CHANGED: 7466 dcmd_ret = megasas_get_ctrl_info(instance); 7467 break; 7468 default: 7469 doscan = 0; 7470 break; 7471 } 7472 } else { 7473 dev_err(&instance->pdev->dev, "invalid evt_detail!\n"); 7474 mutex_unlock(&instance->reset_mutex); 7475 kfree(ev); 7476 return; 7477 } 7478 7479 mutex_unlock(&instance->reset_mutex); 7480 7481 if (doscan & SCAN_PD_CHANNEL) { 7482 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { 7483 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 7484 pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j; 7485 sdev1 = scsi_device_lookup(host, i, j, 0); 7486 if (instance->pd_list[pd_index].driveState == 7487 MR_PD_STATE_SYSTEM) { 7488 if (!sdev1) 7489 scsi_add_device(host, i, j, 0); 7490 else 7491 scsi_device_put(sdev1); 7492 } else { 7493 if (sdev1) 7494 megasas_remove_scsi_device(sdev1); 7495 } 7496 } 7497 } 7498 } 7499 7500 if (doscan & SCAN_VD_CHANNEL) { 7501 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { 7502 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 7503 ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; 7504 sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); 7505 if (instance->ld_ids[ld_index] != 0xff) { 7506 if (!sdev1) 7507 scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); 7508 else 7509 scsi_device_put(sdev1); 7510 } else { 7511 if (sdev1) 7512 megasas_remove_scsi_device(sdev1); 7513 } 7514 } 7515 } 7516 } 7517 7518 if (dcmd_ret == DCMD_SUCCESS) 7519 seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1; 7520 else 7521 seq_num = instance->last_seq_num; 7522 7523 /* Register AEN with FW for latest sequence number plus 1 */ 7524 class_locale.members.reserved = 0; 7525 class_locale.members.locale = MR_EVT_LOCALE_ALL; 7526 class_locale.members.class = MR_EVT_CLASS_DEBUG; 7527 7528 if (instance->aen_cmd != NULL) { 7529 kfree(ev); 7530 return; 7531 } 7532 7533 mutex_lock(&instance->reset_mutex); 7534 error = megasas_register_aen(instance, seq_num, 7535 class_locale.word); 7536 if (error) 7537 dev_err(&instance->pdev->dev, 7538 "register aen failed error %x\n", error); 7539 7540 mutex_unlock(&instance->reset_mutex); 7541 kfree(ev); 7542 } 7543 7544 /** 7545 * megasas_init - Driver load entry point 7546 */ 7547 static int __init megasas_init(void) 7548 { 7549 int rval; 7550 7551 /* 7552 * Booted in kdump kernel, minimize memory footprints by 7553 * disabling few features 7554 */ 7555 if (reset_devices) { 7556 msix_vectors = 1; 7557 rdpq_enable = 0; 7558 dual_qdepth_disable = 1; 7559 } 7560 7561 /* 7562 * Announce driver version and other information 7563 */ 7564 pr_info("megasas: %s\n", MEGASAS_VERSION); 7565 7566 spin_lock_init(&poll_aen_lock); 7567 7568 support_poll_for_event = 2; 7569 support_device_change = 1; 7570 7571 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); 7572 7573 /* 7574 * Register character device node 7575 */ 7576 rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops); 7577 7578 if (rval < 0) { 7579 printk(KERN_DEBUG "megasas: failed to open device node\n"); 7580 return rval; 7581 } 7582 7583 megasas_mgmt_majorno = rval; 7584 7585 /* 7586 * Register ourselves as PCI hotplug module 7587 */ 7588 rval = pci_register_driver(&megasas_pci_driver); 7589 7590 if (rval) { 7591 printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n"); 7592 goto err_pcidrv; 7593 } 7594 7595 rval = driver_create_file(&megasas_pci_driver.driver, 7596 &driver_attr_version); 7597 if (rval) 7598 goto err_dcf_attr_ver; 7599 7600 rval = driver_create_file(&megasas_pci_driver.driver, 7601 &driver_attr_release_date); 7602 if (rval) 7603 goto err_dcf_rel_date; 7604 7605 rval = driver_create_file(&megasas_pci_driver.driver, 7606 &driver_attr_support_poll_for_event); 7607 if (rval) 7608 goto err_dcf_support_poll_for_event; 7609 7610 rval = driver_create_file(&megasas_pci_driver.driver, 7611 &driver_attr_dbg_lvl); 7612 if (rval) 7613 goto err_dcf_dbg_lvl; 7614 rval = driver_create_file(&megasas_pci_driver.driver, 7615 &driver_attr_support_device_change); 7616 if (rval) 7617 goto err_dcf_support_device_change; 7618 7619 return rval; 7620 7621 err_dcf_support_device_change: 7622 driver_remove_file(&megasas_pci_driver.driver, 7623 &driver_attr_dbg_lvl); 7624 err_dcf_dbg_lvl: 7625 driver_remove_file(&megasas_pci_driver.driver, 7626 &driver_attr_support_poll_for_event); 7627 err_dcf_support_poll_for_event: 7628 driver_remove_file(&megasas_pci_driver.driver, 7629 &driver_attr_release_date); 7630 err_dcf_rel_date: 7631 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 7632 err_dcf_attr_ver: 7633 pci_unregister_driver(&megasas_pci_driver); 7634 err_pcidrv: 7635 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 7636 return rval; 7637 } 7638 7639 /** 7640 * megasas_exit - Driver unload entry point 7641 */ 7642 static void __exit megasas_exit(void) 7643 { 7644 driver_remove_file(&megasas_pci_driver.driver, 7645 &driver_attr_dbg_lvl); 7646 driver_remove_file(&megasas_pci_driver.driver, 7647 &driver_attr_support_poll_for_event); 7648 driver_remove_file(&megasas_pci_driver.driver, 7649 &driver_attr_support_device_change); 7650 driver_remove_file(&megasas_pci_driver.driver, 7651 &driver_attr_release_date); 7652 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 7653 7654 pci_unregister_driver(&megasas_pci_driver); 7655 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 7656 } 7657 7658 module_init(megasas_init); 7659 module_exit(megasas_exit); 7660