1 /* 2 * Linux MegaRAID driver for SAS based RAID controllers 3 * 4 * Copyright (c) 2003-2013 LSI Corporation 5 * Copyright (c) 2013-2014 Avago Technologies 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 2 10 * of the License, or (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program. If not, see <http://www.gnu.org/licenses/>. 19 * 20 * Authors: Avago Technologies 21 * Sreenivas Bagalkote 22 * Sumant Patro 23 * Bo Yang 24 * Adam Radford 25 * Kashyap Desai <kashyap.desai@avagotech.com> 26 * Sumit Saxena <sumit.saxena@avagotech.com> 27 * 28 * Send feedback to: megaraidlinux.pdl@avagotech.com 29 * 30 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90, 31 * San Jose, California 95131 32 */ 33 34 #include <linux/kernel.h> 35 #include <linux/types.h> 36 #include <linux/pci.h> 37 #include <linux/list.h> 38 #include <linux/moduleparam.h> 39 #include <linux/module.h> 40 #include <linux/spinlock.h> 41 #include <linux/interrupt.h> 42 #include <linux/delay.h> 43 #include <linux/uio.h> 44 #include <linux/slab.h> 45 #include <asm/uaccess.h> 46 #include <linux/fs.h> 47 #include <linux/compat.h> 48 #include <linux/blkdev.h> 49 #include <linux/mutex.h> 50 #include <linux/poll.h> 51 52 #include <scsi/scsi.h> 53 #include <scsi/scsi_cmnd.h> 54 #include <scsi/scsi_device.h> 55 #include <scsi/scsi_host.h> 56 #include <scsi/scsi_tcq.h> 57 #include "megaraid_sas_fusion.h" 58 #include "megaraid_sas.h" 59 60 /* 61 * Number of sectors per IO command 62 * Will be set in megasas_init_mfi if user does not provide 63 */ 64 static unsigned int max_sectors; 65 module_param_named(max_sectors, max_sectors, int, 0); 66 MODULE_PARM_DESC(max_sectors, 67 "Maximum number of sectors per IO command"); 68 69 static int msix_disable; 70 module_param(msix_disable, int, S_IRUGO); 71 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0"); 72 73 static unsigned int msix_vectors; 74 module_param(msix_vectors, int, S_IRUGO); 75 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW"); 76 77 static int allow_vf_ioctls; 78 module_param(allow_vf_ioctls, int, S_IRUGO); 79 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0"); 80 81 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; 82 module_param(throttlequeuedepth, int, S_IRUGO); 83 MODULE_PARM_DESC(throttlequeuedepth, 84 "Adapter queue depth when throttled due to I/O timeout. Default: 16"); 85 86 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME; 87 module_param(resetwaittime, int, S_IRUGO); 88 MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout " 89 "before resetting adapter. Default: 180"); 90 91 int smp_affinity_enable = 1; 92 module_param(smp_affinity_enable, int, S_IRUGO); 93 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)"); 94 95 int rdpq_enable = 1; 96 module_param(rdpq_enable, int, S_IRUGO); 97 MODULE_PARM_DESC(rdpq_enable, " Allocate reply queue in chunks for large queue depth enable/disable Default: disable(0)"); 98 99 unsigned int dual_qdepth_disable; 100 module_param(dual_qdepth_disable, int, S_IRUGO); 101 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0"); 102 103 unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; 104 module_param(scmd_timeout, int, S_IRUGO); 105 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer."); 106 107 MODULE_LICENSE("GPL"); 108 MODULE_VERSION(MEGASAS_VERSION); 109 MODULE_AUTHOR("megaraidlinux.pdl@avagotech.com"); 110 MODULE_DESCRIPTION("Avago MegaRAID SAS Driver"); 111 112 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); 113 static int megasas_get_pd_list(struct megasas_instance *instance); 114 static int megasas_ld_list_query(struct megasas_instance *instance, 115 u8 query_type); 116 static int megasas_issue_init_mfi(struct megasas_instance *instance); 117 static int megasas_register_aen(struct megasas_instance *instance, 118 u32 seq_num, u32 class_locale_word); 119 static int 120 megasas_get_pd_info(struct megasas_instance *instance, u16 device_id); 121 /* 122 * PCI ID table for all supported controllers 123 */ 124 static struct pci_device_id megasas_pci_table[] = { 125 126 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)}, 127 /* xscale IOP */ 128 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)}, 129 /* ppc IOP */ 130 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)}, 131 /* ppc IOP */ 132 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)}, 133 /* gen2*/ 134 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)}, 135 /* gen2*/ 136 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)}, 137 /* skinny*/ 138 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)}, 139 /* skinny*/ 140 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, 141 /* xscale IOP, vega */ 142 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, 143 /* xscale IOP */ 144 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)}, 145 /* Fusion */ 146 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)}, 147 /* Plasma */ 148 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)}, 149 /* Invader */ 150 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)}, 151 /* Fury */ 152 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)}, 153 /* Intruder */ 154 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)}, 155 /* Intruder 24 port*/ 156 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)}, 157 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)}, 158 {} 159 }; 160 161 MODULE_DEVICE_TABLE(pci, megasas_pci_table); 162 163 static int megasas_mgmt_majorno; 164 struct megasas_mgmt_info megasas_mgmt_info; 165 static struct fasync_struct *megasas_async_queue; 166 static DEFINE_MUTEX(megasas_async_queue_mutex); 167 168 static int megasas_poll_wait_aen; 169 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait); 170 static u32 support_poll_for_event; 171 u32 megasas_dbg_lvl; 172 static u32 support_device_change; 173 174 /* define lock for aen poll */ 175 spinlock_t poll_aen_lock; 176 177 void 178 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 179 u8 alt_status); 180 static u32 181 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs); 182 static int 183 megasas_adp_reset_gen2(struct megasas_instance *instance, 184 struct megasas_register_set __iomem *reg_set); 185 static irqreturn_t megasas_isr(int irq, void *devp); 186 static u32 187 megasas_init_adapter_mfi(struct megasas_instance *instance); 188 u32 189 megasas_build_and_issue_cmd(struct megasas_instance *instance, 190 struct scsi_cmnd *scmd); 191 static void megasas_complete_cmd_dpc(unsigned long instance_addr); 192 int 193 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, 194 int seconds); 195 void megasas_fusion_ocr_wq(struct work_struct *work); 196 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 197 int initial); 198 199 int 200 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 201 { 202 instance->instancet->fire_cmd(instance, 203 cmd->frame_phys_addr, 0, instance->reg_set); 204 return 0; 205 } 206 207 /** 208 * megasas_get_cmd - Get a command from the free pool 209 * @instance: Adapter soft state 210 * 211 * Returns a free command from the pool 212 */ 213 struct megasas_cmd *megasas_get_cmd(struct megasas_instance 214 *instance) 215 { 216 unsigned long flags; 217 struct megasas_cmd *cmd = NULL; 218 219 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 220 221 if (!list_empty(&instance->cmd_pool)) { 222 cmd = list_entry((&instance->cmd_pool)->next, 223 struct megasas_cmd, list); 224 list_del_init(&cmd->list); 225 } else { 226 dev_err(&instance->pdev->dev, "Command pool empty!\n"); 227 } 228 229 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 230 return cmd; 231 } 232 233 /** 234 * megasas_return_cmd - Return a cmd to free command pool 235 * @instance: Adapter soft state 236 * @cmd: Command packet to be returned to free command pool 237 */ 238 inline void 239 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 240 { 241 unsigned long flags; 242 u32 blk_tags; 243 struct megasas_cmd_fusion *cmd_fusion; 244 struct fusion_context *fusion = instance->ctrl_context; 245 246 /* This flag is used only for fusion adapter. 247 * Wait for Interrupt for Polled mode DCMD 248 */ 249 if (cmd->flags & DRV_DCMD_POLLED_MODE) 250 return; 251 252 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 253 254 if (fusion) { 255 blk_tags = instance->max_scsi_cmds + cmd->index; 256 cmd_fusion = fusion->cmd_list[blk_tags]; 257 megasas_return_cmd_fusion(instance, cmd_fusion); 258 } 259 cmd->scmd = NULL; 260 cmd->frame_count = 0; 261 cmd->flags = 0; 262 if (!fusion && reset_devices) 263 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 264 list_add(&cmd->list, (&instance->cmd_pool)->next); 265 266 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 267 268 } 269 270 static const char * 271 format_timestamp(uint32_t timestamp) 272 { 273 static char buffer[32]; 274 275 if ((timestamp & 0xff000000) == 0xff000000) 276 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp & 277 0x00ffffff); 278 else 279 snprintf(buffer, sizeof(buffer), "%us", timestamp); 280 return buffer; 281 } 282 283 static const char * 284 format_class(int8_t class) 285 { 286 static char buffer[6]; 287 288 switch (class) { 289 case MFI_EVT_CLASS_DEBUG: 290 return "debug"; 291 case MFI_EVT_CLASS_PROGRESS: 292 return "progress"; 293 case MFI_EVT_CLASS_INFO: 294 return "info"; 295 case MFI_EVT_CLASS_WARNING: 296 return "WARN"; 297 case MFI_EVT_CLASS_CRITICAL: 298 return "CRIT"; 299 case MFI_EVT_CLASS_FATAL: 300 return "FATAL"; 301 case MFI_EVT_CLASS_DEAD: 302 return "DEAD"; 303 default: 304 snprintf(buffer, sizeof(buffer), "%d", class); 305 return buffer; 306 } 307 } 308 309 /** 310 * megasas_decode_evt: Decode FW AEN event and print critical event 311 * for information. 312 * @instance: Adapter soft state 313 */ 314 static void 315 megasas_decode_evt(struct megasas_instance *instance) 316 { 317 struct megasas_evt_detail *evt_detail = instance->evt_detail; 318 union megasas_evt_class_locale class_locale; 319 class_locale.word = le32_to_cpu(evt_detail->cl.word); 320 321 if (class_locale.members.class >= MFI_EVT_CLASS_CRITICAL) 322 dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n", 323 le32_to_cpu(evt_detail->seq_num), 324 format_timestamp(le32_to_cpu(evt_detail->time_stamp)), 325 (class_locale.members.locale), 326 format_class(class_locale.members.class), 327 evt_detail->description); 328 } 329 330 /** 331 * The following functions are defined for xscale 332 * (deviceid : 1064R, PERC5) controllers 333 */ 334 335 /** 336 * megasas_enable_intr_xscale - Enables interrupts 337 * @regs: MFI register set 338 */ 339 static inline void 340 megasas_enable_intr_xscale(struct megasas_instance *instance) 341 { 342 struct megasas_register_set __iomem *regs; 343 344 regs = instance->reg_set; 345 writel(0, &(regs)->outbound_intr_mask); 346 347 /* Dummy readl to force pci flush */ 348 readl(®s->outbound_intr_mask); 349 } 350 351 /** 352 * megasas_disable_intr_xscale -Disables interrupt 353 * @regs: MFI register set 354 */ 355 static inline void 356 megasas_disable_intr_xscale(struct megasas_instance *instance) 357 { 358 struct megasas_register_set __iomem *regs; 359 u32 mask = 0x1f; 360 361 regs = instance->reg_set; 362 writel(mask, ®s->outbound_intr_mask); 363 /* Dummy readl to force pci flush */ 364 readl(®s->outbound_intr_mask); 365 } 366 367 /** 368 * megasas_read_fw_status_reg_xscale - returns the current FW status value 369 * @regs: MFI register set 370 */ 371 static u32 372 megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs) 373 { 374 return readl(&(regs)->outbound_msg_0); 375 } 376 /** 377 * megasas_clear_interrupt_xscale - Check & clear interrupt 378 * @regs: MFI register set 379 */ 380 static int 381 megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs) 382 { 383 u32 status; 384 u32 mfiStatus = 0; 385 386 /* 387 * Check if it is our interrupt 388 */ 389 status = readl(®s->outbound_intr_status); 390 391 if (status & MFI_OB_INTR_STATUS_MASK) 392 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 393 if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT) 394 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 395 396 /* 397 * Clear the interrupt by writing back the same value 398 */ 399 if (mfiStatus) 400 writel(status, ®s->outbound_intr_status); 401 402 /* Dummy readl to force pci flush */ 403 readl(®s->outbound_intr_status); 404 405 return mfiStatus; 406 } 407 408 /** 409 * megasas_fire_cmd_xscale - Sends command to the FW 410 * @frame_phys_addr : Physical address of cmd 411 * @frame_count : Number of frames for the command 412 * @regs : MFI register set 413 */ 414 static inline void 415 megasas_fire_cmd_xscale(struct megasas_instance *instance, 416 dma_addr_t frame_phys_addr, 417 u32 frame_count, 418 struct megasas_register_set __iomem *regs) 419 { 420 unsigned long flags; 421 422 spin_lock_irqsave(&instance->hba_lock, flags); 423 writel((frame_phys_addr >> 3)|(frame_count), 424 &(regs)->inbound_queue_port); 425 spin_unlock_irqrestore(&instance->hba_lock, flags); 426 } 427 428 /** 429 * megasas_adp_reset_xscale - For controller reset 430 * @regs: MFI register set 431 */ 432 static int 433 megasas_adp_reset_xscale(struct megasas_instance *instance, 434 struct megasas_register_set __iomem *regs) 435 { 436 u32 i; 437 u32 pcidata; 438 439 writel(MFI_ADP_RESET, ®s->inbound_doorbell); 440 441 for (i = 0; i < 3; i++) 442 msleep(1000); /* sleep for 3 secs */ 443 pcidata = 0; 444 pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata); 445 dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata); 446 if (pcidata & 0x2) { 447 dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata); 448 pcidata &= ~0x2; 449 pci_write_config_dword(instance->pdev, 450 MFI_1068_PCSR_OFFSET, pcidata); 451 452 for (i = 0; i < 2; i++) 453 msleep(1000); /* need to wait 2 secs again */ 454 455 pcidata = 0; 456 pci_read_config_dword(instance->pdev, 457 MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata); 458 dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata); 459 if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) { 460 dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata); 461 pcidata = 0; 462 pci_write_config_dword(instance->pdev, 463 MFI_1068_FW_HANDSHAKE_OFFSET, pcidata); 464 } 465 } 466 return 0; 467 } 468 469 /** 470 * megasas_check_reset_xscale - For controller reset check 471 * @regs: MFI register set 472 */ 473 static int 474 megasas_check_reset_xscale(struct megasas_instance *instance, 475 struct megasas_register_set __iomem *regs) 476 { 477 if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) && 478 (le32_to_cpu(*instance->consumer) == 479 MEGASAS_ADPRESET_INPROG_SIGN)) 480 return 1; 481 return 0; 482 } 483 484 static struct megasas_instance_template megasas_instance_template_xscale = { 485 486 .fire_cmd = megasas_fire_cmd_xscale, 487 .enable_intr = megasas_enable_intr_xscale, 488 .disable_intr = megasas_disable_intr_xscale, 489 .clear_intr = megasas_clear_intr_xscale, 490 .read_fw_status_reg = megasas_read_fw_status_reg_xscale, 491 .adp_reset = megasas_adp_reset_xscale, 492 .check_reset = megasas_check_reset_xscale, 493 .service_isr = megasas_isr, 494 .tasklet = megasas_complete_cmd_dpc, 495 .init_adapter = megasas_init_adapter_mfi, 496 .build_and_issue_cmd = megasas_build_and_issue_cmd, 497 .issue_dcmd = megasas_issue_dcmd, 498 }; 499 500 /** 501 * This is the end of set of functions & definitions specific 502 * to xscale (deviceid : 1064R, PERC5) controllers 503 */ 504 505 /** 506 * The following functions are defined for ppc (deviceid : 0x60) 507 * controllers 508 */ 509 510 /** 511 * megasas_enable_intr_ppc - Enables interrupts 512 * @regs: MFI register set 513 */ 514 static inline void 515 megasas_enable_intr_ppc(struct megasas_instance *instance) 516 { 517 struct megasas_register_set __iomem *regs; 518 519 regs = instance->reg_set; 520 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 521 522 writel(~0x80000000, &(regs)->outbound_intr_mask); 523 524 /* Dummy readl to force pci flush */ 525 readl(®s->outbound_intr_mask); 526 } 527 528 /** 529 * megasas_disable_intr_ppc - Disable interrupt 530 * @regs: MFI register set 531 */ 532 static inline void 533 megasas_disable_intr_ppc(struct megasas_instance *instance) 534 { 535 struct megasas_register_set __iomem *regs; 536 u32 mask = 0xFFFFFFFF; 537 538 regs = instance->reg_set; 539 writel(mask, ®s->outbound_intr_mask); 540 /* Dummy readl to force pci flush */ 541 readl(®s->outbound_intr_mask); 542 } 543 544 /** 545 * megasas_read_fw_status_reg_ppc - returns the current FW status value 546 * @regs: MFI register set 547 */ 548 static u32 549 megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs) 550 { 551 return readl(&(regs)->outbound_scratch_pad); 552 } 553 554 /** 555 * megasas_clear_interrupt_ppc - Check & clear interrupt 556 * @regs: MFI register set 557 */ 558 static int 559 megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs) 560 { 561 u32 status, mfiStatus = 0; 562 563 /* 564 * Check if it is our interrupt 565 */ 566 status = readl(®s->outbound_intr_status); 567 568 if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT) 569 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 570 571 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) 572 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 573 574 /* 575 * Clear the interrupt by writing back the same value 576 */ 577 writel(status, ®s->outbound_doorbell_clear); 578 579 /* Dummy readl to force pci flush */ 580 readl(®s->outbound_doorbell_clear); 581 582 return mfiStatus; 583 } 584 585 /** 586 * megasas_fire_cmd_ppc - Sends command to the FW 587 * @frame_phys_addr : Physical address of cmd 588 * @frame_count : Number of frames for the command 589 * @regs : MFI register set 590 */ 591 static inline void 592 megasas_fire_cmd_ppc(struct megasas_instance *instance, 593 dma_addr_t frame_phys_addr, 594 u32 frame_count, 595 struct megasas_register_set __iomem *regs) 596 { 597 unsigned long flags; 598 599 spin_lock_irqsave(&instance->hba_lock, flags); 600 writel((frame_phys_addr | (frame_count<<1))|1, 601 &(regs)->inbound_queue_port); 602 spin_unlock_irqrestore(&instance->hba_lock, flags); 603 } 604 605 /** 606 * megasas_check_reset_ppc - For controller reset check 607 * @regs: MFI register set 608 */ 609 static int 610 megasas_check_reset_ppc(struct megasas_instance *instance, 611 struct megasas_register_set __iomem *regs) 612 { 613 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 614 return 1; 615 616 return 0; 617 } 618 619 static struct megasas_instance_template megasas_instance_template_ppc = { 620 621 .fire_cmd = megasas_fire_cmd_ppc, 622 .enable_intr = megasas_enable_intr_ppc, 623 .disable_intr = megasas_disable_intr_ppc, 624 .clear_intr = megasas_clear_intr_ppc, 625 .read_fw_status_reg = megasas_read_fw_status_reg_ppc, 626 .adp_reset = megasas_adp_reset_xscale, 627 .check_reset = megasas_check_reset_ppc, 628 .service_isr = megasas_isr, 629 .tasklet = megasas_complete_cmd_dpc, 630 .init_adapter = megasas_init_adapter_mfi, 631 .build_and_issue_cmd = megasas_build_and_issue_cmd, 632 .issue_dcmd = megasas_issue_dcmd, 633 }; 634 635 /** 636 * megasas_enable_intr_skinny - Enables interrupts 637 * @regs: MFI register set 638 */ 639 static inline void 640 megasas_enable_intr_skinny(struct megasas_instance *instance) 641 { 642 struct megasas_register_set __iomem *regs; 643 644 regs = instance->reg_set; 645 writel(0xFFFFFFFF, &(regs)->outbound_intr_mask); 646 647 writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 648 649 /* Dummy readl to force pci flush */ 650 readl(®s->outbound_intr_mask); 651 } 652 653 /** 654 * megasas_disable_intr_skinny - Disables interrupt 655 * @regs: MFI register set 656 */ 657 static inline void 658 megasas_disable_intr_skinny(struct megasas_instance *instance) 659 { 660 struct megasas_register_set __iomem *regs; 661 u32 mask = 0xFFFFFFFF; 662 663 regs = instance->reg_set; 664 writel(mask, ®s->outbound_intr_mask); 665 /* Dummy readl to force pci flush */ 666 readl(®s->outbound_intr_mask); 667 } 668 669 /** 670 * megasas_read_fw_status_reg_skinny - returns the current FW status value 671 * @regs: MFI register set 672 */ 673 static u32 674 megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs) 675 { 676 return readl(&(regs)->outbound_scratch_pad); 677 } 678 679 /** 680 * megasas_clear_interrupt_skinny - Check & clear interrupt 681 * @regs: MFI register set 682 */ 683 static int 684 megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs) 685 { 686 u32 status; 687 u32 mfiStatus = 0; 688 689 /* 690 * Check if it is our interrupt 691 */ 692 status = readl(®s->outbound_intr_status); 693 694 if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) { 695 return 0; 696 } 697 698 /* 699 * Check if it is our interrupt 700 */ 701 if ((megasas_read_fw_status_reg_skinny(regs) & MFI_STATE_MASK) == 702 MFI_STATE_FAULT) { 703 mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 704 } else 705 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 706 707 /* 708 * Clear the interrupt by writing back the same value 709 */ 710 writel(status, ®s->outbound_intr_status); 711 712 /* 713 * dummy read to flush PCI 714 */ 715 readl(®s->outbound_intr_status); 716 717 return mfiStatus; 718 } 719 720 /** 721 * megasas_fire_cmd_skinny - Sends command to the FW 722 * @frame_phys_addr : Physical address of cmd 723 * @frame_count : Number of frames for the command 724 * @regs : MFI register set 725 */ 726 static inline void 727 megasas_fire_cmd_skinny(struct megasas_instance *instance, 728 dma_addr_t frame_phys_addr, 729 u32 frame_count, 730 struct megasas_register_set __iomem *regs) 731 { 732 unsigned long flags; 733 734 spin_lock_irqsave(&instance->hba_lock, flags); 735 writel(upper_32_bits(frame_phys_addr), 736 &(regs)->inbound_high_queue_port); 737 writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1, 738 &(regs)->inbound_low_queue_port); 739 mmiowb(); 740 spin_unlock_irqrestore(&instance->hba_lock, flags); 741 } 742 743 /** 744 * megasas_check_reset_skinny - For controller reset check 745 * @regs: MFI register set 746 */ 747 static int 748 megasas_check_reset_skinny(struct megasas_instance *instance, 749 struct megasas_register_set __iomem *regs) 750 { 751 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 752 return 1; 753 754 return 0; 755 } 756 757 static struct megasas_instance_template megasas_instance_template_skinny = { 758 759 .fire_cmd = megasas_fire_cmd_skinny, 760 .enable_intr = megasas_enable_intr_skinny, 761 .disable_intr = megasas_disable_intr_skinny, 762 .clear_intr = megasas_clear_intr_skinny, 763 .read_fw_status_reg = megasas_read_fw_status_reg_skinny, 764 .adp_reset = megasas_adp_reset_gen2, 765 .check_reset = megasas_check_reset_skinny, 766 .service_isr = megasas_isr, 767 .tasklet = megasas_complete_cmd_dpc, 768 .init_adapter = megasas_init_adapter_mfi, 769 .build_and_issue_cmd = megasas_build_and_issue_cmd, 770 .issue_dcmd = megasas_issue_dcmd, 771 }; 772 773 774 /** 775 * The following functions are defined for gen2 (deviceid : 0x78 0x79) 776 * controllers 777 */ 778 779 /** 780 * megasas_enable_intr_gen2 - Enables interrupts 781 * @regs: MFI register set 782 */ 783 static inline void 784 megasas_enable_intr_gen2(struct megasas_instance *instance) 785 { 786 struct megasas_register_set __iomem *regs; 787 788 regs = instance->reg_set; 789 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 790 791 /* write ~0x00000005 (4 & 1) to the intr mask*/ 792 writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 793 794 /* Dummy readl to force pci flush */ 795 readl(®s->outbound_intr_mask); 796 } 797 798 /** 799 * megasas_disable_intr_gen2 - Disables interrupt 800 * @regs: MFI register set 801 */ 802 static inline void 803 megasas_disable_intr_gen2(struct megasas_instance *instance) 804 { 805 struct megasas_register_set __iomem *regs; 806 u32 mask = 0xFFFFFFFF; 807 808 regs = instance->reg_set; 809 writel(mask, ®s->outbound_intr_mask); 810 /* Dummy readl to force pci flush */ 811 readl(®s->outbound_intr_mask); 812 } 813 814 /** 815 * megasas_read_fw_status_reg_gen2 - returns the current FW status value 816 * @regs: MFI register set 817 */ 818 static u32 819 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs) 820 { 821 return readl(&(regs)->outbound_scratch_pad); 822 } 823 824 /** 825 * megasas_clear_interrupt_gen2 - Check & clear interrupt 826 * @regs: MFI register set 827 */ 828 static int 829 megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs) 830 { 831 u32 status; 832 u32 mfiStatus = 0; 833 834 /* 835 * Check if it is our interrupt 836 */ 837 status = readl(®s->outbound_intr_status); 838 839 if (status & MFI_INTR_FLAG_REPLY_MESSAGE) { 840 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 841 } 842 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) { 843 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 844 } 845 846 /* 847 * Clear the interrupt by writing back the same value 848 */ 849 if (mfiStatus) 850 writel(status, ®s->outbound_doorbell_clear); 851 852 /* Dummy readl to force pci flush */ 853 readl(®s->outbound_intr_status); 854 855 return mfiStatus; 856 } 857 /** 858 * megasas_fire_cmd_gen2 - Sends command to the FW 859 * @frame_phys_addr : Physical address of cmd 860 * @frame_count : Number of frames for the command 861 * @regs : MFI register set 862 */ 863 static inline void 864 megasas_fire_cmd_gen2(struct megasas_instance *instance, 865 dma_addr_t frame_phys_addr, 866 u32 frame_count, 867 struct megasas_register_set __iomem *regs) 868 { 869 unsigned long flags; 870 871 spin_lock_irqsave(&instance->hba_lock, flags); 872 writel((frame_phys_addr | (frame_count<<1))|1, 873 &(regs)->inbound_queue_port); 874 spin_unlock_irqrestore(&instance->hba_lock, flags); 875 } 876 877 /** 878 * megasas_adp_reset_gen2 - For controller reset 879 * @regs: MFI register set 880 */ 881 static int 882 megasas_adp_reset_gen2(struct megasas_instance *instance, 883 struct megasas_register_set __iomem *reg_set) 884 { 885 u32 retry = 0 ; 886 u32 HostDiag; 887 u32 __iomem *seq_offset = ®_set->seq_offset; 888 u32 __iomem *hostdiag_offset = ®_set->host_diag; 889 890 if (instance->instancet == &megasas_instance_template_skinny) { 891 seq_offset = ®_set->fusion_seq_offset; 892 hostdiag_offset = ®_set->fusion_host_diag; 893 } 894 895 writel(0, seq_offset); 896 writel(4, seq_offset); 897 writel(0xb, seq_offset); 898 writel(2, seq_offset); 899 writel(7, seq_offset); 900 writel(0xd, seq_offset); 901 902 msleep(1000); 903 904 HostDiag = (u32)readl(hostdiag_offset); 905 906 while (!(HostDiag & DIAG_WRITE_ENABLE)) { 907 msleep(100); 908 HostDiag = (u32)readl(hostdiag_offset); 909 dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n", 910 retry, HostDiag); 911 912 if (retry++ >= 100) 913 return 1; 914 915 } 916 917 dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag); 918 919 writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset); 920 921 ssleep(10); 922 923 HostDiag = (u32)readl(hostdiag_offset); 924 while (HostDiag & DIAG_RESET_ADAPTER) { 925 msleep(100); 926 HostDiag = (u32)readl(hostdiag_offset); 927 dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n", 928 retry, HostDiag); 929 930 if (retry++ >= 1000) 931 return 1; 932 933 } 934 return 0; 935 } 936 937 /** 938 * megasas_check_reset_gen2 - For controller reset check 939 * @regs: MFI register set 940 */ 941 static int 942 megasas_check_reset_gen2(struct megasas_instance *instance, 943 struct megasas_register_set __iomem *regs) 944 { 945 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 946 return 1; 947 948 return 0; 949 } 950 951 static struct megasas_instance_template megasas_instance_template_gen2 = { 952 953 .fire_cmd = megasas_fire_cmd_gen2, 954 .enable_intr = megasas_enable_intr_gen2, 955 .disable_intr = megasas_disable_intr_gen2, 956 .clear_intr = megasas_clear_intr_gen2, 957 .read_fw_status_reg = megasas_read_fw_status_reg_gen2, 958 .adp_reset = megasas_adp_reset_gen2, 959 .check_reset = megasas_check_reset_gen2, 960 .service_isr = megasas_isr, 961 .tasklet = megasas_complete_cmd_dpc, 962 .init_adapter = megasas_init_adapter_mfi, 963 .build_and_issue_cmd = megasas_build_and_issue_cmd, 964 .issue_dcmd = megasas_issue_dcmd, 965 }; 966 967 /** 968 * This is the end of set of functions & definitions 969 * specific to gen2 (deviceid : 0x78, 0x79) controllers 970 */ 971 972 /* 973 * Template added for TB (Fusion) 974 */ 975 extern struct megasas_instance_template megasas_instance_template_fusion; 976 977 /** 978 * megasas_issue_polled - Issues a polling command 979 * @instance: Adapter soft state 980 * @cmd: Command packet to be issued 981 * 982 * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting. 983 */ 984 int 985 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) 986 { 987 struct megasas_header *frame_hdr = &cmd->frame->hdr; 988 989 frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS; 990 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); 991 992 if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) || 993 (instance->instancet->issue_dcmd(instance, cmd))) { 994 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 995 __func__, __LINE__); 996 return DCMD_NOT_FIRED; 997 } 998 999 return wait_and_poll(instance, cmd, instance->requestorId ? 1000 MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS); 1001 } 1002 1003 /** 1004 * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds 1005 * @instance: Adapter soft state 1006 * @cmd: Command to be issued 1007 * @timeout: Timeout in seconds 1008 * 1009 * This function waits on an event for the command to be returned from ISR. 1010 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 1011 * Used to issue ioctl commands. 1012 */ 1013 int 1014 megasas_issue_blocked_cmd(struct megasas_instance *instance, 1015 struct megasas_cmd *cmd, int timeout) 1016 { 1017 int ret = 0; 1018 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 1019 1020 if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) || 1021 (instance->instancet->issue_dcmd(instance, cmd))) { 1022 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1023 __func__, __LINE__); 1024 return DCMD_NOT_FIRED; 1025 } 1026 1027 if (timeout) { 1028 ret = wait_event_timeout(instance->int_cmd_wait_q, 1029 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); 1030 if (!ret) { 1031 dev_err(&instance->pdev->dev, "Failed from %s %d DCMD Timed out\n", 1032 __func__, __LINE__); 1033 return DCMD_TIMEOUT; 1034 } 1035 } else 1036 wait_event(instance->int_cmd_wait_q, 1037 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS); 1038 1039 return (cmd->cmd_status_drv == MFI_STAT_OK) ? 1040 DCMD_SUCCESS : DCMD_FAILED; 1041 } 1042 1043 /** 1044 * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd 1045 * @instance: Adapter soft state 1046 * @cmd_to_abort: Previously issued cmd to be aborted 1047 * @timeout: Timeout in seconds 1048 * 1049 * MFI firmware can abort previously issued AEN comamnd (automatic event 1050 * notification). The megasas_issue_blocked_abort_cmd() issues such abort 1051 * cmd and waits for return status. 1052 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 1053 */ 1054 static int 1055 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, 1056 struct megasas_cmd *cmd_to_abort, int timeout) 1057 { 1058 struct megasas_cmd *cmd; 1059 struct megasas_abort_frame *abort_fr; 1060 int ret = 0; 1061 1062 cmd = megasas_get_cmd(instance); 1063 1064 if (!cmd) 1065 return -1; 1066 1067 abort_fr = &cmd->frame->abort; 1068 1069 /* 1070 * Prepare and issue the abort frame 1071 */ 1072 abort_fr->cmd = MFI_CMD_ABORT; 1073 abort_fr->cmd_status = MFI_STAT_INVALID_STATUS; 1074 abort_fr->flags = cpu_to_le16(0); 1075 abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index); 1076 abort_fr->abort_mfi_phys_addr_lo = 1077 cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr)); 1078 abort_fr->abort_mfi_phys_addr_hi = 1079 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); 1080 1081 cmd->sync_cmd = 1; 1082 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 1083 1084 if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) || 1085 (instance->instancet->issue_dcmd(instance, cmd))) { 1086 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1087 __func__, __LINE__); 1088 return DCMD_NOT_FIRED; 1089 } 1090 1091 if (timeout) { 1092 ret = wait_event_timeout(instance->abort_cmd_wait_q, 1093 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); 1094 if (!ret) { 1095 dev_err(&instance->pdev->dev, "Failed from %s %d Abort Timed out\n", 1096 __func__, __LINE__); 1097 return DCMD_TIMEOUT; 1098 } 1099 } else 1100 wait_event(instance->abort_cmd_wait_q, 1101 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS); 1102 1103 cmd->sync_cmd = 0; 1104 1105 megasas_return_cmd(instance, cmd); 1106 return (cmd->cmd_status_drv == MFI_STAT_OK) ? 1107 DCMD_SUCCESS : DCMD_FAILED; 1108 } 1109 1110 /** 1111 * megasas_make_sgl32 - Prepares 32-bit SGL 1112 * @instance: Adapter soft state 1113 * @scp: SCSI command from the mid-layer 1114 * @mfi_sgl: SGL to be filled in 1115 * 1116 * If successful, this function returns the number of SG elements. Otherwise, 1117 * it returnes -1. 1118 */ 1119 static int 1120 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp, 1121 union megasas_sgl *mfi_sgl) 1122 { 1123 int i; 1124 int sge_count; 1125 struct scatterlist *os_sgl; 1126 1127 sge_count = scsi_dma_map(scp); 1128 BUG_ON(sge_count < 0); 1129 1130 if (sge_count) { 1131 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1132 mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl)); 1133 mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl)); 1134 } 1135 } 1136 return sge_count; 1137 } 1138 1139 /** 1140 * megasas_make_sgl64 - Prepares 64-bit SGL 1141 * @instance: Adapter soft state 1142 * @scp: SCSI command from the mid-layer 1143 * @mfi_sgl: SGL to be filled in 1144 * 1145 * If successful, this function returns the number of SG elements. Otherwise, 1146 * it returnes -1. 1147 */ 1148 static int 1149 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, 1150 union megasas_sgl *mfi_sgl) 1151 { 1152 int i; 1153 int sge_count; 1154 struct scatterlist *os_sgl; 1155 1156 sge_count = scsi_dma_map(scp); 1157 BUG_ON(sge_count < 0); 1158 1159 if (sge_count) { 1160 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1161 mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl)); 1162 mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl)); 1163 } 1164 } 1165 return sge_count; 1166 } 1167 1168 /** 1169 * megasas_make_sgl_skinny - Prepares IEEE SGL 1170 * @instance: Adapter soft state 1171 * @scp: SCSI command from the mid-layer 1172 * @mfi_sgl: SGL to be filled in 1173 * 1174 * If successful, this function returns the number of SG elements. Otherwise, 1175 * it returnes -1. 1176 */ 1177 static int 1178 megasas_make_sgl_skinny(struct megasas_instance *instance, 1179 struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) 1180 { 1181 int i; 1182 int sge_count; 1183 struct scatterlist *os_sgl; 1184 1185 sge_count = scsi_dma_map(scp); 1186 1187 if (sge_count) { 1188 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1189 mfi_sgl->sge_skinny[i].length = 1190 cpu_to_le32(sg_dma_len(os_sgl)); 1191 mfi_sgl->sge_skinny[i].phys_addr = 1192 cpu_to_le64(sg_dma_address(os_sgl)); 1193 mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0); 1194 } 1195 } 1196 return sge_count; 1197 } 1198 1199 /** 1200 * megasas_get_frame_count - Computes the number of frames 1201 * @frame_type : type of frame- io or pthru frame 1202 * @sge_count : number of sg elements 1203 * 1204 * Returns the number of frames required for numnber of sge's (sge_count) 1205 */ 1206 1207 static u32 megasas_get_frame_count(struct megasas_instance *instance, 1208 u8 sge_count, u8 frame_type) 1209 { 1210 int num_cnt; 1211 int sge_bytes; 1212 u32 sge_sz; 1213 u32 frame_count = 0; 1214 1215 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : 1216 sizeof(struct megasas_sge32); 1217 1218 if (instance->flag_ieee) { 1219 sge_sz = sizeof(struct megasas_sge_skinny); 1220 } 1221 1222 /* 1223 * Main frame can contain 2 SGEs for 64-bit SGLs and 1224 * 3 SGEs for 32-bit SGLs for ldio & 1225 * 1 SGEs for 64-bit SGLs and 1226 * 2 SGEs for 32-bit SGLs for pthru frame 1227 */ 1228 if (unlikely(frame_type == PTHRU_FRAME)) { 1229 if (instance->flag_ieee == 1) { 1230 num_cnt = sge_count - 1; 1231 } else if (IS_DMA64) 1232 num_cnt = sge_count - 1; 1233 else 1234 num_cnt = sge_count - 2; 1235 } else { 1236 if (instance->flag_ieee == 1) { 1237 num_cnt = sge_count - 1; 1238 } else if (IS_DMA64) 1239 num_cnt = sge_count - 2; 1240 else 1241 num_cnt = sge_count - 3; 1242 } 1243 1244 if (num_cnt > 0) { 1245 sge_bytes = sge_sz * num_cnt; 1246 1247 frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + 1248 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ; 1249 } 1250 /* Main frame */ 1251 frame_count += 1; 1252 1253 if (frame_count > 7) 1254 frame_count = 8; 1255 return frame_count; 1256 } 1257 1258 /** 1259 * megasas_build_dcdb - Prepares a direct cdb (DCDB) command 1260 * @instance: Adapter soft state 1261 * @scp: SCSI command 1262 * @cmd: Command to be prepared in 1263 * 1264 * This function prepares CDB commands. These are typcially pass-through 1265 * commands to the devices. 1266 */ 1267 static int 1268 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, 1269 struct megasas_cmd *cmd) 1270 { 1271 u32 is_logical; 1272 u32 device_id; 1273 u16 flags = 0; 1274 struct megasas_pthru_frame *pthru; 1275 1276 is_logical = MEGASAS_IS_LOGICAL(scp); 1277 device_id = MEGASAS_DEV_INDEX(scp); 1278 pthru = (struct megasas_pthru_frame *)cmd->frame; 1279 1280 if (scp->sc_data_direction == PCI_DMA_TODEVICE) 1281 flags = MFI_FRAME_DIR_WRITE; 1282 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 1283 flags = MFI_FRAME_DIR_READ; 1284 else if (scp->sc_data_direction == PCI_DMA_NONE) 1285 flags = MFI_FRAME_DIR_NONE; 1286 1287 if (instance->flag_ieee == 1) { 1288 flags |= MFI_FRAME_IEEE; 1289 } 1290 1291 /* 1292 * Prepare the DCDB frame 1293 */ 1294 pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO; 1295 pthru->cmd_status = 0x0; 1296 pthru->scsi_status = 0x0; 1297 pthru->target_id = device_id; 1298 pthru->lun = scp->device->lun; 1299 pthru->cdb_len = scp->cmd_len; 1300 pthru->timeout = 0; 1301 pthru->pad_0 = 0; 1302 pthru->flags = cpu_to_le16(flags); 1303 pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp)); 1304 1305 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); 1306 1307 /* 1308 * If the command is for the tape device, set the 1309 * pthru timeout to the os layer timeout value. 1310 */ 1311 if (scp->device->type == TYPE_TAPE) { 1312 if ((scp->request->timeout / HZ) > 0xFFFF) 1313 pthru->timeout = cpu_to_le16(0xFFFF); 1314 else 1315 pthru->timeout = cpu_to_le16(scp->request->timeout / HZ); 1316 } 1317 1318 /* 1319 * Construct SGL 1320 */ 1321 if (instance->flag_ieee == 1) { 1322 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1323 pthru->sge_count = megasas_make_sgl_skinny(instance, scp, 1324 &pthru->sgl); 1325 } else if (IS_DMA64) { 1326 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1327 pthru->sge_count = megasas_make_sgl64(instance, scp, 1328 &pthru->sgl); 1329 } else 1330 pthru->sge_count = megasas_make_sgl32(instance, scp, 1331 &pthru->sgl); 1332 1333 if (pthru->sge_count > instance->max_num_sge) { 1334 dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n", 1335 pthru->sge_count); 1336 return 0; 1337 } 1338 1339 /* 1340 * Sense info specific 1341 */ 1342 pthru->sense_len = SCSI_SENSE_BUFFERSIZE; 1343 pthru->sense_buf_phys_addr_hi = 1344 cpu_to_le32(upper_32_bits(cmd->sense_phys_addr)); 1345 pthru->sense_buf_phys_addr_lo = 1346 cpu_to_le32(lower_32_bits(cmd->sense_phys_addr)); 1347 1348 /* 1349 * Compute the total number of frames this command consumes. FW uses 1350 * this number to pull sufficient number of frames from host memory. 1351 */ 1352 cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count, 1353 PTHRU_FRAME); 1354 1355 return cmd->frame_count; 1356 } 1357 1358 /** 1359 * megasas_build_ldio - Prepares IOs to logical devices 1360 * @instance: Adapter soft state 1361 * @scp: SCSI command 1362 * @cmd: Command to be prepared 1363 * 1364 * Frames (and accompanying SGLs) for regular SCSI IOs use this function. 1365 */ 1366 static int 1367 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, 1368 struct megasas_cmd *cmd) 1369 { 1370 u32 device_id; 1371 u8 sc = scp->cmnd[0]; 1372 u16 flags = 0; 1373 struct megasas_io_frame *ldio; 1374 1375 device_id = MEGASAS_DEV_INDEX(scp); 1376 ldio = (struct megasas_io_frame *)cmd->frame; 1377 1378 if (scp->sc_data_direction == PCI_DMA_TODEVICE) 1379 flags = MFI_FRAME_DIR_WRITE; 1380 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 1381 flags = MFI_FRAME_DIR_READ; 1382 1383 if (instance->flag_ieee == 1) { 1384 flags |= MFI_FRAME_IEEE; 1385 } 1386 1387 /* 1388 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds 1389 */ 1390 ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ; 1391 ldio->cmd_status = 0x0; 1392 ldio->scsi_status = 0x0; 1393 ldio->target_id = device_id; 1394 ldio->timeout = 0; 1395 ldio->reserved_0 = 0; 1396 ldio->pad_0 = 0; 1397 ldio->flags = cpu_to_le16(flags); 1398 ldio->start_lba_hi = 0; 1399 ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0; 1400 1401 /* 1402 * 6-byte READ(0x08) or WRITE(0x0A) cdb 1403 */ 1404 if (scp->cmd_len == 6) { 1405 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]); 1406 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) | 1407 ((u32) scp->cmnd[2] << 8) | 1408 (u32) scp->cmnd[3]); 1409 1410 ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF); 1411 } 1412 1413 /* 1414 * 10-byte READ(0x28) or WRITE(0x2A) cdb 1415 */ 1416 else if (scp->cmd_len == 10) { 1417 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] | 1418 ((u32) scp->cmnd[7] << 8)); 1419 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1420 ((u32) scp->cmnd[3] << 16) | 1421 ((u32) scp->cmnd[4] << 8) | 1422 (u32) scp->cmnd[5]); 1423 } 1424 1425 /* 1426 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 1427 */ 1428 else if (scp->cmd_len == 12) { 1429 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) | 1430 ((u32) scp->cmnd[7] << 16) | 1431 ((u32) scp->cmnd[8] << 8) | 1432 (u32) scp->cmnd[9]); 1433 1434 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1435 ((u32) scp->cmnd[3] << 16) | 1436 ((u32) scp->cmnd[4] << 8) | 1437 (u32) scp->cmnd[5]); 1438 } 1439 1440 /* 1441 * 16-byte READ(0x88) or WRITE(0x8A) cdb 1442 */ 1443 else if (scp->cmd_len == 16) { 1444 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) | 1445 ((u32) scp->cmnd[11] << 16) | 1446 ((u32) scp->cmnd[12] << 8) | 1447 (u32) scp->cmnd[13]); 1448 1449 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) | 1450 ((u32) scp->cmnd[7] << 16) | 1451 ((u32) scp->cmnd[8] << 8) | 1452 (u32) scp->cmnd[9]); 1453 1454 ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1455 ((u32) scp->cmnd[3] << 16) | 1456 ((u32) scp->cmnd[4] << 8) | 1457 (u32) scp->cmnd[5]); 1458 1459 } 1460 1461 /* 1462 * Construct SGL 1463 */ 1464 if (instance->flag_ieee) { 1465 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1466 ldio->sge_count = megasas_make_sgl_skinny(instance, scp, 1467 &ldio->sgl); 1468 } else if (IS_DMA64) { 1469 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1470 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); 1471 } else 1472 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); 1473 1474 if (ldio->sge_count > instance->max_num_sge) { 1475 dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n", 1476 ldio->sge_count); 1477 return 0; 1478 } 1479 1480 /* 1481 * Sense info specific 1482 */ 1483 ldio->sense_len = SCSI_SENSE_BUFFERSIZE; 1484 ldio->sense_buf_phys_addr_hi = 0; 1485 ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr); 1486 1487 /* 1488 * Compute the total number of frames this command consumes. FW uses 1489 * this number to pull sufficient number of frames from host memory. 1490 */ 1491 cmd->frame_count = megasas_get_frame_count(instance, 1492 ldio->sge_count, IO_FRAME); 1493 1494 return cmd->frame_count; 1495 } 1496 1497 /** 1498 * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD 1499 * and whether it's RW or non RW 1500 * @scmd: SCSI command 1501 * 1502 */ 1503 inline int megasas_cmd_type(struct scsi_cmnd *cmd) 1504 { 1505 int ret; 1506 1507 switch (cmd->cmnd[0]) { 1508 case READ_10: 1509 case WRITE_10: 1510 case READ_12: 1511 case WRITE_12: 1512 case READ_6: 1513 case WRITE_6: 1514 case READ_16: 1515 case WRITE_16: 1516 ret = (MEGASAS_IS_LOGICAL(cmd)) ? 1517 READ_WRITE_LDIO : READ_WRITE_SYSPDIO; 1518 break; 1519 default: 1520 ret = (MEGASAS_IS_LOGICAL(cmd)) ? 1521 NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO; 1522 } 1523 return ret; 1524 } 1525 1526 /** 1527 * megasas_dump_pending_frames - Dumps the frame address of all pending cmds 1528 * in FW 1529 * @instance: Adapter soft state 1530 */ 1531 static inline void 1532 megasas_dump_pending_frames(struct megasas_instance *instance) 1533 { 1534 struct megasas_cmd *cmd; 1535 int i,n; 1536 union megasas_sgl *mfi_sgl; 1537 struct megasas_io_frame *ldio; 1538 struct megasas_pthru_frame *pthru; 1539 u32 sgcount; 1540 u32 max_cmd = instance->max_fw_cmds; 1541 1542 dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no); 1543 dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding)); 1544 if (IS_DMA64) 1545 dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no); 1546 else 1547 dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no); 1548 1549 dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no); 1550 for (i = 0; i < max_cmd; i++) { 1551 cmd = instance->cmd_list[i]; 1552 if (!cmd->scmd) 1553 continue; 1554 dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr); 1555 if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) { 1556 ldio = (struct megasas_io_frame *)cmd->frame; 1557 mfi_sgl = &ldio->sgl; 1558 sgcount = ldio->sge_count; 1559 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x," 1560 " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1561 instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id, 1562 le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi), 1563 le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount); 1564 } else { 1565 pthru = (struct megasas_pthru_frame *) cmd->frame; 1566 mfi_sgl = &pthru->sgl; 1567 sgcount = pthru->sge_count; 1568 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, " 1569 "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1570 instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id, 1571 pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len), 1572 le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount); 1573 } 1574 if (megasas_dbg_lvl & MEGASAS_DBG_LVL) { 1575 for (n = 0; n < sgcount; n++) { 1576 if (IS_DMA64) 1577 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n", 1578 le32_to_cpu(mfi_sgl->sge64[n].length), 1579 le64_to_cpu(mfi_sgl->sge64[n].phys_addr)); 1580 else 1581 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n", 1582 le32_to_cpu(mfi_sgl->sge32[n].length), 1583 le32_to_cpu(mfi_sgl->sge32[n].phys_addr)); 1584 } 1585 } 1586 } /*for max_cmd*/ 1587 dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no); 1588 for (i = 0; i < max_cmd; i++) { 1589 1590 cmd = instance->cmd_list[i]; 1591 1592 if (cmd->sync_cmd == 1) 1593 dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr); 1594 } 1595 dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no); 1596 } 1597 1598 u32 1599 megasas_build_and_issue_cmd(struct megasas_instance *instance, 1600 struct scsi_cmnd *scmd) 1601 { 1602 struct megasas_cmd *cmd; 1603 u32 frame_count; 1604 1605 cmd = megasas_get_cmd(instance); 1606 if (!cmd) 1607 return SCSI_MLQUEUE_HOST_BUSY; 1608 1609 /* 1610 * Logical drive command 1611 */ 1612 if (megasas_cmd_type(scmd) == READ_WRITE_LDIO) 1613 frame_count = megasas_build_ldio(instance, scmd, cmd); 1614 else 1615 frame_count = megasas_build_dcdb(instance, scmd, cmd); 1616 1617 if (!frame_count) 1618 goto out_return_cmd; 1619 1620 cmd->scmd = scmd; 1621 scmd->SCp.ptr = (char *)cmd; 1622 1623 /* 1624 * Issue the command to the FW 1625 */ 1626 atomic_inc(&instance->fw_outstanding); 1627 1628 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 1629 cmd->frame_count-1, instance->reg_set); 1630 1631 return 0; 1632 out_return_cmd: 1633 megasas_return_cmd(instance, cmd); 1634 return SCSI_MLQUEUE_HOST_BUSY; 1635 } 1636 1637 1638 /** 1639 * megasas_queue_command - Queue entry point 1640 * @scmd: SCSI command to be queued 1641 * @done: Callback entry point 1642 */ 1643 static int 1644 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) 1645 { 1646 struct megasas_instance *instance; 1647 struct MR_PRIV_DEVICE *mr_device_priv_data; 1648 1649 instance = (struct megasas_instance *) 1650 scmd->device->host->hostdata; 1651 1652 if (instance->unload == 1) { 1653 scmd->result = DID_NO_CONNECT << 16; 1654 scmd->scsi_done(scmd); 1655 return 0; 1656 } 1657 1658 if (instance->issuepend_done == 0) 1659 return SCSI_MLQUEUE_HOST_BUSY; 1660 1661 1662 /* Check for an mpio path and adjust behavior */ 1663 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { 1664 if (megasas_check_mpio_paths(instance, scmd) == 1665 (DID_RESET << 16)) { 1666 return SCSI_MLQUEUE_HOST_BUSY; 1667 } else { 1668 scmd->result = DID_NO_CONNECT << 16; 1669 scmd->scsi_done(scmd); 1670 return 0; 1671 } 1672 } 1673 1674 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1675 scmd->result = DID_NO_CONNECT << 16; 1676 scmd->scsi_done(scmd); 1677 return 0; 1678 } 1679 1680 mr_device_priv_data = scmd->device->hostdata; 1681 if (!mr_device_priv_data) { 1682 scmd->result = DID_NO_CONNECT << 16; 1683 scmd->scsi_done(scmd); 1684 return 0; 1685 } 1686 1687 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 1688 return SCSI_MLQUEUE_HOST_BUSY; 1689 1690 if (mr_device_priv_data->tm_busy) 1691 return SCSI_MLQUEUE_DEVICE_BUSY; 1692 1693 1694 scmd->result = 0; 1695 1696 if (MEGASAS_IS_LOGICAL(scmd) && 1697 (scmd->device->id >= instance->fw_supported_vd_count || 1698 scmd->device->lun)) { 1699 scmd->result = DID_BAD_TARGET << 16; 1700 goto out_done; 1701 } 1702 1703 switch (scmd->cmnd[0]) { 1704 case SYNCHRONIZE_CACHE: 1705 /* 1706 * FW takes care of flush cache on its own 1707 * No need to send it down 1708 */ 1709 scmd->result = DID_OK << 16; 1710 goto out_done; 1711 default: 1712 break; 1713 } 1714 1715 return instance->instancet->build_and_issue_cmd(instance, scmd); 1716 1717 out_done: 1718 scmd->scsi_done(scmd); 1719 return 0; 1720 } 1721 1722 static struct megasas_instance *megasas_lookup_instance(u16 host_no) 1723 { 1724 int i; 1725 1726 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 1727 1728 if ((megasas_mgmt_info.instance[i]) && 1729 (megasas_mgmt_info.instance[i]->host->host_no == host_no)) 1730 return megasas_mgmt_info.instance[i]; 1731 } 1732 1733 return NULL; 1734 } 1735 1736 /* 1737 * megasas_update_sdev_properties - Update sdev structure based on controller's FW capabilities 1738 * 1739 * @sdev: OS provided scsi device 1740 * 1741 * Returns void 1742 */ 1743 void megasas_update_sdev_properties(struct scsi_device *sdev) 1744 { 1745 u16 pd_index = 0; 1746 u32 device_id, ld; 1747 struct megasas_instance *instance; 1748 struct fusion_context *fusion; 1749 struct MR_PRIV_DEVICE *mr_device_priv_data; 1750 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 1751 struct MR_LD_RAID *raid; 1752 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 1753 1754 instance = megasas_lookup_instance(sdev->host->host_no); 1755 fusion = instance->ctrl_context; 1756 mr_device_priv_data = sdev->hostdata; 1757 1758 if (!fusion) 1759 return; 1760 1761 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS && 1762 instance->use_seqnum_jbod_fp) { 1763 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1764 sdev->id; 1765 pd_sync = (void *)fusion->pd_seq_sync 1766 [(instance->pd_seq_map_id - 1) & 1]; 1767 mr_device_priv_data->is_tm_capable = 1768 pd_sync->seq[pd_index].capability.tmCapable; 1769 } else { 1770 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) 1771 + sdev->id; 1772 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 1773 ld = MR_TargetIdToLdGet(device_id, local_map_ptr); 1774 raid = MR_LdRaidGet(ld, local_map_ptr); 1775 1776 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) 1777 blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1778 mr_device_priv_data->is_tm_capable = 1779 raid->capability.tmCapable; 1780 } 1781 } 1782 1783 static void megasas_set_device_queue_depth(struct scsi_device *sdev) 1784 { 1785 u16 pd_index = 0; 1786 int ret = DCMD_FAILED; 1787 struct megasas_instance *instance; 1788 1789 instance = megasas_lookup_instance(sdev->host->host_no); 1790 1791 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) { 1792 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; 1793 1794 if (instance->pd_info) { 1795 mutex_lock(&instance->hba_mutex); 1796 ret = megasas_get_pd_info(instance, pd_index); 1797 mutex_unlock(&instance->hba_mutex); 1798 } 1799 1800 if (ret != DCMD_SUCCESS) 1801 return; 1802 1803 if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { 1804 1805 switch (instance->pd_list[pd_index].interface) { 1806 case SAS_PD: 1807 scsi_change_queue_depth(sdev, MEGASAS_SAS_QD); 1808 break; 1809 1810 case SATA_PD: 1811 scsi_change_queue_depth(sdev, MEGASAS_SATA_QD); 1812 break; 1813 1814 default: 1815 scsi_change_queue_depth(sdev, MEGASAS_DEFAULT_PD_QD); 1816 } 1817 } 1818 } 1819 } 1820 1821 1822 static int megasas_slave_configure(struct scsi_device *sdev) 1823 { 1824 u16 pd_index = 0; 1825 struct megasas_instance *instance; 1826 1827 instance = megasas_lookup_instance(sdev->host->host_no); 1828 if (instance->pd_list_not_supported) { 1829 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS && 1830 sdev->type == TYPE_DISK) { 1831 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1832 sdev->id; 1833 if (instance->pd_list[pd_index].driveState != 1834 MR_PD_STATE_SYSTEM) 1835 return -ENXIO; 1836 } 1837 } 1838 megasas_set_device_queue_depth(sdev); 1839 megasas_update_sdev_properties(sdev); 1840 1841 /* 1842 * The RAID firmware may require extended timeouts. 1843 */ 1844 blk_queue_rq_timeout(sdev->request_queue, 1845 scmd_timeout * HZ); 1846 1847 return 0; 1848 } 1849 1850 static int megasas_slave_alloc(struct scsi_device *sdev) 1851 { 1852 u16 pd_index = 0; 1853 struct megasas_instance *instance ; 1854 struct MR_PRIV_DEVICE *mr_device_priv_data; 1855 1856 instance = megasas_lookup_instance(sdev->host->host_no); 1857 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) { 1858 /* 1859 * Open the OS scan to the SYSTEM PD 1860 */ 1861 pd_index = 1862 (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1863 sdev->id; 1864 if ((instance->pd_list_not_supported || 1865 instance->pd_list[pd_index].driveState == 1866 MR_PD_STATE_SYSTEM)) { 1867 goto scan_target; 1868 } 1869 return -ENXIO; 1870 } 1871 1872 scan_target: 1873 mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data), 1874 GFP_KERNEL); 1875 if (!mr_device_priv_data) 1876 return -ENOMEM; 1877 sdev->hostdata = mr_device_priv_data; 1878 return 0; 1879 } 1880 1881 static void megasas_slave_destroy(struct scsi_device *sdev) 1882 { 1883 kfree(sdev->hostdata); 1884 sdev->hostdata = NULL; 1885 } 1886 1887 /* 1888 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a 1889 * kill adapter 1890 * @instance: Adapter soft state 1891 * 1892 */ 1893 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance) 1894 { 1895 int i; 1896 struct megasas_cmd *cmd_mfi; 1897 struct megasas_cmd_fusion *cmd_fusion; 1898 struct fusion_context *fusion = instance->ctrl_context; 1899 1900 /* Find all outstanding ioctls */ 1901 if (fusion) { 1902 for (i = 0; i < instance->max_fw_cmds; i++) { 1903 cmd_fusion = fusion->cmd_list[i]; 1904 if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) { 1905 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 1906 if (cmd_mfi->sync_cmd && 1907 cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) 1908 megasas_complete_cmd(instance, 1909 cmd_mfi, DID_OK); 1910 } 1911 } 1912 } else { 1913 for (i = 0; i < instance->max_fw_cmds; i++) { 1914 cmd_mfi = instance->cmd_list[i]; 1915 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != 1916 MFI_CMD_ABORT) 1917 megasas_complete_cmd(instance, cmd_mfi, DID_OK); 1918 } 1919 } 1920 } 1921 1922 1923 void megaraid_sas_kill_hba(struct megasas_instance *instance) 1924 { 1925 /* Set critical error to block I/O & ioctls in case caller didn't */ 1926 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); 1927 /* Wait 1 second to ensure IO or ioctls in build have posted */ 1928 msleep(1000); 1929 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 1930 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 1931 (instance->ctrl_context)) { 1932 writel(MFI_STOP_ADP, &instance->reg_set->doorbell); 1933 /* Flush */ 1934 readl(&instance->reg_set->doorbell); 1935 if (instance->requestorId && instance->peerIsPresent) 1936 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 1937 } else { 1938 writel(MFI_STOP_ADP, 1939 &instance->reg_set->inbound_doorbell); 1940 } 1941 /* Complete outstanding ioctls when adapter is killed */ 1942 megasas_complete_outstanding_ioctls(instance); 1943 } 1944 1945 /** 1946 * megasas_check_and_restore_queue_depth - Check if queue depth needs to be 1947 * restored to max value 1948 * @instance: Adapter soft state 1949 * 1950 */ 1951 void 1952 megasas_check_and_restore_queue_depth(struct megasas_instance *instance) 1953 { 1954 unsigned long flags; 1955 1956 if (instance->flag & MEGASAS_FW_BUSY 1957 && time_after(jiffies, instance->last_time + 5 * HZ) 1958 && atomic_read(&instance->fw_outstanding) < 1959 instance->throttlequeuedepth + 1) { 1960 1961 spin_lock_irqsave(instance->host->host_lock, flags); 1962 instance->flag &= ~MEGASAS_FW_BUSY; 1963 1964 instance->host->can_queue = instance->cur_can_queue; 1965 spin_unlock_irqrestore(instance->host->host_lock, flags); 1966 } 1967 } 1968 1969 /** 1970 * megasas_complete_cmd_dpc - Returns FW's controller structure 1971 * @instance_addr: Address of adapter soft state 1972 * 1973 * Tasklet to complete cmds 1974 */ 1975 static void megasas_complete_cmd_dpc(unsigned long instance_addr) 1976 { 1977 u32 producer; 1978 u32 consumer; 1979 u32 context; 1980 struct megasas_cmd *cmd; 1981 struct megasas_instance *instance = 1982 (struct megasas_instance *)instance_addr; 1983 unsigned long flags; 1984 1985 /* If we have already declared adapter dead, donot complete cmds */ 1986 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 1987 return; 1988 1989 spin_lock_irqsave(&instance->completion_lock, flags); 1990 1991 producer = le32_to_cpu(*instance->producer); 1992 consumer = le32_to_cpu(*instance->consumer); 1993 1994 while (consumer != producer) { 1995 context = le32_to_cpu(instance->reply_queue[consumer]); 1996 if (context >= instance->max_fw_cmds) { 1997 dev_err(&instance->pdev->dev, "Unexpected context value %x\n", 1998 context); 1999 BUG(); 2000 } 2001 2002 cmd = instance->cmd_list[context]; 2003 2004 megasas_complete_cmd(instance, cmd, DID_OK); 2005 2006 consumer++; 2007 if (consumer == (instance->max_fw_cmds + 1)) { 2008 consumer = 0; 2009 } 2010 } 2011 2012 *instance->consumer = cpu_to_le32(producer); 2013 2014 spin_unlock_irqrestore(&instance->completion_lock, flags); 2015 2016 /* 2017 * Check if we can restore can_queue 2018 */ 2019 megasas_check_and_restore_queue_depth(instance); 2020 } 2021 2022 /** 2023 * megasas_start_timer - Initializes a timer object 2024 * @instance: Adapter soft state 2025 * @timer: timer object to be initialized 2026 * @fn: timer function 2027 * @interval: time interval between timer function call 2028 * 2029 */ 2030 void megasas_start_timer(struct megasas_instance *instance, 2031 struct timer_list *timer, 2032 void *fn, unsigned long interval) 2033 { 2034 init_timer(timer); 2035 timer->expires = jiffies + interval; 2036 timer->data = (unsigned long)instance; 2037 timer->function = fn; 2038 add_timer(timer); 2039 } 2040 2041 static void 2042 megasas_internal_reset_defer_cmds(struct megasas_instance *instance); 2043 2044 static void 2045 process_fw_state_change_wq(struct work_struct *work); 2046 2047 void megasas_do_ocr(struct megasas_instance *instance) 2048 { 2049 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 2050 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 2051 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 2052 *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); 2053 } 2054 instance->instancet->disable_intr(instance); 2055 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 2056 instance->issuepend_done = 0; 2057 2058 atomic_set(&instance->fw_outstanding, 0); 2059 megasas_internal_reset_defer_cmds(instance); 2060 process_fw_state_change_wq(&instance->work_init); 2061 } 2062 2063 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance, 2064 int initial) 2065 { 2066 struct megasas_cmd *cmd; 2067 struct megasas_dcmd_frame *dcmd; 2068 struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL; 2069 dma_addr_t new_affiliation_111_h; 2070 int ld, retval = 0; 2071 u8 thisVf; 2072 2073 cmd = megasas_get_cmd(instance); 2074 2075 if (!cmd) { 2076 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:" 2077 "Failed to get cmd for scsi%d\n", 2078 instance->host->host_no); 2079 return -ENOMEM; 2080 } 2081 2082 dcmd = &cmd->frame->dcmd; 2083 2084 if (!instance->vf_affiliation_111) { 2085 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " 2086 "affiliation for scsi%d\n", instance->host->host_no); 2087 megasas_return_cmd(instance, cmd); 2088 return -ENOMEM; 2089 } 2090 2091 if (initial) 2092 memset(instance->vf_affiliation_111, 0, 2093 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2094 else { 2095 new_affiliation_111 = 2096 pci_alloc_consistent(instance->pdev, 2097 sizeof(struct MR_LD_VF_AFFILIATION_111), 2098 &new_affiliation_111_h); 2099 if (!new_affiliation_111) { 2100 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2101 "memory for new affiliation for scsi%d\n", 2102 instance->host->host_no); 2103 megasas_return_cmd(instance, cmd); 2104 return -ENOMEM; 2105 } 2106 memset(new_affiliation_111, 0, 2107 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2108 } 2109 2110 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2111 2112 dcmd->cmd = MFI_CMD_DCMD; 2113 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2114 dcmd->sge_count = 1; 2115 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2116 dcmd->timeout = 0; 2117 dcmd->pad_0 = 0; 2118 dcmd->data_xfer_len = 2119 cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111)); 2120 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111); 2121 2122 if (initial) 2123 dcmd->sgl.sge32[0].phys_addr = 2124 cpu_to_le32(instance->vf_affiliation_111_h); 2125 else 2126 dcmd->sgl.sge32[0].phys_addr = 2127 cpu_to_le32(new_affiliation_111_h); 2128 2129 dcmd->sgl.sge32[0].length = cpu_to_le32( 2130 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2131 2132 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " 2133 "scsi%d\n", instance->host->host_no); 2134 2135 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { 2136 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" 2137 " failed with status 0x%x for scsi%d\n", 2138 dcmd->cmd_status, instance->host->host_no); 2139 retval = 1; /* Do a scan if we couldn't get affiliation */ 2140 goto out; 2141 } 2142 2143 if (!initial) { 2144 thisVf = new_affiliation_111->thisVf; 2145 for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++) 2146 if (instance->vf_affiliation_111->map[ld].policy[thisVf] != 2147 new_affiliation_111->map[ld].policy[thisVf]) { 2148 dev_warn(&instance->pdev->dev, "SR-IOV: " 2149 "Got new LD/VF affiliation for scsi%d\n", 2150 instance->host->host_no); 2151 memcpy(instance->vf_affiliation_111, 2152 new_affiliation_111, 2153 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2154 retval = 1; 2155 goto out; 2156 } 2157 } 2158 out: 2159 if (new_affiliation_111) { 2160 pci_free_consistent(instance->pdev, 2161 sizeof(struct MR_LD_VF_AFFILIATION_111), 2162 new_affiliation_111, 2163 new_affiliation_111_h); 2164 } 2165 2166 megasas_return_cmd(instance, cmd); 2167 2168 return retval; 2169 } 2170 2171 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance, 2172 int initial) 2173 { 2174 struct megasas_cmd *cmd; 2175 struct megasas_dcmd_frame *dcmd; 2176 struct MR_LD_VF_AFFILIATION *new_affiliation = NULL; 2177 struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL; 2178 dma_addr_t new_affiliation_h; 2179 int i, j, retval = 0, found = 0, doscan = 0; 2180 u8 thisVf; 2181 2182 cmd = megasas_get_cmd(instance); 2183 2184 if (!cmd) { 2185 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: " 2186 "Failed to get cmd for scsi%d\n", 2187 instance->host->host_no); 2188 return -ENOMEM; 2189 } 2190 2191 dcmd = &cmd->frame->dcmd; 2192 2193 if (!instance->vf_affiliation) { 2194 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " 2195 "affiliation for scsi%d\n", instance->host->host_no); 2196 megasas_return_cmd(instance, cmd); 2197 return -ENOMEM; 2198 } 2199 2200 if (initial) 2201 memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) * 2202 sizeof(struct MR_LD_VF_AFFILIATION)); 2203 else { 2204 new_affiliation = 2205 pci_alloc_consistent(instance->pdev, 2206 (MAX_LOGICAL_DRIVES + 1) * 2207 sizeof(struct MR_LD_VF_AFFILIATION), 2208 &new_affiliation_h); 2209 if (!new_affiliation) { 2210 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2211 "memory for new affiliation for scsi%d\n", 2212 instance->host->host_no); 2213 megasas_return_cmd(instance, cmd); 2214 return -ENOMEM; 2215 } 2216 memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) * 2217 sizeof(struct MR_LD_VF_AFFILIATION)); 2218 } 2219 2220 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2221 2222 dcmd->cmd = MFI_CMD_DCMD; 2223 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2224 dcmd->sge_count = 1; 2225 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2226 dcmd->timeout = 0; 2227 dcmd->pad_0 = 0; 2228 dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * 2229 sizeof(struct MR_LD_VF_AFFILIATION)); 2230 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS); 2231 2232 if (initial) 2233 dcmd->sgl.sge32[0].phys_addr = 2234 cpu_to_le32(instance->vf_affiliation_h); 2235 else 2236 dcmd->sgl.sge32[0].phys_addr = 2237 cpu_to_le32(new_affiliation_h); 2238 2239 dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * 2240 sizeof(struct MR_LD_VF_AFFILIATION)); 2241 2242 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " 2243 "scsi%d\n", instance->host->host_no); 2244 2245 2246 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { 2247 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" 2248 " failed with status 0x%x for scsi%d\n", 2249 dcmd->cmd_status, instance->host->host_no); 2250 retval = 1; /* Do a scan if we couldn't get affiliation */ 2251 goto out; 2252 } 2253 2254 if (!initial) { 2255 if (!new_affiliation->ldCount) { 2256 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " 2257 "affiliation for passive path for scsi%d\n", 2258 instance->host->host_no); 2259 retval = 1; 2260 goto out; 2261 } 2262 newmap = new_affiliation->map; 2263 savedmap = instance->vf_affiliation->map; 2264 thisVf = new_affiliation->thisVf; 2265 for (i = 0 ; i < new_affiliation->ldCount; i++) { 2266 found = 0; 2267 for (j = 0; j < instance->vf_affiliation->ldCount; 2268 j++) { 2269 if (newmap->ref.targetId == 2270 savedmap->ref.targetId) { 2271 found = 1; 2272 if (newmap->policy[thisVf] != 2273 savedmap->policy[thisVf]) { 2274 doscan = 1; 2275 goto out; 2276 } 2277 } 2278 savedmap = (struct MR_LD_VF_MAP *) 2279 ((unsigned char *)savedmap + 2280 savedmap->size); 2281 } 2282 if (!found && newmap->policy[thisVf] != 2283 MR_LD_ACCESS_HIDDEN) { 2284 doscan = 1; 2285 goto out; 2286 } 2287 newmap = (struct MR_LD_VF_MAP *) 2288 ((unsigned char *)newmap + newmap->size); 2289 } 2290 2291 newmap = new_affiliation->map; 2292 savedmap = instance->vf_affiliation->map; 2293 2294 for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) { 2295 found = 0; 2296 for (j = 0 ; j < new_affiliation->ldCount; j++) { 2297 if (savedmap->ref.targetId == 2298 newmap->ref.targetId) { 2299 found = 1; 2300 if (savedmap->policy[thisVf] != 2301 newmap->policy[thisVf]) { 2302 doscan = 1; 2303 goto out; 2304 } 2305 } 2306 newmap = (struct MR_LD_VF_MAP *) 2307 ((unsigned char *)newmap + 2308 newmap->size); 2309 } 2310 if (!found && savedmap->policy[thisVf] != 2311 MR_LD_ACCESS_HIDDEN) { 2312 doscan = 1; 2313 goto out; 2314 } 2315 savedmap = (struct MR_LD_VF_MAP *) 2316 ((unsigned char *)savedmap + 2317 savedmap->size); 2318 } 2319 } 2320 out: 2321 if (doscan) { 2322 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " 2323 "affiliation for scsi%d\n", instance->host->host_no); 2324 memcpy(instance->vf_affiliation, new_affiliation, 2325 new_affiliation->size); 2326 retval = 1; 2327 } 2328 2329 if (new_affiliation) 2330 pci_free_consistent(instance->pdev, 2331 (MAX_LOGICAL_DRIVES + 1) * 2332 sizeof(struct MR_LD_VF_AFFILIATION), 2333 new_affiliation, new_affiliation_h); 2334 megasas_return_cmd(instance, cmd); 2335 2336 return retval; 2337 } 2338 2339 /* This function will get the current SR-IOV LD/VF affiliation */ 2340 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 2341 int initial) 2342 { 2343 int retval; 2344 2345 if (instance->PlasmaFW111) 2346 retval = megasas_get_ld_vf_affiliation_111(instance, initial); 2347 else 2348 retval = megasas_get_ld_vf_affiliation_12(instance, initial); 2349 return retval; 2350 } 2351 2352 /* This function will tell FW to start the SR-IOV heartbeat */ 2353 int megasas_sriov_start_heartbeat(struct megasas_instance *instance, 2354 int initial) 2355 { 2356 struct megasas_cmd *cmd; 2357 struct megasas_dcmd_frame *dcmd; 2358 int retval = 0; 2359 2360 cmd = megasas_get_cmd(instance); 2361 2362 if (!cmd) { 2363 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: " 2364 "Failed to get cmd for scsi%d\n", 2365 instance->host->host_no); 2366 return -ENOMEM; 2367 } 2368 2369 dcmd = &cmd->frame->dcmd; 2370 2371 if (initial) { 2372 instance->hb_host_mem = 2373 pci_zalloc_consistent(instance->pdev, 2374 sizeof(struct MR_CTRL_HB_HOST_MEM), 2375 &instance->hb_host_mem_h); 2376 if (!instance->hb_host_mem) { 2377 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate" 2378 " memory for heartbeat host memory for scsi%d\n", 2379 instance->host->host_no); 2380 retval = -ENOMEM; 2381 goto out; 2382 } 2383 } 2384 2385 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2386 2387 dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2388 dcmd->cmd = MFI_CMD_DCMD; 2389 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2390 dcmd->sge_count = 1; 2391 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2392 dcmd->timeout = 0; 2393 dcmd->pad_0 = 0; 2394 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2395 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC); 2396 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->hb_host_mem_h); 2397 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2398 2399 dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n", 2400 instance->host->host_no); 2401 2402 if (instance->ctrl_context && !instance->mask_interrupts) 2403 retval = megasas_issue_blocked_cmd(instance, cmd, 2404 MEGASAS_ROUTINE_WAIT_TIME_VF); 2405 else 2406 retval = megasas_issue_polled(instance, cmd); 2407 2408 if (retval) { 2409 dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST" 2410 "_MEM_ALLOC DCMD %s for scsi%d\n", 2411 (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ? 2412 "timed out" : "failed", instance->host->host_no); 2413 retval = 1; 2414 } 2415 2416 out: 2417 megasas_return_cmd(instance, cmd); 2418 2419 return retval; 2420 } 2421 2422 /* Handler for SR-IOV heartbeat */ 2423 void megasas_sriov_heartbeat_handler(unsigned long instance_addr) 2424 { 2425 struct megasas_instance *instance = 2426 (struct megasas_instance *)instance_addr; 2427 2428 if (instance->hb_host_mem->HB.fwCounter != 2429 instance->hb_host_mem->HB.driverCounter) { 2430 instance->hb_host_mem->HB.driverCounter = 2431 instance->hb_host_mem->HB.fwCounter; 2432 mod_timer(&instance->sriov_heartbeat_timer, 2433 jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 2434 } else { 2435 dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never " 2436 "completed for scsi%d\n", instance->host->host_no); 2437 schedule_work(&instance->work_init); 2438 } 2439 } 2440 2441 /** 2442 * megasas_wait_for_outstanding - Wait for all outstanding cmds 2443 * @instance: Adapter soft state 2444 * 2445 * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to 2446 * complete all its outstanding commands. Returns error if one or more IOs 2447 * are pending after this time period. It also marks the controller dead. 2448 */ 2449 static int megasas_wait_for_outstanding(struct megasas_instance *instance) 2450 { 2451 int i, sl, outstanding; 2452 u32 reset_index; 2453 u32 wait_time = MEGASAS_RESET_WAIT_TIME; 2454 unsigned long flags; 2455 struct list_head clist_local; 2456 struct megasas_cmd *reset_cmd; 2457 u32 fw_state; 2458 2459 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2460 dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n", 2461 __func__, __LINE__); 2462 return FAILED; 2463 } 2464 2465 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 2466 2467 INIT_LIST_HEAD(&clist_local); 2468 spin_lock_irqsave(&instance->hba_lock, flags); 2469 list_splice_init(&instance->internal_reset_pending_q, 2470 &clist_local); 2471 spin_unlock_irqrestore(&instance->hba_lock, flags); 2472 2473 dev_notice(&instance->pdev->dev, "HBA reset wait ...\n"); 2474 for (i = 0; i < wait_time; i++) { 2475 msleep(1000); 2476 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) 2477 break; 2478 } 2479 2480 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 2481 dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n"); 2482 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); 2483 return FAILED; 2484 } 2485 2486 reset_index = 0; 2487 while (!list_empty(&clist_local)) { 2488 reset_cmd = list_entry((&clist_local)->next, 2489 struct megasas_cmd, list); 2490 list_del_init(&reset_cmd->list); 2491 if (reset_cmd->scmd) { 2492 reset_cmd->scmd->result = DID_RESET << 16; 2493 dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n", 2494 reset_index, reset_cmd, 2495 reset_cmd->scmd->cmnd[0]); 2496 2497 reset_cmd->scmd->scsi_done(reset_cmd->scmd); 2498 megasas_return_cmd(instance, reset_cmd); 2499 } else if (reset_cmd->sync_cmd) { 2500 dev_notice(&instance->pdev->dev, "%p synch cmds" 2501 "reset queue\n", 2502 reset_cmd); 2503 2504 reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 2505 instance->instancet->fire_cmd(instance, 2506 reset_cmd->frame_phys_addr, 2507 0, instance->reg_set); 2508 } else { 2509 dev_notice(&instance->pdev->dev, "%p unexpected" 2510 "cmds lst\n", 2511 reset_cmd); 2512 } 2513 reset_index++; 2514 } 2515 2516 return SUCCESS; 2517 } 2518 2519 for (i = 0; i < resetwaittime; i++) { 2520 outstanding = atomic_read(&instance->fw_outstanding); 2521 2522 if (!outstanding) 2523 break; 2524 2525 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 2526 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d " 2527 "commands to complete\n",i,outstanding); 2528 /* 2529 * Call cmd completion routine. Cmd to be 2530 * be completed directly without depending on isr. 2531 */ 2532 megasas_complete_cmd_dpc((unsigned long)instance); 2533 } 2534 2535 msleep(1000); 2536 } 2537 2538 i = 0; 2539 outstanding = atomic_read(&instance->fw_outstanding); 2540 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; 2541 2542 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) 2543 goto no_outstanding; 2544 2545 if (instance->disableOnlineCtrlReset) 2546 goto kill_hba_and_failed; 2547 do { 2548 if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) { 2549 dev_info(&instance->pdev->dev, 2550 "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, oustanding 0x%x\n", 2551 __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding)); 2552 if (i == 3) 2553 goto kill_hba_and_failed; 2554 megasas_do_ocr(instance); 2555 2556 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2557 dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n", 2558 __func__, __LINE__); 2559 return FAILED; 2560 } 2561 dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n", 2562 __func__, __LINE__); 2563 2564 for (sl = 0; sl < 10; sl++) 2565 msleep(500); 2566 2567 outstanding = atomic_read(&instance->fw_outstanding); 2568 2569 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; 2570 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) 2571 goto no_outstanding; 2572 } 2573 i++; 2574 } while (i <= 3); 2575 2576 no_outstanding: 2577 2578 dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n", 2579 __func__, __LINE__); 2580 return SUCCESS; 2581 2582 kill_hba_and_failed: 2583 2584 /* Reset not supported, kill adapter */ 2585 dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d" 2586 " disableOnlineCtrlReset %d fw_outstanding %d \n", 2587 __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset, 2588 atomic_read(&instance->fw_outstanding)); 2589 megasas_dump_pending_frames(instance); 2590 megaraid_sas_kill_hba(instance); 2591 2592 return FAILED; 2593 } 2594 2595 /** 2596 * megasas_generic_reset - Generic reset routine 2597 * @scmd: Mid-layer SCSI command 2598 * 2599 * This routine implements a generic reset handler for device, bus and host 2600 * reset requests. Device, bus and host specific reset handlers can use this 2601 * function after they do their specific tasks. 2602 */ 2603 static int megasas_generic_reset(struct scsi_cmnd *scmd) 2604 { 2605 int ret_val; 2606 struct megasas_instance *instance; 2607 2608 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2609 2610 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n", 2611 scmd->cmnd[0], scmd->retries); 2612 2613 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2614 dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n"); 2615 return FAILED; 2616 } 2617 2618 ret_val = megasas_wait_for_outstanding(instance); 2619 if (ret_val == SUCCESS) 2620 dev_notice(&instance->pdev->dev, "reset successful\n"); 2621 else 2622 dev_err(&instance->pdev->dev, "failed to do reset\n"); 2623 2624 return ret_val; 2625 } 2626 2627 /** 2628 * megasas_reset_timer - quiesce the adapter if required 2629 * @scmd: scsi cmnd 2630 * 2631 * Sets the FW busy flag and reduces the host->can_queue if the 2632 * cmd has not been completed within the timeout period. 2633 */ 2634 static enum 2635 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) 2636 { 2637 struct megasas_instance *instance; 2638 unsigned long flags; 2639 2640 if (time_after(jiffies, scmd->jiffies_at_alloc + 2641 (scmd_timeout * 2) * HZ)) { 2642 return BLK_EH_NOT_HANDLED; 2643 } 2644 2645 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2646 if (!(instance->flag & MEGASAS_FW_BUSY)) { 2647 /* FW is busy, throttle IO */ 2648 spin_lock_irqsave(instance->host->host_lock, flags); 2649 2650 instance->host->can_queue = instance->throttlequeuedepth; 2651 instance->last_time = jiffies; 2652 instance->flag |= MEGASAS_FW_BUSY; 2653 2654 spin_unlock_irqrestore(instance->host->host_lock, flags); 2655 } 2656 return BLK_EH_RESET_TIMER; 2657 } 2658 2659 /** 2660 * megasas_reset_bus_host - Bus & host reset handler entry point 2661 */ 2662 static int megasas_reset_bus_host(struct scsi_cmnd *scmd) 2663 { 2664 int ret; 2665 struct megasas_instance *instance; 2666 2667 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2668 2669 /* 2670 * First wait for all commands to complete 2671 */ 2672 if (instance->ctrl_context) 2673 ret = megasas_reset_fusion(scmd->device->host, 1); 2674 else 2675 ret = megasas_generic_reset(scmd); 2676 2677 return ret; 2678 } 2679 2680 /** 2681 * megasas_task_abort - Issues task abort request to firmware 2682 * (supported only for fusion adapters) 2683 * @scmd: SCSI command pointer 2684 */ 2685 static int megasas_task_abort(struct scsi_cmnd *scmd) 2686 { 2687 int ret; 2688 struct megasas_instance *instance; 2689 2690 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2691 2692 if (instance->ctrl_context) 2693 ret = megasas_task_abort_fusion(scmd); 2694 else { 2695 sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n"); 2696 ret = FAILED; 2697 } 2698 2699 return ret; 2700 } 2701 2702 /** 2703 * megasas_reset_target: Issues target reset request to firmware 2704 * (supported only for fusion adapters) 2705 * @scmd: SCSI command pointer 2706 */ 2707 static int megasas_reset_target(struct scsi_cmnd *scmd) 2708 { 2709 int ret; 2710 struct megasas_instance *instance; 2711 2712 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2713 2714 if (instance->ctrl_context) 2715 ret = megasas_reset_target_fusion(scmd); 2716 else { 2717 sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n"); 2718 ret = FAILED; 2719 } 2720 2721 return ret; 2722 } 2723 2724 /** 2725 * megasas_bios_param - Returns disk geometry for a disk 2726 * @sdev: device handle 2727 * @bdev: block device 2728 * @capacity: drive capacity 2729 * @geom: geometry parameters 2730 */ 2731 static int 2732 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev, 2733 sector_t capacity, int geom[]) 2734 { 2735 int heads; 2736 int sectors; 2737 sector_t cylinders; 2738 unsigned long tmp; 2739 2740 /* Default heads (64) & sectors (32) */ 2741 heads = 64; 2742 sectors = 32; 2743 2744 tmp = heads * sectors; 2745 cylinders = capacity; 2746 2747 sector_div(cylinders, tmp); 2748 2749 /* 2750 * Handle extended translation size for logical drives > 1Gb 2751 */ 2752 2753 if (capacity >= 0x200000) { 2754 heads = 255; 2755 sectors = 63; 2756 tmp = heads*sectors; 2757 cylinders = capacity; 2758 sector_div(cylinders, tmp); 2759 } 2760 2761 geom[0] = heads; 2762 geom[1] = sectors; 2763 geom[2] = cylinders; 2764 2765 return 0; 2766 } 2767 2768 static void megasas_aen_polling(struct work_struct *work); 2769 2770 /** 2771 * megasas_service_aen - Processes an event notification 2772 * @instance: Adapter soft state 2773 * @cmd: AEN command completed by the ISR 2774 * 2775 * For AEN, driver sends a command down to FW that is held by the FW till an 2776 * event occurs. When an event of interest occurs, FW completes the command 2777 * that it was previously holding. 2778 * 2779 * This routines sends SIGIO signal to processes that have registered with the 2780 * driver for AEN. 2781 */ 2782 static void 2783 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) 2784 { 2785 unsigned long flags; 2786 2787 /* 2788 * Don't signal app if it is just an aborted previously registered aen 2789 */ 2790 if ((!cmd->abort_aen) && (instance->unload == 0)) { 2791 spin_lock_irqsave(&poll_aen_lock, flags); 2792 megasas_poll_wait_aen = 1; 2793 spin_unlock_irqrestore(&poll_aen_lock, flags); 2794 wake_up(&megasas_poll_wait); 2795 kill_fasync(&megasas_async_queue, SIGIO, POLL_IN); 2796 } 2797 else 2798 cmd->abort_aen = 0; 2799 2800 instance->aen_cmd = NULL; 2801 2802 megasas_return_cmd(instance, cmd); 2803 2804 if ((instance->unload == 0) && 2805 ((instance->issuepend_done == 1))) { 2806 struct megasas_aen_event *ev; 2807 2808 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 2809 if (!ev) { 2810 dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n"); 2811 } else { 2812 ev->instance = instance; 2813 instance->ev = ev; 2814 INIT_DELAYED_WORK(&ev->hotplug_work, 2815 megasas_aen_polling); 2816 schedule_delayed_work(&ev->hotplug_work, 0); 2817 } 2818 } 2819 } 2820 2821 static ssize_t 2822 megasas_fw_crash_buffer_store(struct device *cdev, 2823 struct device_attribute *attr, const char *buf, size_t count) 2824 { 2825 struct Scsi_Host *shost = class_to_shost(cdev); 2826 struct megasas_instance *instance = 2827 (struct megasas_instance *) shost->hostdata; 2828 int val = 0; 2829 unsigned long flags; 2830 2831 if (kstrtoint(buf, 0, &val) != 0) 2832 return -EINVAL; 2833 2834 spin_lock_irqsave(&instance->crashdump_lock, flags); 2835 instance->fw_crash_buffer_offset = val; 2836 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 2837 return strlen(buf); 2838 } 2839 2840 static ssize_t 2841 megasas_fw_crash_buffer_show(struct device *cdev, 2842 struct device_attribute *attr, char *buf) 2843 { 2844 struct Scsi_Host *shost = class_to_shost(cdev); 2845 struct megasas_instance *instance = 2846 (struct megasas_instance *) shost->hostdata; 2847 u32 size; 2848 unsigned long buff_addr; 2849 unsigned long dmachunk = CRASH_DMA_BUF_SIZE; 2850 unsigned long src_addr; 2851 unsigned long flags; 2852 u32 buff_offset; 2853 2854 spin_lock_irqsave(&instance->crashdump_lock, flags); 2855 buff_offset = instance->fw_crash_buffer_offset; 2856 if (!instance->crash_dump_buf && 2857 !((instance->fw_crash_state == AVAILABLE) || 2858 (instance->fw_crash_state == COPYING))) { 2859 dev_err(&instance->pdev->dev, 2860 "Firmware crash dump is not available\n"); 2861 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 2862 return -EINVAL; 2863 } 2864 2865 buff_addr = (unsigned long) buf; 2866 2867 if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) { 2868 dev_err(&instance->pdev->dev, 2869 "Firmware crash dump offset is out of range\n"); 2870 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 2871 return 0; 2872 } 2873 2874 size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset; 2875 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; 2876 2877 src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] + 2878 (buff_offset % dmachunk); 2879 memcpy(buf, (void *)src_addr, size); 2880 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 2881 2882 return size; 2883 } 2884 2885 static ssize_t 2886 megasas_fw_crash_buffer_size_show(struct device *cdev, 2887 struct device_attribute *attr, char *buf) 2888 { 2889 struct Scsi_Host *shost = class_to_shost(cdev); 2890 struct megasas_instance *instance = 2891 (struct megasas_instance *) shost->hostdata; 2892 2893 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long) 2894 ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE); 2895 } 2896 2897 static ssize_t 2898 megasas_fw_crash_state_store(struct device *cdev, 2899 struct device_attribute *attr, const char *buf, size_t count) 2900 { 2901 struct Scsi_Host *shost = class_to_shost(cdev); 2902 struct megasas_instance *instance = 2903 (struct megasas_instance *) shost->hostdata; 2904 int val = 0; 2905 unsigned long flags; 2906 2907 if (kstrtoint(buf, 0, &val) != 0) 2908 return -EINVAL; 2909 2910 if ((val <= AVAILABLE || val > COPY_ERROR)) { 2911 dev_err(&instance->pdev->dev, "application updates invalid " 2912 "firmware crash state\n"); 2913 return -EINVAL; 2914 } 2915 2916 instance->fw_crash_state = val; 2917 2918 if ((val == COPIED) || (val == COPY_ERROR)) { 2919 spin_lock_irqsave(&instance->crashdump_lock, flags); 2920 megasas_free_host_crash_buffer(instance); 2921 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 2922 if (val == COPY_ERROR) 2923 dev_info(&instance->pdev->dev, "application failed to " 2924 "copy Firmware crash dump\n"); 2925 else 2926 dev_info(&instance->pdev->dev, "Firmware crash dump " 2927 "copied successfully\n"); 2928 } 2929 return strlen(buf); 2930 } 2931 2932 static ssize_t 2933 megasas_fw_crash_state_show(struct device *cdev, 2934 struct device_attribute *attr, char *buf) 2935 { 2936 struct Scsi_Host *shost = class_to_shost(cdev); 2937 struct megasas_instance *instance = 2938 (struct megasas_instance *) shost->hostdata; 2939 2940 return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state); 2941 } 2942 2943 static ssize_t 2944 megasas_page_size_show(struct device *cdev, 2945 struct device_attribute *attr, char *buf) 2946 { 2947 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1); 2948 } 2949 2950 static ssize_t 2951 megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr, 2952 char *buf) 2953 { 2954 struct Scsi_Host *shost = class_to_shost(cdev); 2955 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 2956 2957 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding)); 2958 } 2959 2960 static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR, 2961 megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store); 2962 static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO, 2963 megasas_fw_crash_buffer_size_show, NULL); 2964 static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR, 2965 megasas_fw_crash_state_show, megasas_fw_crash_state_store); 2966 static DEVICE_ATTR(page_size, S_IRUGO, 2967 megasas_page_size_show, NULL); 2968 static DEVICE_ATTR(ldio_outstanding, S_IRUGO, 2969 megasas_ldio_outstanding_show, NULL); 2970 2971 struct device_attribute *megaraid_host_attrs[] = { 2972 &dev_attr_fw_crash_buffer_size, 2973 &dev_attr_fw_crash_buffer, 2974 &dev_attr_fw_crash_state, 2975 &dev_attr_page_size, 2976 &dev_attr_ldio_outstanding, 2977 NULL, 2978 }; 2979 2980 /* 2981 * Scsi host template for megaraid_sas driver 2982 */ 2983 static struct scsi_host_template megasas_template = { 2984 2985 .module = THIS_MODULE, 2986 .name = "Avago SAS based MegaRAID driver", 2987 .proc_name = "megaraid_sas", 2988 .slave_configure = megasas_slave_configure, 2989 .slave_alloc = megasas_slave_alloc, 2990 .slave_destroy = megasas_slave_destroy, 2991 .queuecommand = megasas_queue_command, 2992 .eh_target_reset_handler = megasas_reset_target, 2993 .eh_abort_handler = megasas_task_abort, 2994 .eh_host_reset_handler = megasas_reset_bus_host, 2995 .eh_timed_out = megasas_reset_timer, 2996 .shost_attrs = megaraid_host_attrs, 2997 .bios_param = megasas_bios_param, 2998 .use_clustering = ENABLE_CLUSTERING, 2999 .change_queue_depth = scsi_change_queue_depth, 3000 .no_write_same = 1, 3001 }; 3002 3003 /** 3004 * megasas_complete_int_cmd - Completes an internal command 3005 * @instance: Adapter soft state 3006 * @cmd: Command to be completed 3007 * 3008 * The megasas_issue_blocked_cmd() function waits for a command to complete 3009 * after it issues a command. This function wakes up that waiting routine by 3010 * calling wake_up() on the wait queue. 3011 */ 3012 static void 3013 megasas_complete_int_cmd(struct megasas_instance *instance, 3014 struct megasas_cmd *cmd) 3015 { 3016 cmd->cmd_status_drv = cmd->frame->io.cmd_status; 3017 wake_up(&instance->int_cmd_wait_q); 3018 } 3019 3020 /** 3021 * megasas_complete_abort - Completes aborting a command 3022 * @instance: Adapter soft state 3023 * @cmd: Cmd that was issued to abort another cmd 3024 * 3025 * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q 3026 * after it issues an abort on a previously issued command. This function 3027 * wakes up all functions waiting on the same wait queue. 3028 */ 3029 static void 3030 megasas_complete_abort(struct megasas_instance *instance, 3031 struct megasas_cmd *cmd) 3032 { 3033 if (cmd->sync_cmd) { 3034 cmd->sync_cmd = 0; 3035 cmd->cmd_status_drv = 0; 3036 wake_up(&instance->abort_cmd_wait_q); 3037 } 3038 } 3039 3040 /** 3041 * megasas_complete_cmd - Completes a command 3042 * @instance: Adapter soft state 3043 * @cmd: Command to be completed 3044 * @alt_status: If non-zero, use this value as status to 3045 * SCSI mid-layer instead of the value returned 3046 * by the FW. This should be used if caller wants 3047 * an alternate status (as in the case of aborted 3048 * commands) 3049 */ 3050 void 3051 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 3052 u8 alt_status) 3053 { 3054 int exception = 0; 3055 struct megasas_header *hdr = &cmd->frame->hdr; 3056 unsigned long flags; 3057 struct fusion_context *fusion = instance->ctrl_context; 3058 u32 opcode, status; 3059 3060 /* flag for the retry reset */ 3061 cmd->retry_for_fw_reset = 0; 3062 3063 if (cmd->scmd) 3064 cmd->scmd->SCp.ptr = NULL; 3065 3066 switch (hdr->cmd) { 3067 case MFI_CMD_INVALID: 3068 /* Some older 1068 controller FW may keep a pended 3069 MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel 3070 when booting the kdump kernel. Ignore this command to 3071 prevent a kernel panic on shutdown of the kdump kernel. */ 3072 dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command " 3073 "completed\n"); 3074 dev_warn(&instance->pdev->dev, "If you have a controller " 3075 "other than PERC5, please upgrade your firmware\n"); 3076 break; 3077 case MFI_CMD_PD_SCSI_IO: 3078 case MFI_CMD_LD_SCSI_IO: 3079 3080 /* 3081 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been 3082 * issued either through an IO path or an IOCTL path. If it 3083 * was via IOCTL, we will send it to internal completion. 3084 */ 3085 if (cmd->sync_cmd) { 3086 cmd->sync_cmd = 0; 3087 megasas_complete_int_cmd(instance, cmd); 3088 break; 3089 } 3090 3091 case MFI_CMD_LD_READ: 3092 case MFI_CMD_LD_WRITE: 3093 3094 if (alt_status) { 3095 cmd->scmd->result = alt_status << 16; 3096 exception = 1; 3097 } 3098 3099 if (exception) { 3100 3101 atomic_dec(&instance->fw_outstanding); 3102 3103 scsi_dma_unmap(cmd->scmd); 3104 cmd->scmd->scsi_done(cmd->scmd); 3105 megasas_return_cmd(instance, cmd); 3106 3107 break; 3108 } 3109 3110 switch (hdr->cmd_status) { 3111 3112 case MFI_STAT_OK: 3113 cmd->scmd->result = DID_OK << 16; 3114 break; 3115 3116 case MFI_STAT_SCSI_IO_FAILED: 3117 case MFI_STAT_LD_INIT_IN_PROGRESS: 3118 cmd->scmd->result = 3119 (DID_ERROR << 16) | hdr->scsi_status; 3120 break; 3121 3122 case MFI_STAT_SCSI_DONE_WITH_ERROR: 3123 3124 cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status; 3125 3126 if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) { 3127 memset(cmd->scmd->sense_buffer, 0, 3128 SCSI_SENSE_BUFFERSIZE); 3129 memcpy(cmd->scmd->sense_buffer, cmd->sense, 3130 hdr->sense_len); 3131 3132 cmd->scmd->result |= DRIVER_SENSE << 24; 3133 } 3134 3135 break; 3136 3137 case MFI_STAT_LD_OFFLINE: 3138 case MFI_STAT_DEVICE_NOT_FOUND: 3139 cmd->scmd->result = DID_BAD_TARGET << 16; 3140 break; 3141 3142 default: 3143 dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n", 3144 hdr->cmd_status); 3145 cmd->scmd->result = DID_ERROR << 16; 3146 break; 3147 } 3148 3149 atomic_dec(&instance->fw_outstanding); 3150 3151 scsi_dma_unmap(cmd->scmd); 3152 cmd->scmd->scsi_done(cmd->scmd); 3153 megasas_return_cmd(instance, cmd); 3154 3155 break; 3156 3157 case MFI_CMD_SMP: 3158 case MFI_CMD_STP: 3159 case MFI_CMD_DCMD: 3160 opcode = le32_to_cpu(cmd->frame->dcmd.opcode); 3161 /* Check for LD map update */ 3162 if ((opcode == MR_DCMD_LD_MAP_GET_INFO) 3163 && (cmd->frame->dcmd.mbox.b[1] == 1)) { 3164 fusion->fast_path_io = 0; 3165 spin_lock_irqsave(instance->host->host_lock, flags); 3166 instance->map_update_cmd = NULL; 3167 if (cmd->frame->hdr.cmd_status != 0) { 3168 if (cmd->frame->hdr.cmd_status != 3169 MFI_STAT_NOT_FOUND) 3170 dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n", 3171 cmd->frame->hdr.cmd_status); 3172 else { 3173 megasas_return_cmd(instance, cmd); 3174 spin_unlock_irqrestore( 3175 instance->host->host_lock, 3176 flags); 3177 break; 3178 } 3179 } else 3180 instance->map_id++; 3181 megasas_return_cmd(instance, cmd); 3182 3183 /* 3184 * Set fast path IO to ZERO. 3185 * Validate Map will set proper value. 3186 * Meanwhile all IOs will go as LD IO. 3187 */ 3188 if (MR_ValidateMapInfo(instance)) 3189 fusion->fast_path_io = 1; 3190 else 3191 fusion->fast_path_io = 0; 3192 megasas_sync_map_info(instance); 3193 spin_unlock_irqrestore(instance->host->host_lock, 3194 flags); 3195 break; 3196 } 3197 if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO || 3198 opcode == MR_DCMD_CTRL_EVENT_GET) { 3199 spin_lock_irqsave(&poll_aen_lock, flags); 3200 megasas_poll_wait_aen = 0; 3201 spin_unlock_irqrestore(&poll_aen_lock, flags); 3202 } 3203 3204 /* FW has an updated PD sequence */ 3205 if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && 3206 (cmd->frame->dcmd.mbox.b[0] == 1)) { 3207 3208 spin_lock_irqsave(instance->host->host_lock, flags); 3209 status = cmd->frame->hdr.cmd_status; 3210 instance->jbod_seq_cmd = NULL; 3211 megasas_return_cmd(instance, cmd); 3212 3213 if (status == MFI_STAT_OK) { 3214 instance->pd_seq_map_id++; 3215 /* Re-register a pd sync seq num cmd */ 3216 if (megasas_sync_pd_seq_num(instance, true)) 3217 instance->use_seqnum_jbod_fp = false; 3218 } else 3219 instance->use_seqnum_jbod_fp = false; 3220 3221 spin_unlock_irqrestore(instance->host->host_lock, flags); 3222 break; 3223 } 3224 3225 /* 3226 * See if got an event notification 3227 */ 3228 if (opcode == MR_DCMD_CTRL_EVENT_WAIT) 3229 megasas_service_aen(instance, cmd); 3230 else 3231 megasas_complete_int_cmd(instance, cmd); 3232 3233 break; 3234 3235 case MFI_CMD_ABORT: 3236 /* 3237 * Cmd issued to abort another cmd returned 3238 */ 3239 megasas_complete_abort(instance, cmd); 3240 break; 3241 3242 default: 3243 dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n", 3244 hdr->cmd); 3245 break; 3246 } 3247 } 3248 3249 /** 3250 * megasas_issue_pending_cmds_again - issue all pending cmds 3251 * in FW again because of the fw reset 3252 * @instance: Adapter soft state 3253 */ 3254 static inline void 3255 megasas_issue_pending_cmds_again(struct megasas_instance *instance) 3256 { 3257 struct megasas_cmd *cmd; 3258 struct list_head clist_local; 3259 union megasas_evt_class_locale class_locale; 3260 unsigned long flags; 3261 u32 seq_num; 3262 3263 INIT_LIST_HEAD(&clist_local); 3264 spin_lock_irqsave(&instance->hba_lock, flags); 3265 list_splice_init(&instance->internal_reset_pending_q, &clist_local); 3266 spin_unlock_irqrestore(&instance->hba_lock, flags); 3267 3268 while (!list_empty(&clist_local)) { 3269 cmd = list_entry((&clist_local)->next, 3270 struct megasas_cmd, list); 3271 list_del_init(&cmd->list); 3272 3273 if (cmd->sync_cmd || cmd->scmd) { 3274 dev_notice(&instance->pdev->dev, "command %p, %p:%d" 3275 "detected to be pending while HBA reset\n", 3276 cmd, cmd->scmd, cmd->sync_cmd); 3277 3278 cmd->retry_for_fw_reset++; 3279 3280 if (cmd->retry_for_fw_reset == 3) { 3281 dev_notice(&instance->pdev->dev, "cmd %p, %p:%d" 3282 "was tried multiple times during reset." 3283 "Shutting down the HBA\n", 3284 cmd, cmd->scmd, cmd->sync_cmd); 3285 instance->instancet->disable_intr(instance); 3286 atomic_set(&instance->fw_reset_no_pci_access, 1); 3287 megaraid_sas_kill_hba(instance); 3288 return; 3289 } 3290 } 3291 3292 if (cmd->sync_cmd == 1) { 3293 if (cmd->scmd) { 3294 dev_notice(&instance->pdev->dev, "unexpected" 3295 "cmd attached to internal command!\n"); 3296 } 3297 dev_notice(&instance->pdev->dev, "%p synchronous cmd" 3298 "on the internal reset queue," 3299 "issue it again.\n", cmd); 3300 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 3301 instance->instancet->fire_cmd(instance, 3302 cmd->frame_phys_addr, 3303 0, instance->reg_set); 3304 } else if (cmd->scmd) { 3305 dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]" 3306 "detected on the internal queue, issue again.\n", 3307 cmd, cmd->scmd->cmnd[0]); 3308 3309 atomic_inc(&instance->fw_outstanding); 3310 instance->instancet->fire_cmd(instance, 3311 cmd->frame_phys_addr, 3312 cmd->frame_count-1, instance->reg_set); 3313 } else { 3314 dev_notice(&instance->pdev->dev, "%p unexpected cmd on the" 3315 "internal reset defer list while re-issue!!\n", 3316 cmd); 3317 } 3318 } 3319 3320 if (instance->aen_cmd) { 3321 dev_notice(&instance->pdev->dev, "aen_cmd in def process\n"); 3322 megasas_return_cmd(instance, instance->aen_cmd); 3323 3324 instance->aen_cmd = NULL; 3325 } 3326 3327 /* 3328 * Initiate AEN (Asynchronous Event Notification) 3329 */ 3330 seq_num = instance->last_seq_num; 3331 class_locale.members.reserved = 0; 3332 class_locale.members.locale = MR_EVT_LOCALE_ALL; 3333 class_locale.members.class = MR_EVT_CLASS_DEBUG; 3334 3335 megasas_register_aen(instance, seq_num, class_locale.word); 3336 } 3337 3338 /** 3339 * Move the internal reset pending commands to a deferred queue. 3340 * 3341 * We move the commands pending at internal reset time to a 3342 * pending queue. This queue would be flushed after successful 3343 * completion of the internal reset sequence. if the internal reset 3344 * did not complete in time, the kernel reset handler would flush 3345 * these commands. 3346 **/ 3347 static void 3348 megasas_internal_reset_defer_cmds(struct megasas_instance *instance) 3349 { 3350 struct megasas_cmd *cmd; 3351 int i; 3352 u32 max_cmd = instance->max_fw_cmds; 3353 u32 defer_index; 3354 unsigned long flags; 3355 3356 defer_index = 0; 3357 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 3358 for (i = 0; i < max_cmd; i++) { 3359 cmd = instance->cmd_list[i]; 3360 if (cmd->sync_cmd == 1 || cmd->scmd) { 3361 dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p" 3362 "on the defer queue as internal\n", 3363 defer_index, cmd, cmd->sync_cmd, cmd->scmd); 3364 3365 if (!list_empty(&cmd->list)) { 3366 dev_notice(&instance->pdev->dev, "ERROR while" 3367 " moving this cmd:%p, %d %p, it was" 3368 "discovered on some list?\n", 3369 cmd, cmd->sync_cmd, cmd->scmd); 3370 3371 list_del_init(&cmd->list); 3372 } 3373 defer_index++; 3374 list_add_tail(&cmd->list, 3375 &instance->internal_reset_pending_q); 3376 } 3377 } 3378 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 3379 } 3380 3381 3382 static void 3383 process_fw_state_change_wq(struct work_struct *work) 3384 { 3385 struct megasas_instance *instance = 3386 container_of(work, struct megasas_instance, work_init); 3387 u32 wait; 3388 unsigned long flags; 3389 3390 if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) { 3391 dev_notice(&instance->pdev->dev, "error, recovery st %x\n", 3392 atomic_read(&instance->adprecovery)); 3393 return ; 3394 } 3395 3396 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { 3397 dev_notice(&instance->pdev->dev, "FW detected to be in fault" 3398 "state, restarting it...\n"); 3399 3400 instance->instancet->disable_intr(instance); 3401 atomic_set(&instance->fw_outstanding, 0); 3402 3403 atomic_set(&instance->fw_reset_no_pci_access, 1); 3404 instance->instancet->adp_reset(instance, instance->reg_set); 3405 atomic_set(&instance->fw_reset_no_pci_access, 0); 3406 3407 dev_notice(&instance->pdev->dev, "FW restarted successfully," 3408 "initiating next stage...\n"); 3409 3410 dev_notice(&instance->pdev->dev, "HBA recovery state machine," 3411 "state 2 starting...\n"); 3412 3413 /* waiting for about 20 second before start the second init */ 3414 for (wait = 0; wait < 30; wait++) { 3415 msleep(1000); 3416 } 3417 3418 if (megasas_transition_to_ready(instance, 1)) { 3419 dev_notice(&instance->pdev->dev, "adapter not ready\n"); 3420 3421 atomic_set(&instance->fw_reset_no_pci_access, 1); 3422 megaraid_sas_kill_hba(instance); 3423 return ; 3424 } 3425 3426 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 3427 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 3428 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR) 3429 ) { 3430 *instance->consumer = *instance->producer; 3431 } else { 3432 *instance->consumer = 0; 3433 *instance->producer = 0; 3434 } 3435 3436 megasas_issue_init_mfi(instance); 3437 3438 spin_lock_irqsave(&instance->hba_lock, flags); 3439 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 3440 spin_unlock_irqrestore(&instance->hba_lock, flags); 3441 instance->instancet->enable_intr(instance); 3442 3443 megasas_issue_pending_cmds_again(instance); 3444 instance->issuepend_done = 1; 3445 } 3446 } 3447 3448 /** 3449 * megasas_deplete_reply_queue - Processes all completed commands 3450 * @instance: Adapter soft state 3451 * @alt_status: Alternate status to be returned to 3452 * SCSI mid-layer instead of the status 3453 * returned by the FW 3454 * Note: this must be called with hba lock held 3455 */ 3456 static int 3457 megasas_deplete_reply_queue(struct megasas_instance *instance, 3458 u8 alt_status) 3459 { 3460 u32 mfiStatus; 3461 u32 fw_state; 3462 3463 if ((mfiStatus = instance->instancet->check_reset(instance, 3464 instance->reg_set)) == 1) { 3465 return IRQ_HANDLED; 3466 } 3467 3468 if ((mfiStatus = instance->instancet->clear_intr( 3469 instance->reg_set) 3470 ) == 0) { 3471 /* Hardware may not set outbound_intr_status in MSI-X mode */ 3472 if (!instance->msix_vectors) 3473 return IRQ_NONE; 3474 } 3475 3476 instance->mfiStatus = mfiStatus; 3477 3478 if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) { 3479 fw_state = instance->instancet->read_fw_status_reg( 3480 instance->reg_set) & MFI_STATE_MASK; 3481 3482 if (fw_state != MFI_STATE_FAULT) { 3483 dev_notice(&instance->pdev->dev, "fw state:%x\n", 3484 fw_state); 3485 } 3486 3487 if ((fw_state == MFI_STATE_FAULT) && 3488 (instance->disableOnlineCtrlReset == 0)) { 3489 dev_notice(&instance->pdev->dev, "wait adp restart\n"); 3490 3491 if ((instance->pdev->device == 3492 PCI_DEVICE_ID_LSI_SAS1064R) || 3493 (instance->pdev->device == 3494 PCI_DEVICE_ID_DELL_PERC5) || 3495 (instance->pdev->device == 3496 PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 3497 3498 *instance->consumer = 3499 cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); 3500 } 3501 3502 3503 instance->instancet->disable_intr(instance); 3504 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 3505 instance->issuepend_done = 0; 3506 3507 atomic_set(&instance->fw_outstanding, 0); 3508 megasas_internal_reset_defer_cmds(instance); 3509 3510 dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n", 3511 fw_state, atomic_read(&instance->adprecovery)); 3512 3513 schedule_work(&instance->work_init); 3514 return IRQ_HANDLED; 3515 3516 } else { 3517 dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n", 3518 fw_state, instance->disableOnlineCtrlReset); 3519 } 3520 } 3521 3522 tasklet_schedule(&instance->isr_tasklet); 3523 return IRQ_HANDLED; 3524 } 3525 /** 3526 * megasas_isr - isr entry point 3527 */ 3528 static irqreturn_t megasas_isr(int irq, void *devp) 3529 { 3530 struct megasas_irq_context *irq_context = devp; 3531 struct megasas_instance *instance = irq_context->instance; 3532 unsigned long flags; 3533 irqreturn_t rc; 3534 3535 if (atomic_read(&instance->fw_reset_no_pci_access)) 3536 return IRQ_HANDLED; 3537 3538 spin_lock_irqsave(&instance->hba_lock, flags); 3539 rc = megasas_deplete_reply_queue(instance, DID_OK); 3540 spin_unlock_irqrestore(&instance->hba_lock, flags); 3541 3542 return rc; 3543 } 3544 3545 /** 3546 * megasas_transition_to_ready - Move the FW to READY state 3547 * @instance: Adapter soft state 3548 * 3549 * During the initialization, FW passes can potentially be in any one of 3550 * several possible states. If the FW in operational, waiting-for-handshake 3551 * states, driver must take steps to bring it to ready state. Otherwise, it 3552 * has to wait for the ready state. 3553 */ 3554 int 3555 megasas_transition_to_ready(struct megasas_instance *instance, int ocr) 3556 { 3557 int i; 3558 u8 max_wait; 3559 u32 fw_state; 3560 u32 cur_state; 3561 u32 abs_state, curr_abs_state; 3562 3563 abs_state = instance->instancet->read_fw_status_reg(instance->reg_set); 3564 fw_state = abs_state & MFI_STATE_MASK; 3565 3566 if (fw_state != MFI_STATE_READY) 3567 dev_info(&instance->pdev->dev, "Waiting for FW to come to ready" 3568 " state\n"); 3569 3570 while (fw_state != MFI_STATE_READY) { 3571 3572 switch (fw_state) { 3573 3574 case MFI_STATE_FAULT: 3575 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW in FAULT state!!\n"); 3576 if (ocr) { 3577 max_wait = MEGASAS_RESET_WAIT_TIME; 3578 cur_state = MFI_STATE_FAULT; 3579 break; 3580 } else 3581 return -ENODEV; 3582 3583 case MFI_STATE_WAIT_HANDSHAKE: 3584 /* 3585 * Set the CLR bit in inbound doorbell 3586 */ 3587 if ((instance->pdev->device == 3588 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 3589 (instance->pdev->device == 3590 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 3591 (instance->ctrl_context)) 3592 writel( 3593 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 3594 &instance->reg_set->doorbell); 3595 else 3596 writel( 3597 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 3598 &instance->reg_set->inbound_doorbell); 3599 3600 max_wait = MEGASAS_RESET_WAIT_TIME; 3601 cur_state = MFI_STATE_WAIT_HANDSHAKE; 3602 break; 3603 3604 case MFI_STATE_BOOT_MESSAGE_PENDING: 3605 if ((instance->pdev->device == 3606 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 3607 (instance->pdev->device == 3608 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 3609 (instance->ctrl_context)) 3610 writel(MFI_INIT_HOTPLUG, 3611 &instance->reg_set->doorbell); 3612 else 3613 writel(MFI_INIT_HOTPLUG, 3614 &instance->reg_set->inbound_doorbell); 3615 3616 max_wait = MEGASAS_RESET_WAIT_TIME; 3617 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 3618 break; 3619 3620 case MFI_STATE_OPERATIONAL: 3621 /* 3622 * Bring it to READY state; assuming max wait 10 secs 3623 */ 3624 instance->instancet->disable_intr(instance); 3625 if ((instance->pdev->device == 3626 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 3627 (instance->pdev->device == 3628 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 3629 (instance->ctrl_context)) { 3630 writel(MFI_RESET_FLAGS, 3631 &instance->reg_set->doorbell); 3632 3633 if (instance->ctrl_context) { 3634 for (i = 0; i < (10 * 1000); i += 20) { 3635 if (readl( 3636 &instance-> 3637 reg_set-> 3638 doorbell) & 1) 3639 msleep(20); 3640 else 3641 break; 3642 } 3643 } 3644 } else 3645 writel(MFI_RESET_FLAGS, 3646 &instance->reg_set->inbound_doorbell); 3647 3648 max_wait = MEGASAS_RESET_WAIT_TIME; 3649 cur_state = MFI_STATE_OPERATIONAL; 3650 break; 3651 3652 case MFI_STATE_UNDEFINED: 3653 /* 3654 * This state should not last for more than 2 seconds 3655 */ 3656 max_wait = MEGASAS_RESET_WAIT_TIME; 3657 cur_state = MFI_STATE_UNDEFINED; 3658 break; 3659 3660 case MFI_STATE_BB_INIT: 3661 max_wait = MEGASAS_RESET_WAIT_TIME; 3662 cur_state = MFI_STATE_BB_INIT; 3663 break; 3664 3665 case MFI_STATE_FW_INIT: 3666 max_wait = MEGASAS_RESET_WAIT_TIME; 3667 cur_state = MFI_STATE_FW_INIT; 3668 break; 3669 3670 case MFI_STATE_FW_INIT_2: 3671 max_wait = MEGASAS_RESET_WAIT_TIME; 3672 cur_state = MFI_STATE_FW_INIT_2; 3673 break; 3674 3675 case MFI_STATE_DEVICE_SCAN: 3676 max_wait = MEGASAS_RESET_WAIT_TIME; 3677 cur_state = MFI_STATE_DEVICE_SCAN; 3678 break; 3679 3680 case MFI_STATE_FLUSH_CACHE: 3681 max_wait = MEGASAS_RESET_WAIT_TIME; 3682 cur_state = MFI_STATE_FLUSH_CACHE; 3683 break; 3684 3685 default: 3686 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n", 3687 fw_state); 3688 return -ENODEV; 3689 } 3690 3691 /* 3692 * The cur_state should not last for more than max_wait secs 3693 */ 3694 for (i = 0; i < (max_wait * 1000); i++) { 3695 curr_abs_state = instance->instancet-> 3696 read_fw_status_reg(instance->reg_set); 3697 3698 if (abs_state == curr_abs_state) { 3699 msleep(1); 3700 } else 3701 break; 3702 } 3703 3704 /* 3705 * Return error if fw_state hasn't changed after max_wait 3706 */ 3707 if (curr_abs_state == abs_state) { 3708 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed " 3709 "in %d secs\n", fw_state, max_wait); 3710 return -ENODEV; 3711 } 3712 3713 abs_state = curr_abs_state; 3714 fw_state = curr_abs_state & MFI_STATE_MASK; 3715 } 3716 dev_info(&instance->pdev->dev, "FW now in Ready state\n"); 3717 3718 return 0; 3719 } 3720 3721 /** 3722 * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool 3723 * @instance: Adapter soft state 3724 */ 3725 static void megasas_teardown_frame_pool(struct megasas_instance *instance) 3726 { 3727 int i; 3728 u32 max_cmd = instance->max_mfi_cmds; 3729 struct megasas_cmd *cmd; 3730 3731 if (!instance->frame_dma_pool) 3732 return; 3733 3734 /* 3735 * Return all frames to pool 3736 */ 3737 for (i = 0; i < max_cmd; i++) { 3738 3739 cmd = instance->cmd_list[i]; 3740 3741 if (cmd->frame) 3742 pci_pool_free(instance->frame_dma_pool, cmd->frame, 3743 cmd->frame_phys_addr); 3744 3745 if (cmd->sense) 3746 pci_pool_free(instance->sense_dma_pool, cmd->sense, 3747 cmd->sense_phys_addr); 3748 } 3749 3750 /* 3751 * Now destroy the pool itself 3752 */ 3753 pci_pool_destroy(instance->frame_dma_pool); 3754 pci_pool_destroy(instance->sense_dma_pool); 3755 3756 instance->frame_dma_pool = NULL; 3757 instance->sense_dma_pool = NULL; 3758 } 3759 3760 /** 3761 * megasas_create_frame_pool - Creates DMA pool for cmd frames 3762 * @instance: Adapter soft state 3763 * 3764 * Each command packet has an embedded DMA memory buffer that is used for 3765 * filling MFI frame and the SG list that immediately follows the frame. This 3766 * function creates those DMA memory buffers for each command packet by using 3767 * PCI pool facility. 3768 */ 3769 static int megasas_create_frame_pool(struct megasas_instance *instance) 3770 { 3771 int i; 3772 u32 max_cmd; 3773 u32 sge_sz; 3774 u32 total_sz; 3775 u32 frame_count; 3776 struct megasas_cmd *cmd; 3777 3778 max_cmd = instance->max_mfi_cmds; 3779 3780 /* 3781 * Size of our frame is 64 bytes for MFI frame, followed by max SG 3782 * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer 3783 */ 3784 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : 3785 sizeof(struct megasas_sge32); 3786 3787 if (instance->flag_ieee) 3788 sge_sz = sizeof(struct megasas_sge_skinny); 3789 3790 /* 3791 * For MFI controllers. 3792 * max_num_sge = 60 3793 * max_sge_sz = 16 byte (sizeof megasas_sge_skinny) 3794 * Total 960 byte (15 MFI frame of 64 byte) 3795 * 3796 * Fusion adapter require only 3 extra frame. 3797 * max_num_sge = 16 (defined as MAX_IOCTL_SGE) 3798 * max_sge_sz = 12 byte (sizeof megasas_sge64) 3799 * Total 192 byte (3 MFI frame of 64 byte) 3800 */ 3801 frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1); 3802 total_sz = MEGAMFI_FRAME_SIZE * frame_count; 3803 /* 3804 * Use DMA pool facility provided by PCI layer 3805 */ 3806 instance->frame_dma_pool = pci_pool_create("megasas frame pool", 3807 instance->pdev, total_sz, 256, 0); 3808 3809 if (!instance->frame_dma_pool) { 3810 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n"); 3811 return -ENOMEM; 3812 } 3813 3814 instance->sense_dma_pool = pci_pool_create("megasas sense pool", 3815 instance->pdev, 128, 4, 0); 3816 3817 if (!instance->sense_dma_pool) { 3818 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n"); 3819 3820 pci_pool_destroy(instance->frame_dma_pool); 3821 instance->frame_dma_pool = NULL; 3822 3823 return -ENOMEM; 3824 } 3825 3826 /* 3827 * Allocate and attach a frame to each of the commands in cmd_list. 3828 * By making cmd->index as the context instead of the &cmd, we can 3829 * always use 32bit context regardless of the architecture 3830 */ 3831 for (i = 0; i < max_cmd; i++) { 3832 3833 cmd = instance->cmd_list[i]; 3834 3835 cmd->frame = pci_pool_alloc(instance->frame_dma_pool, 3836 GFP_KERNEL, &cmd->frame_phys_addr); 3837 3838 cmd->sense = pci_pool_alloc(instance->sense_dma_pool, 3839 GFP_KERNEL, &cmd->sense_phys_addr); 3840 3841 /* 3842 * megasas_teardown_frame_pool() takes care of freeing 3843 * whatever has been allocated 3844 */ 3845 if (!cmd->frame || !cmd->sense) { 3846 dev_printk(KERN_DEBUG, &instance->pdev->dev, "pci_pool_alloc failed\n"); 3847 megasas_teardown_frame_pool(instance); 3848 return -ENOMEM; 3849 } 3850 3851 memset(cmd->frame, 0, total_sz); 3852 cmd->frame->io.context = cpu_to_le32(cmd->index); 3853 cmd->frame->io.pad_0 = 0; 3854 if (!instance->ctrl_context && reset_devices) 3855 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 3856 } 3857 3858 return 0; 3859 } 3860 3861 /** 3862 * megasas_free_cmds - Free all the cmds in the free cmd pool 3863 * @instance: Adapter soft state 3864 */ 3865 void megasas_free_cmds(struct megasas_instance *instance) 3866 { 3867 int i; 3868 3869 /* First free the MFI frame pool */ 3870 megasas_teardown_frame_pool(instance); 3871 3872 /* Free all the commands in the cmd_list */ 3873 for (i = 0; i < instance->max_mfi_cmds; i++) 3874 3875 kfree(instance->cmd_list[i]); 3876 3877 /* Free the cmd_list buffer itself */ 3878 kfree(instance->cmd_list); 3879 instance->cmd_list = NULL; 3880 3881 INIT_LIST_HEAD(&instance->cmd_pool); 3882 } 3883 3884 /** 3885 * megasas_alloc_cmds - Allocates the command packets 3886 * @instance: Adapter soft state 3887 * 3888 * Each command that is issued to the FW, whether IO commands from the OS or 3889 * internal commands like IOCTLs, are wrapped in local data structure called 3890 * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to 3891 * the FW. 3892 * 3893 * Each frame has a 32-bit field called context (tag). This context is used 3894 * to get back the megasas_cmd from the frame when a frame gets completed in 3895 * the ISR. Typically the address of the megasas_cmd itself would be used as 3896 * the context. But we wanted to keep the differences between 32 and 64 bit 3897 * systems to the mininum. We always use 32 bit integers for the context. In 3898 * this driver, the 32 bit values are the indices into an array cmd_list. 3899 * This array is used only to look up the megasas_cmd given the context. The 3900 * free commands themselves are maintained in a linked list called cmd_pool. 3901 */ 3902 int megasas_alloc_cmds(struct megasas_instance *instance) 3903 { 3904 int i; 3905 int j; 3906 u32 max_cmd; 3907 struct megasas_cmd *cmd; 3908 struct fusion_context *fusion; 3909 3910 fusion = instance->ctrl_context; 3911 max_cmd = instance->max_mfi_cmds; 3912 3913 /* 3914 * instance->cmd_list is an array of struct megasas_cmd pointers. 3915 * Allocate the dynamic array first and then allocate individual 3916 * commands. 3917 */ 3918 instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL); 3919 3920 if (!instance->cmd_list) { 3921 dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n"); 3922 return -ENOMEM; 3923 } 3924 3925 memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd); 3926 3927 for (i = 0; i < max_cmd; i++) { 3928 instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd), 3929 GFP_KERNEL); 3930 3931 if (!instance->cmd_list[i]) { 3932 3933 for (j = 0; j < i; j++) 3934 kfree(instance->cmd_list[j]); 3935 3936 kfree(instance->cmd_list); 3937 instance->cmd_list = NULL; 3938 3939 return -ENOMEM; 3940 } 3941 } 3942 3943 for (i = 0; i < max_cmd; i++) { 3944 cmd = instance->cmd_list[i]; 3945 memset(cmd, 0, sizeof(struct megasas_cmd)); 3946 cmd->index = i; 3947 cmd->scmd = NULL; 3948 cmd->instance = instance; 3949 3950 list_add_tail(&cmd->list, &instance->cmd_pool); 3951 } 3952 3953 /* 3954 * Create a frame pool and assign one frame to each cmd 3955 */ 3956 if (megasas_create_frame_pool(instance)) { 3957 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n"); 3958 megasas_free_cmds(instance); 3959 } 3960 3961 return 0; 3962 } 3963 3964 /* 3965 * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state. 3966 * @instance: Adapter soft state 3967 * 3968 * Return 0 for only Fusion adapter, if driver load/unload is not in progress 3969 * or FW is not under OCR. 3970 */ 3971 inline int 3972 dcmd_timeout_ocr_possible(struct megasas_instance *instance) { 3973 3974 if (!instance->ctrl_context) 3975 return KILL_ADAPTER; 3976 else if (instance->unload || 3977 test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) 3978 return IGNORE_TIMEOUT; 3979 else 3980 return INITIATE_OCR; 3981 } 3982 3983 static int 3984 megasas_get_pd_info(struct megasas_instance *instance, u16 device_id) 3985 { 3986 int ret; 3987 struct megasas_cmd *cmd; 3988 struct megasas_dcmd_frame *dcmd; 3989 3990 cmd = megasas_get_cmd(instance); 3991 3992 if (!cmd) { 3993 dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__); 3994 return -ENOMEM; 3995 } 3996 3997 dcmd = &cmd->frame->dcmd; 3998 3999 memset(instance->pd_info, 0, sizeof(*instance->pd_info)); 4000 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4001 4002 dcmd->mbox.s[0] = cpu_to_le16(device_id); 4003 dcmd->cmd = MFI_CMD_DCMD; 4004 dcmd->cmd_status = 0xFF; 4005 dcmd->sge_count = 1; 4006 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 4007 dcmd->timeout = 0; 4008 dcmd->pad_0 = 0; 4009 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO)); 4010 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO); 4011 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->pd_info_h); 4012 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_PD_INFO)); 4013 4014 if (instance->ctrl_context && !instance->mask_interrupts) 4015 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4016 else 4017 ret = megasas_issue_polled(instance, cmd); 4018 4019 switch (ret) { 4020 case DCMD_SUCCESS: 4021 instance->pd_list[device_id].interface = 4022 instance->pd_info->state.ddf.pdType.intf; 4023 break; 4024 4025 case DCMD_TIMEOUT: 4026 4027 switch (dcmd_timeout_ocr_possible(instance)) { 4028 case INITIATE_OCR: 4029 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4030 megasas_reset_fusion(instance->host, 4031 MFI_IO_TIMEOUT_OCR); 4032 break; 4033 case KILL_ADAPTER: 4034 megaraid_sas_kill_hba(instance); 4035 break; 4036 case IGNORE_TIMEOUT: 4037 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4038 __func__, __LINE__); 4039 break; 4040 } 4041 4042 break; 4043 } 4044 4045 if (ret != DCMD_TIMEOUT) 4046 megasas_return_cmd(instance, cmd); 4047 4048 return ret; 4049 } 4050 /* 4051 * megasas_get_pd_list_info - Returns FW's pd_list structure 4052 * @instance: Adapter soft state 4053 * @pd_list: pd_list structure 4054 * 4055 * Issues an internal command (DCMD) to get the FW's controller PD 4056 * list structure. This information is mainly used to find out SYSTEM 4057 * supported by the FW. 4058 */ 4059 static int 4060 megasas_get_pd_list(struct megasas_instance *instance) 4061 { 4062 int ret = 0, pd_index = 0; 4063 struct megasas_cmd *cmd; 4064 struct megasas_dcmd_frame *dcmd; 4065 struct MR_PD_LIST *ci; 4066 struct MR_PD_ADDRESS *pd_addr; 4067 dma_addr_t ci_h = 0; 4068 4069 if (instance->pd_list_not_supported) { 4070 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " 4071 "not supported by firmware\n"); 4072 return ret; 4073 } 4074 4075 cmd = megasas_get_cmd(instance); 4076 4077 if (!cmd) { 4078 dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n"); 4079 return -ENOMEM; 4080 } 4081 4082 dcmd = &cmd->frame->dcmd; 4083 4084 ci = pci_alloc_consistent(instance->pdev, 4085 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h); 4086 4087 if (!ci) { 4088 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for pd_list\n"); 4089 megasas_return_cmd(instance, cmd); 4090 return -ENOMEM; 4091 } 4092 4093 memset(ci, 0, sizeof(*ci)); 4094 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4095 4096 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; 4097 dcmd->mbox.b[1] = 0; 4098 dcmd->cmd = MFI_CMD_DCMD; 4099 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4100 dcmd->sge_count = 1; 4101 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 4102 dcmd->timeout = 0; 4103 dcmd->pad_0 = 0; 4104 dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); 4105 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY); 4106 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); 4107 dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); 4108 4109 if (instance->ctrl_context && !instance->mask_interrupts) 4110 ret = megasas_issue_blocked_cmd(instance, cmd, 4111 MFI_IO_TIMEOUT_SECS); 4112 else 4113 ret = megasas_issue_polled(instance, cmd); 4114 4115 switch (ret) { 4116 case DCMD_FAILED: 4117 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " 4118 "failed/not supported by firmware\n"); 4119 4120 if (instance->ctrl_context) 4121 megaraid_sas_kill_hba(instance); 4122 else 4123 instance->pd_list_not_supported = 1; 4124 break; 4125 case DCMD_TIMEOUT: 4126 4127 switch (dcmd_timeout_ocr_possible(instance)) { 4128 case INITIATE_OCR: 4129 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4130 /* 4131 * DCMD failed from AEN path. 4132 * AEN path already hold reset_mutex to avoid PCI access 4133 * while OCR is in progress. 4134 */ 4135 mutex_unlock(&instance->reset_mutex); 4136 megasas_reset_fusion(instance->host, 4137 MFI_IO_TIMEOUT_OCR); 4138 mutex_lock(&instance->reset_mutex); 4139 break; 4140 case KILL_ADAPTER: 4141 megaraid_sas_kill_hba(instance); 4142 break; 4143 case IGNORE_TIMEOUT: 4144 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n", 4145 __func__, __LINE__); 4146 break; 4147 } 4148 4149 break; 4150 4151 case DCMD_SUCCESS: 4152 pd_addr = ci->addr; 4153 4154 if ((le32_to_cpu(ci->count) > 4155 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) 4156 break; 4157 4158 memset(instance->local_pd_list, 0, 4159 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); 4160 4161 for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) { 4162 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid = 4163 le16_to_cpu(pd_addr->deviceId); 4164 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType = 4165 pd_addr->scsiDevType; 4166 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState = 4167 MR_PD_STATE_SYSTEM; 4168 pd_addr++; 4169 } 4170 4171 memcpy(instance->pd_list, instance->local_pd_list, 4172 sizeof(instance->pd_list)); 4173 break; 4174 4175 } 4176 4177 pci_free_consistent(instance->pdev, 4178 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), 4179 ci, ci_h); 4180 4181 if (ret != DCMD_TIMEOUT) 4182 megasas_return_cmd(instance, cmd); 4183 4184 return ret; 4185 } 4186 4187 /* 4188 * megasas_get_ld_list_info - Returns FW's ld_list structure 4189 * @instance: Adapter soft state 4190 * @ld_list: ld_list structure 4191 * 4192 * Issues an internal command (DCMD) to get the FW's controller PD 4193 * list structure. This information is mainly used to find out SYSTEM 4194 * supported by the FW. 4195 */ 4196 static int 4197 megasas_get_ld_list(struct megasas_instance *instance) 4198 { 4199 int ret = 0, ld_index = 0, ids = 0; 4200 struct megasas_cmd *cmd; 4201 struct megasas_dcmd_frame *dcmd; 4202 struct MR_LD_LIST *ci; 4203 dma_addr_t ci_h = 0; 4204 u32 ld_count; 4205 4206 cmd = megasas_get_cmd(instance); 4207 4208 if (!cmd) { 4209 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n"); 4210 return -ENOMEM; 4211 } 4212 4213 dcmd = &cmd->frame->dcmd; 4214 4215 ci = pci_alloc_consistent(instance->pdev, 4216 sizeof(struct MR_LD_LIST), 4217 &ci_h); 4218 4219 if (!ci) { 4220 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem in get_ld_list\n"); 4221 megasas_return_cmd(instance, cmd); 4222 return -ENOMEM; 4223 } 4224 4225 memset(ci, 0, sizeof(*ci)); 4226 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4227 4228 if (instance->supportmax256vd) 4229 dcmd->mbox.b[0] = 1; 4230 dcmd->cmd = MFI_CMD_DCMD; 4231 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4232 dcmd->sge_count = 1; 4233 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 4234 dcmd->timeout = 0; 4235 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST)); 4236 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST); 4237 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); 4238 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST)); 4239 dcmd->pad_0 = 0; 4240 4241 if (instance->ctrl_context && !instance->mask_interrupts) 4242 ret = megasas_issue_blocked_cmd(instance, cmd, 4243 MFI_IO_TIMEOUT_SECS); 4244 else 4245 ret = megasas_issue_polled(instance, cmd); 4246 4247 ld_count = le32_to_cpu(ci->ldCount); 4248 4249 switch (ret) { 4250 case DCMD_FAILED: 4251 megaraid_sas_kill_hba(instance); 4252 break; 4253 case DCMD_TIMEOUT: 4254 4255 switch (dcmd_timeout_ocr_possible(instance)) { 4256 case INITIATE_OCR: 4257 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4258 /* 4259 * DCMD failed from AEN path. 4260 * AEN path already hold reset_mutex to avoid PCI access 4261 * while OCR is in progress. 4262 */ 4263 mutex_unlock(&instance->reset_mutex); 4264 megasas_reset_fusion(instance->host, 4265 MFI_IO_TIMEOUT_OCR); 4266 mutex_lock(&instance->reset_mutex); 4267 break; 4268 case KILL_ADAPTER: 4269 megaraid_sas_kill_hba(instance); 4270 break; 4271 case IGNORE_TIMEOUT: 4272 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4273 __func__, __LINE__); 4274 break; 4275 } 4276 4277 break; 4278 4279 case DCMD_SUCCESS: 4280 if (ld_count > instance->fw_supported_vd_count) 4281 break; 4282 4283 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 4284 4285 for (ld_index = 0; ld_index < ld_count; ld_index++) { 4286 if (ci->ldList[ld_index].state != 0) { 4287 ids = ci->ldList[ld_index].ref.targetId; 4288 instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId; 4289 } 4290 } 4291 4292 break; 4293 } 4294 4295 pci_free_consistent(instance->pdev, sizeof(struct MR_LD_LIST), ci, ci_h); 4296 4297 if (ret != DCMD_TIMEOUT) 4298 megasas_return_cmd(instance, cmd); 4299 4300 return ret; 4301 } 4302 4303 /** 4304 * megasas_ld_list_query - Returns FW's ld_list structure 4305 * @instance: Adapter soft state 4306 * @ld_list: ld_list structure 4307 * 4308 * Issues an internal command (DCMD) to get the FW's controller PD 4309 * list structure. This information is mainly used to find out SYSTEM 4310 * supported by the FW. 4311 */ 4312 static int 4313 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) 4314 { 4315 int ret = 0, ld_index = 0, ids = 0; 4316 struct megasas_cmd *cmd; 4317 struct megasas_dcmd_frame *dcmd; 4318 struct MR_LD_TARGETID_LIST *ci; 4319 dma_addr_t ci_h = 0; 4320 u32 tgtid_count; 4321 4322 cmd = megasas_get_cmd(instance); 4323 4324 if (!cmd) { 4325 dev_warn(&instance->pdev->dev, 4326 "megasas_ld_list_query: Failed to get cmd\n"); 4327 return -ENOMEM; 4328 } 4329 4330 dcmd = &cmd->frame->dcmd; 4331 4332 ci = pci_alloc_consistent(instance->pdev, 4333 sizeof(struct MR_LD_TARGETID_LIST), &ci_h); 4334 4335 if (!ci) { 4336 dev_warn(&instance->pdev->dev, 4337 "Failed to alloc mem for ld_list_query\n"); 4338 megasas_return_cmd(instance, cmd); 4339 return -ENOMEM; 4340 } 4341 4342 memset(ci, 0, sizeof(*ci)); 4343 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4344 4345 dcmd->mbox.b[0] = query_type; 4346 if (instance->supportmax256vd) 4347 dcmd->mbox.b[2] = 1; 4348 4349 dcmd->cmd = MFI_CMD_DCMD; 4350 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4351 dcmd->sge_count = 1; 4352 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 4353 dcmd->timeout = 0; 4354 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); 4355 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY); 4356 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); 4357 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); 4358 dcmd->pad_0 = 0; 4359 4360 if (instance->ctrl_context && !instance->mask_interrupts) 4361 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4362 else 4363 ret = megasas_issue_polled(instance, cmd); 4364 4365 switch (ret) { 4366 case DCMD_FAILED: 4367 dev_info(&instance->pdev->dev, 4368 "DCMD not supported by firmware - %s %d\n", 4369 __func__, __LINE__); 4370 ret = megasas_get_ld_list(instance); 4371 break; 4372 case DCMD_TIMEOUT: 4373 switch (dcmd_timeout_ocr_possible(instance)) { 4374 case INITIATE_OCR: 4375 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4376 /* 4377 * DCMD failed from AEN path. 4378 * AEN path already hold reset_mutex to avoid PCI access 4379 * while OCR is in progress. 4380 */ 4381 mutex_unlock(&instance->reset_mutex); 4382 megasas_reset_fusion(instance->host, 4383 MFI_IO_TIMEOUT_OCR); 4384 mutex_lock(&instance->reset_mutex); 4385 break; 4386 case KILL_ADAPTER: 4387 megaraid_sas_kill_hba(instance); 4388 break; 4389 case IGNORE_TIMEOUT: 4390 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4391 __func__, __LINE__); 4392 break; 4393 } 4394 4395 break; 4396 case DCMD_SUCCESS: 4397 tgtid_count = le32_to_cpu(ci->count); 4398 4399 if ((tgtid_count > (instance->fw_supported_vd_count))) 4400 break; 4401 4402 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 4403 for (ld_index = 0; ld_index < tgtid_count; ld_index++) { 4404 ids = ci->targetId[ld_index]; 4405 instance->ld_ids[ids] = ci->targetId[ld_index]; 4406 } 4407 4408 break; 4409 } 4410 4411 pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST), 4412 ci, ci_h); 4413 4414 if (ret != DCMD_TIMEOUT) 4415 megasas_return_cmd(instance, cmd); 4416 4417 return ret; 4418 } 4419 4420 /* 4421 * megasas_update_ext_vd_details : Update details w.r.t Extended VD 4422 * instance : Controller's instance 4423 */ 4424 static void megasas_update_ext_vd_details(struct megasas_instance *instance) 4425 { 4426 struct fusion_context *fusion; 4427 u32 old_map_sz; 4428 u32 new_map_sz; 4429 4430 fusion = instance->ctrl_context; 4431 /* For MFI based controllers return dummy success */ 4432 if (!fusion) 4433 return; 4434 4435 instance->supportmax256vd = 4436 instance->ctrl_info->adapterOperations3.supportMaxExtLDs; 4437 /* Below is additional check to address future FW enhancement */ 4438 if (instance->ctrl_info->max_lds > 64) 4439 instance->supportmax256vd = 1; 4440 4441 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS 4442 * MEGASAS_MAX_DEV_PER_CHANNEL; 4443 instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS 4444 * MEGASAS_MAX_DEV_PER_CHANNEL; 4445 if (instance->supportmax256vd) { 4446 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; 4447 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 4448 } else { 4449 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 4450 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 4451 } 4452 4453 dev_info(&instance->pdev->dev, 4454 "firmware type\t: %s\n", 4455 instance->supportmax256vd ? "Extended VD(240 VD)firmware" : 4456 "Legacy(64 VD) firmware"); 4457 4458 old_map_sz = sizeof(struct MR_FW_RAID_MAP) + 4459 (sizeof(struct MR_LD_SPAN_MAP) * 4460 (instance->fw_supported_vd_count - 1)); 4461 new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT); 4462 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP) + 4463 (sizeof(struct MR_LD_SPAN_MAP) * 4464 (instance->drv_supported_vd_count - 1)); 4465 4466 fusion->max_map_sz = max(old_map_sz, new_map_sz); 4467 4468 4469 if (instance->supportmax256vd) 4470 fusion->current_map_sz = new_map_sz; 4471 else 4472 fusion->current_map_sz = old_map_sz; 4473 } 4474 4475 /** 4476 * megasas_get_controller_info - Returns FW's controller structure 4477 * @instance: Adapter soft state 4478 * 4479 * Issues an internal command (DCMD) to get the FW's controller structure. 4480 * This information is mainly used to find out the maximum IO transfer per 4481 * command supported by the FW. 4482 */ 4483 int 4484 megasas_get_ctrl_info(struct megasas_instance *instance) 4485 { 4486 int ret = 0; 4487 struct megasas_cmd *cmd; 4488 struct megasas_dcmd_frame *dcmd; 4489 struct megasas_ctrl_info *ci; 4490 struct megasas_ctrl_info *ctrl_info; 4491 dma_addr_t ci_h = 0; 4492 4493 ctrl_info = instance->ctrl_info; 4494 4495 cmd = megasas_get_cmd(instance); 4496 4497 if (!cmd) { 4498 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n"); 4499 return -ENOMEM; 4500 } 4501 4502 dcmd = &cmd->frame->dcmd; 4503 4504 ci = pci_alloc_consistent(instance->pdev, 4505 sizeof(struct megasas_ctrl_info), &ci_h); 4506 4507 if (!ci) { 4508 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ctrl info\n"); 4509 megasas_return_cmd(instance, cmd); 4510 return -ENOMEM; 4511 } 4512 4513 memset(ci, 0, sizeof(*ci)); 4514 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4515 4516 dcmd->cmd = MFI_CMD_DCMD; 4517 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4518 dcmd->sge_count = 1; 4519 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 4520 dcmd->timeout = 0; 4521 dcmd->pad_0 = 0; 4522 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info)); 4523 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO); 4524 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); 4525 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info)); 4526 dcmd->mbox.b[0] = 1; 4527 4528 if (instance->ctrl_context && !instance->mask_interrupts) 4529 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4530 else 4531 ret = megasas_issue_polled(instance, cmd); 4532 4533 switch (ret) { 4534 case DCMD_SUCCESS: 4535 memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info)); 4536 /* Save required controller information in 4537 * CPU endianness format. 4538 */ 4539 le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties); 4540 le32_to_cpus((u32 *)&ctrl_info->adapterOperations2); 4541 le32_to_cpus((u32 *)&ctrl_info->adapterOperations3); 4542 4543 /* Update the latest Ext VD info. 4544 * From Init path, store current firmware details. 4545 * From OCR path, detect any firmware properties changes. 4546 * in case of Firmware upgrade without system reboot. 4547 */ 4548 megasas_update_ext_vd_details(instance); 4549 instance->use_seqnum_jbod_fp = 4550 ctrl_info->adapterOperations3.useSeqNumJbodFP; 4551 4552 /*Check whether controller is iMR or MR */ 4553 instance->is_imr = (ctrl_info->memory_size ? 0 : 1); 4554 dev_info(&instance->pdev->dev, 4555 "controller type\t: %s(%dMB)\n", 4556 instance->is_imr ? "iMR" : "MR", 4557 le16_to_cpu(ctrl_info->memory_size)); 4558 4559 instance->disableOnlineCtrlReset = 4560 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 4561 instance->secure_jbod_support = 4562 ctrl_info->adapterOperations3.supportSecurityonJBOD; 4563 dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n", 4564 instance->disableOnlineCtrlReset ? "Disabled" : "Enabled"); 4565 dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n", 4566 instance->secure_jbod_support ? "Yes" : "No"); 4567 break; 4568 4569 case DCMD_TIMEOUT: 4570 switch (dcmd_timeout_ocr_possible(instance)) { 4571 case INITIATE_OCR: 4572 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4573 megasas_reset_fusion(instance->host, 4574 MFI_IO_TIMEOUT_OCR); 4575 break; 4576 case KILL_ADAPTER: 4577 megaraid_sas_kill_hba(instance); 4578 break; 4579 case IGNORE_TIMEOUT: 4580 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4581 __func__, __LINE__); 4582 break; 4583 } 4584 case DCMD_FAILED: 4585 megaraid_sas_kill_hba(instance); 4586 break; 4587 4588 } 4589 4590 pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info), 4591 ci, ci_h); 4592 4593 megasas_return_cmd(instance, cmd); 4594 4595 4596 return ret; 4597 } 4598 4599 /* 4600 * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer 4601 * to firmware 4602 * 4603 * @instance: Adapter soft state 4604 * @crash_buf_state - tell FW to turn ON/OFF crash dump feature 4605 MR_CRASH_BUF_TURN_OFF = 0 4606 MR_CRASH_BUF_TURN_ON = 1 4607 * @return 0 on success non-zero on failure. 4608 * Issues an internal command (DCMD) to set parameters for crash dump feature. 4609 * Driver will send address of crash dump DMA buffer and set mbox to tell FW 4610 * that driver supports crash dump feature. This DCMD will be sent only if 4611 * crash dump feature is supported by the FW. 4612 * 4613 */ 4614 int megasas_set_crash_dump_params(struct megasas_instance *instance, 4615 u8 crash_buf_state) 4616 { 4617 int ret = 0; 4618 struct megasas_cmd *cmd; 4619 struct megasas_dcmd_frame *dcmd; 4620 4621 cmd = megasas_get_cmd(instance); 4622 4623 if (!cmd) { 4624 dev_err(&instance->pdev->dev, "Failed to get a free cmd\n"); 4625 return -ENOMEM; 4626 } 4627 4628 4629 dcmd = &cmd->frame->dcmd; 4630 4631 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4632 dcmd->mbox.b[0] = crash_buf_state; 4633 dcmd->cmd = MFI_CMD_DCMD; 4634 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4635 dcmd->sge_count = 1; 4636 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 4637 dcmd->timeout = 0; 4638 dcmd->pad_0 = 0; 4639 dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE); 4640 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS); 4641 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->crash_dump_h); 4642 dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE); 4643 4644 if (instance->ctrl_context && !instance->mask_interrupts) 4645 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4646 else 4647 ret = megasas_issue_polled(instance, cmd); 4648 4649 if (ret == DCMD_TIMEOUT) { 4650 switch (dcmd_timeout_ocr_possible(instance)) { 4651 case INITIATE_OCR: 4652 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4653 megasas_reset_fusion(instance->host, 4654 MFI_IO_TIMEOUT_OCR); 4655 break; 4656 case KILL_ADAPTER: 4657 megaraid_sas_kill_hba(instance); 4658 break; 4659 case IGNORE_TIMEOUT: 4660 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4661 __func__, __LINE__); 4662 break; 4663 } 4664 } else 4665 megasas_return_cmd(instance, cmd); 4666 4667 return ret; 4668 } 4669 4670 /** 4671 * megasas_issue_init_mfi - Initializes the FW 4672 * @instance: Adapter soft state 4673 * 4674 * Issues the INIT MFI cmd 4675 */ 4676 static int 4677 megasas_issue_init_mfi(struct megasas_instance *instance) 4678 { 4679 __le32 context; 4680 struct megasas_cmd *cmd; 4681 struct megasas_init_frame *init_frame; 4682 struct megasas_init_queue_info *initq_info; 4683 dma_addr_t init_frame_h; 4684 dma_addr_t initq_info_h; 4685 4686 /* 4687 * Prepare a init frame. Note the init frame points to queue info 4688 * structure. Each frame has SGL allocated after first 64 bytes. For 4689 * this frame - since we don't need any SGL - we use SGL's space as 4690 * queue info structure 4691 * 4692 * We will not get a NULL command below. We just created the pool. 4693 */ 4694 cmd = megasas_get_cmd(instance); 4695 4696 init_frame = (struct megasas_init_frame *)cmd->frame; 4697 initq_info = (struct megasas_init_queue_info *) 4698 ((unsigned long)init_frame + 64); 4699 4700 init_frame_h = cmd->frame_phys_addr; 4701 initq_info_h = init_frame_h + 64; 4702 4703 context = init_frame->context; 4704 memset(init_frame, 0, MEGAMFI_FRAME_SIZE); 4705 memset(initq_info, 0, sizeof(struct megasas_init_queue_info)); 4706 init_frame->context = context; 4707 4708 initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1); 4709 initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h); 4710 4711 initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h); 4712 initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h); 4713 4714 init_frame->cmd = MFI_CMD_INIT; 4715 init_frame->cmd_status = MFI_STAT_INVALID_STATUS; 4716 init_frame->queue_info_new_phys_addr_lo = 4717 cpu_to_le32(lower_32_bits(initq_info_h)); 4718 init_frame->queue_info_new_phys_addr_hi = 4719 cpu_to_le32(upper_32_bits(initq_info_h)); 4720 4721 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info)); 4722 4723 /* 4724 * disable the intr before firing the init frame to FW 4725 */ 4726 instance->instancet->disable_intr(instance); 4727 4728 /* 4729 * Issue the init frame in polled mode 4730 */ 4731 4732 if (megasas_issue_polled(instance, cmd)) { 4733 dev_err(&instance->pdev->dev, "Failed to init firmware\n"); 4734 megasas_return_cmd(instance, cmd); 4735 goto fail_fw_init; 4736 } 4737 4738 megasas_return_cmd(instance, cmd); 4739 4740 return 0; 4741 4742 fail_fw_init: 4743 return -EINVAL; 4744 } 4745 4746 static u32 4747 megasas_init_adapter_mfi(struct megasas_instance *instance) 4748 { 4749 struct megasas_register_set __iomem *reg_set; 4750 u32 context_sz; 4751 u32 reply_q_sz; 4752 4753 reg_set = instance->reg_set; 4754 4755 /* 4756 * Get various operational parameters from status register 4757 */ 4758 instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF; 4759 /* 4760 * Reduce the max supported cmds by 1. This is to ensure that the 4761 * reply_q_sz (1 more than the max cmd that driver may send) 4762 * does not exceed max cmds that the FW can support 4763 */ 4764 instance->max_fw_cmds = instance->max_fw_cmds-1; 4765 instance->max_mfi_cmds = instance->max_fw_cmds; 4766 instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >> 4767 0x10; 4768 /* 4769 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands 4770 * are reserved for IOCTL + driver's internal DCMDs. 4771 */ 4772 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 4773 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { 4774 instance->max_scsi_cmds = (instance->max_fw_cmds - 4775 MEGASAS_SKINNY_INT_CMDS); 4776 sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); 4777 } else { 4778 instance->max_scsi_cmds = (instance->max_fw_cmds - 4779 MEGASAS_INT_CMDS); 4780 sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS)); 4781 } 4782 4783 instance->cur_can_queue = instance->max_scsi_cmds; 4784 /* 4785 * Create a pool of commands 4786 */ 4787 if (megasas_alloc_cmds(instance)) 4788 goto fail_alloc_cmds; 4789 4790 /* 4791 * Allocate memory for reply queue. Length of reply queue should 4792 * be _one_ more than the maximum commands handled by the firmware. 4793 * 4794 * Note: When FW completes commands, it places corresponding contex 4795 * values in this circular reply queue. This circular queue is a fairly 4796 * typical producer-consumer queue. FW is the producer (of completed 4797 * commands) and the driver is the consumer. 4798 */ 4799 context_sz = sizeof(u32); 4800 reply_q_sz = context_sz * (instance->max_fw_cmds + 1); 4801 4802 instance->reply_queue = pci_alloc_consistent(instance->pdev, 4803 reply_q_sz, 4804 &instance->reply_queue_h); 4805 4806 if (!instance->reply_queue) { 4807 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n"); 4808 goto fail_reply_queue; 4809 } 4810 4811 if (megasas_issue_init_mfi(instance)) 4812 goto fail_fw_init; 4813 4814 if (megasas_get_ctrl_info(instance)) { 4815 dev_err(&instance->pdev->dev, "(%d): Could get controller info " 4816 "Fail from %s %d\n", instance->unique_id, 4817 __func__, __LINE__); 4818 goto fail_fw_init; 4819 } 4820 4821 instance->fw_support_ieee = 0; 4822 instance->fw_support_ieee = 4823 (instance->instancet->read_fw_status_reg(reg_set) & 4824 0x04000000); 4825 4826 dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d", 4827 instance->fw_support_ieee); 4828 4829 if (instance->fw_support_ieee) 4830 instance->flag_ieee = 1; 4831 4832 return 0; 4833 4834 fail_fw_init: 4835 4836 pci_free_consistent(instance->pdev, reply_q_sz, 4837 instance->reply_queue, instance->reply_queue_h); 4838 fail_reply_queue: 4839 megasas_free_cmds(instance); 4840 4841 fail_alloc_cmds: 4842 return 1; 4843 } 4844 4845 /* 4846 * megasas_setup_irqs_msix - register legacy interrupts. 4847 * @instance: Adapter soft state 4848 * 4849 * Do not enable interrupt, only setup ISRs. 4850 * 4851 * Return 0 on success. 4852 */ 4853 static int 4854 megasas_setup_irqs_ioapic(struct megasas_instance *instance) 4855 { 4856 struct pci_dev *pdev; 4857 4858 pdev = instance->pdev; 4859 instance->irq_context[0].instance = instance; 4860 instance->irq_context[0].MSIxIndex = 0; 4861 if (request_irq(pdev->irq, instance->instancet->service_isr, 4862 IRQF_SHARED, "megasas", &instance->irq_context[0])) { 4863 dev_err(&instance->pdev->dev, 4864 "Failed to register IRQ from %s %d\n", 4865 __func__, __LINE__); 4866 return -1; 4867 } 4868 return 0; 4869 } 4870 4871 /** 4872 * megasas_setup_irqs_msix - register MSI-x interrupts. 4873 * @instance: Adapter soft state 4874 * @is_probe: Driver probe check 4875 * 4876 * Do not enable interrupt, only setup ISRs. 4877 * 4878 * Return 0 on success. 4879 */ 4880 static int 4881 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) 4882 { 4883 int i, j, cpu; 4884 struct pci_dev *pdev; 4885 4886 pdev = instance->pdev; 4887 4888 /* Try MSI-x */ 4889 cpu = cpumask_first(cpu_online_mask); 4890 for (i = 0; i < instance->msix_vectors; i++) { 4891 instance->irq_context[i].instance = instance; 4892 instance->irq_context[i].MSIxIndex = i; 4893 if (request_irq(instance->msixentry[i].vector, 4894 instance->instancet->service_isr, 0, "megasas", 4895 &instance->irq_context[i])) { 4896 dev_err(&instance->pdev->dev, 4897 "Failed to register IRQ for vector %d.\n", i); 4898 for (j = 0; j < i; j++) { 4899 if (smp_affinity_enable) 4900 irq_set_affinity_hint( 4901 instance->msixentry[j].vector, NULL); 4902 free_irq(instance->msixentry[j].vector, 4903 &instance->irq_context[j]); 4904 } 4905 /* Retry irq register for IO_APIC*/ 4906 instance->msix_vectors = 0; 4907 if (is_probe) 4908 return megasas_setup_irqs_ioapic(instance); 4909 else 4910 return -1; 4911 } 4912 if (smp_affinity_enable) { 4913 if (irq_set_affinity_hint(instance->msixentry[i].vector, 4914 get_cpu_mask(cpu))) 4915 dev_err(&instance->pdev->dev, 4916 "Failed to set affinity hint" 4917 " for cpu %d\n", cpu); 4918 cpu = cpumask_next(cpu, cpu_online_mask); 4919 } 4920 } 4921 return 0; 4922 } 4923 4924 /* 4925 * megasas_destroy_irqs- unregister interrupts. 4926 * @instance: Adapter soft state 4927 * return: void 4928 */ 4929 static void 4930 megasas_destroy_irqs(struct megasas_instance *instance) { 4931 4932 int i; 4933 4934 if (instance->msix_vectors) 4935 for (i = 0; i < instance->msix_vectors; i++) { 4936 if (smp_affinity_enable) 4937 irq_set_affinity_hint( 4938 instance->msixentry[i].vector, NULL); 4939 free_irq(instance->msixentry[i].vector, 4940 &instance->irq_context[i]); 4941 } 4942 else 4943 free_irq(instance->pdev->irq, &instance->irq_context[0]); 4944 } 4945 4946 /** 4947 * megasas_setup_jbod_map - setup jbod map for FP seq_number. 4948 * @instance: Adapter soft state 4949 * @is_probe: Driver probe check 4950 * 4951 * Return 0 on success. 4952 */ 4953 void 4954 megasas_setup_jbod_map(struct megasas_instance *instance) 4955 { 4956 int i; 4957 struct fusion_context *fusion = instance->ctrl_context; 4958 u32 pd_seq_map_sz; 4959 4960 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 4961 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1)); 4962 4963 if (reset_devices || !fusion || 4964 !instance->ctrl_info->adapterOperations3.useSeqNumJbodFP) { 4965 dev_info(&instance->pdev->dev, 4966 "Jbod map is not supported %s %d\n", 4967 __func__, __LINE__); 4968 instance->use_seqnum_jbod_fp = false; 4969 return; 4970 } 4971 4972 if (fusion->pd_seq_sync[0]) 4973 goto skip_alloc; 4974 4975 for (i = 0; i < JBOD_MAPS_COUNT; i++) { 4976 fusion->pd_seq_sync[i] = dma_alloc_coherent 4977 (&instance->pdev->dev, pd_seq_map_sz, 4978 &fusion->pd_seq_phys[i], GFP_KERNEL); 4979 if (!fusion->pd_seq_sync[i]) { 4980 dev_err(&instance->pdev->dev, 4981 "Failed to allocate memory from %s %d\n", 4982 __func__, __LINE__); 4983 if (i == 1) { 4984 dma_free_coherent(&instance->pdev->dev, 4985 pd_seq_map_sz, fusion->pd_seq_sync[0], 4986 fusion->pd_seq_phys[0]); 4987 fusion->pd_seq_sync[0] = NULL; 4988 } 4989 instance->use_seqnum_jbod_fp = false; 4990 return; 4991 } 4992 } 4993 4994 skip_alloc: 4995 if (!megasas_sync_pd_seq_num(instance, false) && 4996 !megasas_sync_pd_seq_num(instance, true)) 4997 instance->use_seqnum_jbod_fp = true; 4998 else 4999 instance->use_seqnum_jbod_fp = false; 5000 } 5001 5002 /** 5003 * megasas_init_fw - Initializes the FW 5004 * @instance: Adapter soft state 5005 * 5006 * This is the main function for initializing firmware 5007 */ 5008 5009 static int megasas_init_fw(struct megasas_instance *instance) 5010 { 5011 u32 max_sectors_1; 5012 u32 max_sectors_2; 5013 u32 tmp_sectors, msix_enable, scratch_pad_2; 5014 resource_size_t base_addr; 5015 struct megasas_register_set __iomem *reg_set; 5016 struct megasas_ctrl_info *ctrl_info = NULL; 5017 unsigned long bar_list; 5018 int i, loop, fw_msix_count = 0; 5019 struct IOV_111 *iovPtr; 5020 struct fusion_context *fusion; 5021 5022 fusion = instance->ctrl_context; 5023 5024 /* Find first memory bar */ 5025 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); 5026 instance->bar = find_first_bit(&bar_list, BITS_PER_LONG); 5027 if (pci_request_selected_regions(instance->pdev, 1<<instance->bar, 5028 "megasas: LSI")) { 5029 dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n"); 5030 return -EBUSY; 5031 } 5032 5033 base_addr = pci_resource_start(instance->pdev, instance->bar); 5034 instance->reg_set = ioremap_nocache(base_addr, 8192); 5035 5036 if (!instance->reg_set) { 5037 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n"); 5038 goto fail_ioremap; 5039 } 5040 5041 reg_set = instance->reg_set; 5042 5043 switch (instance->pdev->device) { 5044 case PCI_DEVICE_ID_LSI_FUSION: 5045 case PCI_DEVICE_ID_LSI_PLASMA: 5046 case PCI_DEVICE_ID_LSI_INVADER: 5047 case PCI_DEVICE_ID_LSI_FURY: 5048 case PCI_DEVICE_ID_LSI_INTRUDER: 5049 case PCI_DEVICE_ID_LSI_INTRUDER_24: 5050 case PCI_DEVICE_ID_LSI_CUTLASS_52: 5051 case PCI_DEVICE_ID_LSI_CUTLASS_53: 5052 instance->instancet = &megasas_instance_template_fusion; 5053 break; 5054 case PCI_DEVICE_ID_LSI_SAS1078R: 5055 case PCI_DEVICE_ID_LSI_SAS1078DE: 5056 instance->instancet = &megasas_instance_template_ppc; 5057 break; 5058 case PCI_DEVICE_ID_LSI_SAS1078GEN2: 5059 case PCI_DEVICE_ID_LSI_SAS0079GEN2: 5060 instance->instancet = &megasas_instance_template_gen2; 5061 break; 5062 case PCI_DEVICE_ID_LSI_SAS0073SKINNY: 5063 case PCI_DEVICE_ID_LSI_SAS0071SKINNY: 5064 instance->instancet = &megasas_instance_template_skinny; 5065 break; 5066 case PCI_DEVICE_ID_LSI_SAS1064R: 5067 case PCI_DEVICE_ID_DELL_PERC5: 5068 default: 5069 instance->instancet = &megasas_instance_template_xscale; 5070 break; 5071 } 5072 5073 if (megasas_transition_to_ready(instance, 0)) { 5074 atomic_set(&instance->fw_reset_no_pci_access, 1); 5075 instance->instancet->adp_reset 5076 (instance, instance->reg_set); 5077 atomic_set(&instance->fw_reset_no_pci_access, 0); 5078 dev_info(&instance->pdev->dev, 5079 "FW restarted successfully from %s!\n", 5080 __func__); 5081 5082 /*waitting for about 30 second before retry*/ 5083 ssleep(30); 5084 5085 if (megasas_transition_to_ready(instance, 0)) 5086 goto fail_ready_state; 5087 } 5088 5089 /* 5090 * MSI-X host index 0 is common for all adapter. 5091 * It is used for all MPT based Adapters. 5092 */ 5093 instance->reply_post_host_index_addr[0] = 5094 (u32 __iomem *)((u8 __iomem *)instance->reg_set + 5095 MPI2_REPLY_POST_HOST_INDEX_OFFSET); 5096 5097 /* Check if MSI-X is supported while in ready state */ 5098 msix_enable = (instance->instancet->read_fw_status_reg(reg_set) & 5099 0x4000000) >> 0x1a; 5100 if (msix_enable && !msix_disable) { 5101 scratch_pad_2 = readl 5102 (&instance->reg_set->outbound_scratch_pad_2); 5103 /* Check max MSI-X vectors */ 5104 if (fusion) { 5105 if (fusion->adapter_type == THUNDERBOLT_SERIES) { /* Thunderbolt Series*/ 5106 instance->msix_vectors = (scratch_pad_2 5107 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; 5108 fw_msix_count = instance->msix_vectors; 5109 } else { /* Invader series supports more than 8 MSI-x vectors*/ 5110 instance->msix_vectors = ((scratch_pad_2 5111 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) 5112 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; 5113 if (rdpq_enable) 5114 instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 5115 1 : 0; 5116 fw_msix_count = instance->msix_vectors; 5117 /* Save 1-15 reply post index address to local memory 5118 * Index 0 is already saved from reg offset 5119 * MPI2_REPLY_POST_HOST_INDEX_OFFSET 5120 */ 5121 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) { 5122 instance->reply_post_host_index_addr[loop] = 5123 (u32 __iomem *) 5124 ((u8 __iomem *)instance->reg_set + 5125 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET 5126 + (loop * 0x10)); 5127 } 5128 } 5129 if (msix_vectors) 5130 instance->msix_vectors = min(msix_vectors, 5131 instance->msix_vectors); 5132 } else /* MFI adapters */ 5133 instance->msix_vectors = 1; 5134 /* Don't bother allocating more MSI-X vectors than cpus */ 5135 instance->msix_vectors = min(instance->msix_vectors, 5136 (unsigned int)num_online_cpus()); 5137 for (i = 0; i < instance->msix_vectors; i++) 5138 instance->msixentry[i].entry = i; 5139 i = pci_enable_msix_range(instance->pdev, instance->msixentry, 5140 1, instance->msix_vectors); 5141 if (i > 0) 5142 instance->msix_vectors = i; 5143 else 5144 instance->msix_vectors = 0; 5145 } 5146 5147 dev_info(&instance->pdev->dev, 5148 "firmware supports msix\t: (%d)", fw_msix_count); 5149 dev_info(&instance->pdev->dev, 5150 "current msix/online cpus\t: (%d/%d)\n", 5151 instance->msix_vectors, (unsigned int)num_online_cpus()); 5152 dev_info(&instance->pdev->dev, 5153 "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled"); 5154 5155 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 5156 (unsigned long)instance); 5157 5158 if (instance->msix_vectors ? 5159 megasas_setup_irqs_msix(instance, 1) : 5160 megasas_setup_irqs_ioapic(instance)) 5161 goto fail_setup_irqs; 5162 5163 instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info), 5164 GFP_KERNEL); 5165 if (instance->ctrl_info == NULL) 5166 goto fail_init_adapter; 5167 5168 /* 5169 * Below are default value for legacy Firmware. 5170 * non-fusion based controllers 5171 */ 5172 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 5173 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 5174 /* Get operational params, sge flags, send init cmd to controller */ 5175 if (instance->instancet->init_adapter(instance)) 5176 goto fail_init_adapter; 5177 5178 5179 instance->instancet->enable_intr(instance); 5180 5181 dev_info(&instance->pdev->dev, "INIT adapter done\n"); 5182 5183 megasas_setup_jbod_map(instance); 5184 5185 /** for passthrough 5186 * the following function will get the PD LIST. 5187 */ 5188 memset(instance->pd_list, 0, 5189 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); 5190 if (megasas_get_pd_list(instance) < 0) { 5191 dev_err(&instance->pdev->dev, "failed to get PD list\n"); 5192 goto fail_get_pd_list; 5193 } 5194 5195 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 5196 if (megasas_ld_list_query(instance, 5197 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) 5198 megasas_get_ld_list(instance); 5199 5200 /* 5201 * Compute the max allowed sectors per IO: The controller info has two 5202 * limits on max sectors. Driver should use the minimum of these two. 5203 * 5204 * 1 << stripe_sz_ops.min = max sectors per strip 5205 * 5206 * Note that older firmwares ( < FW ver 30) didn't report information 5207 * to calculate max_sectors_1. So the number ended up as zero always. 5208 */ 5209 tmp_sectors = 0; 5210 ctrl_info = instance->ctrl_info; 5211 5212 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * 5213 le16_to_cpu(ctrl_info->max_strips_per_io); 5214 max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size); 5215 5216 tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2); 5217 5218 instance->peerIsPresent = ctrl_info->cluster.peerIsPresent; 5219 instance->passive = ctrl_info->cluster.passive; 5220 memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId)); 5221 instance->UnevenSpanSupport = 5222 ctrl_info->adapterOperations2.supportUnevenSpans; 5223 if (instance->UnevenSpanSupport) { 5224 struct fusion_context *fusion = instance->ctrl_context; 5225 if (MR_ValidateMapInfo(instance)) 5226 fusion->fast_path_io = 1; 5227 else 5228 fusion->fast_path_io = 0; 5229 5230 } 5231 if (ctrl_info->host_interface.SRIOV) { 5232 instance->requestorId = ctrl_info->iov.requestorId; 5233 if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) { 5234 if (!ctrl_info->adapterOperations2.activePassive) 5235 instance->PlasmaFW111 = 1; 5236 5237 dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n", 5238 instance->PlasmaFW111 ? "1.11" : "new"); 5239 5240 if (instance->PlasmaFW111) { 5241 iovPtr = (struct IOV_111 *) 5242 ((unsigned char *)ctrl_info + IOV_111_OFFSET); 5243 instance->requestorId = iovPtr->requestorId; 5244 } 5245 } 5246 dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n", 5247 instance->requestorId); 5248 } 5249 5250 instance->crash_dump_fw_support = 5251 ctrl_info->adapterOperations3.supportCrashDump; 5252 instance->crash_dump_drv_support = 5253 (instance->crash_dump_fw_support && 5254 instance->crash_dump_buf); 5255 if (instance->crash_dump_drv_support) 5256 megasas_set_crash_dump_params(instance, 5257 MR_CRASH_BUF_TURN_OFF); 5258 5259 else { 5260 if (instance->crash_dump_buf) 5261 pci_free_consistent(instance->pdev, 5262 CRASH_DMA_BUF_SIZE, 5263 instance->crash_dump_buf, 5264 instance->crash_dump_h); 5265 instance->crash_dump_buf = NULL; 5266 } 5267 5268 5269 dev_info(&instance->pdev->dev, 5270 "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n", 5271 le16_to_cpu(ctrl_info->pci.vendor_id), 5272 le16_to_cpu(ctrl_info->pci.device_id), 5273 le16_to_cpu(ctrl_info->pci.sub_vendor_id), 5274 le16_to_cpu(ctrl_info->pci.sub_device_id)); 5275 dev_info(&instance->pdev->dev, "unevenspan support : %s\n", 5276 instance->UnevenSpanSupport ? "yes" : "no"); 5277 dev_info(&instance->pdev->dev, "firmware crash dump : %s\n", 5278 instance->crash_dump_drv_support ? "yes" : "no"); 5279 dev_info(&instance->pdev->dev, "jbod sync map : %s\n", 5280 instance->use_seqnum_jbod_fp ? "yes" : "no"); 5281 5282 5283 instance->max_sectors_per_req = instance->max_num_sge * 5284 SGE_BUFFER_SIZE / 512; 5285 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) 5286 instance->max_sectors_per_req = tmp_sectors; 5287 5288 /* Check for valid throttlequeuedepth module parameter */ 5289 if (throttlequeuedepth && 5290 throttlequeuedepth <= instance->max_scsi_cmds) 5291 instance->throttlequeuedepth = throttlequeuedepth; 5292 else 5293 instance->throttlequeuedepth = 5294 MEGASAS_THROTTLE_QUEUE_DEPTH; 5295 5296 if (resetwaittime > MEGASAS_RESET_WAIT_TIME) 5297 resetwaittime = MEGASAS_RESET_WAIT_TIME; 5298 5299 if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT)) 5300 scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; 5301 5302 /* Launch SR-IOV heartbeat timer */ 5303 if (instance->requestorId) { 5304 if (!megasas_sriov_start_heartbeat(instance, 1)) 5305 megasas_start_timer(instance, 5306 &instance->sriov_heartbeat_timer, 5307 megasas_sriov_heartbeat_handler, 5308 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 5309 else 5310 instance->skip_heartbeat_timer_del = 1; 5311 } 5312 5313 return 0; 5314 5315 fail_get_pd_list: 5316 instance->instancet->disable_intr(instance); 5317 fail_init_adapter: 5318 megasas_destroy_irqs(instance); 5319 fail_setup_irqs: 5320 if (instance->msix_vectors) 5321 pci_disable_msix(instance->pdev); 5322 instance->msix_vectors = 0; 5323 fail_ready_state: 5324 kfree(instance->ctrl_info); 5325 instance->ctrl_info = NULL; 5326 iounmap(instance->reg_set); 5327 5328 fail_ioremap: 5329 pci_release_selected_regions(instance->pdev, 1<<instance->bar); 5330 5331 return -EINVAL; 5332 } 5333 5334 /** 5335 * megasas_release_mfi - Reverses the FW initialization 5336 * @instance: Adapter soft state 5337 */ 5338 static void megasas_release_mfi(struct megasas_instance *instance) 5339 { 5340 u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1); 5341 5342 if (instance->reply_queue) 5343 pci_free_consistent(instance->pdev, reply_q_sz, 5344 instance->reply_queue, instance->reply_queue_h); 5345 5346 megasas_free_cmds(instance); 5347 5348 iounmap(instance->reg_set); 5349 5350 pci_release_selected_regions(instance->pdev, 1<<instance->bar); 5351 } 5352 5353 /** 5354 * megasas_get_seq_num - Gets latest event sequence numbers 5355 * @instance: Adapter soft state 5356 * @eli: FW event log sequence numbers information 5357 * 5358 * FW maintains a log of all events in a non-volatile area. Upper layers would 5359 * usually find out the latest sequence number of the events, the seq number at 5360 * the boot etc. They would "read" all the events below the latest seq number 5361 * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq 5362 * number), they would subsribe to AEN (asynchronous event notification) and 5363 * wait for the events to happen. 5364 */ 5365 static int 5366 megasas_get_seq_num(struct megasas_instance *instance, 5367 struct megasas_evt_log_info *eli) 5368 { 5369 struct megasas_cmd *cmd; 5370 struct megasas_dcmd_frame *dcmd; 5371 struct megasas_evt_log_info *el_info; 5372 dma_addr_t el_info_h = 0; 5373 5374 cmd = megasas_get_cmd(instance); 5375 5376 if (!cmd) { 5377 return -ENOMEM; 5378 } 5379 5380 dcmd = &cmd->frame->dcmd; 5381 el_info = pci_alloc_consistent(instance->pdev, 5382 sizeof(struct megasas_evt_log_info), 5383 &el_info_h); 5384 5385 if (!el_info) { 5386 megasas_return_cmd(instance, cmd); 5387 return -ENOMEM; 5388 } 5389 5390 memset(el_info, 0, sizeof(*el_info)); 5391 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5392 5393 dcmd->cmd = MFI_CMD_DCMD; 5394 dcmd->cmd_status = 0x0; 5395 dcmd->sge_count = 1; 5396 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 5397 dcmd->timeout = 0; 5398 dcmd->pad_0 = 0; 5399 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info)); 5400 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO); 5401 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h); 5402 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info)); 5403 5404 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) == 5405 DCMD_SUCCESS) { 5406 /* 5407 * Copy the data back into callers buffer 5408 */ 5409 eli->newest_seq_num = el_info->newest_seq_num; 5410 eli->oldest_seq_num = el_info->oldest_seq_num; 5411 eli->clear_seq_num = el_info->clear_seq_num; 5412 eli->shutdown_seq_num = el_info->shutdown_seq_num; 5413 eli->boot_seq_num = el_info->boot_seq_num; 5414 } else 5415 dev_err(&instance->pdev->dev, "DCMD failed " 5416 "from %s\n", __func__); 5417 5418 pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), 5419 el_info, el_info_h); 5420 5421 megasas_return_cmd(instance, cmd); 5422 5423 return 0; 5424 } 5425 5426 /** 5427 * megasas_register_aen - Registers for asynchronous event notification 5428 * @instance: Adapter soft state 5429 * @seq_num: The starting sequence number 5430 * @class_locale: Class of the event 5431 * 5432 * This function subscribes for AEN for events beyond the @seq_num. It requests 5433 * to be notified if and only if the event is of type @class_locale 5434 */ 5435 static int 5436 megasas_register_aen(struct megasas_instance *instance, u32 seq_num, 5437 u32 class_locale_word) 5438 { 5439 int ret_val; 5440 struct megasas_cmd *cmd; 5441 struct megasas_dcmd_frame *dcmd; 5442 union megasas_evt_class_locale curr_aen; 5443 union megasas_evt_class_locale prev_aen; 5444 5445 /* 5446 * If there an AEN pending already (aen_cmd), check if the 5447 * class_locale of that pending AEN is inclusive of the new 5448 * AEN request we currently have. If it is, then we don't have 5449 * to do anything. In other words, whichever events the current 5450 * AEN request is subscribing to, have already been subscribed 5451 * to. 5452 * 5453 * If the old_cmd is _not_ inclusive, then we have to abort 5454 * that command, form a class_locale that is superset of both 5455 * old and current and re-issue to the FW 5456 */ 5457 5458 curr_aen.word = class_locale_word; 5459 5460 if (instance->aen_cmd) { 5461 5462 prev_aen.word = 5463 le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]); 5464 5465 /* 5466 * A class whose enum value is smaller is inclusive of all 5467 * higher values. If a PROGRESS (= -1) was previously 5468 * registered, then a new registration requests for higher 5469 * classes need not be sent to FW. They are automatically 5470 * included. 5471 * 5472 * Locale numbers don't have such hierarchy. They are bitmap 5473 * values 5474 */ 5475 if ((prev_aen.members.class <= curr_aen.members.class) && 5476 !((prev_aen.members.locale & curr_aen.members.locale) ^ 5477 curr_aen.members.locale)) { 5478 /* 5479 * Previously issued event registration includes 5480 * current request. Nothing to do. 5481 */ 5482 return 0; 5483 } else { 5484 curr_aen.members.locale |= prev_aen.members.locale; 5485 5486 if (prev_aen.members.class < curr_aen.members.class) 5487 curr_aen.members.class = prev_aen.members.class; 5488 5489 instance->aen_cmd->abort_aen = 1; 5490 ret_val = megasas_issue_blocked_abort_cmd(instance, 5491 instance-> 5492 aen_cmd, 30); 5493 5494 if (ret_val) { 5495 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort " 5496 "previous AEN command\n"); 5497 return ret_val; 5498 } 5499 } 5500 } 5501 5502 cmd = megasas_get_cmd(instance); 5503 5504 if (!cmd) 5505 return -ENOMEM; 5506 5507 dcmd = &cmd->frame->dcmd; 5508 5509 memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail)); 5510 5511 /* 5512 * Prepare DCMD for aen registration 5513 */ 5514 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5515 5516 dcmd->cmd = MFI_CMD_DCMD; 5517 dcmd->cmd_status = 0x0; 5518 dcmd->sge_count = 1; 5519 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 5520 dcmd->timeout = 0; 5521 dcmd->pad_0 = 0; 5522 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail)); 5523 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT); 5524 dcmd->mbox.w[0] = cpu_to_le32(seq_num); 5525 instance->last_seq_num = seq_num; 5526 dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word); 5527 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h); 5528 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail)); 5529 5530 if (instance->aen_cmd != NULL) { 5531 megasas_return_cmd(instance, cmd); 5532 return 0; 5533 } 5534 5535 /* 5536 * Store reference to the cmd used to register for AEN. When an 5537 * application wants us to register for AEN, we have to abort this 5538 * cmd and re-register with a new EVENT LOCALE supplied by that app 5539 */ 5540 instance->aen_cmd = cmd; 5541 5542 /* 5543 * Issue the aen registration frame 5544 */ 5545 instance->instancet->issue_dcmd(instance, cmd); 5546 5547 return 0; 5548 } 5549 5550 /** 5551 * megasas_start_aen - Subscribes to AEN during driver load time 5552 * @instance: Adapter soft state 5553 */ 5554 static int megasas_start_aen(struct megasas_instance *instance) 5555 { 5556 struct megasas_evt_log_info eli; 5557 union megasas_evt_class_locale class_locale; 5558 5559 /* 5560 * Get the latest sequence number from FW 5561 */ 5562 memset(&eli, 0, sizeof(eli)); 5563 5564 if (megasas_get_seq_num(instance, &eli)) 5565 return -1; 5566 5567 /* 5568 * Register AEN with FW for latest sequence number plus 1 5569 */ 5570 class_locale.members.reserved = 0; 5571 class_locale.members.locale = MR_EVT_LOCALE_ALL; 5572 class_locale.members.class = MR_EVT_CLASS_DEBUG; 5573 5574 return megasas_register_aen(instance, 5575 le32_to_cpu(eli.newest_seq_num) + 1, 5576 class_locale.word); 5577 } 5578 5579 /** 5580 * megasas_io_attach - Attaches this driver to SCSI mid-layer 5581 * @instance: Adapter soft state 5582 */ 5583 static int megasas_io_attach(struct megasas_instance *instance) 5584 { 5585 struct Scsi_Host *host = instance->host; 5586 5587 /* 5588 * Export parameters required by SCSI mid-layer 5589 */ 5590 host->irq = instance->pdev->irq; 5591 host->unique_id = instance->unique_id; 5592 host->can_queue = instance->max_scsi_cmds; 5593 host->this_id = instance->init_id; 5594 host->sg_tablesize = instance->max_num_sge; 5595 5596 if (instance->fw_support_ieee) 5597 instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE; 5598 5599 /* 5600 * Check if the module parameter value for max_sectors can be used 5601 */ 5602 if (max_sectors && max_sectors < instance->max_sectors_per_req) 5603 instance->max_sectors_per_req = max_sectors; 5604 else { 5605 if (max_sectors) { 5606 if (((instance->pdev->device == 5607 PCI_DEVICE_ID_LSI_SAS1078GEN2) || 5608 (instance->pdev->device == 5609 PCI_DEVICE_ID_LSI_SAS0079GEN2)) && 5610 (max_sectors <= MEGASAS_MAX_SECTORS)) { 5611 instance->max_sectors_per_req = max_sectors; 5612 } else { 5613 dev_info(&instance->pdev->dev, "max_sectors should be > 0" 5614 "and <= %d (or < 1MB for GEN2 controller)\n", 5615 instance->max_sectors_per_req); 5616 } 5617 } 5618 } 5619 5620 host->max_sectors = instance->max_sectors_per_req; 5621 host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN; 5622 host->max_channel = MEGASAS_MAX_CHANNELS - 1; 5623 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; 5624 host->max_lun = MEGASAS_MAX_LUN; 5625 host->max_cmd_len = 16; 5626 5627 /* 5628 * Notify the mid-layer about the new controller 5629 */ 5630 if (scsi_add_host(host, &instance->pdev->dev)) { 5631 dev_err(&instance->pdev->dev, 5632 "Failed to add host from %s %d\n", 5633 __func__, __LINE__); 5634 return -ENODEV; 5635 } 5636 5637 return 0; 5638 } 5639 5640 static int 5641 megasas_set_dma_mask(struct pci_dev *pdev) 5642 { 5643 /* 5644 * All our controllers are capable of performing 64-bit DMA 5645 */ 5646 if (IS_DMA64) { 5647 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { 5648 5649 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 5650 goto fail_set_dma_mask; 5651 } 5652 } else { 5653 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 5654 goto fail_set_dma_mask; 5655 } 5656 /* 5657 * Ensure that all data structures are allocated in 32-bit 5658 * memory. 5659 */ 5660 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { 5661 /* Try 32bit DMA mask and 32 bit Consistent dma mask */ 5662 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) 5663 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) 5664 dev_info(&pdev->dev, "set 32bit DMA mask" 5665 "and 32 bit consistent mask\n"); 5666 else 5667 goto fail_set_dma_mask; 5668 } 5669 5670 return 0; 5671 5672 fail_set_dma_mask: 5673 return 1; 5674 } 5675 5676 /** 5677 * megasas_probe_one - PCI hotplug entry point 5678 * @pdev: PCI device structure 5679 * @id: PCI ids of supported hotplugged adapter 5680 */ 5681 static int megasas_probe_one(struct pci_dev *pdev, 5682 const struct pci_device_id *id) 5683 { 5684 int rval, pos; 5685 struct Scsi_Host *host; 5686 struct megasas_instance *instance; 5687 u16 control = 0; 5688 struct fusion_context *fusion = NULL; 5689 5690 /* Reset MSI-X in the kdump kernel */ 5691 if (reset_devices) { 5692 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 5693 if (pos) { 5694 pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, 5695 &control); 5696 if (control & PCI_MSIX_FLAGS_ENABLE) { 5697 dev_info(&pdev->dev, "resetting MSI-X\n"); 5698 pci_write_config_word(pdev, 5699 pos + PCI_MSIX_FLAGS, 5700 control & 5701 ~PCI_MSIX_FLAGS_ENABLE); 5702 } 5703 } 5704 } 5705 5706 /* 5707 * PCI prepping: enable device set bus mastering and dma mask 5708 */ 5709 rval = pci_enable_device_mem(pdev); 5710 5711 if (rval) { 5712 return rval; 5713 } 5714 5715 pci_set_master(pdev); 5716 5717 if (megasas_set_dma_mask(pdev)) 5718 goto fail_set_dma_mask; 5719 5720 host = scsi_host_alloc(&megasas_template, 5721 sizeof(struct megasas_instance)); 5722 5723 if (!host) { 5724 dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n"); 5725 goto fail_alloc_instance; 5726 } 5727 5728 instance = (struct megasas_instance *)host->hostdata; 5729 memset(instance, 0, sizeof(*instance)); 5730 atomic_set(&instance->fw_reset_no_pci_access, 0); 5731 instance->pdev = pdev; 5732 5733 switch (instance->pdev->device) { 5734 case PCI_DEVICE_ID_LSI_FUSION: 5735 case PCI_DEVICE_ID_LSI_PLASMA: 5736 case PCI_DEVICE_ID_LSI_INVADER: 5737 case PCI_DEVICE_ID_LSI_FURY: 5738 case PCI_DEVICE_ID_LSI_INTRUDER: 5739 case PCI_DEVICE_ID_LSI_INTRUDER_24: 5740 case PCI_DEVICE_ID_LSI_CUTLASS_52: 5741 case PCI_DEVICE_ID_LSI_CUTLASS_53: 5742 { 5743 instance->ctrl_context_pages = 5744 get_order(sizeof(struct fusion_context)); 5745 instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL, 5746 instance->ctrl_context_pages); 5747 if (!instance->ctrl_context) { 5748 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate " 5749 "memory for Fusion context info\n"); 5750 goto fail_alloc_dma_buf; 5751 } 5752 fusion = instance->ctrl_context; 5753 memset(fusion, 0, 5754 ((1 << PAGE_SHIFT) << instance->ctrl_context_pages)); 5755 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || 5756 (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA)) 5757 fusion->adapter_type = THUNDERBOLT_SERIES; 5758 else 5759 fusion->adapter_type = INVADER_SERIES; 5760 } 5761 break; 5762 default: /* For all other supported controllers */ 5763 5764 instance->producer = 5765 pci_alloc_consistent(pdev, sizeof(u32), 5766 &instance->producer_h); 5767 instance->consumer = 5768 pci_alloc_consistent(pdev, sizeof(u32), 5769 &instance->consumer_h); 5770 5771 if (!instance->producer || !instance->consumer) { 5772 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate " 5773 "memory for producer, consumer\n"); 5774 goto fail_alloc_dma_buf; 5775 } 5776 5777 *instance->producer = 0; 5778 *instance->consumer = 0; 5779 break; 5780 } 5781 5782 /* Crash dump feature related initialisation*/ 5783 instance->drv_buf_index = 0; 5784 instance->drv_buf_alloc = 0; 5785 instance->crash_dump_fw_support = 0; 5786 instance->crash_dump_app_support = 0; 5787 instance->fw_crash_state = UNAVAILABLE; 5788 spin_lock_init(&instance->crashdump_lock); 5789 instance->crash_dump_buf = NULL; 5790 5791 megasas_poll_wait_aen = 0; 5792 instance->flag_ieee = 0; 5793 instance->ev = NULL; 5794 instance->issuepend_done = 1; 5795 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 5796 instance->is_imr = 0; 5797 5798 instance->evt_detail = pci_alloc_consistent(pdev, 5799 sizeof(struct 5800 megasas_evt_detail), 5801 &instance->evt_detail_h); 5802 5803 if (!instance->evt_detail) { 5804 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate memory for " 5805 "event detail structure\n"); 5806 goto fail_alloc_dma_buf; 5807 } 5808 5809 if (!reset_devices) { 5810 instance->system_info_buf = pci_zalloc_consistent(pdev, 5811 sizeof(struct MR_DRV_SYSTEM_INFO), 5812 &instance->system_info_h); 5813 if (!instance->system_info_buf) 5814 dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n"); 5815 5816 instance->pd_info = pci_alloc_consistent(pdev, 5817 sizeof(struct MR_PD_INFO), &instance->pd_info_h); 5818 5819 if (!instance->pd_info) 5820 dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n"); 5821 5822 instance->crash_dump_buf = pci_alloc_consistent(pdev, 5823 CRASH_DMA_BUF_SIZE, 5824 &instance->crash_dump_h); 5825 if (!instance->crash_dump_buf) 5826 dev_err(&pdev->dev, "Can't allocate Firmware " 5827 "crash dump DMA buffer\n"); 5828 } 5829 5830 /* 5831 * Initialize locks and queues 5832 */ 5833 INIT_LIST_HEAD(&instance->cmd_pool); 5834 INIT_LIST_HEAD(&instance->internal_reset_pending_q); 5835 5836 atomic_set(&instance->fw_outstanding,0); 5837 5838 init_waitqueue_head(&instance->int_cmd_wait_q); 5839 init_waitqueue_head(&instance->abort_cmd_wait_q); 5840 5841 spin_lock_init(&instance->mfi_pool_lock); 5842 spin_lock_init(&instance->hba_lock); 5843 spin_lock_init(&instance->completion_lock); 5844 5845 mutex_init(&instance->reset_mutex); 5846 mutex_init(&instance->hba_mutex); 5847 5848 /* 5849 * Initialize PCI related and misc parameters 5850 */ 5851 instance->host = host; 5852 instance->unique_id = pdev->bus->number << 8 | pdev->devfn; 5853 instance->init_id = MEGASAS_DEFAULT_INIT_ID; 5854 instance->ctrl_info = NULL; 5855 5856 5857 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 5858 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) 5859 instance->flag_ieee = 1; 5860 5861 megasas_dbg_lvl = 0; 5862 instance->flag = 0; 5863 instance->unload = 1; 5864 instance->last_time = 0; 5865 instance->disableOnlineCtrlReset = 1; 5866 instance->UnevenSpanSupport = 0; 5867 5868 if (instance->ctrl_context) { 5869 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); 5870 INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq); 5871 } else 5872 INIT_WORK(&instance->work_init, process_fw_state_change_wq); 5873 5874 /* 5875 * Initialize MFI Firmware 5876 */ 5877 if (megasas_init_fw(instance)) 5878 goto fail_init_mfi; 5879 5880 if (instance->requestorId) { 5881 if (instance->PlasmaFW111) { 5882 instance->vf_affiliation_111 = 5883 pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111), 5884 &instance->vf_affiliation_111_h); 5885 if (!instance->vf_affiliation_111) 5886 dev_warn(&pdev->dev, "Can't allocate " 5887 "memory for VF affiliation buffer\n"); 5888 } else { 5889 instance->vf_affiliation = 5890 pci_alloc_consistent(pdev, 5891 (MAX_LOGICAL_DRIVES + 1) * 5892 sizeof(struct MR_LD_VF_AFFILIATION), 5893 &instance->vf_affiliation_h); 5894 if (!instance->vf_affiliation) 5895 dev_warn(&pdev->dev, "Can't allocate " 5896 "memory for VF affiliation buffer\n"); 5897 } 5898 } 5899 5900 /* 5901 * Store instance in PCI softstate 5902 */ 5903 pci_set_drvdata(pdev, instance); 5904 5905 /* 5906 * Add this controller to megasas_mgmt_info structure so that it 5907 * can be exported to management applications 5908 */ 5909 megasas_mgmt_info.count++; 5910 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance; 5911 megasas_mgmt_info.max_index++; 5912 5913 /* 5914 * Register with SCSI mid-layer 5915 */ 5916 if (megasas_io_attach(instance)) 5917 goto fail_io_attach; 5918 5919 instance->unload = 0; 5920 /* 5921 * Trigger SCSI to scan our drives 5922 */ 5923 scsi_scan_host(host); 5924 5925 /* 5926 * Initiate AEN (Asynchronous Event Notification) 5927 */ 5928 if (megasas_start_aen(instance)) { 5929 dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n"); 5930 goto fail_start_aen; 5931 } 5932 5933 /* Get current SR-IOV LD/VF affiliation */ 5934 if (instance->requestorId) 5935 megasas_get_ld_vf_affiliation(instance, 1); 5936 5937 return 0; 5938 5939 fail_start_aen: 5940 fail_io_attach: 5941 megasas_mgmt_info.count--; 5942 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; 5943 megasas_mgmt_info.max_index--; 5944 5945 instance->instancet->disable_intr(instance); 5946 megasas_destroy_irqs(instance); 5947 5948 if (instance->ctrl_context) 5949 megasas_release_fusion(instance); 5950 else 5951 megasas_release_mfi(instance); 5952 if (instance->msix_vectors) 5953 pci_disable_msix(instance->pdev); 5954 fail_init_mfi: 5955 fail_alloc_dma_buf: 5956 if (instance->evt_detail) 5957 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), 5958 instance->evt_detail, 5959 instance->evt_detail_h); 5960 5961 if (instance->pd_info) 5962 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO), 5963 instance->pd_info, 5964 instance->pd_info_h); 5965 if (instance->producer) 5966 pci_free_consistent(pdev, sizeof(u32), instance->producer, 5967 instance->producer_h); 5968 if (instance->consumer) 5969 pci_free_consistent(pdev, sizeof(u32), instance->consumer, 5970 instance->consumer_h); 5971 scsi_host_put(host); 5972 5973 fail_alloc_instance: 5974 fail_set_dma_mask: 5975 pci_disable_device(pdev); 5976 5977 return -ENODEV; 5978 } 5979 5980 /** 5981 * megasas_flush_cache - Requests FW to flush all its caches 5982 * @instance: Adapter soft state 5983 */ 5984 static void megasas_flush_cache(struct megasas_instance *instance) 5985 { 5986 struct megasas_cmd *cmd; 5987 struct megasas_dcmd_frame *dcmd; 5988 5989 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 5990 return; 5991 5992 cmd = megasas_get_cmd(instance); 5993 5994 if (!cmd) 5995 return; 5996 5997 dcmd = &cmd->frame->dcmd; 5998 5999 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6000 6001 dcmd->cmd = MFI_CMD_DCMD; 6002 dcmd->cmd_status = 0x0; 6003 dcmd->sge_count = 0; 6004 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 6005 dcmd->timeout = 0; 6006 dcmd->pad_0 = 0; 6007 dcmd->data_xfer_len = 0; 6008 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH); 6009 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 6010 6011 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) 6012 != DCMD_SUCCESS) { 6013 dev_err(&instance->pdev->dev, 6014 "return from %s %d\n", __func__, __LINE__); 6015 return; 6016 } 6017 6018 megasas_return_cmd(instance, cmd); 6019 } 6020 6021 /** 6022 * megasas_shutdown_controller - Instructs FW to shutdown the controller 6023 * @instance: Adapter soft state 6024 * @opcode: Shutdown/Hibernate 6025 */ 6026 static void megasas_shutdown_controller(struct megasas_instance *instance, 6027 u32 opcode) 6028 { 6029 struct megasas_cmd *cmd; 6030 struct megasas_dcmd_frame *dcmd; 6031 6032 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 6033 return; 6034 6035 cmd = megasas_get_cmd(instance); 6036 6037 if (!cmd) 6038 return; 6039 6040 if (instance->aen_cmd) 6041 megasas_issue_blocked_abort_cmd(instance, 6042 instance->aen_cmd, MFI_IO_TIMEOUT_SECS); 6043 if (instance->map_update_cmd) 6044 megasas_issue_blocked_abort_cmd(instance, 6045 instance->map_update_cmd, MFI_IO_TIMEOUT_SECS); 6046 if (instance->jbod_seq_cmd) 6047 megasas_issue_blocked_abort_cmd(instance, 6048 instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS); 6049 6050 dcmd = &cmd->frame->dcmd; 6051 6052 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6053 6054 dcmd->cmd = MFI_CMD_DCMD; 6055 dcmd->cmd_status = 0x0; 6056 dcmd->sge_count = 0; 6057 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 6058 dcmd->timeout = 0; 6059 dcmd->pad_0 = 0; 6060 dcmd->data_xfer_len = 0; 6061 dcmd->opcode = cpu_to_le32(opcode); 6062 6063 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) 6064 != DCMD_SUCCESS) { 6065 dev_err(&instance->pdev->dev, 6066 "return from %s %d\n", __func__, __LINE__); 6067 return; 6068 } 6069 6070 megasas_return_cmd(instance, cmd); 6071 } 6072 6073 #ifdef CONFIG_PM 6074 /** 6075 * megasas_suspend - driver suspend entry point 6076 * @pdev: PCI device structure 6077 * @state: PCI power state to suspend routine 6078 */ 6079 static int 6080 megasas_suspend(struct pci_dev *pdev, pm_message_t state) 6081 { 6082 struct Scsi_Host *host; 6083 struct megasas_instance *instance; 6084 6085 instance = pci_get_drvdata(pdev); 6086 host = instance->host; 6087 instance->unload = 1; 6088 6089 /* Shutdown SR-IOV heartbeat timer */ 6090 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 6091 del_timer_sync(&instance->sriov_heartbeat_timer); 6092 6093 megasas_flush_cache(instance); 6094 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); 6095 6096 /* cancel the delayed work if this work still in queue */ 6097 if (instance->ev != NULL) { 6098 struct megasas_aen_event *ev = instance->ev; 6099 cancel_delayed_work_sync(&ev->hotplug_work); 6100 instance->ev = NULL; 6101 } 6102 6103 tasklet_kill(&instance->isr_tasklet); 6104 6105 pci_set_drvdata(instance->pdev, instance); 6106 instance->instancet->disable_intr(instance); 6107 6108 megasas_destroy_irqs(instance); 6109 6110 if (instance->msix_vectors) 6111 pci_disable_msix(instance->pdev); 6112 6113 pci_save_state(pdev); 6114 pci_disable_device(pdev); 6115 6116 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 6117 6118 return 0; 6119 } 6120 6121 /** 6122 * megasas_resume- driver resume entry point 6123 * @pdev: PCI device structure 6124 */ 6125 static int 6126 megasas_resume(struct pci_dev *pdev) 6127 { 6128 int rval; 6129 struct Scsi_Host *host; 6130 struct megasas_instance *instance; 6131 6132 instance = pci_get_drvdata(pdev); 6133 host = instance->host; 6134 pci_set_power_state(pdev, PCI_D0); 6135 pci_enable_wake(pdev, PCI_D0, 0); 6136 pci_restore_state(pdev); 6137 6138 /* 6139 * PCI prepping: enable device set bus mastering and dma mask 6140 */ 6141 rval = pci_enable_device_mem(pdev); 6142 6143 if (rval) { 6144 dev_err(&pdev->dev, "Enable device failed\n"); 6145 return rval; 6146 } 6147 6148 pci_set_master(pdev); 6149 6150 if (megasas_set_dma_mask(pdev)) 6151 goto fail_set_dma_mask; 6152 6153 /* 6154 * Initialize MFI Firmware 6155 */ 6156 6157 atomic_set(&instance->fw_outstanding, 0); 6158 6159 /* 6160 * We expect the FW state to be READY 6161 */ 6162 if (megasas_transition_to_ready(instance, 0)) 6163 goto fail_ready_state; 6164 6165 /* Now re-enable MSI-X */ 6166 if (instance->msix_vectors && 6167 pci_enable_msix_exact(instance->pdev, instance->msixentry, 6168 instance->msix_vectors)) 6169 goto fail_reenable_msix; 6170 6171 if (instance->ctrl_context) { 6172 megasas_reset_reply_desc(instance); 6173 if (megasas_ioc_init_fusion(instance)) { 6174 megasas_free_cmds(instance); 6175 megasas_free_cmds_fusion(instance); 6176 goto fail_init_mfi; 6177 } 6178 if (!megasas_get_map_info(instance)) 6179 megasas_sync_map_info(instance); 6180 } else { 6181 *instance->producer = 0; 6182 *instance->consumer = 0; 6183 if (megasas_issue_init_mfi(instance)) 6184 goto fail_init_mfi; 6185 } 6186 6187 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 6188 (unsigned long)instance); 6189 6190 if (instance->msix_vectors ? 6191 megasas_setup_irqs_msix(instance, 0) : 6192 megasas_setup_irqs_ioapic(instance)) 6193 goto fail_init_mfi; 6194 6195 /* Re-launch SR-IOV heartbeat timer */ 6196 if (instance->requestorId) { 6197 if (!megasas_sriov_start_heartbeat(instance, 0)) 6198 megasas_start_timer(instance, 6199 &instance->sriov_heartbeat_timer, 6200 megasas_sriov_heartbeat_handler, 6201 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 6202 else { 6203 instance->skip_heartbeat_timer_del = 1; 6204 goto fail_init_mfi; 6205 } 6206 } 6207 6208 instance->instancet->enable_intr(instance); 6209 megasas_setup_jbod_map(instance); 6210 instance->unload = 0; 6211 6212 /* 6213 * Initiate AEN (Asynchronous Event Notification) 6214 */ 6215 if (megasas_start_aen(instance)) 6216 dev_err(&instance->pdev->dev, "Start AEN failed\n"); 6217 6218 return 0; 6219 6220 fail_init_mfi: 6221 if (instance->evt_detail) 6222 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), 6223 instance->evt_detail, 6224 instance->evt_detail_h); 6225 6226 if (instance->pd_info) 6227 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO), 6228 instance->pd_info, 6229 instance->pd_info_h); 6230 if (instance->producer) 6231 pci_free_consistent(pdev, sizeof(u32), instance->producer, 6232 instance->producer_h); 6233 if (instance->consumer) 6234 pci_free_consistent(pdev, sizeof(u32), instance->consumer, 6235 instance->consumer_h); 6236 scsi_host_put(host); 6237 6238 fail_set_dma_mask: 6239 fail_ready_state: 6240 fail_reenable_msix: 6241 6242 pci_disable_device(pdev); 6243 6244 return -ENODEV; 6245 } 6246 #else 6247 #define megasas_suspend NULL 6248 #define megasas_resume NULL 6249 #endif 6250 6251 /** 6252 * megasas_detach_one - PCI hot"un"plug entry point 6253 * @pdev: PCI device structure 6254 */ 6255 static void megasas_detach_one(struct pci_dev *pdev) 6256 { 6257 int i; 6258 struct Scsi_Host *host; 6259 struct megasas_instance *instance; 6260 struct fusion_context *fusion; 6261 u32 pd_seq_map_sz; 6262 6263 instance = pci_get_drvdata(pdev); 6264 instance->unload = 1; 6265 host = instance->host; 6266 fusion = instance->ctrl_context; 6267 6268 /* Shutdown SR-IOV heartbeat timer */ 6269 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 6270 del_timer_sync(&instance->sriov_heartbeat_timer); 6271 6272 if (instance->fw_crash_state != UNAVAILABLE) 6273 megasas_free_host_crash_buffer(instance); 6274 scsi_remove_host(instance->host); 6275 megasas_flush_cache(instance); 6276 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 6277 6278 /* cancel the delayed work if this work still in queue*/ 6279 if (instance->ev != NULL) { 6280 struct megasas_aen_event *ev = instance->ev; 6281 cancel_delayed_work_sync(&ev->hotplug_work); 6282 instance->ev = NULL; 6283 } 6284 6285 /* cancel all wait events */ 6286 wake_up_all(&instance->int_cmd_wait_q); 6287 6288 tasklet_kill(&instance->isr_tasklet); 6289 6290 /* 6291 * Take the instance off the instance array. Note that we will not 6292 * decrement the max_index. We let this array be sparse array 6293 */ 6294 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 6295 if (megasas_mgmt_info.instance[i] == instance) { 6296 megasas_mgmt_info.count--; 6297 megasas_mgmt_info.instance[i] = NULL; 6298 6299 break; 6300 } 6301 } 6302 6303 instance->instancet->disable_intr(instance); 6304 6305 megasas_destroy_irqs(instance); 6306 6307 if (instance->msix_vectors) 6308 pci_disable_msix(instance->pdev); 6309 6310 if (instance->ctrl_context) { 6311 megasas_release_fusion(instance); 6312 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 6313 (sizeof(struct MR_PD_CFG_SEQ) * 6314 (MAX_PHYSICAL_DEVICES - 1)); 6315 for (i = 0; i < 2 ; i++) { 6316 if (fusion->ld_map[i]) 6317 dma_free_coherent(&instance->pdev->dev, 6318 fusion->max_map_sz, 6319 fusion->ld_map[i], 6320 fusion->ld_map_phys[i]); 6321 if (fusion->ld_drv_map[i]) 6322 free_pages((ulong)fusion->ld_drv_map[i], 6323 fusion->drv_map_pages); 6324 if (fusion->pd_seq_sync[i]) 6325 dma_free_coherent(&instance->pdev->dev, 6326 pd_seq_map_sz, 6327 fusion->pd_seq_sync[i], 6328 fusion->pd_seq_phys[i]); 6329 } 6330 free_pages((ulong)instance->ctrl_context, 6331 instance->ctrl_context_pages); 6332 } else { 6333 megasas_release_mfi(instance); 6334 pci_free_consistent(pdev, sizeof(u32), 6335 instance->producer, 6336 instance->producer_h); 6337 pci_free_consistent(pdev, sizeof(u32), 6338 instance->consumer, 6339 instance->consumer_h); 6340 } 6341 6342 kfree(instance->ctrl_info); 6343 6344 if (instance->evt_detail) 6345 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), 6346 instance->evt_detail, instance->evt_detail_h); 6347 6348 if (instance->pd_info) 6349 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO), 6350 instance->pd_info, 6351 instance->pd_info_h); 6352 if (instance->vf_affiliation) 6353 pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) * 6354 sizeof(struct MR_LD_VF_AFFILIATION), 6355 instance->vf_affiliation, 6356 instance->vf_affiliation_h); 6357 6358 if (instance->vf_affiliation_111) 6359 pci_free_consistent(pdev, 6360 sizeof(struct MR_LD_VF_AFFILIATION_111), 6361 instance->vf_affiliation_111, 6362 instance->vf_affiliation_111_h); 6363 6364 if (instance->hb_host_mem) 6365 pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM), 6366 instance->hb_host_mem, 6367 instance->hb_host_mem_h); 6368 6369 if (instance->crash_dump_buf) 6370 pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE, 6371 instance->crash_dump_buf, instance->crash_dump_h); 6372 6373 if (instance->system_info_buf) 6374 pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO), 6375 instance->system_info_buf, instance->system_info_h); 6376 6377 scsi_host_put(host); 6378 6379 pci_disable_device(pdev); 6380 } 6381 6382 /** 6383 * megasas_shutdown - Shutdown entry point 6384 * @device: Generic device structure 6385 */ 6386 static void megasas_shutdown(struct pci_dev *pdev) 6387 { 6388 struct megasas_instance *instance = pci_get_drvdata(pdev); 6389 6390 instance->unload = 1; 6391 megasas_flush_cache(instance); 6392 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 6393 instance->instancet->disable_intr(instance); 6394 megasas_destroy_irqs(instance); 6395 6396 if (instance->msix_vectors) 6397 pci_disable_msix(instance->pdev); 6398 } 6399 6400 /** 6401 * megasas_mgmt_open - char node "open" entry point 6402 */ 6403 static int megasas_mgmt_open(struct inode *inode, struct file *filep) 6404 { 6405 /* 6406 * Allow only those users with admin rights 6407 */ 6408 if (!capable(CAP_SYS_ADMIN)) 6409 return -EACCES; 6410 6411 return 0; 6412 } 6413 6414 /** 6415 * megasas_mgmt_fasync - Async notifier registration from applications 6416 * 6417 * This function adds the calling process to a driver global queue. When an 6418 * event occurs, SIGIO will be sent to all processes in this queue. 6419 */ 6420 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode) 6421 { 6422 int rc; 6423 6424 mutex_lock(&megasas_async_queue_mutex); 6425 6426 rc = fasync_helper(fd, filep, mode, &megasas_async_queue); 6427 6428 mutex_unlock(&megasas_async_queue_mutex); 6429 6430 if (rc >= 0) { 6431 /* For sanity check when we get ioctl */ 6432 filep->private_data = filep; 6433 return 0; 6434 } 6435 6436 printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc); 6437 6438 return rc; 6439 } 6440 6441 /** 6442 * megasas_mgmt_poll - char node "poll" entry point 6443 * */ 6444 static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait) 6445 { 6446 unsigned int mask; 6447 unsigned long flags; 6448 6449 poll_wait(file, &megasas_poll_wait, wait); 6450 spin_lock_irqsave(&poll_aen_lock, flags); 6451 if (megasas_poll_wait_aen) 6452 mask = (POLLIN | POLLRDNORM); 6453 else 6454 mask = 0; 6455 megasas_poll_wait_aen = 0; 6456 spin_unlock_irqrestore(&poll_aen_lock, flags); 6457 return mask; 6458 } 6459 6460 /* 6461 * megasas_set_crash_dump_params_ioctl: 6462 * Send CRASH_DUMP_MODE DCMD to all controllers 6463 * @cmd: MFI command frame 6464 */ 6465 6466 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd) 6467 { 6468 struct megasas_instance *local_instance; 6469 int i, error = 0; 6470 int crash_support; 6471 6472 crash_support = cmd->frame->dcmd.mbox.w[0]; 6473 6474 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 6475 local_instance = megasas_mgmt_info.instance[i]; 6476 if (local_instance && local_instance->crash_dump_drv_support) { 6477 if ((atomic_read(&local_instance->adprecovery) == 6478 MEGASAS_HBA_OPERATIONAL) && 6479 !megasas_set_crash_dump_params(local_instance, 6480 crash_support)) { 6481 local_instance->crash_dump_app_support = 6482 crash_support; 6483 dev_info(&local_instance->pdev->dev, 6484 "Application firmware crash " 6485 "dump mode set success\n"); 6486 error = 0; 6487 } else { 6488 dev_info(&local_instance->pdev->dev, 6489 "Application firmware crash " 6490 "dump mode set failed\n"); 6491 error = -1; 6492 } 6493 } 6494 } 6495 return error; 6496 } 6497 6498 /** 6499 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW 6500 * @instance: Adapter soft state 6501 * @argp: User's ioctl packet 6502 */ 6503 static int 6504 megasas_mgmt_fw_ioctl(struct megasas_instance *instance, 6505 struct megasas_iocpacket __user * user_ioc, 6506 struct megasas_iocpacket *ioc) 6507 { 6508 struct megasas_sge32 *kern_sge32; 6509 struct megasas_cmd *cmd; 6510 void *kbuff_arr[MAX_IOCTL_SGE]; 6511 dma_addr_t buf_handle = 0; 6512 int error = 0, i; 6513 void *sense = NULL; 6514 dma_addr_t sense_handle; 6515 unsigned long *sense_ptr; 6516 6517 memset(kbuff_arr, 0, sizeof(kbuff_arr)); 6518 6519 if (ioc->sge_count > MAX_IOCTL_SGE) { 6520 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n", 6521 ioc->sge_count, MAX_IOCTL_SGE); 6522 return -EINVAL; 6523 } 6524 6525 cmd = megasas_get_cmd(instance); 6526 if (!cmd) { 6527 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n"); 6528 return -ENOMEM; 6529 } 6530 6531 /* 6532 * User's IOCTL packet has 2 frames (maximum). Copy those two 6533 * frames into our cmd's frames. cmd->frame's context will get 6534 * overwritten when we copy from user's frames. So set that value 6535 * alone separately 6536 */ 6537 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); 6538 cmd->frame->hdr.context = cpu_to_le32(cmd->index); 6539 cmd->frame->hdr.pad_0 = 0; 6540 cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE | 6541 MFI_FRAME_SGL64 | 6542 MFI_FRAME_SENSE64)); 6543 6544 if (cmd->frame->dcmd.opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) { 6545 error = megasas_set_crash_dump_params_ioctl(cmd); 6546 megasas_return_cmd(instance, cmd); 6547 return error; 6548 } 6549 6550 /* 6551 * The management interface between applications and the fw uses 6552 * MFI frames. E.g, RAID configuration changes, LD property changes 6553 * etc are accomplishes through different kinds of MFI frames. The 6554 * driver needs to care only about substituting user buffers with 6555 * kernel buffers in SGLs. The location of SGL is embedded in the 6556 * struct iocpacket itself. 6557 */ 6558 kern_sge32 = (struct megasas_sge32 *) 6559 ((unsigned long)cmd->frame + ioc->sgl_off); 6560 6561 /* 6562 * For each user buffer, create a mirror buffer and copy in 6563 */ 6564 for (i = 0; i < ioc->sge_count; i++) { 6565 if (!ioc->sgl[i].iov_len) 6566 continue; 6567 6568 kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev, 6569 ioc->sgl[i].iov_len, 6570 &buf_handle, GFP_KERNEL); 6571 if (!kbuff_arr[i]) { 6572 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc " 6573 "kernel SGL buffer for IOCTL\n"); 6574 error = -ENOMEM; 6575 goto out; 6576 } 6577 6578 /* 6579 * We don't change the dma_coherent_mask, so 6580 * pci_alloc_consistent only returns 32bit addresses 6581 */ 6582 kern_sge32[i].phys_addr = cpu_to_le32(buf_handle); 6583 kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len); 6584 6585 /* 6586 * We created a kernel buffer corresponding to the 6587 * user buffer. Now copy in from the user buffer 6588 */ 6589 if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base, 6590 (u32) (ioc->sgl[i].iov_len))) { 6591 error = -EFAULT; 6592 goto out; 6593 } 6594 } 6595 6596 if (ioc->sense_len) { 6597 sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len, 6598 &sense_handle, GFP_KERNEL); 6599 if (!sense) { 6600 error = -ENOMEM; 6601 goto out; 6602 } 6603 6604 sense_ptr = 6605 (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off); 6606 *sense_ptr = cpu_to_le32(sense_handle); 6607 } 6608 6609 /* 6610 * Set the sync_cmd flag so that the ISR knows not to complete this 6611 * cmd to the SCSI mid-layer 6612 */ 6613 cmd->sync_cmd = 1; 6614 if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) { 6615 cmd->sync_cmd = 0; 6616 dev_err(&instance->pdev->dev, 6617 "return -EBUSY from %s %d opcode 0x%x cmd->cmd_status_drv 0x%x\n", 6618 __func__, __LINE__, cmd->frame->dcmd.opcode, 6619 cmd->cmd_status_drv); 6620 return -EBUSY; 6621 } 6622 6623 cmd->sync_cmd = 0; 6624 6625 if (instance->unload == 1) { 6626 dev_info(&instance->pdev->dev, "Driver unload is in progress " 6627 "don't submit data to application\n"); 6628 goto out; 6629 } 6630 /* 6631 * copy out the kernel buffers to user buffers 6632 */ 6633 for (i = 0; i < ioc->sge_count; i++) { 6634 if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i], 6635 ioc->sgl[i].iov_len)) { 6636 error = -EFAULT; 6637 goto out; 6638 } 6639 } 6640 6641 /* 6642 * copy out the sense 6643 */ 6644 if (ioc->sense_len) { 6645 /* 6646 * sense_ptr points to the location that has the user 6647 * sense buffer address 6648 */ 6649 sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw + 6650 ioc->sense_off); 6651 6652 if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)), 6653 sense, ioc->sense_len)) { 6654 dev_err(&instance->pdev->dev, "Failed to copy out to user " 6655 "sense data\n"); 6656 error = -EFAULT; 6657 goto out; 6658 } 6659 } 6660 6661 /* 6662 * copy the status codes returned by the fw 6663 */ 6664 if (copy_to_user(&user_ioc->frame.hdr.cmd_status, 6665 &cmd->frame->hdr.cmd_status, sizeof(u8))) { 6666 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n"); 6667 error = -EFAULT; 6668 } 6669 6670 out: 6671 if (sense) { 6672 dma_free_coherent(&instance->pdev->dev, ioc->sense_len, 6673 sense, sense_handle); 6674 } 6675 6676 for (i = 0; i < ioc->sge_count; i++) { 6677 if (kbuff_arr[i]) { 6678 dma_free_coherent(&instance->pdev->dev, 6679 le32_to_cpu(kern_sge32[i].length), 6680 kbuff_arr[i], 6681 le32_to_cpu(kern_sge32[i].phys_addr)); 6682 kbuff_arr[i] = NULL; 6683 } 6684 } 6685 6686 megasas_return_cmd(instance, cmd); 6687 return error; 6688 } 6689 6690 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) 6691 { 6692 struct megasas_iocpacket __user *user_ioc = 6693 (struct megasas_iocpacket __user *)arg; 6694 struct megasas_iocpacket *ioc; 6695 struct megasas_instance *instance; 6696 int error; 6697 int i; 6698 unsigned long flags; 6699 u32 wait_time = MEGASAS_RESET_WAIT_TIME; 6700 6701 ioc = memdup_user(user_ioc, sizeof(*ioc)); 6702 if (IS_ERR(ioc)) 6703 return PTR_ERR(ioc); 6704 6705 instance = megasas_lookup_instance(ioc->host_no); 6706 if (!instance) { 6707 error = -ENODEV; 6708 goto out_kfree_ioc; 6709 } 6710 6711 /* Adjust ioctl wait time for VF mode */ 6712 if (instance->requestorId) 6713 wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF; 6714 6715 /* Block ioctls in VF mode */ 6716 if (instance->requestorId && !allow_vf_ioctls) { 6717 error = -ENODEV; 6718 goto out_kfree_ioc; 6719 } 6720 6721 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 6722 dev_err(&instance->pdev->dev, "Controller in crit error\n"); 6723 error = -ENODEV; 6724 goto out_kfree_ioc; 6725 } 6726 6727 if (instance->unload == 1) { 6728 error = -ENODEV; 6729 goto out_kfree_ioc; 6730 } 6731 6732 if (down_interruptible(&instance->ioctl_sem)) { 6733 error = -ERESTARTSYS; 6734 goto out_kfree_ioc; 6735 } 6736 6737 for (i = 0; i < wait_time; i++) { 6738 6739 spin_lock_irqsave(&instance->hba_lock, flags); 6740 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) { 6741 spin_unlock_irqrestore(&instance->hba_lock, flags); 6742 break; 6743 } 6744 spin_unlock_irqrestore(&instance->hba_lock, flags); 6745 6746 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 6747 dev_notice(&instance->pdev->dev, "waiting" 6748 "for controller reset to finish\n"); 6749 } 6750 6751 msleep(1000); 6752 } 6753 6754 spin_lock_irqsave(&instance->hba_lock, flags); 6755 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 6756 spin_unlock_irqrestore(&instance->hba_lock, flags); 6757 6758 dev_err(&instance->pdev->dev, "timed out while" 6759 "waiting for HBA to recover\n"); 6760 error = -ENODEV; 6761 goto out_up; 6762 } 6763 spin_unlock_irqrestore(&instance->hba_lock, flags); 6764 6765 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); 6766 out_up: 6767 up(&instance->ioctl_sem); 6768 6769 out_kfree_ioc: 6770 kfree(ioc); 6771 return error; 6772 } 6773 6774 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) 6775 { 6776 struct megasas_instance *instance; 6777 struct megasas_aen aen; 6778 int error; 6779 int i; 6780 unsigned long flags; 6781 u32 wait_time = MEGASAS_RESET_WAIT_TIME; 6782 6783 if (file->private_data != file) { 6784 printk(KERN_DEBUG "megasas: fasync_helper was not " 6785 "called first\n"); 6786 return -EINVAL; 6787 } 6788 6789 if (copy_from_user(&aen, (void __user *)arg, sizeof(aen))) 6790 return -EFAULT; 6791 6792 instance = megasas_lookup_instance(aen.host_no); 6793 6794 if (!instance) 6795 return -ENODEV; 6796 6797 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 6798 return -ENODEV; 6799 } 6800 6801 if (instance->unload == 1) { 6802 return -ENODEV; 6803 } 6804 6805 for (i = 0; i < wait_time; i++) { 6806 6807 spin_lock_irqsave(&instance->hba_lock, flags); 6808 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) { 6809 spin_unlock_irqrestore(&instance->hba_lock, 6810 flags); 6811 break; 6812 } 6813 6814 spin_unlock_irqrestore(&instance->hba_lock, flags); 6815 6816 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 6817 dev_notice(&instance->pdev->dev, "waiting for" 6818 "controller reset to finish\n"); 6819 } 6820 6821 msleep(1000); 6822 } 6823 6824 spin_lock_irqsave(&instance->hba_lock, flags); 6825 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 6826 spin_unlock_irqrestore(&instance->hba_lock, flags); 6827 dev_err(&instance->pdev->dev, "timed out while waiting" 6828 "for HBA to recover\n"); 6829 return -ENODEV; 6830 } 6831 spin_unlock_irqrestore(&instance->hba_lock, flags); 6832 6833 mutex_lock(&instance->reset_mutex); 6834 error = megasas_register_aen(instance, aen.seq_num, 6835 aen.class_locale_word); 6836 mutex_unlock(&instance->reset_mutex); 6837 return error; 6838 } 6839 6840 /** 6841 * megasas_mgmt_ioctl - char node ioctl entry point 6842 */ 6843 static long 6844 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 6845 { 6846 switch (cmd) { 6847 case MEGASAS_IOC_FIRMWARE: 6848 return megasas_mgmt_ioctl_fw(file, arg); 6849 6850 case MEGASAS_IOC_GET_AEN: 6851 return megasas_mgmt_ioctl_aen(file, arg); 6852 } 6853 6854 return -ENOTTY; 6855 } 6856 6857 #ifdef CONFIG_COMPAT 6858 static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) 6859 { 6860 struct compat_megasas_iocpacket __user *cioc = 6861 (struct compat_megasas_iocpacket __user *)arg; 6862 struct megasas_iocpacket __user *ioc = 6863 compat_alloc_user_space(sizeof(struct megasas_iocpacket)); 6864 int i; 6865 int error = 0; 6866 compat_uptr_t ptr; 6867 u32 local_sense_off; 6868 u32 local_sense_len; 6869 u32 user_sense_off; 6870 6871 if (clear_user(ioc, sizeof(*ioc))) 6872 return -EFAULT; 6873 6874 if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) || 6875 copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) || 6876 copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) || 6877 copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) || 6878 copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) || 6879 copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32))) 6880 return -EFAULT; 6881 6882 /* 6883 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when 6884 * sense_len is not null, so prepare the 64bit value under 6885 * the same condition. 6886 */ 6887 if (get_user(local_sense_off, &ioc->sense_off) || 6888 get_user(local_sense_len, &ioc->sense_len) || 6889 get_user(user_sense_off, &cioc->sense_off)) 6890 return -EFAULT; 6891 6892 if (local_sense_len) { 6893 void __user **sense_ioc_ptr = 6894 (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off); 6895 compat_uptr_t *sense_cioc_ptr = 6896 (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off); 6897 if (get_user(ptr, sense_cioc_ptr) || 6898 put_user(compat_ptr(ptr), sense_ioc_ptr)) 6899 return -EFAULT; 6900 } 6901 6902 for (i = 0; i < MAX_IOCTL_SGE; i++) { 6903 if (get_user(ptr, &cioc->sgl[i].iov_base) || 6904 put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) || 6905 copy_in_user(&ioc->sgl[i].iov_len, 6906 &cioc->sgl[i].iov_len, sizeof(compat_size_t))) 6907 return -EFAULT; 6908 } 6909 6910 error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc); 6911 6912 if (copy_in_user(&cioc->frame.hdr.cmd_status, 6913 &ioc->frame.hdr.cmd_status, sizeof(u8))) { 6914 printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n"); 6915 return -EFAULT; 6916 } 6917 return error; 6918 } 6919 6920 static long 6921 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd, 6922 unsigned long arg) 6923 { 6924 switch (cmd) { 6925 case MEGASAS_IOC_FIRMWARE32: 6926 return megasas_mgmt_compat_ioctl_fw(file, arg); 6927 case MEGASAS_IOC_GET_AEN: 6928 return megasas_mgmt_ioctl_aen(file, arg); 6929 } 6930 6931 return -ENOTTY; 6932 } 6933 #endif 6934 6935 /* 6936 * File operations structure for management interface 6937 */ 6938 static const struct file_operations megasas_mgmt_fops = { 6939 .owner = THIS_MODULE, 6940 .open = megasas_mgmt_open, 6941 .fasync = megasas_mgmt_fasync, 6942 .unlocked_ioctl = megasas_mgmt_ioctl, 6943 .poll = megasas_mgmt_poll, 6944 #ifdef CONFIG_COMPAT 6945 .compat_ioctl = megasas_mgmt_compat_ioctl, 6946 #endif 6947 .llseek = noop_llseek, 6948 }; 6949 6950 /* 6951 * PCI hotplug support registration structure 6952 */ 6953 static struct pci_driver megasas_pci_driver = { 6954 6955 .name = "megaraid_sas", 6956 .id_table = megasas_pci_table, 6957 .probe = megasas_probe_one, 6958 .remove = megasas_detach_one, 6959 .suspend = megasas_suspend, 6960 .resume = megasas_resume, 6961 .shutdown = megasas_shutdown, 6962 }; 6963 6964 /* 6965 * Sysfs driver attributes 6966 */ 6967 static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf) 6968 { 6969 return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n", 6970 MEGASAS_VERSION); 6971 } 6972 6973 static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL); 6974 6975 static ssize_t 6976 megasas_sysfs_show_release_date(struct device_driver *dd, char *buf) 6977 { 6978 return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n", 6979 MEGASAS_RELDATE); 6980 } 6981 6982 static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date, NULL); 6983 6984 static ssize_t 6985 megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf) 6986 { 6987 return sprintf(buf, "%u\n", support_poll_for_event); 6988 } 6989 6990 static DRIVER_ATTR(support_poll_for_event, S_IRUGO, 6991 megasas_sysfs_show_support_poll_for_event, NULL); 6992 6993 static ssize_t 6994 megasas_sysfs_show_support_device_change(struct device_driver *dd, char *buf) 6995 { 6996 return sprintf(buf, "%u\n", support_device_change); 6997 } 6998 6999 static DRIVER_ATTR(support_device_change, S_IRUGO, 7000 megasas_sysfs_show_support_device_change, NULL); 7001 7002 static ssize_t 7003 megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf) 7004 { 7005 return sprintf(buf, "%u\n", megasas_dbg_lvl); 7006 } 7007 7008 static ssize_t 7009 megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t count) 7010 { 7011 int retval = count; 7012 7013 if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) { 7014 printk(KERN_ERR "megasas: could not set dbg_lvl\n"); 7015 retval = -EINVAL; 7016 } 7017 return retval; 7018 } 7019 7020 static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl, 7021 megasas_sysfs_set_dbg_lvl); 7022 7023 static void 7024 megasas_aen_polling(struct work_struct *work) 7025 { 7026 struct megasas_aen_event *ev = 7027 container_of(work, struct megasas_aen_event, hotplug_work.work); 7028 struct megasas_instance *instance = ev->instance; 7029 union megasas_evt_class_locale class_locale; 7030 struct Scsi_Host *host; 7031 struct scsi_device *sdev1; 7032 u16 pd_index = 0; 7033 u16 ld_index = 0; 7034 int i, j, doscan = 0; 7035 u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME; 7036 int error; 7037 u8 dcmd_ret = DCMD_SUCCESS; 7038 7039 if (!instance) { 7040 printk(KERN_ERR "invalid instance!\n"); 7041 kfree(ev); 7042 return; 7043 } 7044 7045 /* Adjust event workqueue thread wait time for VF mode */ 7046 if (instance->requestorId) 7047 wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF; 7048 7049 /* Don't run the event workqueue thread if OCR is running */ 7050 mutex_lock(&instance->reset_mutex); 7051 7052 instance->ev = NULL; 7053 host = instance->host; 7054 if (instance->evt_detail) { 7055 megasas_decode_evt(instance); 7056 7057 switch (le32_to_cpu(instance->evt_detail->code)) { 7058 7059 case MR_EVT_PD_INSERTED: 7060 case MR_EVT_PD_REMOVED: 7061 dcmd_ret = megasas_get_pd_list(instance); 7062 if (dcmd_ret == DCMD_SUCCESS) 7063 doscan = SCAN_PD_CHANNEL; 7064 break; 7065 7066 case MR_EVT_LD_OFFLINE: 7067 case MR_EVT_CFG_CLEARED: 7068 case MR_EVT_LD_DELETED: 7069 case MR_EVT_LD_CREATED: 7070 if (!instance->requestorId || 7071 (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0))) 7072 dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); 7073 7074 if (dcmd_ret == DCMD_SUCCESS) 7075 doscan = SCAN_VD_CHANNEL; 7076 7077 break; 7078 7079 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: 7080 case MR_EVT_FOREIGN_CFG_IMPORTED: 7081 case MR_EVT_LD_STATE_CHANGE: 7082 dcmd_ret = megasas_get_pd_list(instance); 7083 7084 if (dcmd_ret != DCMD_SUCCESS) 7085 break; 7086 7087 if (!instance->requestorId || 7088 (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0))) 7089 dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); 7090 7091 if (dcmd_ret != DCMD_SUCCESS) 7092 break; 7093 7094 doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL; 7095 dev_info(&instance->pdev->dev, "scanning for scsi%d...\n", 7096 instance->host->host_no); 7097 break; 7098 7099 case MR_EVT_CTRL_PROP_CHANGED: 7100 dcmd_ret = megasas_get_ctrl_info(instance); 7101 break; 7102 default: 7103 doscan = 0; 7104 break; 7105 } 7106 } else { 7107 dev_err(&instance->pdev->dev, "invalid evt_detail!\n"); 7108 mutex_unlock(&instance->reset_mutex); 7109 kfree(ev); 7110 return; 7111 } 7112 7113 mutex_unlock(&instance->reset_mutex); 7114 7115 if (doscan & SCAN_PD_CHANNEL) { 7116 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { 7117 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 7118 pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j; 7119 sdev1 = scsi_device_lookup(host, i, j, 0); 7120 if (instance->pd_list[pd_index].driveState == 7121 MR_PD_STATE_SYSTEM) { 7122 if (!sdev1) 7123 scsi_add_device(host, i, j, 0); 7124 else 7125 scsi_device_put(sdev1); 7126 } else { 7127 if (sdev1) { 7128 scsi_remove_device(sdev1); 7129 scsi_device_put(sdev1); 7130 } 7131 } 7132 } 7133 } 7134 } 7135 7136 if (doscan & SCAN_VD_CHANNEL) { 7137 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { 7138 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 7139 ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; 7140 sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); 7141 if (instance->ld_ids[ld_index] != 0xff) { 7142 if (!sdev1) 7143 scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); 7144 else 7145 scsi_device_put(sdev1); 7146 } else { 7147 if (sdev1) { 7148 scsi_remove_device(sdev1); 7149 scsi_device_put(sdev1); 7150 } 7151 } 7152 } 7153 } 7154 } 7155 7156 if (dcmd_ret == DCMD_SUCCESS) 7157 seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1; 7158 else 7159 seq_num = instance->last_seq_num; 7160 7161 /* Register AEN with FW for latest sequence number plus 1 */ 7162 class_locale.members.reserved = 0; 7163 class_locale.members.locale = MR_EVT_LOCALE_ALL; 7164 class_locale.members.class = MR_EVT_CLASS_DEBUG; 7165 7166 if (instance->aen_cmd != NULL) { 7167 kfree(ev); 7168 return; 7169 } 7170 7171 mutex_lock(&instance->reset_mutex); 7172 error = megasas_register_aen(instance, seq_num, 7173 class_locale.word); 7174 if (error) 7175 dev_err(&instance->pdev->dev, 7176 "register aen failed error %x\n", error); 7177 7178 mutex_unlock(&instance->reset_mutex); 7179 kfree(ev); 7180 } 7181 7182 /** 7183 * megasas_init - Driver load entry point 7184 */ 7185 static int __init megasas_init(void) 7186 { 7187 int rval; 7188 7189 /* 7190 * Booted in kdump kernel, minimize memory footprints by 7191 * disabling few features 7192 */ 7193 if (reset_devices) { 7194 msix_vectors = 1; 7195 rdpq_enable = 0; 7196 dual_qdepth_disable = 1; 7197 } 7198 7199 /* 7200 * Announce driver version and other information 7201 */ 7202 pr_info("megasas: %s\n", MEGASAS_VERSION); 7203 7204 spin_lock_init(&poll_aen_lock); 7205 7206 support_poll_for_event = 2; 7207 support_device_change = 1; 7208 7209 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); 7210 7211 /* 7212 * Register character device node 7213 */ 7214 rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops); 7215 7216 if (rval < 0) { 7217 printk(KERN_DEBUG "megasas: failed to open device node\n"); 7218 return rval; 7219 } 7220 7221 megasas_mgmt_majorno = rval; 7222 7223 /* 7224 * Register ourselves as PCI hotplug module 7225 */ 7226 rval = pci_register_driver(&megasas_pci_driver); 7227 7228 if (rval) { 7229 printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n"); 7230 goto err_pcidrv; 7231 } 7232 7233 rval = driver_create_file(&megasas_pci_driver.driver, 7234 &driver_attr_version); 7235 if (rval) 7236 goto err_dcf_attr_ver; 7237 7238 rval = driver_create_file(&megasas_pci_driver.driver, 7239 &driver_attr_release_date); 7240 if (rval) 7241 goto err_dcf_rel_date; 7242 7243 rval = driver_create_file(&megasas_pci_driver.driver, 7244 &driver_attr_support_poll_for_event); 7245 if (rval) 7246 goto err_dcf_support_poll_for_event; 7247 7248 rval = driver_create_file(&megasas_pci_driver.driver, 7249 &driver_attr_dbg_lvl); 7250 if (rval) 7251 goto err_dcf_dbg_lvl; 7252 rval = driver_create_file(&megasas_pci_driver.driver, 7253 &driver_attr_support_device_change); 7254 if (rval) 7255 goto err_dcf_support_device_change; 7256 7257 return rval; 7258 7259 err_dcf_support_device_change: 7260 driver_remove_file(&megasas_pci_driver.driver, 7261 &driver_attr_dbg_lvl); 7262 err_dcf_dbg_lvl: 7263 driver_remove_file(&megasas_pci_driver.driver, 7264 &driver_attr_support_poll_for_event); 7265 err_dcf_support_poll_for_event: 7266 driver_remove_file(&megasas_pci_driver.driver, 7267 &driver_attr_release_date); 7268 err_dcf_rel_date: 7269 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 7270 err_dcf_attr_ver: 7271 pci_unregister_driver(&megasas_pci_driver); 7272 err_pcidrv: 7273 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 7274 return rval; 7275 } 7276 7277 /** 7278 * megasas_exit - Driver unload entry point 7279 */ 7280 static void __exit megasas_exit(void) 7281 { 7282 driver_remove_file(&megasas_pci_driver.driver, 7283 &driver_attr_dbg_lvl); 7284 driver_remove_file(&megasas_pci_driver.driver, 7285 &driver_attr_support_poll_for_event); 7286 driver_remove_file(&megasas_pci_driver.driver, 7287 &driver_attr_support_device_change); 7288 driver_remove_file(&megasas_pci_driver.driver, 7289 &driver_attr_release_date); 7290 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 7291 7292 pci_unregister_driver(&megasas_pci_driver); 7293 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 7294 } 7295 7296 module_init(megasas_init); 7297 module_exit(megasas_exit); 7298