1 /* 2 * linux/drivers/scsi/esas2r/esas2r_init.c 3 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers 4 * 5 * Copyright (c) 2001-2013 ATTO Technology, Inc. 6 * (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 2 11 * of the License, or (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * NO WARRANTY 19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 23 * solely responsible for determining the appropriateness of using and 24 * distributing the Program and assumes all risks associated with its 25 * exercise of rights under this Agreement, including but not limited to 26 * the risks and costs of program errors, damage to or loss of data, 27 * programs or equipment, and unavailability or interruption of operations. 28 * 29 * DISCLAIMER OF LIABILITY 30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 37 * 38 * You should have received a copy of the GNU General Public License 39 * along with this program; if not, write to the Free Software 40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 41 * USA. 42 */ 43 44 #include "esas2r.h" 45 46 static bool esas2r_initmem_alloc(struct esas2r_adapter *a, 47 struct esas2r_mem_desc *mem_desc, 48 u32 align) 49 { 50 mem_desc->esas2r_param = mem_desc->size + align; 51 mem_desc->virt_addr = NULL; 52 mem_desc->phys_addr = 0; 53 mem_desc->esas2r_data = dma_alloc_coherent(&a->pcid->dev, 54 (size_t)mem_desc-> 55 esas2r_param, 56 (dma_addr_t *)&mem_desc-> 57 phys_addr, 58 GFP_KERNEL); 59 60 if (mem_desc->esas2r_data == NULL) { 61 esas2r_log(ESAS2R_LOG_CRIT, 62 "failed to allocate %lu bytes of consistent memory!", 63 (long 64 unsigned 65 int)mem_desc->esas2r_param); 66 return false; 67 } 68 69 mem_desc->virt_addr = PTR_ALIGN(mem_desc->esas2r_data, align); 70 mem_desc->phys_addr = ALIGN(mem_desc->phys_addr, align); 71 memset(mem_desc->virt_addr, 0, mem_desc->size); 72 return true; 73 } 74 75 static void esas2r_initmem_free(struct esas2r_adapter *a, 76 struct esas2r_mem_desc *mem_desc) 77 { 78 if (mem_desc->virt_addr == NULL) 79 return; 80 81 /* 82 * Careful! phys_addr and virt_addr may have been adjusted from the 83 * original allocation in order to return the desired alignment. That 84 * means we have to use the original address (in esas2r_data) and size 85 * (esas2r_param) and calculate the original physical address based on 86 * the difference between the requested and actual allocation size. 87 */ 88 if (mem_desc->phys_addr) { 89 int unalign = ((u8 *)mem_desc->virt_addr) - 90 ((u8 *)mem_desc->esas2r_data); 91 92 dma_free_coherent(&a->pcid->dev, 93 (size_t)mem_desc->esas2r_param, 94 mem_desc->esas2r_data, 95 (dma_addr_t)(mem_desc->phys_addr - unalign)); 96 } else { 97 kfree(mem_desc->esas2r_data); 98 } 99 100 mem_desc->virt_addr = NULL; 101 } 102 103 static bool alloc_vda_req(struct esas2r_adapter *a, 104 struct esas2r_request *rq) 105 { 106 struct esas2r_mem_desc *memdesc = kzalloc( 107 sizeof(struct esas2r_mem_desc), GFP_KERNEL); 108 109 if (memdesc == NULL) { 110 esas2r_hdebug("could not alloc mem for vda request memdesc\n"); 111 return false; 112 } 113 114 memdesc->size = sizeof(union atto_vda_req) + 115 ESAS2R_DATA_BUF_LEN; 116 117 if (!esas2r_initmem_alloc(a, memdesc, 256)) { 118 esas2r_hdebug("could not alloc mem for vda request\n"); 119 kfree(memdesc); 120 return false; 121 } 122 123 a->num_vrqs++; 124 list_add(&memdesc->next_desc, &a->vrq_mds_head); 125 126 rq->vrq_md = memdesc; 127 rq->vrq = (union atto_vda_req *)memdesc->virt_addr; 128 rq->vrq->scsi.handle = a->num_vrqs; 129 130 return true; 131 } 132 133 static void esas2r_unmap_regions(struct esas2r_adapter *a) 134 { 135 if (a->regs) 136 iounmap((void __iomem *)a->regs); 137 138 a->regs = NULL; 139 140 pci_release_region(a->pcid, 2); 141 142 if (a->data_window) 143 iounmap((void __iomem *)a->data_window); 144 145 a->data_window = NULL; 146 147 pci_release_region(a->pcid, 0); 148 } 149 150 static int esas2r_map_regions(struct esas2r_adapter *a) 151 { 152 int error; 153 154 a->regs = NULL; 155 a->data_window = NULL; 156 157 error = pci_request_region(a->pcid, 2, a->name); 158 if (error != 0) { 159 esas2r_log(ESAS2R_LOG_CRIT, 160 "pci_request_region(2) failed, error %d", 161 error); 162 163 return error; 164 } 165 166 a->regs = (void __force *)ioremap(pci_resource_start(a->pcid, 2), 167 pci_resource_len(a->pcid, 2)); 168 if (a->regs == NULL) { 169 esas2r_log(ESAS2R_LOG_CRIT, 170 "ioremap failed for regs mem region\n"); 171 pci_release_region(a->pcid, 2); 172 return -EFAULT; 173 } 174 175 error = pci_request_region(a->pcid, 0, a->name); 176 if (error != 0) { 177 esas2r_log(ESAS2R_LOG_CRIT, 178 "pci_request_region(2) failed, error %d", 179 error); 180 esas2r_unmap_regions(a); 181 return error; 182 } 183 184 a->data_window = (void __force *)ioremap(pci_resource_start(a->pcid, 185 0), 186 pci_resource_len(a->pcid, 0)); 187 if (a->data_window == NULL) { 188 esas2r_log(ESAS2R_LOG_CRIT, 189 "ioremap failed for data_window mem region\n"); 190 esas2r_unmap_regions(a); 191 return -EFAULT; 192 } 193 194 return 0; 195 } 196 197 static void esas2r_setup_interrupts(struct esas2r_adapter *a, int intr_mode) 198 { 199 int i; 200 201 /* Set up interrupt mode based on the requested value */ 202 switch (intr_mode) { 203 case INTR_MODE_LEGACY: 204 use_legacy_interrupts: 205 a->intr_mode = INTR_MODE_LEGACY; 206 break; 207 208 case INTR_MODE_MSI: 209 i = pci_enable_msi(a->pcid); 210 if (i != 0) { 211 esas2r_log(ESAS2R_LOG_WARN, 212 "failed to enable MSI for adapter %d, " 213 "falling back to legacy interrupts " 214 "(err=%d)", a->index, 215 i); 216 goto use_legacy_interrupts; 217 } 218 a->intr_mode = INTR_MODE_MSI; 219 set_bit(AF2_MSI_ENABLED, &a->flags2); 220 break; 221 222 223 default: 224 esas2r_log(ESAS2R_LOG_WARN, 225 "unknown interrupt_mode %d requested, " 226 "falling back to legacy interrupt", 227 interrupt_mode); 228 goto use_legacy_interrupts; 229 } 230 } 231 232 static void esas2r_claim_interrupts(struct esas2r_adapter *a) 233 { 234 unsigned long flags = 0; 235 236 if (a->intr_mode == INTR_MODE_LEGACY) 237 flags |= IRQF_SHARED; 238 239 esas2r_log(ESAS2R_LOG_INFO, 240 "esas2r_claim_interrupts irq=%d (%p, %s, %lx)", 241 a->pcid->irq, a, a->name, flags); 242 243 if (request_irq(a->pcid->irq, 244 (a->intr_mode == 245 INTR_MODE_LEGACY) ? esas2r_interrupt : 246 esas2r_msi_interrupt, 247 flags, 248 a->name, 249 a)) { 250 esas2r_log(ESAS2R_LOG_CRIT, "unable to request IRQ %02X", 251 a->pcid->irq); 252 return; 253 } 254 255 set_bit(AF2_IRQ_CLAIMED, &a->flags2); 256 esas2r_log(ESAS2R_LOG_INFO, 257 "claimed IRQ %d flags: 0x%lx", 258 a->pcid->irq, flags); 259 } 260 261 int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid, 262 int index) 263 { 264 struct esas2r_adapter *a; 265 u64 bus_addr = 0; 266 int i; 267 void *next_uncached; 268 struct esas2r_request *first_request, *last_request; 269 bool dma64 = false; 270 271 if (index >= MAX_ADAPTERS) { 272 esas2r_log(ESAS2R_LOG_CRIT, 273 "tried to init invalid adapter index %u!", 274 index); 275 return 0; 276 } 277 278 if (esas2r_adapters[index]) { 279 esas2r_log(ESAS2R_LOG_CRIT, 280 "tried to init existing adapter index %u!", 281 index); 282 return 0; 283 } 284 285 a = (struct esas2r_adapter *)host->hostdata; 286 memset(a, 0, sizeof(struct esas2r_adapter)); 287 a->pcid = pcid; 288 a->host = host; 289 290 if (sizeof(dma_addr_t) > 4 && 291 dma_get_required_mask(&pcid->dev) > DMA_BIT_MASK(32) && 292 !dma_set_mask_and_coherent(&pcid->dev, DMA_BIT_MASK(64))) 293 dma64 = true; 294 295 if (!dma64 && dma_set_mask_and_coherent(&pcid->dev, DMA_BIT_MASK(32))) { 296 esas2r_log(ESAS2R_LOG_CRIT, "failed to set DMA mask"); 297 esas2r_kill_adapter(index); 298 return 0; 299 } 300 301 esas2r_log_dev(ESAS2R_LOG_INFO, &pcid->dev, 302 "%s-bit PCI addressing enabled\n", dma64 ? "64" : "32"); 303 304 esas2r_adapters[index] = a; 305 sprintf(a->name, ESAS2R_DRVR_NAME "_%02d", index); 306 esas2r_debug("new adapter %p, name %s", a, a->name); 307 spin_lock_init(&a->request_lock); 308 spin_lock_init(&a->fw_event_lock); 309 mutex_init(&a->fm_api_mutex); 310 mutex_init(&a->fs_api_mutex); 311 sema_init(&a->nvram_semaphore, 1); 312 313 esas2r_fw_event_off(a); 314 snprintf(a->fw_event_q_name, ESAS2R_KOBJ_NAME_LEN, "esas2r/%d", 315 a->index); 316 a->fw_event_q = create_singlethread_workqueue(a->fw_event_q_name); 317 318 init_waitqueue_head(&a->buffered_ioctl_waiter); 319 init_waitqueue_head(&a->nvram_waiter); 320 init_waitqueue_head(&a->fm_api_waiter); 321 init_waitqueue_head(&a->fs_api_waiter); 322 init_waitqueue_head(&a->vda_waiter); 323 324 INIT_LIST_HEAD(&a->general_req.req_list); 325 INIT_LIST_HEAD(&a->active_list); 326 INIT_LIST_HEAD(&a->defer_list); 327 INIT_LIST_HEAD(&a->free_sg_list_head); 328 INIT_LIST_HEAD(&a->avail_request); 329 INIT_LIST_HEAD(&a->vrq_mds_head); 330 INIT_LIST_HEAD(&a->fw_event_list); 331 332 first_request = (struct esas2r_request *)((u8 *)(a + 1)); 333 334 for (last_request = first_request, i = 1; i < num_requests; 335 last_request++, i++) { 336 INIT_LIST_HEAD(&last_request->req_list); 337 list_add_tail(&last_request->comp_list, &a->avail_request); 338 if (!alloc_vda_req(a, last_request)) { 339 esas2r_log(ESAS2R_LOG_CRIT, 340 "failed to allocate a VDA request!"); 341 esas2r_kill_adapter(index); 342 return 0; 343 } 344 } 345 346 esas2r_debug("requests: %p to %p (%d, %d)", first_request, 347 last_request, 348 sizeof(*first_request), 349 num_requests); 350 351 if (esas2r_map_regions(a) != 0) { 352 esas2r_log(ESAS2R_LOG_CRIT, "could not map PCI regions!"); 353 esas2r_kill_adapter(index); 354 return 0; 355 } 356 357 a->index = index; 358 359 /* interrupts will be disabled until we are done with init */ 360 atomic_inc(&a->dis_ints_cnt); 361 atomic_inc(&a->disable_cnt); 362 set_bit(AF_CHPRST_PENDING, &a->flags); 363 set_bit(AF_DISC_PENDING, &a->flags); 364 set_bit(AF_FIRST_INIT, &a->flags); 365 set_bit(AF_LEGACY_SGE_MODE, &a->flags); 366 367 a->init_msg = ESAS2R_INIT_MSG_START; 368 a->max_vdareq_size = 128; 369 a->build_sgl = esas2r_build_sg_list_sge; 370 371 esas2r_setup_interrupts(a, interrupt_mode); 372 373 a->uncached_size = esas2r_get_uncached_size(a); 374 a->uncached = dma_alloc_coherent(&pcid->dev, 375 (size_t)a->uncached_size, 376 (dma_addr_t *)&bus_addr, 377 GFP_KERNEL); 378 if (a->uncached == NULL) { 379 esas2r_log(ESAS2R_LOG_CRIT, 380 "failed to allocate %d bytes of consistent memory!", 381 a->uncached_size); 382 esas2r_kill_adapter(index); 383 return 0; 384 } 385 386 a->uncached_phys = bus_addr; 387 388 esas2r_debug("%d bytes uncached memory allocated @ %p (%x:%x)", 389 a->uncached_size, 390 a->uncached, 391 upper_32_bits(bus_addr), 392 lower_32_bits(bus_addr)); 393 memset(a->uncached, 0, a->uncached_size); 394 next_uncached = a->uncached; 395 396 if (!esas2r_init_adapter_struct(a, 397 &next_uncached)) { 398 esas2r_log(ESAS2R_LOG_CRIT, 399 "failed to initialize adapter structure (2)!"); 400 esas2r_kill_adapter(index); 401 return 0; 402 } 403 404 tasklet_init(&a->tasklet, 405 esas2r_adapter_tasklet, 406 (unsigned long)a); 407 408 /* 409 * Disable chip interrupts to prevent spurious interrupts 410 * until we claim the IRQ. 411 */ 412 esas2r_disable_chip_interrupts(a); 413 esas2r_check_adapter(a); 414 415 if (!esas2r_init_adapter_hw(a, true)) { 416 esas2r_log(ESAS2R_LOG_CRIT, "failed to initialize hardware!"); 417 } else { 418 esas2r_debug("esas2r_init_adapter ok"); 419 } 420 421 esas2r_claim_interrupts(a); 422 423 if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) 424 esas2r_enable_chip_interrupts(a); 425 426 set_bit(AF2_INIT_DONE, &a->flags2); 427 if (!test_bit(AF_DEGRADED_MODE, &a->flags)) 428 esas2r_kickoff_timer(a); 429 esas2r_debug("esas2r_init_adapter done for %p (%d)", 430 a, a->disable_cnt); 431 432 return 1; 433 } 434 435 static void esas2r_adapter_power_down(struct esas2r_adapter *a, 436 int power_management) 437 { 438 struct esas2r_mem_desc *memdesc, *next; 439 440 if ((test_bit(AF2_INIT_DONE, &a->flags2)) 441 && (!test_bit(AF_DEGRADED_MODE, &a->flags))) { 442 if (!power_management) { 443 del_timer_sync(&a->timer); 444 tasklet_kill(&a->tasklet); 445 } 446 esas2r_power_down(a); 447 448 /* 449 * There are versions of firmware that do not handle the sync 450 * cache command correctly. Stall here to ensure that the 451 * cache is lazily flushed. 452 */ 453 mdelay(500); 454 esas2r_debug("chip halted"); 455 } 456 457 /* Remove sysfs binary files */ 458 if (a->sysfs_fw_created) { 459 sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fw); 460 a->sysfs_fw_created = 0; 461 } 462 463 if (a->sysfs_fs_created) { 464 sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fs); 465 a->sysfs_fs_created = 0; 466 } 467 468 if (a->sysfs_vda_created) { 469 sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_vda); 470 a->sysfs_vda_created = 0; 471 } 472 473 if (a->sysfs_hw_created) { 474 sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_hw); 475 a->sysfs_hw_created = 0; 476 } 477 478 if (a->sysfs_live_nvram_created) { 479 sysfs_remove_bin_file(&a->host->shost_dev.kobj, 480 &bin_attr_live_nvram); 481 a->sysfs_live_nvram_created = 0; 482 } 483 484 if (a->sysfs_default_nvram_created) { 485 sysfs_remove_bin_file(&a->host->shost_dev.kobj, 486 &bin_attr_default_nvram); 487 a->sysfs_default_nvram_created = 0; 488 } 489 490 /* Clean up interrupts */ 491 if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) { 492 esas2r_log_dev(ESAS2R_LOG_INFO, 493 &(a->pcid->dev), 494 "free_irq(%d) called", a->pcid->irq); 495 496 free_irq(a->pcid->irq, a); 497 esas2r_debug("IRQ released"); 498 clear_bit(AF2_IRQ_CLAIMED, &a->flags2); 499 } 500 501 if (test_bit(AF2_MSI_ENABLED, &a->flags2)) { 502 pci_disable_msi(a->pcid); 503 clear_bit(AF2_MSI_ENABLED, &a->flags2); 504 esas2r_debug("MSI disabled"); 505 } 506 507 if (a->inbound_list_md.virt_addr) 508 esas2r_initmem_free(a, &a->inbound_list_md); 509 510 if (a->outbound_list_md.virt_addr) 511 esas2r_initmem_free(a, &a->outbound_list_md); 512 513 list_for_each_entry_safe(memdesc, next, &a->free_sg_list_head, 514 next_desc) { 515 esas2r_initmem_free(a, memdesc); 516 } 517 518 /* Following frees everything allocated via alloc_vda_req */ 519 list_for_each_entry_safe(memdesc, next, &a->vrq_mds_head, next_desc) { 520 esas2r_initmem_free(a, memdesc); 521 list_del(&memdesc->next_desc); 522 kfree(memdesc); 523 } 524 525 kfree(a->first_ae_req); 526 a->first_ae_req = NULL; 527 528 kfree(a->sg_list_mds); 529 a->sg_list_mds = NULL; 530 531 kfree(a->req_table); 532 a->req_table = NULL; 533 534 if (a->regs) { 535 esas2r_unmap_regions(a); 536 a->regs = NULL; 537 a->data_window = NULL; 538 esas2r_debug("regions unmapped"); 539 } 540 } 541 542 /* Release/free allocated resources for specified adapters. */ 543 void esas2r_kill_adapter(int i) 544 { 545 struct esas2r_adapter *a = esas2r_adapters[i]; 546 547 if (a) { 548 unsigned long flags; 549 struct workqueue_struct *wq; 550 esas2r_debug("killing adapter %p [%d] ", a, i); 551 esas2r_fw_event_off(a); 552 esas2r_adapter_power_down(a, 0); 553 if (esas2r_buffered_ioctl && 554 (a->pcid == esas2r_buffered_ioctl_pcid)) { 555 dma_free_coherent(&a->pcid->dev, 556 (size_t)esas2r_buffered_ioctl_size, 557 esas2r_buffered_ioctl, 558 esas2r_buffered_ioctl_addr); 559 esas2r_buffered_ioctl = NULL; 560 } 561 562 if (a->vda_buffer) { 563 dma_free_coherent(&a->pcid->dev, 564 (size_t)VDA_MAX_BUFFER_SIZE, 565 a->vda_buffer, 566 (dma_addr_t)a->ppvda_buffer); 567 a->vda_buffer = NULL; 568 } 569 if (a->fs_api_buffer) { 570 dma_free_coherent(&a->pcid->dev, 571 (size_t)a->fs_api_buffer_size, 572 a->fs_api_buffer, 573 (dma_addr_t)a->ppfs_api_buffer); 574 a->fs_api_buffer = NULL; 575 } 576 577 kfree(a->local_atto_ioctl); 578 a->local_atto_ioctl = NULL; 579 580 spin_lock_irqsave(&a->fw_event_lock, flags); 581 wq = a->fw_event_q; 582 a->fw_event_q = NULL; 583 spin_unlock_irqrestore(&a->fw_event_lock, flags); 584 if (wq) 585 destroy_workqueue(wq); 586 587 if (a->uncached) { 588 dma_free_coherent(&a->pcid->dev, 589 (size_t)a->uncached_size, 590 a->uncached, 591 (dma_addr_t)a->uncached_phys); 592 a->uncached = NULL; 593 esas2r_debug("uncached area freed"); 594 } 595 596 esas2r_log_dev(ESAS2R_LOG_INFO, 597 &(a->pcid->dev), 598 "pci_disable_device() called. msix_enabled: %d " 599 "msi_enabled: %d irq: %d pin: %d", 600 a->pcid->msix_enabled, 601 a->pcid->msi_enabled, 602 a->pcid->irq, 603 a->pcid->pin); 604 605 esas2r_log_dev(ESAS2R_LOG_INFO, 606 &(a->pcid->dev), 607 "before pci_disable_device() enable_cnt: %d", 608 a->pcid->enable_cnt.counter); 609 610 pci_disable_device(a->pcid); 611 esas2r_log_dev(ESAS2R_LOG_INFO, 612 &(a->pcid->dev), 613 "after pci_disable_device() enable_cnt: %d", 614 a->pcid->enable_cnt.counter); 615 616 esas2r_log_dev(ESAS2R_LOG_INFO, 617 &(a->pcid->dev), 618 "pci_set_drv_data(%p, NULL) called", 619 a->pcid); 620 621 pci_set_drvdata(a->pcid, NULL); 622 esas2r_adapters[i] = NULL; 623 624 if (test_bit(AF2_INIT_DONE, &a->flags2)) { 625 clear_bit(AF2_INIT_DONE, &a->flags2); 626 627 set_bit(AF_DEGRADED_MODE, &a->flags); 628 629 esas2r_log_dev(ESAS2R_LOG_INFO, 630 &(a->host->shost_gendev), 631 "scsi_remove_host() called"); 632 633 scsi_remove_host(a->host); 634 635 esas2r_log_dev(ESAS2R_LOG_INFO, 636 &(a->host->shost_gendev), 637 "scsi_host_put() called"); 638 639 scsi_host_put(a->host); 640 } 641 } 642 } 643 644 int esas2r_suspend(struct pci_dev *pdev, pm_message_t state) 645 { 646 struct Scsi_Host *host = pci_get_drvdata(pdev); 647 u32 device_state; 648 struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata; 649 650 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "suspending adapter()"); 651 if (!a) 652 return -ENODEV; 653 654 esas2r_adapter_power_down(a, 1); 655 device_state = pci_choose_state(pdev, state); 656 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), 657 "pci_save_state() called"); 658 pci_save_state(pdev); 659 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), 660 "pci_disable_device() called"); 661 pci_disable_device(pdev); 662 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), 663 "pci_set_power_state() called"); 664 pci_set_power_state(pdev, device_state); 665 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "esas2r_suspend(): 0"); 666 return 0; 667 } 668 669 int esas2r_resume(struct pci_dev *pdev) 670 { 671 struct Scsi_Host *host = pci_get_drvdata(pdev); 672 struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata; 673 int rez; 674 675 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "resuming adapter()"); 676 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), 677 "pci_set_power_state(PCI_D0) " 678 "called"); 679 pci_set_power_state(pdev, PCI_D0); 680 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), 681 "pci_enable_wake(PCI_D0, 0) " 682 "called"); 683 pci_enable_wake(pdev, PCI_D0, 0); 684 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), 685 "pci_restore_state() called"); 686 pci_restore_state(pdev); 687 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), 688 "pci_enable_device() called"); 689 rez = pci_enable_device(pdev); 690 pci_set_master(pdev); 691 692 if (!a) { 693 rez = -ENODEV; 694 goto error_exit; 695 } 696 697 if (esas2r_map_regions(a) != 0) { 698 esas2r_log(ESAS2R_LOG_CRIT, "could not re-map PCI regions!"); 699 rez = -ENOMEM; 700 goto error_exit; 701 } 702 703 /* Set up interupt mode */ 704 esas2r_setup_interrupts(a, a->intr_mode); 705 706 /* 707 * Disable chip interrupts to prevent spurious interrupts until we 708 * claim the IRQ. 709 */ 710 esas2r_disable_chip_interrupts(a); 711 if (!esas2r_power_up(a, true)) { 712 esas2r_debug("yikes, esas2r_power_up failed"); 713 rez = -ENOMEM; 714 goto error_exit; 715 } 716 717 esas2r_claim_interrupts(a); 718 719 if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) { 720 /* 721 * Now that system interrupt(s) are claimed, we can enable 722 * chip interrupts. 723 */ 724 esas2r_enable_chip_interrupts(a); 725 esas2r_kickoff_timer(a); 726 } else { 727 esas2r_debug("yikes, unable to claim IRQ"); 728 esas2r_log(ESAS2R_LOG_CRIT, "could not re-claim IRQ!"); 729 rez = -ENOMEM; 730 goto error_exit; 731 } 732 733 error_exit: 734 esas2r_log_dev(ESAS2R_LOG_CRIT, &(pdev->dev), "esas2r_resume(): %d", 735 rez); 736 return rez; 737 } 738 739 bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str) 740 { 741 set_bit(AF_DEGRADED_MODE, &a->flags); 742 esas2r_log(ESAS2R_LOG_CRIT, 743 "setting adapter to degraded mode: %s\n", error_str); 744 return false; 745 } 746 747 u32 esas2r_get_uncached_size(struct esas2r_adapter *a) 748 { 749 return sizeof(struct esas2r_sas_nvram) 750 + ALIGN(ESAS2R_DISC_BUF_LEN, 8) 751 + ALIGN(sizeof(u32), 8) /* outbound list copy pointer */ 752 + 8 753 + (num_sg_lists * (u16)sgl_page_size) 754 + ALIGN((num_requests + num_ae_requests + 1 + 755 ESAS2R_LIST_EXTRA) * 756 sizeof(struct esas2r_inbound_list_source_entry), 757 8) 758 + ALIGN((num_requests + num_ae_requests + 1 + 759 ESAS2R_LIST_EXTRA) * 760 sizeof(struct atto_vda_ob_rsp), 8) 761 + 256; /* VDA request and buffer align */ 762 } 763 764 static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a) 765 { 766 if (pci_is_pcie(a->pcid)) { 767 u16 devcontrol; 768 769 pcie_capability_read_word(a->pcid, PCI_EXP_DEVCTL, &devcontrol); 770 771 if ((devcontrol & PCI_EXP_DEVCTL_READRQ) > 772 PCI_EXP_DEVCTL_READRQ_512B) { 773 esas2r_log(ESAS2R_LOG_INFO, 774 "max read request size > 512B"); 775 776 devcontrol &= ~PCI_EXP_DEVCTL_READRQ; 777 devcontrol |= PCI_EXP_DEVCTL_READRQ_512B; 778 pcie_capability_write_word(a->pcid, PCI_EXP_DEVCTL, 779 devcontrol); 780 } 781 } 782 } 783 784 /* 785 * Determine the organization of the uncached data area and 786 * finish initializing the adapter structure 787 */ 788 bool esas2r_init_adapter_struct(struct esas2r_adapter *a, 789 void **uncached_area) 790 { 791 u32 i; 792 u8 *high; 793 struct esas2r_inbound_list_source_entry *element; 794 struct esas2r_request *rq; 795 struct esas2r_mem_desc *sgl; 796 797 spin_lock_init(&a->sg_list_lock); 798 spin_lock_init(&a->mem_lock); 799 spin_lock_init(&a->queue_lock); 800 801 a->targetdb_end = &a->targetdb[ESAS2R_MAX_TARGETS]; 802 803 if (!alloc_vda_req(a, &a->general_req)) { 804 esas2r_hdebug( 805 "failed to allocate a VDA request for the general req!"); 806 return false; 807 } 808 809 /* allocate requests for asynchronous events */ 810 a->first_ae_req = 811 kcalloc(num_ae_requests, sizeof(struct esas2r_request), 812 GFP_KERNEL); 813 814 if (a->first_ae_req == NULL) { 815 esas2r_log(ESAS2R_LOG_CRIT, 816 "failed to allocate memory for asynchronous events"); 817 return false; 818 } 819 820 /* allocate the S/G list memory descriptors */ 821 a->sg_list_mds = kcalloc(num_sg_lists, sizeof(struct esas2r_mem_desc), 822 GFP_KERNEL); 823 824 if (a->sg_list_mds == NULL) { 825 esas2r_log(ESAS2R_LOG_CRIT, 826 "failed to allocate memory for s/g list descriptors"); 827 return false; 828 } 829 830 /* allocate the request table */ 831 a->req_table = 832 kcalloc(num_requests + num_ae_requests + 1, 833 sizeof(struct esas2r_request *), 834 GFP_KERNEL); 835 836 if (a->req_table == NULL) { 837 esas2r_log(ESAS2R_LOG_CRIT, 838 "failed to allocate memory for the request table"); 839 return false; 840 } 841 842 /* initialize PCI configuration space */ 843 esas2r_init_pci_cfg_space(a); 844 845 /* 846 * the thunder_stream boards all have a serial flash part that has a 847 * different base address on the AHB bus. 848 */ 849 if ((a->pcid->subsystem_vendor == ATTO_VENDOR_ID) 850 && (a->pcid->subsystem_device & ATTO_SSDID_TBT)) 851 a->flags2 |= AF2_THUNDERBOLT; 852 853 if (test_bit(AF2_THUNDERBOLT, &a->flags2)) 854 a->flags2 |= AF2_SERIAL_FLASH; 855 856 if (a->pcid->subsystem_device == ATTO_TLSH_1068) 857 a->flags2 |= AF2_THUNDERLINK; 858 859 /* Uncached Area */ 860 high = (u8 *)*uncached_area; 861 862 /* initialize the scatter/gather table pages */ 863 864 for (i = 0, sgl = a->sg_list_mds; i < num_sg_lists; i++, sgl++) { 865 sgl->size = sgl_page_size; 866 867 list_add_tail(&sgl->next_desc, &a->free_sg_list_head); 868 869 if (!esas2r_initmem_alloc(a, sgl, ESAS2R_SGL_ALIGN)) { 870 /* Allow the driver to load if the minimum count met. */ 871 if (i < NUM_SGL_MIN) 872 return false; 873 break; 874 } 875 } 876 877 /* compute the size of the lists */ 878 a->list_size = num_requests + ESAS2R_LIST_EXTRA; 879 880 /* allocate the inbound list */ 881 a->inbound_list_md.size = a->list_size * 882 sizeof(struct 883 esas2r_inbound_list_source_entry); 884 885 if (!esas2r_initmem_alloc(a, &a->inbound_list_md, ESAS2R_LIST_ALIGN)) { 886 esas2r_hdebug("failed to allocate IB list"); 887 return false; 888 } 889 890 /* allocate the outbound list */ 891 a->outbound_list_md.size = a->list_size * 892 sizeof(struct atto_vda_ob_rsp); 893 894 if (!esas2r_initmem_alloc(a, &a->outbound_list_md, 895 ESAS2R_LIST_ALIGN)) { 896 esas2r_hdebug("failed to allocate IB list"); 897 return false; 898 } 899 900 /* allocate the NVRAM structure */ 901 a->nvram = (struct esas2r_sas_nvram *)high; 902 high += sizeof(struct esas2r_sas_nvram); 903 904 /* allocate the discovery buffer */ 905 a->disc_buffer = high; 906 high += ESAS2R_DISC_BUF_LEN; 907 high = PTR_ALIGN(high, 8); 908 909 /* allocate the outbound list copy pointer */ 910 a->outbound_copy = (u32 volatile *)high; 911 high += sizeof(u32); 912 913 if (!test_bit(AF_NVR_VALID, &a->flags)) 914 esas2r_nvram_set_defaults(a); 915 916 /* update the caller's uncached memory area pointer */ 917 *uncached_area = (void *)high; 918 919 /* initialize the allocated memory */ 920 if (test_bit(AF_FIRST_INIT, &a->flags)) { 921 esas2r_targ_db_initialize(a); 922 923 /* prime parts of the inbound list */ 924 element = 925 (struct esas2r_inbound_list_source_entry *)a-> 926 inbound_list_md. 927 virt_addr; 928 929 for (i = 0; i < a->list_size; i++) { 930 element->address = 0; 931 element->reserved = 0; 932 element->length = cpu_to_le32(HWILSE_INTERFACE_F0 933 | (sizeof(union 934 atto_vda_req) 935 / 936 sizeof(u32))); 937 element++; 938 } 939 940 /* init the AE requests */ 941 for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++, 942 i++) { 943 INIT_LIST_HEAD(&rq->req_list); 944 if (!alloc_vda_req(a, rq)) { 945 esas2r_hdebug( 946 "failed to allocate a VDA request!"); 947 return false; 948 } 949 950 esas2r_rq_init_request(rq, a); 951 952 /* override the completion function */ 953 rq->comp_cb = esas2r_ae_complete; 954 } 955 } 956 957 return true; 958 } 959 960 /* This code will verify that the chip is operational. */ 961 bool esas2r_check_adapter(struct esas2r_adapter *a) 962 { 963 u32 starttime; 964 u32 doorbell; 965 u64 ppaddr; 966 u32 dw; 967 968 /* 969 * if the chip reset detected flag is set, we can bypass a bunch of 970 * stuff. 971 */ 972 if (test_bit(AF_CHPRST_DETECTED, &a->flags)) 973 goto skip_chip_reset; 974 975 /* 976 * BEFORE WE DO ANYTHING, disable the chip interrupts! the boot driver 977 * may have left them enabled or we may be recovering from a fault. 978 */ 979 esas2r_write_register_dword(a, MU_INT_MASK_OUT, ESAS2R_INT_DIS_MASK); 980 esas2r_flush_register_dword(a, MU_INT_MASK_OUT); 981 982 /* 983 * wait for the firmware to become ready by forcing an interrupt and 984 * waiting for a response. 985 */ 986 starttime = jiffies_to_msecs(jiffies); 987 988 while (true) { 989 esas2r_force_interrupt(a); 990 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); 991 if (doorbell == 0xFFFFFFFF) { 992 /* 993 * Give the firmware up to two seconds to enable 994 * register access after a reset. 995 */ 996 if ((jiffies_to_msecs(jiffies) - starttime) > 2000) 997 return esas2r_set_degraded_mode(a, 998 "unable to access registers"); 999 } else if (doorbell & DRBL_FORCE_INT) { 1000 u32 ver = (doorbell & DRBL_FW_VER_MSK); 1001 1002 /* 1003 * This driver supports version 0 and version 1 of 1004 * the API 1005 */ 1006 esas2r_write_register_dword(a, MU_DOORBELL_OUT, 1007 doorbell); 1008 1009 if (ver == DRBL_FW_VER_0) { 1010 set_bit(AF_LEGACY_SGE_MODE, &a->flags); 1011 1012 a->max_vdareq_size = 128; 1013 a->build_sgl = esas2r_build_sg_list_sge; 1014 } else if (ver == DRBL_FW_VER_1) { 1015 clear_bit(AF_LEGACY_SGE_MODE, &a->flags); 1016 1017 a->max_vdareq_size = 1024; 1018 a->build_sgl = esas2r_build_sg_list_prd; 1019 } else { 1020 return esas2r_set_degraded_mode(a, 1021 "unknown firmware version"); 1022 } 1023 break; 1024 } 1025 1026 schedule_timeout_interruptible(msecs_to_jiffies(100)); 1027 1028 if ((jiffies_to_msecs(jiffies) - starttime) > 180000) { 1029 esas2r_hdebug("FW ready TMO"); 1030 esas2r_bugon(); 1031 1032 return esas2r_set_degraded_mode(a, 1033 "firmware start has timed out"); 1034 } 1035 } 1036 1037 /* purge any asynchronous events since we will repost them later */ 1038 esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_DOWN); 1039 starttime = jiffies_to_msecs(jiffies); 1040 1041 while (true) { 1042 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); 1043 if (doorbell & DRBL_MSG_IFC_DOWN) { 1044 esas2r_write_register_dword(a, MU_DOORBELL_OUT, 1045 doorbell); 1046 break; 1047 } 1048 1049 schedule_timeout_interruptible(msecs_to_jiffies(50)); 1050 1051 if ((jiffies_to_msecs(jiffies) - starttime) > 3000) { 1052 esas2r_hdebug("timeout waiting for interface down"); 1053 break; 1054 } 1055 } 1056 skip_chip_reset: 1057 /* 1058 * first things first, before we go changing any of these registers 1059 * disable the communication lists. 1060 */ 1061 dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG); 1062 dw &= ~MU_ILC_ENABLE; 1063 esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw); 1064 dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG); 1065 dw &= ~MU_OLC_ENABLE; 1066 esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw); 1067 1068 /* configure the communication list addresses */ 1069 ppaddr = a->inbound_list_md.phys_addr; 1070 esas2r_write_register_dword(a, MU_IN_LIST_ADDR_LO, 1071 lower_32_bits(ppaddr)); 1072 esas2r_write_register_dword(a, MU_IN_LIST_ADDR_HI, 1073 upper_32_bits(ppaddr)); 1074 ppaddr = a->outbound_list_md.phys_addr; 1075 esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_LO, 1076 lower_32_bits(ppaddr)); 1077 esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_HI, 1078 upper_32_bits(ppaddr)); 1079 ppaddr = a->uncached_phys + 1080 ((u8 *)a->outbound_copy - a->uncached); 1081 esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_LO, 1082 lower_32_bits(ppaddr)); 1083 esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_HI, 1084 upper_32_bits(ppaddr)); 1085 1086 /* reset the read and write pointers */ 1087 *a->outbound_copy = 1088 a->last_write = 1089 a->last_read = a->list_size - 1; 1090 set_bit(AF_COMM_LIST_TOGGLE, &a->flags); 1091 esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE | 1092 a->last_write); 1093 esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE | 1094 a->last_write); 1095 esas2r_write_register_dword(a, MU_IN_LIST_READ, MU_ILR_TOGGLE | 1096 a->last_write); 1097 esas2r_write_register_dword(a, MU_OUT_LIST_WRITE, 1098 MU_OLW_TOGGLE | a->last_write); 1099 1100 /* configure the interface select fields */ 1101 dw = esas2r_read_register_dword(a, MU_IN_LIST_IFC_CONFIG); 1102 dw &= ~(MU_ILIC_LIST | MU_ILIC_DEST); 1103 esas2r_write_register_dword(a, MU_IN_LIST_IFC_CONFIG, 1104 (dw | MU_ILIC_LIST_F0 | MU_ILIC_DEST_DDR)); 1105 dw = esas2r_read_register_dword(a, MU_OUT_LIST_IFC_CONFIG); 1106 dw &= ~(MU_OLIC_LIST | MU_OLIC_SOURCE); 1107 esas2r_write_register_dword(a, MU_OUT_LIST_IFC_CONFIG, 1108 (dw | MU_OLIC_LIST_F0 | 1109 MU_OLIC_SOURCE_DDR)); 1110 1111 /* finish configuring the communication lists */ 1112 dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG); 1113 dw &= ~(MU_ILC_ENTRY_MASK | MU_ILC_NUMBER_MASK); 1114 dw |= MU_ILC_ENTRY_4_DW | MU_ILC_DYNAMIC_SRC 1115 | (a->list_size << MU_ILC_NUMBER_SHIFT); 1116 esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw); 1117 dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG); 1118 dw &= ~(MU_OLC_ENTRY_MASK | MU_OLC_NUMBER_MASK); 1119 dw |= MU_OLC_ENTRY_4_DW | (a->list_size << MU_OLC_NUMBER_SHIFT); 1120 esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw); 1121 1122 /* 1123 * notify the firmware that we're done setting up the communication 1124 * list registers. wait here until the firmware is done configuring 1125 * its lists. it will signal that it is done by enabling the lists. 1126 */ 1127 esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_INIT); 1128 starttime = jiffies_to_msecs(jiffies); 1129 1130 while (true) { 1131 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); 1132 if (doorbell & DRBL_MSG_IFC_INIT) { 1133 esas2r_write_register_dword(a, MU_DOORBELL_OUT, 1134 doorbell); 1135 break; 1136 } 1137 1138 schedule_timeout_interruptible(msecs_to_jiffies(100)); 1139 1140 if ((jiffies_to_msecs(jiffies) - starttime) > 3000) { 1141 esas2r_hdebug( 1142 "timeout waiting for communication list init"); 1143 esas2r_bugon(); 1144 return esas2r_set_degraded_mode(a, 1145 "timeout waiting for communication list init"); 1146 } 1147 } 1148 1149 /* 1150 * flag whether the firmware supports the power down doorbell. we 1151 * determine this by reading the inbound doorbell enable mask. 1152 */ 1153 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB); 1154 if (doorbell & DRBL_POWER_DOWN) 1155 set_bit(AF2_VDA_POWER_DOWN, &a->flags2); 1156 else 1157 clear_bit(AF2_VDA_POWER_DOWN, &a->flags2); 1158 1159 /* 1160 * enable assertion of outbound queue and doorbell interrupts in the 1161 * main interrupt cause register. 1162 */ 1163 esas2r_write_register_dword(a, MU_OUT_LIST_INT_MASK, MU_OLIS_MASK); 1164 esas2r_write_register_dword(a, MU_DOORBELL_OUT_ENB, DRBL_ENB_MASK); 1165 return true; 1166 } 1167 1168 /* Process the initialization message just completed and format the next one. */ 1169 static bool esas2r_format_init_msg(struct esas2r_adapter *a, 1170 struct esas2r_request *rq) 1171 { 1172 u32 msg = a->init_msg; 1173 struct atto_vda_cfg_init *ci; 1174 1175 a->init_msg = 0; 1176 1177 switch (msg) { 1178 case ESAS2R_INIT_MSG_START: 1179 case ESAS2R_INIT_MSG_REINIT: 1180 { 1181 esas2r_hdebug("CFG init"); 1182 esas2r_build_cfg_req(a, 1183 rq, 1184 VDA_CFG_INIT, 1185 0, 1186 NULL); 1187 ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init; 1188 ci->sgl_page_size = cpu_to_le32(sgl_page_size); 1189 /* firmware interface overflows in y2106 */ 1190 ci->epoch_time = cpu_to_le32(ktime_get_real_seconds()); 1191 rq->flags |= RF_FAILURE_OK; 1192 a->init_msg = ESAS2R_INIT_MSG_INIT; 1193 break; 1194 } 1195 1196 case ESAS2R_INIT_MSG_INIT: 1197 if (rq->req_stat == RS_SUCCESS) { 1198 u32 major; 1199 u32 minor; 1200 u16 fw_release; 1201 1202 a->fw_version = le16_to_cpu( 1203 rq->func_rsp.cfg_rsp.vda_version); 1204 a->fw_build = rq->func_rsp.cfg_rsp.fw_build; 1205 fw_release = le16_to_cpu( 1206 rq->func_rsp.cfg_rsp.fw_release); 1207 major = LOBYTE(fw_release); 1208 minor = HIBYTE(fw_release); 1209 a->fw_version += (major << 16) + (minor << 24); 1210 } else { 1211 esas2r_hdebug("FAILED"); 1212 } 1213 1214 /* 1215 * the 2.71 and earlier releases of R6xx firmware did not error 1216 * unsupported config requests correctly. 1217 */ 1218 1219 if ((test_bit(AF2_THUNDERBOLT, &a->flags2)) 1220 || (be32_to_cpu(a->fw_version) > 0x00524702)) { 1221 esas2r_hdebug("CFG get init"); 1222 esas2r_build_cfg_req(a, 1223 rq, 1224 VDA_CFG_GET_INIT2, 1225 sizeof(struct atto_vda_cfg_init), 1226 NULL); 1227 1228 rq->vrq->cfg.sg_list_offset = offsetof( 1229 struct atto_vda_cfg_req, 1230 data.sge); 1231 rq->vrq->cfg.data.prde.ctl_len = 1232 cpu_to_le32(sizeof(struct atto_vda_cfg_init)); 1233 rq->vrq->cfg.data.prde.address = cpu_to_le64( 1234 rq->vrq_md->phys_addr + 1235 sizeof(union atto_vda_req)); 1236 rq->flags |= RF_FAILURE_OK; 1237 a->init_msg = ESAS2R_INIT_MSG_GET_INIT; 1238 break; 1239 } 1240 fallthrough; 1241 1242 case ESAS2R_INIT_MSG_GET_INIT: 1243 if (msg == ESAS2R_INIT_MSG_GET_INIT) { 1244 ci = (struct atto_vda_cfg_init *)rq->data_buf; 1245 if (rq->req_stat == RS_SUCCESS) { 1246 a->num_targets_backend = 1247 le32_to_cpu(ci->num_targets_backend); 1248 a->ioctl_tunnel = 1249 le32_to_cpu(ci->ioctl_tunnel); 1250 } else { 1251 esas2r_hdebug("FAILED"); 1252 } 1253 } 1254 fallthrough; 1255 1256 default: 1257 rq->req_stat = RS_SUCCESS; 1258 return false; 1259 } 1260 return true; 1261 } 1262 1263 /* 1264 * Perform initialization messages via the request queue. Messages are 1265 * performed with interrupts disabled. 1266 */ 1267 bool esas2r_init_msgs(struct esas2r_adapter *a) 1268 { 1269 bool success = true; 1270 struct esas2r_request *rq = &a->general_req; 1271 1272 esas2r_rq_init_request(rq, a); 1273 rq->comp_cb = esas2r_dummy_complete; 1274 1275 if (a->init_msg == 0) 1276 a->init_msg = ESAS2R_INIT_MSG_REINIT; 1277 1278 while (a->init_msg) { 1279 if (esas2r_format_init_msg(a, rq)) { 1280 unsigned long flags; 1281 while (true) { 1282 spin_lock_irqsave(&a->queue_lock, flags); 1283 esas2r_start_vda_request(a, rq); 1284 spin_unlock_irqrestore(&a->queue_lock, flags); 1285 esas2r_wait_request(a, rq); 1286 if (rq->req_stat != RS_PENDING) 1287 break; 1288 } 1289 } 1290 1291 if (rq->req_stat == RS_SUCCESS 1292 || ((rq->flags & RF_FAILURE_OK) 1293 && rq->req_stat != RS_TIMEOUT)) 1294 continue; 1295 1296 esas2r_log(ESAS2R_LOG_CRIT, "init message %x failed (%x, %x)", 1297 a->init_msg, rq->req_stat, rq->flags); 1298 a->init_msg = ESAS2R_INIT_MSG_START; 1299 success = false; 1300 break; 1301 } 1302 1303 esas2r_rq_destroy_request(rq, a); 1304 return success; 1305 } 1306 1307 /* Initialize the adapter chip */ 1308 bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll) 1309 { 1310 bool rslt = false; 1311 struct esas2r_request *rq; 1312 u32 i; 1313 1314 if (test_bit(AF_DEGRADED_MODE, &a->flags)) 1315 goto exit; 1316 1317 if (!test_bit(AF_NVR_VALID, &a->flags)) { 1318 if (!esas2r_nvram_read_direct(a)) 1319 esas2r_log(ESAS2R_LOG_WARN, 1320 "invalid/missing NVRAM parameters"); 1321 } 1322 1323 if (!esas2r_init_msgs(a)) { 1324 esas2r_set_degraded_mode(a, "init messages failed"); 1325 goto exit; 1326 } 1327 1328 /* The firmware is ready. */ 1329 clear_bit(AF_DEGRADED_MODE, &a->flags); 1330 clear_bit(AF_CHPRST_PENDING, &a->flags); 1331 1332 /* Post all the async event requests */ 1333 for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++) 1334 esas2r_start_ae_request(a, rq); 1335 1336 if (!a->flash_rev[0]) 1337 esas2r_read_flash_rev(a); 1338 1339 if (!a->image_type[0]) 1340 esas2r_read_image_type(a); 1341 1342 if (a->fw_version == 0) 1343 a->fw_rev[0] = 0; 1344 else 1345 sprintf(a->fw_rev, "%1d.%02d", 1346 (int)LOBYTE(HIWORD(a->fw_version)), 1347 (int)HIBYTE(HIWORD(a->fw_version))); 1348 1349 esas2r_hdebug("firmware revision: %s", a->fw_rev); 1350 1351 if (test_bit(AF_CHPRST_DETECTED, &a->flags) 1352 && (test_bit(AF_FIRST_INIT, &a->flags))) { 1353 esas2r_enable_chip_interrupts(a); 1354 return true; 1355 } 1356 1357 /* initialize discovery */ 1358 esas2r_disc_initialize(a); 1359 1360 /* 1361 * wait for the device wait time to expire here if requested. this is 1362 * usually requested during initial driver load and possibly when 1363 * resuming from a low power state. deferred device waiting will use 1364 * interrupts. chip reset recovery always defers device waiting to 1365 * avoid being in a TASKLET too long. 1366 */ 1367 if (init_poll) { 1368 u32 currtime = a->disc_start_time; 1369 u32 nexttick = 100; 1370 u32 deltatime; 1371 1372 /* 1373 * Block Tasklets from getting scheduled and indicate this is 1374 * polled discovery. 1375 */ 1376 set_bit(AF_TASKLET_SCHEDULED, &a->flags); 1377 set_bit(AF_DISC_POLLED, &a->flags); 1378 1379 /* 1380 * Temporarily bring the disable count to zero to enable 1381 * deferred processing. Note that the count is already zero 1382 * after the first initialization. 1383 */ 1384 if (test_bit(AF_FIRST_INIT, &a->flags)) 1385 atomic_dec(&a->disable_cnt); 1386 1387 while (test_bit(AF_DISC_PENDING, &a->flags)) { 1388 schedule_timeout_interruptible(msecs_to_jiffies(100)); 1389 1390 /* 1391 * Determine the need for a timer tick based on the 1392 * delta time between this and the last iteration of 1393 * this loop. We don't use the absolute time because 1394 * then we would have to worry about when nexttick 1395 * wraps and currtime hasn't yet. 1396 */ 1397 deltatime = jiffies_to_msecs(jiffies) - currtime; 1398 currtime += deltatime; 1399 1400 /* 1401 * Process any waiting discovery as long as the chip is 1402 * up. If a chip reset happens during initial polling, 1403 * we have to make sure the timer tick processes the 1404 * doorbell indicating the firmware is ready. 1405 */ 1406 if (!test_bit(AF_CHPRST_PENDING, &a->flags)) 1407 esas2r_disc_check_for_work(a); 1408 1409 /* Simulate a timer tick. */ 1410 if (nexttick <= deltatime) { 1411 1412 /* Time for a timer tick */ 1413 nexttick += 100; 1414 esas2r_timer_tick(a); 1415 } 1416 1417 if (nexttick > deltatime) 1418 nexttick -= deltatime; 1419 1420 /* Do any deferred processing */ 1421 if (esas2r_is_tasklet_pending(a)) 1422 esas2r_do_tasklet_tasks(a); 1423 1424 } 1425 1426 if (test_bit(AF_FIRST_INIT, &a->flags)) 1427 atomic_inc(&a->disable_cnt); 1428 1429 clear_bit(AF_DISC_POLLED, &a->flags); 1430 clear_bit(AF_TASKLET_SCHEDULED, &a->flags); 1431 } 1432 1433 1434 esas2r_targ_db_report_changes(a); 1435 1436 /* 1437 * For cases where (a) the initialization messages processing may 1438 * handle an interrupt for a port event and a discovery is waiting, but 1439 * we are not waiting for devices, or (b) the device wait time has been 1440 * exhausted but there is still discovery pending, start any leftover 1441 * discovery in interrupt driven mode. 1442 */ 1443 esas2r_disc_start_waiting(a); 1444 1445 /* Enable chip interrupts */ 1446 a->int_mask = ESAS2R_INT_STS_MASK; 1447 esas2r_enable_chip_interrupts(a); 1448 esas2r_enable_heartbeat(a); 1449 rslt = true; 1450 1451 exit: 1452 /* 1453 * Regardless of whether initialization was successful, certain things 1454 * need to get done before we exit. 1455 */ 1456 1457 if (test_bit(AF_CHPRST_DETECTED, &a->flags) && 1458 test_bit(AF_FIRST_INIT, &a->flags)) { 1459 /* 1460 * Reinitialization was performed during the first 1461 * initialization. Only clear the chip reset flag so the 1462 * original device polling is not cancelled. 1463 */ 1464 if (!rslt) 1465 clear_bit(AF_CHPRST_PENDING, &a->flags); 1466 } else { 1467 /* First initialization or a subsequent re-init is complete. */ 1468 if (!rslt) { 1469 clear_bit(AF_CHPRST_PENDING, &a->flags); 1470 clear_bit(AF_DISC_PENDING, &a->flags); 1471 } 1472 1473 1474 /* Enable deferred processing after the first initialization. */ 1475 if (test_bit(AF_FIRST_INIT, &a->flags)) { 1476 clear_bit(AF_FIRST_INIT, &a->flags); 1477 1478 if (atomic_dec_return(&a->disable_cnt) == 0) 1479 esas2r_do_deferred_processes(a); 1480 } 1481 } 1482 1483 return rslt; 1484 } 1485 1486 void esas2r_reset_adapter(struct esas2r_adapter *a) 1487 { 1488 set_bit(AF_OS_RESET, &a->flags); 1489 esas2r_local_reset_adapter(a); 1490 esas2r_schedule_tasklet(a); 1491 } 1492 1493 void esas2r_reset_chip(struct esas2r_adapter *a) 1494 { 1495 if (!esas2r_is_adapter_present(a)) 1496 return; 1497 1498 /* 1499 * Before we reset the chip, save off the VDA core dump. The VDA core 1500 * dump is located in the upper 512KB of the onchip SRAM. Make sure 1501 * to not overwrite a previous crash that was saved. 1502 */ 1503 if (test_bit(AF2_COREDUMP_AVAIL, &a->flags2) && 1504 !test_bit(AF2_COREDUMP_SAVED, &a->flags2)) { 1505 esas2r_read_mem_block(a, 1506 a->fw_coredump_buff, 1507 MW_DATA_ADDR_SRAM + 0x80000, 1508 ESAS2R_FWCOREDUMP_SZ); 1509 1510 set_bit(AF2_COREDUMP_SAVED, &a->flags2); 1511 } 1512 1513 clear_bit(AF2_COREDUMP_AVAIL, &a->flags2); 1514 1515 /* Reset the chip */ 1516 if (a->pcid->revision == MVR_FREY_B2) 1517 esas2r_write_register_dword(a, MU_CTL_STATUS_IN_B2, 1518 MU_CTL_IN_FULL_RST2); 1519 else 1520 esas2r_write_register_dword(a, MU_CTL_STATUS_IN, 1521 MU_CTL_IN_FULL_RST); 1522 1523 1524 /* Stall a little while to let the reset condition clear */ 1525 mdelay(10); 1526 } 1527 1528 static void esas2r_power_down_notify_firmware(struct esas2r_adapter *a) 1529 { 1530 u32 starttime; 1531 u32 doorbell; 1532 1533 esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_POWER_DOWN); 1534 starttime = jiffies_to_msecs(jiffies); 1535 1536 while (true) { 1537 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); 1538 if (doorbell & DRBL_POWER_DOWN) { 1539 esas2r_write_register_dword(a, MU_DOORBELL_OUT, 1540 doorbell); 1541 break; 1542 } 1543 1544 schedule_timeout_interruptible(msecs_to_jiffies(100)); 1545 1546 if ((jiffies_to_msecs(jiffies) - starttime) > 30000) { 1547 esas2r_hdebug("Timeout waiting for power down"); 1548 break; 1549 } 1550 } 1551 } 1552 1553 /* 1554 * Perform power management processing including managing device states, adapter 1555 * states, interrupts, and I/O. 1556 */ 1557 void esas2r_power_down(struct esas2r_adapter *a) 1558 { 1559 set_bit(AF_POWER_MGT, &a->flags); 1560 set_bit(AF_POWER_DOWN, &a->flags); 1561 1562 if (!test_bit(AF_DEGRADED_MODE, &a->flags)) { 1563 u32 starttime; 1564 u32 doorbell; 1565 1566 /* 1567 * We are currently running OK and will be reinitializing later. 1568 * increment the disable count to coordinate with 1569 * esas2r_init_adapter. We don't have to do this in degraded 1570 * mode since we never enabled interrupts in the first place. 1571 */ 1572 esas2r_disable_chip_interrupts(a); 1573 esas2r_disable_heartbeat(a); 1574 1575 /* wait for any VDA activity to clear before continuing */ 1576 esas2r_write_register_dword(a, MU_DOORBELL_IN, 1577 DRBL_MSG_IFC_DOWN); 1578 starttime = jiffies_to_msecs(jiffies); 1579 1580 while (true) { 1581 doorbell = 1582 esas2r_read_register_dword(a, MU_DOORBELL_OUT); 1583 if (doorbell & DRBL_MSG_IFC_DOWN) { 1584 esas2r_write_register_dword(a, MU_DOORBELL_OUT, 1585 doorbell); 1586 break; 1587 } 1588 1589 schedule_timeout_interruptible(msecs_to_jiffies(100)); 1590 1591 if ((jiffies_to_msecs(jiffies) - starttime) > 3000) { 1592 esas2r_hdebug( 1593 "timeout waiting for interface down"); 1594 break; 1595 } 1596 } 1597 1598 /* 1599 * For versions of firmware that support it tell them the driver 1600 * is powering down. 1601 */ 1602 if (test_bit(AF2_VDA_POWER_DOWN, &a->flags2)) 1603 esas2r_power_down_notify_firmware(a); 1604 } 1605 1606 /* Suspend I/O processing. */ 1607 set_bit(AF_OS_RESET, &a->flags); 1608 set_bit(AF_DISC_PENDING, &a->flags); 1609 set_bit(AF_CHPRST_PENDING, &a->flags); 1610 1611 esas2r_process_adapter_reset(a); 1612 1613 /* Remove devices now that I/O is cleaned up. */ 1614 a->prev_dev_cnt = esas2r_targ_db_get_tgt_cnt(a); 1615 esas2r_targ_db_remove_all(a, false); 1616 } 1617 1618 /* 1619 * Perform power management processing including managing device states, adapter 1620 * states, interrupts, and I/O. 1621 */ 1622 bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll) 1623 { 1624 bool ret; 1625 1626 clear_bit(AF_POWER_DOWN, &a->flags); 1627 esas2r_init_pci_cfg_space(a); 1628 set_bit(AF_FIRST_INIT, &a->flags); 1629 atomic_inc(&a->disable_cnt); 1630 1631 /* reinitialize the adapter */ 1632 ret = esas2r_check_adapter(a); 1633 if (!esas2r_init_adapter_hw(a, init_poll)) 1634 ret = false; 1635 1636 /* send the reset asynchronous event */ 1637 esas2r_send_reset_ae(a, true); 1638 1639 /* clear this flag after initialization. */ 1640 clear_bit(AF_POWER_MGT, &a->flags); 1641 return ret; 1642 } 1643 1644 bool esas2r_is_adapter_present(struct esas2r_adapter *a) 1645 { 1646 if (test_bit(AF_NOT_PRESENT, &a->flags)) 1647 return false; 1648 1649 if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) { 1650 set_bit(AF_NOT_PRESENT, &a->flags); 1651 1652 return false; 1653 } 1654 return true; 1655 } 1656 1657 const char *esas2r_get_model_name(struct esas2r_adapter *a) 1658 { 1659 switch (a->pcid->subsystem_device) { 1660 case ATTO_ESAS_R680: 1661 return "ATTO ExpressSAS R680"; 1662 1663 case ATTO_ESAS_R608: 1664 return "ATTO ExpressSAS R608"; 1665 1666 case ATTO_ESAS_R60F: 1667 return "ATTO ExpressSAS R60F"; 1668 1669 case ATTO_ESAS_R6F0: 1670 return "ATTO ExpressSAS R6F0"; 1671 1672 case ATTO_ESAS_R644: 1673 return "ATTO ExpressSAS R644"; 1674 1675 case ATTO_ESAS_R648: 1676 return "ATTO ExpressSAS R648"; 1677 1678 case ATTO_TSSC_3808: 1679 return "ATTO ThunderStream SC 3808D"; 1680 1681 case ATTO_TSSC_3808E: 1682 return "ATTO ThunderStream SC 3808E"; 1683 1684 case ATTO_TLSH_1068: 1685 return "ATTO ThunderLink SH 1068"; 1686 } 1687 1688 return "ATTO SAS Controller"; 1689 } 1690 1691 const char *esas2r_get_model_name_short(struct esas2r_adapter *a) 1692 { 1693 switch (a->pcid->subsystem_device) { 1694 case ATTO_ESAS_R680: 1695 return "R680"; 1696 1697 case ATTO_ESAS_R608: 1698 return "R608"; 1699 1700 case ATTO_ESAS_R60F: 1701 return "R60F"; 1702 1703 case ATTO_ESAS_R6F0: 1704 return "R6F0"; 1705 1706 case ATTO_ESAS_R644: 1707 return "R644"; 1708 1709 case ATTO_ESAS_R648: 1710 return "R648"; 1711 1712 case ATTO_TSSC_3808: 1713 return "SC 3808D"; 1714 1715 case ATTO_TSSC_3808E: 1716 return "SC 3808E"; 1717 1718 case ATTO_TLSH_1068: 1719 return "SH 1068"; 1720 } 1721 1722 return "unknown"; 1723 } 1724