Lines Matching +full:ld +full:- +full:pulse +full:- +full:width +full:- +full:us

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * ipr.c -- driver for IBM Power Linux RAID adapters
5 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
17 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
18 * PCI-X Dual Channel Ultra 320 SCSI Adapter
19 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
23 * - Ultra 320 SCSI controller
24 * - PCI-X host interface
25 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
26 * - Non-Volatile Write Cache
27 * - Supports attachment of non-RAID disks, tape, and optical devices
28 * - RAID Levels 0, 5, 10
29 * - Hot spare
30 * - Background Parity Checking
31 * - Background Data Scrubbing
32 * - Ability to increase the capacity of an existing RAID 5 disk array
36 * - Tagged command queuing
37 * - Adapter microcode download
38 * - PCI hot plug
39 * - SCSI device hot plug
92 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
190 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
193 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2…
195 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
210 …_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)");
378 "9073: Invalid multi-adapter configuration"},
400 "Illegal request, command not allowed to a non-optimized resource"},
550 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
551 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
556 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
557 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
577 * ipr_trc_hook - Add a trace entry to the driver trace
589 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_trc_hook()
592 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK; in ipr_trc_hook()
593 trace_entry = &ioa_cfg->trace[trace_index]; in ipr_trc_hook()
594 trace_entry->time = jiffies; in ipr_trc_hook()
595 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; in ipr_trc_hook()
596 trace_entry->type = type; in ipr_trc_hook()
597 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff; in ipr_trc_hook()
598 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle; in ipr_trc_hook()
599 trace_entry->u.add_data = add_data; in ipr_trc_hook()
607 * ipr_lock_and_done - Acquire lock and complete command
616 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_lock_and_done()
618 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_lock_and_done()
619 ipr_cmd->done(ipr_cmd); in ipr_lock_and_done()
620 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_lock_and_done()
624 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
632 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_reinit_ipr_cmnd()
633 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; in ipr_reinit_ipr_cmnd()
634 dma_addr_t dma_addr = ipr_cmd->dma_addr; in ipr_reinit_ipr_cmnd()
637 hrrq_id = ioarcb->cmd_pkt.hrrq_id; in ipr_reinit_ipr_cmnd()
638 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); in ipr_reinit_ipr_cmnd()
639 ioarcb->cmd_pkt.hrrq_id = hrrq_id; in ipr_reinit_ipr_cmnd()
640 ioarcb->data_transfer_length = 0; in ipr_reinit_ipr_cmnd()
641 ioarcb->read_data_transfer_length = 0; in ipr_reinit_ipr_cmnd()
642 ioarcb->ioadl_len = 0; in ipr_reinit_ipr_cmnd()
643 ioarcb->read_ioadl_len = 0; in ipr_reinit_ipr_cmnd()
645 if (ipr_cmd->ioa_cfg->sis64) { in ipr_reinit_ipr_cmnd()
646 ioarcb->u.sis64_addr_data.data_ioadl_addr = in ipr_reinit_ipr_cmnd()
649 ioarcb->write_ioadl_addr = in ipr_reinit_ipr_cmnd()
651 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; in ipr_reinit_ipr_cmnd()
654 ioasa->hdr.ioasc = 0; in ipr_reinit_ipr_cmnd()
655 ioasa->hdr.residual_data_len = 0; in ipr_reinit_ipr_cmnd()
656 ipr_cmd->scsi_cmd = NULL; in ipr_reinit_ipr_cmnd()
657 ipr_cmd->sense_buffer[0] = 0; in ipr_reinit_ipr_cmnd()
658 ipr_cmd->dma_use_sg = 0; in ipr_reinit_ipr_cmnd()
662 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
664 * @fast_done: fast done function call-back
673 ipr_cmd->u.scratch = 0; in ipr_init_ipr_cmnd()
674 ipr_cmd->sibling = NULL; in ipr_init_ipr_cmnd()
675 ipr_cmd->eh_comp = NULL; in ipr_init_ipr_cmnd()
676 ipr_cmd->fast_done = fast_done; in ipr_init_ipr_cmnd()
677 timer_setup(&ipr_cmd->timer, NULL, 0); in ipr_init_ipr_cmnd()
681 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
692 if (likely(!list_empty(&hrrq->hrrq_free_q))) { in __ipr_get_free_ipr_cmnd()
693 ipr_cmd = list_entry(hrrq->hrrq_free_q.next, in __ipr_get_free_ipr_cmnd()
695 list_del(&ipr_cmd->queue); in __ipr_get_free_ipr_cmnd()
703 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
713 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]); in ipr_get_free_ipr_cmnd()
719 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
735 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_mask_and_clear_interrupts()
736 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_mask_and_clear_interrupts()
737 ioa_cfg->hrrq[i].allow_interrupts = 0; in ipr_mask_and_clear_interrupts()
738 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_mask_and_clear_interrupts()
742 if (ioa_cfg->sis64) in ipr_mask_and_clear_interrupts()
743 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_mask_and_clear_interrupts()
745 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_mask_and_clear_interrupts()
748 if (ioa_cfg->sis64) in ipr_mask_and_clear_interrupts()
749 writel(~0, ioa_cfg->regs.clr_interrupt_reg); in ipr_mask_and_clear_interrupts()
750 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32); in ipr_mask_and_clear_interrupts()
751 readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_mask_and_clear_interrupts()
755 * ipr_save_pcix_cmd_reg - Save PCI-X command register
759 * 0 on success / -EIO on failure
763 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); in ipr_save_pcix_cmd_reg()
768 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, in ipr_save_pcix_cmd_reg()
769 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { in ipr_save_pcix_cmd_reg()
770 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n"); in ipr_save_pcix_cmd_reg()
771 return -EIO; in ipr_save_pcix_cmd_reg()
774 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO; in ipr_save_pcix_cmd_reg()
779 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
783 * 0 on success / -EIO on failure
787 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); in ipr_set_pcix_cmd_reg()
790 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, in ipr_set_pcix_cmd_reg()
791 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { in ipr_set_pcix_cmd_reg()
792 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n"); in ipr_set_pcix_cmd_reg()
793 return -EIO; in ipr_set_pcix_cmd_reg()
802 * __ipr_scsi_eh_done - mid-layer done function for aborted ops
806 * ops generated by the SCSI mid-layer which are being aborted.
813 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in __ipr_scsi_eh_done()
815 scsi_cmd->result |= (DID_ERROR << 16); in __ipr_scsi_eh_done()
817 scsi_dma_unmap(ipr_cmd->scsi_cmd); in __ipr_scsi_eh_done()
819 if (ipr_cmd->eh_comp) in __ipr_scsi_eh_done()
820 complete(ipr_cmd->eh_comp); in __ipr_scsi_eh_done()
821 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in __ipr_scsi_eh_done()
825 * ipr_scsi_eh_done - mid-layer done function for aborted ops
829 * ops generated by the SCSI mid-layer which are being aborted.
837 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; in ipr_scsi_eh_done()
839 spin_lock_irqsave(&hrrq->_lock, hrrq_flags); in ipr_scsi_eh_done()
841 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); in ipr_scsi_eh_done()
845 * ipr_fail_all_ops - Fails all outstanding ops.
860 spin_lock(&hrrq->_lock); in ipr_fail_all_ops()
862 temp, &hrrq->hrrq_pending_q, queue) { in ipr_fail_all_ops()
863 list_del(&ipr_cmd->queue); in ipr_fail_all_ops()
865 ipr_cmd->s.ioasa.hdr.ioasc = in ipr_fail_all_ops()
867 ipr_cmd->s.ioasa.hdr.ilid = in ipr_fail_all_ops()
870 if (ipr_cmd->scsi_cmd) in ipr_fail_all_ops()
871 ipr_cmd->done = __ipr_scsi_eh_done; in ipr_fail_all_ops()
875 del_timer(&ipr_cmd->timer); in ipr_fail_all_ops()
876 ipr_cmd->done(ipr_cmd); in ipr_fail_all_ops()
878 spin_unlock(&hrrq->_lock); in ipr_fail_all_ops()
884 * ipr_send_command - Send driver initiated requests.
896 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_send_command()
897 dma_addr_t send_dma_addr = ipr_cmd->dma_addr; in ipr_send_command()
899 if (ioa_cfg->sis64) { in ipr_send_command()
905 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 ) in ipr_send_command()
907 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg); in ipr_send_command()
909 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg); in ipr_send_command()
913 * ipr_do_req - Send driver initiated requests.
929 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_do_req()
931 ipr_cmd->done = done; in ipr_do_req()
933 ipr_cmd->timer.expires = jiffies + timeout; in ipr_do_req()
934 ipr_cmd->timer.function = timeout_func; in ipr_do_req()
936 add_timer(&ipr_cmd->timer); in ipr_do_req()
944 * ipr_internal_cmd_done - Op done function for an internally generated op.
955 if (ipr_cmd->sibling) in ipr_internal_cmd_done()
956 ipr_cmd->sibling = NULL; in ipr_internal_cmd_done()
958 complete(&ipr_cmd->completion); in ipr_internal_cmd_done()
962 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
977 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; in ipr_init_ioadl()
978 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; in ipr_init_ioadl()
980 ipr_cmd->dma_use_sg = 1; in ipr_init_ioadl()
982 if (ipr_cmd->ioa_cfg->sis64) { in ipr_init_ioadl()
983 ioadl64->flags = cpu_to_be32(flags); in ipr_init_ioadl()
984 ioadl64->data_len = cpu_to_be32(len); in ipr_init_ioadl()
985 ioadl64->address = cpu_to_be64(dma_addr); in ipr_init_ioadl()
987 ipr_cmd->ioarcb.ioadl_len = in ipr_init_ioadl()
989 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); in ipr_init_ioadl()
991 ioadl->flags_and_data_len = cpu_to_be32(flags | len); in ipr_init_ioadl()
992 ioadl->address = cpu_to_be32(dma_addr); in ipr_init_ioadl()
995 ipr_cmd->ioarcb.read_ioadl_len = in ipr_init_ioadl()
997 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len); in ipr_init_ioadl()
999 ipr_cmd->ioarcb.ioadl_len = in ipr_init_ioadl()
1001 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); in ipr_init_ioadl()
1007 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1019 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_send_blocking_cmd()
1021 init_completion(&ipr_cmd->completion); in ipr_send_blocking_cmd()
1024 spin_unlock_irq(ioa_cfg->host->host_lock); in ipr_send_blocking_cmd()
1025 wait_for_completion(&ipr_cmd->completion); in ipr_send_blocking_cmd()
1026 spin_lock_irq(ioa_cfg->host->host_lock); in ipr_send_blocking_cmd()
1033 if (ioa_cfg->hrrq_num == 1) in ipr_get_hrrq_index()
1036 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index); in ipr_get_hrrq_index()
1037 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1; in ipr_get_hrrq_index()
1043 * ipr_send_hcam - Send an HCAM to the adapter.
1061 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { in ipr_send_hcam()
1063 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_send_hcam()
1064 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q); in ipr_send_hcam()
1066 ipr_cmd->u.hostrcb = hostrcb; in ipr_send_hcam()
1067 ioarcb = &ipr_cmd->ioarcb; in ipr_send_hcam()
1069 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_send_hcam()
1070 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM; in ipr_send_hcam()
1071 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC; in ipr_send_hcam()
1072 ioarcb->cmd_pkt.cdb[1] = type; in ipr_send_hcam()
1073 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff; in ipr_send_hcam()
1074 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff; in ipr_send_hcam()
1076 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma, in ipr_send_hcam()
1077 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST); in ipr_send_hcam()
1080 ipr_cmd->done = ipr_process_ccn; in ipr_send_hcam()
1082 ipr_cmd->done = ipr_process_error; in ipr_send_hcam()
1088 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); in ipr_send_hcam()
1093 * ipr_init_res_entry - Initialize a resource entry struct.
1104 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; in ipr_init_res_entry()
1107 res->needs_sync_complete = 0; in ipr_init_res_entry()
1108 res->in_erp = 0; in ipr_init_res_entry()
1109 res->add_to_ml = 0; in ipr_init_res_entry()
1110 res->del_from_ml = 0; in ipr_init_res_entry()
1111 res->resetting_device = 0; in ipr_init_res_entry()
1112 res->reset_occurred = 0; in ipr_init_res_entry()
1113 res->sdev = NULL; in ipr_init_res_entry()
1115 if (ioa_cfg->sis64) { in ipr_init_res_entry()
1116 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags); in ipr_init_res_entry()
1117 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags); in ipr_init_res_entry()
1118 res->qmodel = IPR_QUEUEING_MODEL64(res); in ipr_init_res_entry()
1119 res->type = cfgtew->u.cfgte64->res_type; in ipr_init_res_entry()
1121 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, in ipr_init_res_entry()
1122 sizeof(res->res_path)); in ipr_init_res_entry()
1124 res->bus = 0; in ipr_init_res_entry()
1125 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, in ipr_init_res_entry()
1126 sizeof(res->dev_lun.scsi_lun)); in ipr_init_res_entry()
1127 res->lun = scsilun_to_int(&res->dev_lun); in ipr_init_res_entry()
1129 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) { in ipr_init_res_entry()
1130 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) { in ipr_init_res_entry()
1131 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) { in ipr_init_res_entry()
1133 res->target = gscsi_res->target; in ipr_init_res_entry()
1138 res->target = find_first_zero_bit(ioa_cfg->target_ids, in ipr_init_res_entry()
1139 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1140 set_bit(res->target, ioa_cfg->target_ids); in ipr_init_res_entry()
1142 } else if (res->type == IPR_RES_TYPE_IOAFP) { in ipr_init_res_entry()
1143 res->bus = IPR_IOAFP_VIRTUAL_BUS; in ipr_init_res_entry()
1144 res->target = 0; in ipr_init_res_entry()
1145 } else if (res->type == IPR_RES_TYPE_ARRAY) { in ipr_init_res_entry()
1146 res->bus = IPR_ARRAY_VIRTUAL_BUS; in ipr_init_res_entry()
1147 res->target = find_first_zero_bit(ioa_cfg->array_ids, in ipr_init_res_entry()
1148 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1149 set_bit(res->target, ioa_cfg->array_ids); in ipr_init_res_entry()
1150 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) { in ipr_init_res_entry()
1151 res->bus = IPR_VSET_VIRTUAL_BUS; in ipr_init_res_entry()
1152 res->target = find_first_zero_bit(ioa_cfg->vset_ids, in ipr_init_res_entry()
1153 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1154 set_bit(res->target, ioa_cfg->vset_ids); in ipr_init_res_entry()
1156 res->target = find_first_zero_bit(ioa_cfg->target_ids, in ipr_init_res_entry()
1157 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1158 set_bit(res->target, ioa_cfg->target_ids); in ipr_init_res_entry()
1161 res->qmodel = IPR_QUEUEING_MODEL(res); in ipr_init_res_entry()
1162 res->flags = cfgtew->u.cfgte->flags; in ipr_init_res_entry()
1163 if (res->flags & IPR_IS_IOA_RESOURCE) in ipr_init_res_entry()
1164 res->type = IPR_RES_TYPE_IOAFP; in ipr_init_res_entry()
1166 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f; in ipr_init_res_entry()
1168 res->bus = cfgtew->u.cfgte->res_addr.bus; in ipr_init_res_entry()
1169 res->target = cfgtew->u.cfgte->res_addr.target; in ipr_init_res_entry()
1170 res->lun = cfgtew->u.cfgte->res_addr.lun; in ipr_init_res_entry()
1171 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn); in ipr_init_res_entry()
1176 * ipr_is_same_device - Determine if two devices are the same.
1186 if (res->ioa_cfg->sis64) { in ipr_is_same_device()
1187 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id, in ipr_is_same_device()
1188 sizeof(cfgtew->u.cfgte64->dev_id)) && in ipr_is_same_device()
1189 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, in ipr_is_same_device()
1190 sizeof(cfgtew->u.cfgte64->lun))) { in ipr_is_same_device()
1194 if (res->bus == cfgtew->u.cfgte->res_addr.bus && in ipr_is_same_device()
1195 res->target == cfgtew->u.cfgte->res_addr.target && in ipr_is_same_device()
1196 res->lun == cfgtew->u.cfgte->res_addr.lun) in ipr_is_same_device()
1204 * __ipr_format_res_path - Format the resource path for printing.
1218 p += scnprintf(p, buffer + len - p, "%02X", res_path[0]); in __ipr_format_res_path()
1220 p += scnprintf(p, buffer + len - p, "-%02X", res_path[i]); in __ipr_format_res_path()
1226 * ipr_format_res_path - Format the resource path for printing.
1241 p += scnprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no); in ipr_format_res_path()
1242 __ipr_format_res_path(res_path, p, len - (p - buffer)); in ipr_format_res_path()
1247 * ipr_update_res_entry - Update the resource entry.
1260 if (res->ioa_cfg->sis64) { in ipr_update_res_entry()
1261 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags); in ipr_update_res_entry()
1262 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags); in ipr_update_res_entry()
1263 res->type = cfgtew->u.cfgte64->res_type; in ipr_update_res_entry()
1265 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data, in ipr_update_res_entry()
1268 res->qmodel = IPR_QUEUEING_MODEL64(res); in ipr_update_res_entry()
1269 res->res_handle = cfgtew->u.cfgte64->res_handle; in ipr_update_res_entry()
1270 res->dev_id = cfgtew->u.cfgte64->dev_id; in ipr_update_res_entry()
1272 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, in ipr_update_res_entry()
1273 sizeof(res->dev_lun.scsi_lun)); in ipr_update_res_entry()
1275 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path, in ipr_update_res_entry()
1276 sizeof(res->res_path))) { in ipr_update_res_entry()
1277 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, in ipr_update_res_entry()
1278 sizeof(res->res_path)); in ipr_update_res_entry()
1282 if (res->sdev && new_path) in ipr_update_res_entry()
1283 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n", in ipr_update_res_entry()
1284 ipr_format_res_path(res->ioa_cfg, in ipr_update_res_entry()
1285 res->res_path, buffer, sizeof(buffer))); in ipr_update_res_entry()
1287 res->flags = cfgtew->u.cfgte->flags; in ipr_update_res_entry()
1288 if (res->flags & IPR_IS_IOA_RESOURCE) in ipr_update_res_entry()
1289 res->type = IPR_RES_TYPE_IOAFP; in ipr_update_res_entry()
1291 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f; in ipr_update_res_entry()
1293 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data, in ipr_update_res_entry()
1296 res->qmodel = IPR_QUEUEING_MODEL(res); in ipr_update_res_entry()
1297 res->res_handle = cfgtew->u.cfgte->res_handle; in ipr_update_res_entry()
1302 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1312 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; in ipr_clear_res_target()
1314 if (!ioa_cfg->sis64) in ipr_clear_res_target()
1317 if (res->bus == IPR_ARRAY_VIRTUAL_BUS) in ipr_clear_res_target()
1318 clear_bit(res->target, ioa_cfg->array_ids); in ipr_clear_res_target()
1319 else if (res->bus == IPR_VSET_VIRTUAL_BUS) in ipr_clear_res_target()
1320 clear_bit(res->target, ioa_cfg->vset_ids); in ipr_clear_res_target()
1321 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) { in ipr_clear_res_target()
1322 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) in ipr_clear_res_target()
1323 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res) in ipr_clear_res_target()
1325 clear_bit(res->target, ioa_cfg->target_ids); in ipr_clear_res_target()
1327 } else if (res->bus == 0) in ipr_clear_res_target()
1328 clear_bit(res->target, ioa_cfg->target_ids); in ipr_clear_res_target()
1332 * ipr_handle_config_change - Handle a config change from the adapter
1348 if (ioa_cfg->sis64) { in ipr_handle_config_change()
1349 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64; in ipr_handle_config_change()
1350 cc_res_handle = cfgtew.u.cfgte64->res_handle; in ipr_handle_config_change()
1352 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte; in ipr_handle_config_change()
1353 cc_res_handle = cfgtew.u.cfgte->res_handle; in ipr_handle_config_change()
1356 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_handle_config_change()
1357 if (res->res_handle == cc_res_handle) { in ipr_handle_config_change()
1364 if (list_empty(&ioa_cfg->free_res_q)) { in ipr_handle_config_change()
1371 res = list_entry(ioa_cfg->free_res_q.next, in ipr_handle_config_change()
1374 list_del(&res->queue); in ipr_handle_config_change()
1376 list_add_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_handle_config_change()
1381 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) { in ipr_handle_config_change()
1382 if (res->sdev) { in ipr_handle_config_change()
1383 res->del_from_ml = 1; in ipr_handle_config_change()
1384 res->res_handle = IPR_INVALID_RES_HANDLE; in ipr_handle_config_change()
1385 schedule_work(&ioa_cfg->work_q); in ipr_handle_config_change()
1388 list_move_tail(&res->queue, &ioa_cfg->free_res_q); in ipr_handle_config_change()
1390 } else if (!res->sdev || res->del_from_ml) { in ipr_handle_config_change()
1391 res->add_to_ml = 1; in ipr_handle_config_change()
1392 schedule_work(&ioa_cfg->work_q); in ipr_handle_config_change()
1399 * ipr_process_ccn - Op done function for a CCN.
1410 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_process_ccn()
1411 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; in ipr_process_ccn()
1412 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_process_ccn()
1414 list_del_init(&hostrcb->queue); in ipr_process_ccn()
1415 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_process_ccn()
1420 dev_err(&ioa_cfg->pdev->dev, in ipr_process_ccn()
1430 * strip_whitespace - Strip and pad trailing whitespace.
1442 i--; in strip_whitespace()
1444 i--; in strip_whitespace()
1449 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1464 memcpy(vendor_id, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); in ipr_log_vpd_compact()
1467 memcpy(product_id, vpd->vpids.product_id, IPR_PROD_ID_LEN); in ipr_log_vpd_compact()
1470 memcpy(sn, vpd->sn, IPR_SERIAL_NUM_LEN); in ipr_log_vpd_compact()
1478 * ipr_log_vpd - Log the passed VPD to the error log.
1489 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); in ipr_log_vpd()
1490 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id, in ipr_log_vpd()
1495 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN); in ipr_log_vpd()
1501 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1512 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd); in ipr_log_ext_vpd_compact()
1514 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1])); in ipr_log_ext_vpd_compact()
1518 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1526 ipr_log_vpd(&vpd->vpd); in ipr_log_ext_vpd()
1527 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]), in ipr_log_ext_vpd()
1528 be32_to_cpu(vpd->wwid[1])); in ipr_log_ext_vpd()
1532 * ipr_log_enhanced_cache_error - Log a cache error.
1544 if (ioa_cfg->sis64) in ipr_log_enhanced_cache_error()
1545 error = &hostrcb->hcam.u.error64.u.type_12_error; in ipr_log_enhanced_cache_error()
1547 error = &hostrcb->hcam.u.error.u.type_12_error; in ipr_log_enhanced_cache_error()
1549 ipr_err("-----Current Configuration-----\n"); in ipr_log_enhanced_cache_error()
1551 ipr_log_ext_vpd(&error->ioa_vpd); in ipr_log_enhanced_cache_error()
1553 ipr_log_ext_vpd(&error->cfc_vpd); in ipr_log_enhanced_cache_error()
1555 ipr_err("-----Expected Configuration-----\n"); in ipr_log_enhanced_cache_error()
1557 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd); in ipr_log_enhanced_cache_error()
1559 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd); in ipr_log_enhanced_cache_error()
1562 be32_to_cpu(error->ioa_data[0]), in ipr_log_enhanced_cache_error()
1563 be32_to_cpu(error->ioa_data[1]), in ipr_log_enhanced_cache_error()
1564 be32_to_cpu(error->ioa_data[2])); in ipr_log_enhanced_cache_error()
1568 * ipr_log_cache_error - Log a cache error.
1579 &hostrcb->hcam.u.error.u.type_02_error; in ipr_log_cache_error()
1581 ipr_err("-----Current Configuration-----\n"); in ipr_log_cache_error()
1583 ipr_log_vpd(&error->ioa_vpd); in ipr_log_cache_error()
1585 ipr_log_vpd(&error->cfc_vpd); in ipr_log_cache_error()
1587 ipr_err("-----Expected Configuration-----\n"); in ipr_log_cache_error()
1589 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd); in ipr_log_cache_error()
1591 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd); in ipr_log_cache_error()
1594 be32_to_cpu(error->ioa_data[0]), in ipr_log_cache_error()
1595 be32_to_cpu(error->ioa_data[1]), in ipr_log_cache_error()
1596 be32_to_cpu(error->ioa_data[2])); in ipr_log_cache_error()
1600 * ipr_log_enhanced_config_error - Log a configuration error.
1614 error = &hostrcb->hcam.u.error.u.type_13_error; in ipr_log_enhanced_config_error()
1615 errors_logged = be32_to_cpu(error->errors_logged); in ipr_log_enhanced_config_error()
1618 be32_to_cpu(error->errors_detected), errors_logged); in ipr_log_enhanced_config_error()
1620 dev_entry = error->dev; in ipr_log_enhanced_config_error()
1625 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); in ipr_log_enhanced_config_error()
1626 ipr_log_ext_vpd(&dev_entry->vpd); in ipr_log_enhanced_config_error()
1628 ipr_err("-----New Device Information-----\n"); in ipr_log_enhanced_config_error()
1629 ipr_log_ext_vpd(&dev_entry->new_vpd); in ipr_log_enhanced_config_error()
1632 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd); in ipr_log_enhanced_config_error()
1635 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd); in ipr_log_enhanced_config_error()
1640 * ipr_log_sis64_config_error - Log a device error.
1655 error = &hostrcb->hcam.u.error64.u.type_23_error; in ipr_log_sis64_config_error()
1656 errors_logged = be32_to_cpu(error->errors_logged); in ipr_log_sis64_config_error()
1659 be32_to_cpu(error->errors_detected), errors_logged); in ipr_log_sis64_config_error()
1661 dev_entry = error->dev; in ipr_log_sis64_config_error()
1667 __ipr_format_res_path(dev_entry->res_path, in ipr_log_sis64_config_error()
1669 ipr_log_ext_vpd(&dev_entry->vpd); in ipr_log_sis64_config_error()
1671 ipr_err("-----New Device Information-----\n"); in ipr_log_sis64_config_error()
1672 ipr_log_ext_vpd(&dev_entry->new_vpd); in ipr_log_sis64_config_error()
1675 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd); in ipr_log_sis64_config_error()
1678 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd); in ipr_log_sis64_config_error()
1683 * ipr_log_config_error - Log a configuration error.
1697 error = &hostrcb->hcam.u.error.u.type_03_error; in ipr_log_config_error()
1698 errors_logged = be32_to_cpu(error->errors_logged); in ipr_log_config_error()
1701 be32_to_cpu(error->errors_detected), errors_logged); in ipr_log_config_error()
1703 dev_entry = error->dev; in ipr_log_config_error()
1708 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); in ipr_log_config_error()
1709 ipr_log_vpd(&dev_entry->vpd); in ipr_log_config_error()
1711 ipr_err("-----New Device Information-----\n"); in ipr_log_config_error()
1712 ipr_log_vpd(&dev_entry->new_vpd); in ipr_log_config_error()
1715 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd); in ipr_log_config_error()
1718 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd); in ipr_log_config_error()
1721 be32_to_cpu(dev_entry->ioa_data[0]), in ipr_log_config_error()
1722 be32_to_cpu(dev_entry->ioa_data[1]), in ipr_log_config_error()
1723 be32_to_cpu(dev_entry->ioa_data[2]), in ipr_log_config_error()
1724 be32_to_cpu(dev_entry->ioa_data[3]), in ipr_log_config_error()
1725 be32_to_cpu(dev_entry->ioa_data[4])); in ipr_log_config_error()
1730 * ipr_log_enhanced_array_error - Log an array configuration error.
1743 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; in ipr_log_enhanced_array_error()
1745 error = &hostrcb->hcam.u.error.u.type_14_error; in ipr_log_enhanced_array_error()
1750 error->protection_level, in ipr_log_enhanced_array_error()
1751 ioa_cfg->host->host_no, in ipr_log_enhanced_array_error()
1752 error->last_func_vset_res_addr.bus, in ipr_log_enhanced_array_error()
1753 error->last_func_vset_res_addr.target, in ipr_log_enhanced_array_error()
1754 error->last_func_vset_res_addr.lun); in ipr_log_enhanced_array_error()
1758 array_entry = error->array_member; in ipr_log_enhanced_array_error()
1759 num_entries = min_t(u32, be32_to_cpu(error->num_entries), in ipr_log_enhanced_array_error()
1760 ARRAY_SIZE(error->array_member)); in ipr_log_enhanced_array_error()
1763 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) in ipr_log_enhanced_array_error()
1766 if (be32_to_cpu(error->exposed_mode_adn) == i) in ipr_log_enhanced_array_error()
1771 ipr_log_ext_vpd(&array_entry->vpd); in ipr_log_enhanced_array_error()
1772 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); in ipr_log_enhanced_array_error()
1773 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, in ipr_log_enhanced_array_error()
1781 * ipr_log_array_error - Log an array configuration error.
1794 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; in ipr_log_array_error()
1796 error = &hostrcb->hcam.u.error.u.type_04_error; in ipr_log_array_error()
1801 error->protection_level, in ipr_log_array_error()
1802 ioa_cfg->host->host_no, in ipr_log_array_error()
1803 error->last_func_vset_res_addr.bus, in ipr_log_array_error()
1804 error->last_func_vset_res_addr.target, in ipr_log_array_error()
1805 error->last_func_vset_res_addr.lun); in ipr_log_array_error()
1809 array_entry = error->array_member; in ipr_log_array_error()
1812 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) in ipr_log_array_error()
1815 if (be32_to_cpu(error->exposed_mode_adn) == i) in ipr_log_array_error()
1820 ipr_log_vpd(&array_entry->vpd); in ipr_log_array_error()
1822 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); in ipr_log_array_error()
1823 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, in ipr_log_array_error()
1829 array_entry = error->array_member2; in ipr_log_array_error()
1836 * ipr_log_hex_data - Log additional hex IOA error data.
1851 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) in ipr_log_hex_data()
1864 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1876 if (ioa_cfg->sis64) in ipr_log_enhanced_dual_ioa_error()
1877 error = &hostrcb->hcam.u.error64.u.type_17_error; in ipr_log_enhanced_dual_ioa_error()
1879 error = &hostrcb->hcam.u.error.u.type_17_error; in ipr_log_enhanced_dual_ioa_error()
1881 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; in ipr_log_enhanced_dual_ioa_error()
1882 strim(error->failure_reason); in ipr_log_enhanced_dual_ioa_error()
1884 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, in ipr_log_enhanced_dual_ioa_error()
1885 be32_to_cpu(hostrcb->hcam.u.error.prc)); in ipr_log_enhanced_dual_ioa_error()
1886 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd); in ipr_log_enhanced_dual_ioa_error()
1887 ipr_log_hex_data(ioa_cfg, error->data, in ipr_log_enhanced_dual_ioa_error()
1888 be32_to_cpu(hostrcb->hcam.length) - in ipr_log_enhanced_dual_ioa_error()
1894 * ipr_log_dual_ioa_error - Log a dual adapter error.
1906 error = &hostrcb->hcam.u.error.u.type_07_error; in ipr_log_dual_ioa_error()
1907 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; in ipr_log_dual_ioa_error()
1908 strim(error->failure_reason); in ipr_log_dual_ioa_error()
1910 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, in ipr_log_dual_ioa_error()
1911 be32_to_cpu(hostrcb->hcam.u.error.prc)); in ipr_log_dual_ioa_error()
1912 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd); in ipr_log_dual_ioa_error()
1913 ipr_log_hex_data(ioa_cfg, error->data, in ipr_log_dual_ioa_error()
1914 be32_to_cpu(hostrcb->hcam.length) - in ipr_log_dual_ioa_error()
1939 * ipr_log_fabric_path - Log a fabric path error
1950 u8 path_state = fabric->path_state; in ipr_log_fabric_path()
1962 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) { in ipr_log_fabric_path()
1965 fabric->ioa_port); in ipr_log_fabric_path()
1966 } else if (fabric->cascaded_expander == 0xff) { in ipr_log_fabric_path()
1969 fabric->ioa_port, fabric->phy); in ipr_log_fabric_path()
1970 } else if (fabric->phy == 0xff) { in ipr_log_fabric_path()
1973 fabric->ioa_port, fabric->cascaded_expander); in ipr_log_fabric_path()
1977 fabric->ioa_port, fabric->cascaded_expander, fabric->phy); in ipr_log_fabric_path()
1984 fabric->ioa_port, fabric->cascaded_expander, fabric->phy); in ipr_log_fabric_path()
1988 * ipr_log64_fabric_path - Log a fabric path error
1999 u8 path_state = fabric->path_state; in ipr_log64_fabric_path()
2014 ipr_format_res_path(hostrcb->ioa_cfg, in ipr_log64_fabric_path()
2015 fabric->res_path, in ipr_log64_fabric_path()
2022 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path, in ipr_log64_fabric_path()
2068 * ipr_log_path_elem - Log a fabric path element.
2079 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK; in ipr_log_path_elem()
2080 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK; in ipr_log_path_elem()
2096 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2097 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2099 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) { in ipr_log_path_elem()
2102 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2103 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2104 } else if (cfg->cascaded_expander == 0xff) { in ipr_log_path_elem()
2107 path_type_desc[i].desc, cfg->phy, in ipr_log_path_elem()
2108 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2109 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2110 } else if (cfg->phy == 0xff) { in ipr_log_path_elem()
2113 path_type_desc[i].desc, cfg->cascaded_expander, in ipr_log_path_elem()
2114 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2115 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2119 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy, in ipr_log_path_elem()
2120 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2121 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2129 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy, in ipr_log_path_elem()
2130 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2131 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2135 * ipr_log64_path_elem - Log a fabric path element.
2146 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK; in ipr_log64_path_elem()
2147 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK; in ipr_log64_path_elem()
2148 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK; in ipr_log64_path_elem()
2164 ipr_format_res_path(hostrcb->ioa_cfg, in ipr_log64_path_elem()
2165 cfg->res_path, buffer, sizeof(buffer)), in ipr_log64_path_elem()
2166 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log64_path_elem()
2167 be32_to_cpu(cfg->wwid[0]), in ipr_log64_path_elem()
2168 be32_to_cpu(cfg->wwid[1])); in ipr_log64_path_elem()
2173 "WWN=%08X%08X\n", cfg->type_status, in ipr_log64_path_elem()
2174 ipr_format_res_path(hostrcb->ioa_cfg, in ipr_log64_path_elem()
2175 cfg->res_path, buffer, sizeof(buffer)), in ipr_log64_path_elem()
2176 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log64_path_elem()
2177 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log64_path_elem()
2181 * ipr_log_fabric_error - Log a fabric error.
2196 error = &hostrcb->hcam.u.error.u.type_20_error; in ipr_log_fabric_error()
2197 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; in ipr_log_fabric_error()
2198 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason); in ipr_log_fabric_error()
2200 add_len = be32_to_cpu(hostrcb->hcam.length) - in ipr_log_fabric_error()
2204 for (i = 0, fabric = error->desc; i < error->num_entries; i++) { in ipr_log_fabric_error()
2209 add_len -= be16_to_cpu(fabric->length); in ipr_log_fabric_error()
2211 ((unsigned long)fabric + be16_to_cpu(fabric->length)); in ipr_log_fabric_error()
2218 * ipr_log_sis64_array_error - Log a sis64 array error.
2232 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; in ipr_log_sis64_array_error()
2234 error = &hostrcb->hcam.u.error64.u.type_24_error; in ipr_log_sis64_array_error()
2239 error->protection_level, in ipr_log_sis64_array_error()
2240 ipr_format_res_path(ioa_cfg, error->last_res_path, in ipr_log_sis64_array_error()
2245 array_entry = error->array_member; in ipr_log_sis64_array_error()
2246 num_entries = min_t(u32, error->num_entries, in ipr_log_sis64_array_error()
2247 ARRAY_SIZE(error->array_member)); in ipr_log_sis64_array_error()
2251 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) in ipr_log_sis64_array_error()
2254 if (error->exposed_mode_adn == i) in ipr_log_sis64_array_error()
2260 ipr_log_ext_vpd(&array_entry->vpd); in ipr_log_sis64_array_error()
2262 ipr_format_res_path(ioa_cfg, array_entry->res_path, in ipr_log_sis64_array_error()
2266 array_entry->expected_res_path, in ipr_log_sis64_array_error()
2274 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2289 error = &hostrcb->hcam.u.error64.u.type_30_error; in ipr_log_sis64_fabric_error()
2291 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; in ipr_log_sis64_fabric_error()
2292 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason); in ipr_log_sis64_fabric_error()
2294 add_len = be32_to_cpu(hostrcb->hcam.length) - in ipr_log_sis64_fabric_error()
2298 for (i = 0, fabric = error->desc; i < error->num_entries; i++) { in ipr_log_sis64_fabric_error()
2303 add_len -= be16_to_cpu(fabric->length); in ipr_log_sis64_fabric_error()
2305 ((unsigned long)fabric + be16_to_cpu(fabric->length)); in ipr_log_sis64_fabric_error()
2312 * ipr_log_sis64_service_required_error - Log a sis64 service required error.
2324 error = &hostrcb->hcam.u.error64.u.type_41_error; in ipr_log_sis64_service_required_error()
2326 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; in ipr_log_sis64_service_required_error()
2327 ipr_err("Primary Failure Reason: %s\n", error->failure_reason); in ipr_log_sis64_service_required_error()
2328 ipr_log_hex_data(ioa_cfg, error->data, in ipr_log_sis64_service_required_error()
2329 be32_to_cpu(hostrcb->hcam.length) - in ipr_log_sis64_service_required_error()
2334 * ipr_log_generic_error - Log an adapter error.
2344 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data, in ipr_log_generic_error()
2345 be32_to_cpu(hostrcb->hcam.length)); in ipr_log_generic_error()
2349 * ipr_log_sis64_device_error - Log a cache error.
2362 error = &hostrcb->hcam.u.error64.u.type_21_error; in ipr_log_sis64_device_error()
2364 ipr_err("-----Failing Device Information-----\n"); in ipr_log_sis64_device_error()
2366 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]), in ipr_log_sis64_device_error()
2367 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3])); in ipr_log_sis64_device_error()
2369 __ipr_format_res_path(error->res_path, in ipr_log_sis64_device_error()
2371 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0'; in ipr_log_sis64_device_error()
2372 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0'; in ipr_log_sis64_device_error()
2373 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc); in ipr_log_sis64_device_error()
2374 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc); in ipr_log_sis64_device_error()
2376 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data)); in ipr_log_sis64_device_error()
2378 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb)); in ipr_log_sis64_device_error()
2381 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error)); in ipr_log_sis64_device_error()
2385 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2407 * ipr_handle_log_data - Log an adapter error.
2423 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY) in ipr_handle_log_data()
2426 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST) in ipr_handle_log_data()
2427 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n"); in ipr_handle_log_data()
2429 if (ioa_cfg->sis64) in ipr_handle_log_data()
2430 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); in ipr_handle_log_data()
2432 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); in ipr_handle_log_data()
2434 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET || in ipr_handle_log_data()
2437 scsi_report_bus_reset(ioa_cfg->host, in ipr_handle_log_data()
2438 hostrcb->hcam.u.error.fd_res_addr.bus); in ipr_handle_log_data()
2447 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) { in ipr_handle_log_data()
2448 error = &hostrcb->hcam.u.error64.u.type_21_error; in ipr_handle_log_data()
2450 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST && in ipr_handle_log_data()
2451 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) in ipr_handle_log_data()
2458 ioa_cfg->errors_logged++; in ipr_handle_log_data()
2460 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam) in ipr_handle_log_data()
2462 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw)) in ipr_handle_log_data()
2463 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw)); in ipr_handle_log_data()
2465 switch (hostrcb->hcam.overlay_id) { in ipr_handle_log_data()
2523 hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q, in ipr_get_free_hostrcb()
2527 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers."); in ipr_get_free_hostrcb()
2528 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q, in ipr_get_free_hostrcb()
2532 list_del_init(&hostrcb->queue); in ipr_get_free_hostrcb()
2537 * ipr_process_error - Op done function for an adapter error log.
2549 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_process_error()
2550 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; in ipr_process_error()
2551 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_process_error()
2554 if (ioa_cfg->sis64) in ipr_process_error()
2555 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); in ipr_process_error()
2557 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); in ipr_process_error()
2559 list_del_init(&hostrcb->queue); in ipr_process_error()
2560 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_process_error()
2568 dev_err(&ioa_cfg->pdev->dev, in ipr_process_error()
2572 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q); in ipr_process_error()
2573 schedule_work(&ioa_cfg->work_q); in ipr_process_error()
2580 * ipr_timeout - An internally generated op has timed out.
2593 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_timeout()
2596 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_timeout()
2598 ioa_cfg->errors_logged++; in ipr_timeout()
2599 dev_err(&ioa_cfg->pdev->dev, in ipr_timeout()
2602 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_timeout()
2603 ioa_cfg->sdt_state = GET_DUMP; in ipr_timeout()
2605 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) in ipr_timeout()
2608 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_timeout()
2613 * ipr_oper_timeout - Adapter timed out transitioning to operational
2626 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_oper_timeout()
2629 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_oper_timeout()
2631 ioa_cfg->errors_logged++; in ipr_oper_timeout()
2632 dev_err(&ioa_cfg->pdev->dev, in ipr_oper_timeout()
2635 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_oper_timeout()
2636 ioa_cfg->sdt_state = GET_DUMP; in ipr_oper_timeout()
2638 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) { in ipr_oper_timeout()
2640 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; in ipr_oper_timeout()
2644 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_oper_timeout()
2649 * ipr_find_ses_entry - Find matching SES in SES table
2664 if (ste->compare_product_id_byte[j] == 'X') { in ipr_find_ses_entry()
2665 vpids = &res->std_inq_data.vpids; in ipr_find_ses_entry()
2666 if (vpids->product_id[j] == ste->product_id[j]) in ipr_find_ses_entry()
2682 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2685 * @bus_width: bus width
2689 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2700 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_get_max_scsi_speed()
2701 if (!(IPR_IS_SES_DEVICE(res->std_inq_data))) in ipr_get_max_scsi_speed()
2704 if (bus != res->bus) in ipr_get_max_scsi_speed()
2710 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8); in ipr_get_max_scsi_speed()
2717 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2719 * @max_delay: max delay in micro-seconds to wait
2733 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_wait_iodbg_ack()
2746 return -EIO; in ipr_wait_iodbg_ack()
2750 * ipr_get_sis64_dump_data_section - Dump IOA memory
2766 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg); in ipr_get_sis64_dump_data_section()
2767 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg)); in ipr_get_sis64_dump_data_section()
2775 * ipr_get_ldump_data_section - Dump IOA memory
2782 * 0 on success / -EIO on failure
2791 if (ioa_cfg->sis64) in ipr_get_ldump_data_section()
2797 ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2802 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ldump_data_section()
2804 return -EIO; in ipr_get_ldump_data_section()
2807 /* Signal LDUMP interlocked - clear IO debug ack */ in ipr_get_ldump_data_section()
2809 ioa_cfg->regs.clr_interrupt_reg); in ipr_get_ldump_data_section()
2812 writel(start_addr, ioa_cfg->ioa_mailbox); in ipr_get_ldump_data_section()
2814 /* Signal address valid - clear IOA Reset alert */ in ipr_get_ldump_data_section()
2816 ioa_cfg->regs.clr_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2822 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ldump_data_section()
2824 return -EIO; in ipr_get_ldump_data_section()
2828 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox)); in ipr_get_ldump_data_section()
2832 if (i < (length_in_words - 1)) { in ipr_get_ldump_data_section()
2833 /* Signal dump data received - Clear IO debug Ack */ in ipr_get_ldump_data_section()
2835 ioa_cfg->regs.clr_interrupt_reg); in ipr_get_ldump_data_section()
2841 ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2844 ioa_cfg->regs.clr_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2846 /* Signal dump data received - Clear IO debug Ack */ in ipr_get_ldump_data_section()
2848 ioa_cfg->regs.clr_interrupt_reg); in ipr_get_ldump_data_section()
2850 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */ in ipr_get_ldump_data_section()
2853 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2867 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2884 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump; in ipr_sdt_copy()
2886 if (ioa_cfg->sis64) in ipr_sdt_copy()
2892 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) { in ipr_sdt_copy()
2893 if (ioa_dump->page_offset >= PAGE_SIZE || in ipr_sdt_copy()
2894 ioa_dump->page_offset == 0) { in ipr_sdt_copy()
2902 ioa_dump->page_offset = 0; in ipr_sdt_copy()
2903 ioa_dump->ioa_data[ioa_dump->next_page_index] = page; in ipr_sdt_copy()
2904 ioa_dump->next_page_index++; in ipr_sdt_copy()
2906 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1]; in ipr_sdt_copy()
2908 rem_len = length - bytes_copied; in ipr_sdt_copy()
2909 rem_page_len = PAGE_SIZE - ioa_dump->page_offset; in ipr_sdt_copy()
2912 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_sdt_copy()
2913 if (ioa_cfg->sdt_state == ABORT_DUMP) { in ipr_sdt_copy()
2914 rc = -EIO; in ipr_sdt_copy()
2918 &page[ioa_dump->page_offset / 4], in ipr_sdt_copy()
2921 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sdt_copy()
2924 ioa_dump->page_offset += cur_len; in ipr_sdt_copy()
2937 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2945 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER; in ipr_init_dump_entry_hdr()
2946 hdr->num_elems = 1; in ipr_init_dump_entry_hdr()
2947 hdr->offset = sizeof(*hdr); in ipr_init_dump_entry_hdr()
2948 hdr->status = IPR_DUMP_STATUS_SUCCESS; in ipr_init_dump_entry_hdr()
2952 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2962 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; in ipr_dump_ioa_type_data()
2964 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr); in ipr_dump_ioa_type_data()
2965 driver_dump->ioa_type_entry.hdr.len = in ipr_dump_ioa_type_data()
2966 sizeof(struct ipr_dump_ioa_type_entry) - in ipr_dump_ioa_type_data()
2968 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; in ipr_dump_ioa_type_data()
2969 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID; in ipr_dump_ioa_type_data()
2970 driver_dump->ioa_type_entry.type = ioa_cfg->type; in ipr_dump_ioa_type_data()
2971 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) | in ipr_dump_ioa_type_data()
2972 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) | in ipr_dump_ioa_type_data()
2973 ucode_vpd->minor_release[1]; in ipr_dump_ioa_type_data()
2974 driver_dump->hdr.num_entries++; in ipr_dump_ioa_type_data()
2978 * ipr_dump_version_data - Fill in the driver version in the dump.
2988 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr); in ipr_dump_version_data()
2989 driver_dump->version_entry.hdr.len = in ipr_dump_version_data()
2990 sizeof(struct ipr_dump_version_entry) - in ipr_dump_version_data()
2992 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; in ipr_dump_version_data()
2993 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID; in ipr_dump_version_data()
2994 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION); in ipr_dump_version_data()
2995 driver_dump->hdr.num_entries++; in ipr_dump_version_data()
2999 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3009 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr); in ipr_dump_trace_data()
3010 driver_dump->trace_entry.hdr.len = in ipr_dump_trace_data()
3011 sizeof(struct ipr_dump_trace_entry) - in ipr_dump_trace_data()
3013 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; in ipr_dump_trace_data()
3014 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID; in ipr_dump_trace_data()
3015 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE); in ipr_dump_trace_data()
3016 driver_dump->hdr.num_entries++; in ipr_dump_trace_data()
3020 * ipr_dump_location_data - Fill in the IOA location in the dump.
3030 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr); in ipr_dump_location_data()
3031 driver_dump->location_entry.hdr.len = in ipr_dump_location_data()
3032 sizeof(struct ipr_dump_location_entry) - in ipr_dump_location_data()
3034 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; in ipr_dump_location_data()
3035 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID; in ipr_dump_location_data()
3036 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev)); in ipr_dump_location_data()
3037 driver_dump->hdr.num_entries++; in ipr_dump_location_data()
3041 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3052 struct ipr_driver_dump *driver_dump = &dump->driver_dump; in ipr_get_ioa_dump()
3053 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump; in ipr_get_ioa_dump()
3062 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3064 if (ioa_cfg->sdt_state != READ_DUMP) { in ipr_get_ioa_dump()
3065 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3069 if (ioa_cfg->sis64) { in ipr_get_ioa_dump()
3070 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3072 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3075 start_addr = readl(ioa_cfg->ioa_mailbox); in ipr_get_ioa_dump()
3077 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) { in ipr_get_ioa_dump()
3078 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ioa_dump()
3080 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3084 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n"); in ipr_get_ioa_dump()
3086 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER; in ipr_get_ioa_dump()
3089 driver_dump->hdr.len = sizeof(struct ipr_driver_dump); in ipr_get_ioa_dump()
3090 driver_dump->hdr.num_entries = 1; in ipr_get_ioa_dump()
3091 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header); in ipr_get_ioa_dump()
3092 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS; in ipr_get_ioa_dump()
3093 driver_dump->hdr.os = IPR_DUMP_OS_LINUX; in ipr_get_ioa_dump()
3094 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME; in ipr_get_ioa_dump()
3102 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header); in ipr_get_ioa_dump()
3105 ipr_init_dump_entry_hdr(&ioa_dump->hdr); in ipr_get_ioa_dump()
3106 ioa_dump->hdr.len = 0; in ipr_get_ioa_dump()
3107 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; in ipr_get_ioa_dump()
3108 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID; in ipr_get_ioa_dump()
3114 sdt = &ioa_dump->sdt; in ipr_get_ioa_dump()
3116 if (ioa_cfg->sis64) { in ipr_get_ioa_dump()
3130 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && in ipr_get_ioa_dump()
3131 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) { in ipr_get_ioa_dump()
3132 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ioa_dump()
3134 rc, be32_to_cpu(sdt->hdr.state)); in ipr_get_ioa_dump()
3135 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED; in ipr_get_ioa_dump()
3136 ioa_cfg->sdt_state = DUMP_OBTAINED; in ipr_get_ioa_dump()
3137 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3141 num_entries = be32_to_cpu(sdt->hdr.num_entries_used); in ipr_get_ioa_dump()
3147 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header); in ipr_get_ioa_dump()
3148 if (ioa_cfg->sis64) in ipr_get_ioa_dump()
3149 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry); in ipr_get_ioa_dump()
3151 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry); in ipr_get_ioa_dump()
3153 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3156 if (ioa_dump->hdr.len > max_dump_size) { in ipr_get_ioa_dump()
3157 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; in ipr_get_ioa_dump()
3161 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) { in ipr_get_ioa_dump()
3162 sdt_word = be32_to_cpu(sdt->entry[i].start_token); in ipr_get_ioa_dump()
3163 if (ioa_cfg->sis64) in ipr_get_ioa_dump()
3164 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token); in ipr_get_ioa_dump()
3167 end_off = be32_to_cpu(sdt->entry[i].end_token); in ipr_get_ioa_dump()
3170 bytes_to_copy = end_off - start_off; in ipr_get_ioa_dump()
3176 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY; in ipr_get_ioa_dump()
3184 ioa_dump->hdr.len += bytes_copied; in ipr_get_ioa_dump()
3187 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; in ipr_get_ioa_dump()
3194 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n"); in ipr_get_ioa_dump()
3197 driver_dump->hdr.len += ioa_dump->hdr.len; in ipr_get_ioa_dump()
3199 ioa_cfg->sdt_state = DUMP_OBTAINED; in ipr_get_ioa_dump()
3208 * ipr_release_dump - Free adapter dump memory
3217 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg; in ipr_release_dump()
3222 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_release_dump()
3223 ioa_cfg->dump = NULL; in ipr_release_dump()
3224 ioa_cfg->sdt_state = INACTIVE; in ipr_release_dump()
3225 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_release_dump()
3227 for (i = 0; i < dump->ioa_dump.next_page_index; i++) in ipr_release_dump()
3228 free_page((unsigned long) dump->ioa_dump.ioa_data[i]); in ipr_release_dump()
3230 vfree(dump->ioa_dump.ioa_data); in ipr_release_dump()
3246 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3251 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { in ipr_add_remove_thread()
3252 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3256 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_add_remove_thread()
3257 if (res->del_from_ml && res->sdev) { in ipr_add_remove_thread()
3259 sdev = res->sdev; in ipr_add_remove_thread()
3261 if (!res->add_to_ml) in ipr_add_remove_thread()
3262 list_move_tail(&res->queue, &ioa_cfg->free_res_q); in ipr_add_remove_thread()
3264 res->del_from_ml = 0; in ipr_add_remove_thread()
3265 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3268 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3275 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_add_remove_thread()
3276 if (res->add_to_ml) { in ipr_add_remove_thread()
3277 bus = res->bus; in ipr_add_remove_thread()
3278 target = res->target; in ipr_add_remove_thread()
3279 lun = res->lun; in ipr_add_remove_thread()
3280 res->add_to_ml = 0; in ipr_add_remove_thread()
3281 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3282 scsi_add_device(ioa_cfg->host, bus, target, lun); in ipr_add_remove_thread()
3283 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3288 ioa_cfg->scan_done = 1; in ipr_add_remove_thread()
3289 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3290 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE); in ipr_add_remove_thread()
3295 * ipr_worker_thread - Worker thread
3299 * of adding and removing device from the mid-layer as configuration
3313 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3315 if (ioa_cfg->sdt_state == READ_DUMP) { in ipr_worker_thread()
3316 dump = ioa_cfg->dump; in ipr_worker_thread()
3318 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3321 kref_get(&dump->kref); in ipr_worker_thread()
3322 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3324 kref_put(&dump->kref, ipr_release_dump); in ipr_worker_thread()
3326 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3327 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout) in ipr_worker_thread()
3329 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3333 if (ioa_cfg->scsi_unblock) { in ipr_worker_thread()
3334 ioa_cfg->scsi_unblock = 0; in ipr_worker_thread()
3335 ioa_cfg->scsi_blocked = 0; in ipr_worker_thread()
3336 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3337 scsi_unblock_requests(ioa_cfg->host); in ipr_worker_thread()
3338 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3339 if (ioa_cfg->scsi_blocked) in ipr_worker_thread()
3340 scsi_block_requests(ioa_cfg->host); in ipr_worker_thread()
3343 if (!ioa_cfg->scan_enabled) { in ipr_worker_thread()
3344 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3348 schedule_work(&ioa_cfg->scsi_add_work_q); in ipr_worker_thread()
3350 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3356 * ipr_read_trace - Dump the adapter trace
3373 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_read_trace()
3377 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_read_trace()
3378 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace, in ipr_read_trace()
3380 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_trace()
3396 * ipr_show_fw_version - Show the firmware version
3408 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_fw_version()
3409 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; in ipr_show_fw_version()
3413 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_version()
3415 ucode_vpd->major_release, ucode_vpd->card_type, in ipr_show_fw_version()
3416 ucode_vpd->minor_release[0], in ipr_show_fw_version()
3417 ucode_vpd->minor_release[1]); in ipr_show_fw_version()
3418 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_version()
3431 * ipr_show_log_level - Show the adapter's error logging level
3443 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_log_level()
3447 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_log_level()
3448 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level); in ipr_show_log_level()
3449 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_log_level()
3454 * ipr_store_log_level - Change the adapter's error logging level
3468 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_log_level()
3471 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_log_level()
3472 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10); in ipr_store_log_level()
3473 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_log_level()
3487 * ipr_store_diagnostics - IOA Diagnostics interface
3504 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_diagnostics()
3509 return -EACCES; in ipr_store_diagnostics()
3511 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3512 while (ioa_cfg->in_reset_reload) { in ipr_store_diagnostics()
3513 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3514 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_diagnostics()
3515 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3518 ioa_cfg->errors_logged = 0; in ipr_store_diagnostics()
3521 if (ioa_cfg->in_reset_reload) { in ipr_store_diagnostics()
3522 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3523 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_diagnostics()
3528 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3529 return -EIO; in ipr_store_diagnostics()
3532 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3533 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged) in ipr_store_diagnostics()
3534 rc = -EIO; in ipr_store_diagnostics()
3535 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3549 * ipr_show_adapter_state - Show the adapter's state
3561 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_adapter_state()
3565 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_state()
3566 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_show_adapter_state()
3570 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_state()
3575 * ipr_store_adapter_state - Change adapter state
3591 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_adapter_state()
3596 return -EACCES; in ipr_store_adapter_state()
3598 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_adapter_state()
3599 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && in ipr_store_adapter_state()
3601 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_store_adapter_state()
3602 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_store_adapter_state()
3603 ioa_cfg->hrrq[i].ioa_is_dead = 0; in ipr_store_adapter_state()
3604 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_store_adapter_state()
3607 ioa_cfg->reset_retries = 0; in ipr_store_adapter_state()
3608 ioa_cfg->in_ioa_bringdown = 0; in ipr_store_adapter_state()
3611 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_adapter_state()
3612 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_adapter_state()
3627 * ipr_store_reset_adapter - Reset the adapter
3643 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_reset_adapter()
3648 return -EACCES; in ipr_store_reset_adapter()
3650 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_reset_adapter()
3651 if (!ioa_cfg->in_reset_reload) in ipr_store_reset_adapter()
3653 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_reset_adapter()
3654 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_reset_adapter()
3669 * ipr_show_iopoll_weight - Show ipr polling mode
3681 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_iopoll_weight()
3685 spin_lock_irqsave(shost->host_lock, lock_flags); in ipr_show_iopoll_weight()
3686 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight); in ipr_show_iopoll_weight()
3687 spin_unlock_irqrestore(shost->host_lock, lock_flags); in ipr_show_iopoll_weight()
3693 * ipr_store_iopoll_weight - Change the adapter's polling mode
3707 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_iopoll_weight()
3712 if (!ioa_cfg->sis64) { in ipr_store_iopoll_weight()
3713 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n"); in ipr_store_iopoll_weight()
3714 return -EINVAL; in ipr_store_iopoll_weight()
3717 return -EINVAL; in ipr_store_iopoll_weight()
3720 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n"); in ipr_store_iopoll_weight()
3721 return -EINVAL; in ipr_store_iopoll_weight()
3724 if (user_iopoll_weight == ioa_cfg->iopoll_weight) { in ipr_store_iopoll_weight()
3725 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n"); in ipr_store_iopoll_weight()
3729 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_store_iopoll_weight()
3730 for (i = 1; i < ioa_cfg->hrrq_num; i++) in ipr_store_iopoll_weight()
3731 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll); in ipr_store_iopoll_weight()
3734 spin_lock_irqsave(shost->host_lock, lock_flags); in ipr_store_iopoll_weight()
3735 ioa_cfg->iopoll_weight = user_iopoll_weight; in ipr_store_iopoll_weight()
3736 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_store_iopoll_weight()
3737 for (i = 1; i < ioa_cfg->hrrq_num; i++) { in ipr_store_iopoll_weight()
3738 irq_poll_init(&ioa_cfg->hrrq[i].iopoll, in ipr_store_iopoll_weight()
3739 ioa_cfg->iopoll_weight, ipr_iopoll); in ipr_store_iopoll_weight()
3742 spin_unlock_irqrestore(shost->host_lock, lock_flags); in ipr_store_iopoll_weight()
3757 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3772 sg_size = buf_len / (IPR_MAX_SGLIST - 1); in ipr_alloc_ucode_buffer()
3783 sglist->order = order; in ipr_alloc_ucode_buffer()
3784 sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL, in ipr_alloc_ucode_buffer()
3785 &sglist->num_sg); in ipr_alloc_ucode_buffer()
3786 if (!sglist->scatterlist) { in ipr_alloc_ucode_buffer()
3795 * ipr_free_ucode_buffer - Frees a microcode download buffer
3806 sgl_free_order(sglist->scatterlist, sglist->order); in ipr_free_ucode_buffer()
3811 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3829 bsize_elem = PAGE_SIZE * (1 << sglist->order); in ipr_copy_ucode_buffer()
3831 sg = sglist->scatterlist; in ipr_copy_ucode_buffer()
3839 sg->length = bsize_elem; in ipr_copy_ucode_buffer()
3852 sg->length = len % bsize_elem; in ipr_copy_ucode_buffer()
3855 sglist->buffer_len = len; in ipr_copy_ucode_buffer()
3860 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3870 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ucode_ioadl64()
3871 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; in ipr_build_ucode_ioadl64()
3872 struct scatterlist *scatterlist = sglist->scatterlist; in ipr_build_ucode_ioadl64()
3876 ipr_cmd->dma_use_sg = sglist->num_dma_sg; in ipr_build_ucode_ioadl64()
3877 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_ucode_ioadl64()
3878 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); in ipr_build_ucode_ioadl64()
3880 ioarcb->ioadl_len = in ipr_build_ucode_ioadl64()
3881 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); in ipr_build_ucode_ioadl64()
3882 for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) { in ipr_build_ucode_ioadl64()
3888 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); in ipr_build_ucode_ioadl64()
3892 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3902 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ucode_ioadl()
3903 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; in ipr_build_ucode_ioadl()
3904 struct scatterlist *scatterlist = sglist->scatterlist; in ipr_build_ucode_ioadl()
3908 ipr_cmd->dma_use_sg = sglist->num_dma_sg; in ipr_build_ucode_ioadl()
3909 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_ucode_ioadl()
3910 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); in ipr_build_ucode_ioadl()
3912 ioarcb->ioadl_len = in ipr_build_ucode_ioadl()
3913 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); in ipr_build_ucode_ioadl()
3915 for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) { in ipr_build_ucode_ioadl()
3922 ioadl[i-1].flags_and_data_len |= in ipr_build_ucode_ioadl()
3927 * ipr_update_ioa_ucode - Update IOA's microcode
3934 * 0 on success / -EIO on failure
3941 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3942 while (ioa_cfg->in_reset_reload) { in ipr_update_ioa_ucode()
3943 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3944 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_update_ioa_ucode()
3945 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3948 if (ioa_cfg->ucode_sglist) { in ipr_update_ioa_ucode()
3949 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3950 dev_err(&ioa_cfg->pdev->dev, in ipr_update_ioa_ucode()
3952 return -EIO; in ipr_update_ioa_ucode()
3955 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev, in ipr_update_ioa_ucode()
3956 sglist->scatterlist, sglist->num_sg, in ipr_update_ioa_ucode()
3959 if (!sglist->num_dma_sg) { in ipr_update_ioa_ucode()
3960 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3961 dev_err(&ioa_cfg->pdev->dev, in ipr_update_ioa_ucode()
3963 return -EIO; in ipr_update_ioa_ucode()
3966 ioa_cfg->ucode_sglist = sglist; in ipr_update_ioa_ucode()
3968 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3969 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_update_ioa_ucode()
3971 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3972 ioa_cfg->ucode_sglist = NULL; in ipr_update_ioa_ucode()
3973 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3978 * ipr_store_update_fw - Update the firmware on the adapter
3994 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_update_fw()
4004 return -EACCES; in ipr_store_update_fw()
4012 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) { in ipr_store_update_fw()
4013 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname); in ipr_store_update_fw()
4014 return -EIO; in ipr_store_update_fw()
4017 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data; in ipr_store_update_fw()
4019 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length); in ipr_store_update_fw()
4020 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length); in ipr_store_update_fw()
4024 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n"); in ipr_store_update_fw()
4026 return -ENOMEM; in ipr_store_update_fw()
4032 dev_err(&ioa_cfg->pdev->dev, in ipr_store_update_fw()
4058 * ipr_show_fw_type - Show the adapter's firmware type.
4070 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_fw_type()
4074 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_type()
4075 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64); in ipr_show_fw_type()
4076 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_type()
4094 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_read_async_err_log()
4099 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_read_async_err_log()
4100 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q, in ipr_read_async_err_log()
4103 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_async_err_log()
4106 ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam, in ipr_read_async_err_log()
4107 sizeof(hostrcb->hcam)); in ipr_read_async_err_log()
4108 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_async_err_log()
4118 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_next_async_err_log()
4122 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_next_async_err_log()
4123 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q, in ipr_next_async_err_log()
4126 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_next_async_err_log()
4131 list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); in ipr_next_async_err_log()
4132 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_next_async_err_log()
4162 * ipr_read_dump - Dump the adapter
4179 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_read_dump()
4187 return -EACCES; in ipr_read_dump()
4189 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_read_dump()
4190 dump = ioa_cfg->dump; in ipr_read_dump()
4192 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) { in ipr_read_dump()
4193 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_dump()
4196 kref_get(&dump->kref); in ipr_read_dump()
4197 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_dump()
4199 if (off > dump->driver_dump.hdr.len) { in ipr_read_dump()
4200 kref_put(&dump->kref, ipr_release_dump); in ipr_read_dump()
4204 if (off + count > dump->driver_dump.hdr.len) { in ipr_read_dump()
4205 count = dump->driver_dump.hdr.len - off; in ipr_read_dump()
4209 if (count && off < sizeof(dump->driver_dump)) { in ipr_read_dump()
4210 if (off + count > sizeof(dump->driver_dump)) in ipr_read_dump()
4211 len = sizeof(dump->driver_dump) - off; in ipr_read_dump()
4214 src = (u8 *)&dump->driver_dump + off; in ipr_read_dump()
4218 count -= len; in ipr_read_dump()
4221 off -= sizeof(dump->driver_dump); in ipr_read_dump()
4223 if (ioa_cfg->sis64) in ipr_read_dump()
4225 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) * in ipr_read_dump()
4233 len = sdt_end - off; in ipr_read_dump()
4236 src = (u8 *)&dump->ioa_dump + off; in ipr_read_dump()
4240 count -= len; in ipr_read_dump()
4243 off -= sdt_end; in ipr_read_dump()
4247 len = PAGE_ALIGN(off) - off; in ipr_read_dump()
4250 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT]; in ipr_read_dump()
4255 count -= len; in ipr_read_dump()
4258 kref_put(&dump->kref, ipr_release_dump); in ipr_read_dump()
4263 * ipr_alloc_dump - Prepare for adapter dump
4279 return -ENOMEM; in ipr_alloc_dump()
4282 if (ioa_cfg->sis64) in ipr_alloc_dump()
4292 return -ENOMEM; in ipr_alloc_dump()
4295 dump->ioa_dump.ioa_data = ioa_data; in ipr_alloc_dump()
4297 kref_init(&dump->kref); in ipr_alloc_dump()
4298 dump->ioa_cfg = ioa_cfg; in ipr_alloc_dump()
4300 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_alloc_dump()
4302 if (INACTIVE != ioa_cfg->sdt_state) { in ipr_alloc_dump()
4303 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_alloc_dump()
4304 vfree(dump->ioa_dump.ioa_data); in ipr_alloc_dump()
4309 ioa_cfg->dump = dump; in ipr_alloc_dump()
4310 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_alloc_dump()
4311 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) { in ipr_alloc_dump()
4312 ioa_cfg->dump_taken = 1; in ipr_alloc_dump()
4313 schedule_work(&ioa_cfg->work_q); in ipr_alloc_dump()
4315 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_alloc_dump()
4321 * ipr_free_dump - Free adapter dump memory
4334 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_free_dump()
4335 dump = ioa_cfg->dump; in ipr_free_dump()
4337 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_free_dump()
4341 ioa_cfg->dump = NULL; in ipr_free_dump()
4342 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_free_dump()
4344 kref_put(&dump->kref, ipr_release_dump); in ipr_free_dump()
4351 * ipr_write_dump - Setup dump state of adapter
4368 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_write_dump()
4372 return -EACCES; in ipr_write_dump()
4379 return -EINVAL; in ipr_write_dump()
4401 * ipr_change_queue_depth - Change the device's queue depth
4411 return sdev->queue_depth; in ipr_change_queue_depth()
4415 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4426 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_adapter_handle()
4429 ssize_t len = -ENXIO; in ipr_show_adapter_handle()
4431 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_handle()
4432 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_show_adapter_handle()
4434 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle); in ipr_show_adapter_handle()
4435 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_handle()
4448 * ipr_show_resource_path - Show the resource path or the resource address for
4460 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_resource_path()
4463 ssize_t len = -ENXIO; in ipr_show_resource_path()
4466 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_path()
4467 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_show_resource_path()
4468 if (res && ioa_cfg->sis64) in ipr_show_resource_path()
4470 __ipr_format_res_path(res->res_path, buffer, in ipr_show_resource_path()
4473 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no, in ipr_show_resource_path()
4474 res->bus, res->target, res->lun); in ipr_show_resource_path()
4476 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_path()
4489 * ipr_show_device_id - Show the device_id for this device.
4500 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_device_id()
4503 ssize_t len = -ENXIO; in ipr_show_device_id()
4505 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_device_id()
4506 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_show_device_id()
4507 if (res && ioa_cfg->sis64) in ipr_show_device_id()
4508 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id)); in ipr_show_device_id()
4510 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn); in ipr_show_device_id()
4512 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_device_id()
4525 * ipr_show_resource_type - Show the resource type for this device.
4536 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_resource_type()
4539 ssize_t len = -ENXIO; in ipr_show_resource_type()
4541 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_type()
4542 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_show_resource_type()
4545 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type); in ipr_show_resource_type()
4547 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_type()
4560 * ipr_show_raw_mode - Show the adapter's raw mode
4572 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_raw_mode()
4577 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_raw_mode()
4578 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_show_raw_mode()
4580 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode); in ipr_show_raw_mode()
4582 len = -ENXIO; in ipr_show_raw_mode()
4583 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_raw_mode()
4588 * ipr_store_raw_mode - Change the adapter's raw mode
4602 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_store_raw_mode()
4607 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_raw_mode()
4608 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_store_raw_mode()
4611 res->raw_mode = simple_strtoul(buf, NULL, 10); in ipr_store_raw_mode()
4613 if (res->sdev) in ipr_store_raw_mode()
4614 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n", in ipr_store_raw_mode()
4615 res->raw_mode ? "enabled" : "disabled"); in ipr_store_raw_mode()
4617 len = -EINVAL; in ipr_store_raw_mode()
4619 len = -ENXIO; in ipr_store_raw_mode()
4620 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_raw_mode()
4645 * ipr_biosparam - Return the HSC mapping
4680 * ipr_find_starget - Find target based on bus/target.
4688 struct Scsi_Host *shost = dev_to_shost(&starget->dev); in ipr_find_starget()
4689 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_find_starget()
4692 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_find_starget()
4693 if ((res->bus == starget->channel) && in ipr_find_starget()
4694 (res->target == starget->id)) { in ipr_find_starget()
4703 * ipr_target_destroy - Destroy a SCSI target
4709 struct Scsi_Host *shost = dev_to_shost(&starget->dev); in ipr_target_destroy()
4710 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_target_destroy()
4712 if (ioa_cfg->sis64) { in ipr_target_destroy()
4714 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS) in ipr_target_destroy()
4715 clear_bit(starget->id, ioa_cfg->array_ids); in ipr_target_destroy()
4716 else if (starget->channel == IPR_VSET_VIRTUAL_BUS) in ipr_target_destroy()
4717 clear_bit(starget->id, ioa_cfg->vset_ids); in ipr_target_destroy()
4718 else if (starget->channel == 0) in ipr_target_destroy()
4719 clear_bit(starget->id, ioa_cfg->target_ids); in ipr_target_destroy()
4725 * ipr_find_sdev - Find device based on bus/target/lun.
4733 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_find_sdev()
4736 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_find_sdev()
4737 if ((res->bus == sdev->channel) && in ipr_find_sdev()
4738 (res->target == sdev->id) && in ipr_find_sdev()
4739 (res->lun == sdev->lun)) in ipr_find_sdev()
4747 * ipr_slave_destroy - Unconfigure a SCSI device
4759 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_slave_destroy()
4761 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_destroy()
4762 res = (struct ipr_resource_entry *) sdev->hostdata; in ipr_slave_destroy()
4764 sdev->hostdata = NULL; in ipr_slave_destroy()
4765 res->sdev = NULL; in ipr_slave_destroy()
4767 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_destroy()
4771 * ipr_slave_configure - Configure a SCSI device
4781 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_slave_configure()
4786 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_configure()
4787 res = sdev->hostdata; in ipr_slave_configure()
4790 sdev->type = TYPE_RAID; in ipr_slave_configure()
4792 sdev->scsi_level = 4; in ipr_slave_configure()
4793 sdev->no_uld_attach = 1; in ipr_slave_configure()
4796 sdev->scsi_level = SCSI_SPC_3; in ipr_slave_configure()
4797 sdev->no_report_opcodes = 1; in ipr_slave_configure()
4798 blk_queue_rq_timeout(sdev->request_queue, in ipr_slave_configure()
4800 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); in ipr_slave_configure()
4802 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_configure()
4804 if (ioa_cfg->sis64) in ipr_slave_configure()
4807 res->res_path, buffer, sizeof(buffer))); in ipr_slave_configure()
4810 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_configure()
4815 * ipr_slave_alloc - Prepare for commands to a device.
4824 * 0 on success / -ENXIO if device does not exist
4828 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_slave_alloc()
4831 int rc = -ENXIO; in ipr_slave_alloc()
4833 sdev->hostdata = NULL; in ipr_slave_alloc()
4835 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_alloc()
4839 res->sdev = sdev; in ipr_slave_alloc()
4840 res->add_to_ml = 0; in ipr_slave_alloc()
4841 res->in_erp = 0; in ipr_slave_alloc()
4842 sdev->hostdata = res; in ipr_slave_alloc()
4844 res->needs_sync_complete = 1; in ipr_slave_alloc()
4849 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_alloc()
4850 return -ENXIO; in ipr_slave_alloc()
4854 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_alloc()
4860 * ipr_match_lun - Match function for specified LUN
4869 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device) in ipr_match_lun()
4875 * ipr_cmnd_is_free - Check if a command is free or not
4885 list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) { in ipr_cmnd_is_free()
4894 * ipr_wait_for_ops - Wait for matching commands to complete
4917 spin_lock_irqsave(hrrq->lock, flags); in ipr_wait_for_ops()
4918 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { in ipr_wait_for_ops()
4919 ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; in ipr_wait_for_ops()
4922 ipr_cmd->eh_comp = &comp; in ipr_wait_for_ops()
4927 spin_unlock_irqrestore(hrrq->lock, flags); in ipr_wait_for_ops()
4937 spin_lock_irqsave(hrrq->lock, flags); in ipr_wait_for_ops()
4938 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { in ipr_wait_for_ops()
4939 ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; in ipr_wait_for_ops()
4942 ipr_cmd->eh_comp = NULL; in ipr_wait_for_ops()
4947 spin_unlock_irqrestore(hrrq->lock, flags); in ipr_wait_for_ops()
4951 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n"); in ipr_wait_for_ops()
4969 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; in ipr_eh_host_reset()
4970 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
4972 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_eh_host_reset()
4974 dev_err(&ioa_cfg->pdev->dev, in ipr_eh_host_reset()
4977 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_eh_host_reset()
4978 ioa_cfg->sdt_state = GET_DUMP; in ipr_eh_host_reset()
4981 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
4982 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_eh_host_reset()
4983 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
4987 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_eh_host_reset()
4992 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
4998 * ipr_device_reset - Reset the device
5008 * 0 on success / non-zero on failure
5020 ioarcb = &ipr_cmd->ioarcb; in ipr_device_reset()
5021 cmd_pkt = &ioarcb->cmd_pkt; in ipr_device_reset()
5023 if (ipr_cmd->ioa_cfg->sis64) in ipr_device_reset()
5024 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb)); in ipr_device_reset()
5026 ioarcb->res_handle = res->res_handle; in ipr_device_reset()
5027 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; in ipr_device_reset()
5028 cmd_pkt->cdb[0] = IPR_RESET_DEVICE; in ipr_device_reset()
5031 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_device_reset()
5032 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_device_reset()
5035 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0; in ipr_device_reset()
5039 * __ipr_eh_dev_reset - Reset the device
5056 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; in __ipr_eh_dev_reset()
5057 res = scsi_cmd->device->hostdata; in __ipr_eh_dev_reset()
5061 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the in __ipr_eh_dev_reset()
5064 if (ioa_cfg->in_reset_reload) in __ipr_eh_dev_reset()
5066 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in __ipr_eh_dev_reset()
5069 res->resetting_device = 1; in __ipr_eh_dev_reset()
5073 res->resetting_device = 0; in __ipr_eh_dev_reset()
5074 res->reset_occurred = 1; in __ipr_eh_dev_reset()
5086 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; in ipr_eh_dev_reset()
5087 res = cmd->device->hostdata; in ipr_eh_dev_reset()
5092 spin_lock_irq(cmd->device->host->host_lock); in ipr_eh_dev_reset()
5094 spin_unlock_irq(cmd->device->host->host_lock); in ipr_eh_dev_reset()
5097 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun); in ipr_eh_dev_reset()
5103 * ipr_bus_reset_done - Op done function for bus reset.
5113 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_bus_reset_done()
5117 if (!ioa_cfg->sis64) in ipr_bus_reset_done()
5118 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_bus_reset_done()
5119 if (res->res_handle == ipr_cmd->ioarcb.res_handle) { in ipr_bus_reset_done()
5120 scsi_report_bus_reset(ioa_cfg->host, res->bus); in ipr_bus_reset_done()
5129 if (ipr_cmd->sibling->sibling) in ipr_bus_reset_done()
5130 ipr_cmd->sibling->sibling = NULL; in ipr_bus_reset_done()
5132 ipr_cmd->sibling->done(ipr_cmd->sibling); in ipr_bus_reset_done()
5134 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_bus_reset_done()
5139 * ipr_abort_timeout - An abort task has timed out
5153 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_abort_timeout()
5158 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_abort_timeout()
5159 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) { in ipr_abort_timeout()
5160 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_abort_timeout()
5164 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n"); in ipr_abort_timeout()
5166 ipr_cmd->sibling = reset_cmd; in ipr_abort_timeout()
5167 reset_cmd->sibling = ipr_cmd; in ipr_abort_timeout()
5168 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle; in ipr_abort_timeout()
5169 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt; in ipr_abort_timeout()
5170 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; in ipr_abort_timeout()
5171 cmd_pkt->cdb[0] = IPR_RESET_DEVICE; in ipr_abort_timeout()
5172 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET; in ipr_abort_timeout()
5175 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_abort_timeout()
5180 * ipr_cancel_op - Cancel specified op
5199 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata; in ipr_cancel_op()
5200 res = scsi_cmd->device->hostdata; in ipr_cancel_op()
5203 * This will force the mid-layer to call ipr_eh_host_reset, in ipr_cancel_op()
5206 if (ioa_cfg->in_reset_reload || in ipr_cancel_op()
5207 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_cancel_op()
5217 readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_cancel_op()
5223 spin_lock(&hrrq->_lock); in ipr_cancel_op()
5224 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { in ipr_cancel_op()
5225 if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) { in ipr_cancel_op()
5226 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) { in ipr_cancel_op()
5232 spin_unlock(&hrrq->_lock); in ipr_cancel_op()
5239 ipr_cmd->ioarcb.res_handle = res->res_handle; in ipr_cancel_op()
5240 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; in ipr_cancel_op()
5241 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; in ipr_cancel_op()
5242 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; in ipr_cancel_op()
5243 ipr_cmd->u.sdev = scsi_cmd->device; in ipr_cancel_op()
5246 scsi_cmd->cmnd[0]); in ipr_cancel_op()
5248 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_cancel_op()
5259 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_cancel_op()
5261 res->needs_sync_complete = 1; in ipr_cancel_op()
5268 * ipr_scan_finished - Report whether scan is done
5278 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_scan_finished()
5281 spin_lock_irqsave(shost->host_lock, lock_flags); in ipr_scan_finished()
5282 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done) in ipr_scan_finished()
5284 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2)) in ipr_scan_finished()
5286 spin_unlock_irqrestore(shost->host_lock, lock_flags); in ipr_scan_finished()
5291 * ipr_eh_abort - Reset the host adapter
5305 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; in ipr_eh_abort()
5307 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags); in ipr_eh_abort()
5309 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags); in ipr_eh_abort()
5312 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun); in ipr_eh_abort()
5318 * ipr_handle_other_interrupt - Handle "other" interrupts
5331 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); in ipr_handle_other_interrupt()
5338 if (ioa_cfg->sis64) { in ipr_handle_other_interrupt()
5339 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_handle_other_interrupt()
5340 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; in ipr_handle_other_interrupt()
5344 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); in ipr_handle_other_interrupt()
5345 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; in ipr_handle_other_interrupt()
5346 list_del(&ioa_cfg->reset_cmd->queue); in ipr_handle_other_interrupt()
5347 del_timer(&ioa_cfg->reset_cmd->timer); in ipr_handle_other_interrupt()
5348 ipr_reset_ioa_job(ioa_cfg->reset_cmd); in ipr_handle_other_interrupt()
5358 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_handle_other_interrupt()
5359 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_handle_other_interrupt()
5361 list_del(&ioa_cfg->reset_cmd->queue); in ipr_handle_other_interrupt()
5362 del_timer(&ioa_cfg->reset_cmd->timer); in ipr_handle_other_interrupt()
5363 ipr_reset_ioa_job(ioa_cfg->reset_cmd); in ipr_handle_other_interrupt()
5365 if (ioa_cfg->clear_isr) { in ipr_handle_other_interrupt()
5367 dev_err(&ioa_cfg->pdev->dev, in ipr_handle_other_interrupt()
5369 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); in ipr_handle_other_interrupt()
5370 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_handle_other_interrupt()
5375 ioa_cfg->ioa_unit_checked = 1; in ipr_handle_other_interrupt()
5377 dev_err(&ioa_cfg->pdev->dev, in ipr_handle_other_interrupt()
5380 dev_err(&ioa_cfg->pdev->dev, in ipr_handle_other_interrupt()
5383 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_handle_other_interrupt()
5384 ioa_cfg->sdt_state = GET_DUMP; in ipr_handle_other_interrupt()
5394 * ipr_isr_eh - Interrupt service routine error handler
5404 ioa_cfg->errors_logged++; in ipr_isr_eh()
5405 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number); in ipr_isr_eh()
5407 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_isr_eh()
5408 ioa_cfg->sdt_state = GET_DUMP; in ipr_isr_eh()
5419 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg; in ipr_process_hrrq()
5423 if (!hrr_queue->allow_interrupts) in ipr_process_hrrq()
5426 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == in ipr_process_hrrq()
5427 hrr_queue->toggle_bit) { in ipr_process_hrrq()
5429 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) & in ipr_process_hrrq()
5433 if (unlikely(cmd_index > hrr_queue->max_cmd_id || in ipr_process_hrrq()
5434 cmd_index < hrr_queue->min_cmd_id)) { in ipr_process_hrrq()
5441 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; in ipr_process_hrrq()
5442 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_process_hrrq()
5446 list_move_tail(&ipr_cmd->queue, doneq); in ipr_process_hrrq()
5448 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) { in ipr_process_hrrq()
5449 hrr_queue->hrrq_curr++; in ipr_process_hrrq()
5451 hrr_queue->hrrq_curr = hrr_queue->hrrq_start; in ipr_process_hrrq()
5452 hrr_queue->toggle_bit ^= 1u; in ipr_process_hrrq()
5472 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_iopoll()
5477 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_iopoll()
5480 list_del(&ipr_cmd->queue); in ipr_iopoll()
5481 del_timer(&ipr_cmd->timer); in ipr_iopoll()
5482 ipr_cmd->fast_done(ipr_cmd); in ipr_iopoll()
5489 * ipr_isr - Interrupt service routine
5499 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; in ipr_isr()
5508 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_isr()
5510 if (!hrrq->allow_interrupts) { in ipr_isr()
5511 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr()
5516 if (ipr_process_hrrq(hrrq, -1, &doneq)) { in ipr_isr()
5519 if (!ioa_cfg->clear_isr) in ipr_isr()
5526 ioa_cfg->regs.clr_interrupt_reg32); in ipr_isr()
5527 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_isr()
5532 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_isr()
5547 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr()
5549 list_del(&ipr_cmd->queue); in ipr_isr()
5550 del_timer(&ipr_cmd->timer); in ipr_isr()
5551 ipr_cmd->fast_done(ipr_cmd); in ipr_isr()
5557 * ipr_isr_mhrrq - Interrupt service routine
5567 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; in ipr_isr_mhrrq()
5573 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_isr_mhrrq()
5576 if (!hrrq->allow_interrupts) { in ipr_isr_mhrrq()
5577 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr_mhrrq()
5581 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_isr_mhrrq()
5582 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == in ipr_isr_mhrrq()
5583 hrrq->toggle_bit) { in ipr_isr_mhrrq()
5584 irq_poll_sched(&hrrq->iopoll); in ipr_isr_mhrrq()
5585 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr_mhrrq()
5589 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == in ipr_isr_mhrrq()
5590 hrrq->toggle_bit) in ipr_isr_mhrrq()
5592 if (ipr_process_hrrq(hrrq, -1, &doneq)) in ipr_isr_mhrrq()
5596 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr_mhrrq()
5599 list_del(&ipr_cmd->queue); in ipr_isr_mhrrq()
5600 del_timer(&ipr_cmd->timer); in ipr_isr_mhrrq()
5601 ipr_cmd->fast_done(ipr_cmd); in ipr_isr_mhrrq()
5607 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5612 * 0 on success / -1 on failure
5621 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in ipr_build_ioadl64()
5622 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ioadl64()
5623 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; in ipr_build_ioadl64()
5632 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); in ipr_build_ioadl64()
5633 return -1; in ipr_build_ioadl64()
5636 ipr_cmd->dma_use_sg = nseg; in ipr_build_ioadl64()
5638 ioarcb->data_transfer_length = cpu_to_be32(length); in ipr_build_ioadl64()
5639 ioarcb->ioadl_len = in ipr_build_ioadl64()
5640 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); in ipr_build_ioadl64()
5642 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { in ipr_build_ioadl64()
5644 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_ioadl64()
5645 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) in ipr_build_ioadl64()
5648 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { in ipr_build_ioadl64()
5654 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); in ipr_build_ioadl64()
5659 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5664 * 0 on success / -1 on failure
5673 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in ipr_build_ioadl()
5674 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ioadl()
5675 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; in ipr_build_ioadl()
5683 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); in ipr_build_ioadl()
5684 return -1; in ipr_build_ioadl()
5687 ipr_cmd->dma_use_sg = nseg; in ipr_build_ioadl()
5689 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { in ipr_build_ioadl()
5691 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_ioadl()
5692 ioarcb->data_transfer_length = cpu_to_be32(length); in ipr_build_ioadl()
5693 ioarcb->ioadl_len = in ipr_build_ioadl()
5694 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); in ipr_build_ioadl()
5695 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { in ipr_build_ioadl()
5697 ioarcb->read_data_transfer_length = cpu_to_be32(length); in ipr_build_ioadl()
5698 ioarcb->read_ioadl_len = in ipr_build_ioadl()
5699 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); in ipr_build_ioadl()
5702 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) { in ipr_build_ioadl()
5703 ioadl = ioarcb->u.add_data.u.ioadl; in ipr_build_ioadl()
5704 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) + in ipr_build_ioadl()
5706 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; in ipr_build_ioadl()
5709 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { in ipr_build_ioadl()
5715 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); in ipr_build_ioadl()
5720 * __ipr_erp_done - Process completion of ERP for a device
5731 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in __ipr_erp_done()
5732 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; in __ipr_erp_done()
5733 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in __ipr_erp_done()
5736 scsi_cmd->result |= (DID_ERROR << 16); in __ipr_erp_done()
5740 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer, in __ipr_erp_done()
5746 res->needs_sync_complete = 1; in __ipr_erp_done()
5747 res->in_erp = 0; in __ipr_erp_done()
5749 scsi_dma_unmap(ipr_cmd->scsi_cmd); in __ipr_erp_done()
5751 if (ipr_cmd->eh_comp) in __ipr_erp_done()
5752 complete(ipr_cmd->eh_comp); in __ipr_erp_done()
5753 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in __ipr_erp_done()
5757 * ipr_erp_done - Process completion of ERP for a device
5768 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; in ipr_erp_done()
5771 spin_lock_irqsave(&hrrq->_lock, hrrq_flags); in ipr_erp_done()
5773 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); in ipr_erp_done()
5777 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5785 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_reinit_ipr_cmnd_for_erp()
5786 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; in ipr_reinit_ipr_cmnd_for_erp()
5787 dma_addr_t dma_addr = ipr_cmd->dma_addr; in ipr_reinit_ipr_cmnd_for_erp()
5789 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); in ipr_reinit_ipr_cmnd_for_erp()
5790 ioarcb->data_transfer_length = 0; in ipr_reinit_ipr_cmnd_for_erp()
5791 ioarcb->read_data_transfer_length = 0; in ipr_reinit_ipr_cmnd_for_erp()
5792 ioarcb->ioadl_len = 0; in ipr_reinit_ipr_cmnd_for_erp()
5793 ioarcb->read_ioadl_len = 0; in ipr_reinit_ipr_cmnd_for_erp()
5794 ioasa->hdr.ioasc = 0; in ipr_reinit_ipr_cmnd_for_erp()
5795 ioasa->hdr.residual_data_len = 0; in ipr_reinit_ipr_cmnd_for_erp()
5797 if (ipr_cmd->ioa_cfg->sis64) in ipr_reinit_ipr_cmnd_for_erp()
5798 ioarcb->u.sis64_addr_data.data_ioadl_addr = in ipr_reinit_ipr_cmnd_for_erp()
5801 ioarcb->write_ioadl_addr = in ipr_reinit_ipr_cmnd_for_erp()
5803 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; in ipr_reinit_ipr_cmnd_for_erp()
5808 * __ipr_erp_request_sense - Send request sense to a device
5819 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; in __ipr_erp_request_sense()
5820 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in __ipr_erp_request_sense()
5829 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB; in __ipr_erp_request_sense()
5830 cmd_pkt->cdb[0] = REQUEST_SENSE; in __ipr_erp_request_sense()
5831 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE; in __ipr_erp_request_sense()
5832 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE; in __ipr_erp_request_sense()
5833 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; in __ipr_erp_request_sense()
5834 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ); in __ipr_erp_request_sense()
5836 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma, in __ipr_erp_request_sense()
5844 * ipr_erp_request_sense - Send request sense to a device
5855 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; in ipr_erp_request_sense()
5858 spin_lock_irqsave(&hrrq->_lock, hrrq_flags); in ipr_erp_request_sense()
5860 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); in ipr_erp_request_sense()
5864 * ipr_erp_cancel_all - Send cancel all to a device
5870 * Cancel all will return them to us.
5877 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in ipr_erp_cancel_all()
5878 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; in ipr_erp_cancel_all()
5881 res->in_erp = 1; in ipr_erp_cancel_all()
5885 if (!scsi_cmd->device->simple_tags) { in ipr_erp_cancel_all()
5890 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; in ipr_erp_cancel_all()
5891 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; in ipr_erp_cancel_all()
5892 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; in ipr_erp_cancel_all()
5899 * ipr_dump_ioasa - Dump contents of IOASA
5917 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; in ipr_dump_ioasa()
5921 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK; in ipr_dump_ioasa()
5922 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK; in ipr_dump_ioasa()
5927 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL) in ipr_dump_ioasa()
5935 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) { in ipr_dump_ioasa()
5937 if (ioasa->hdr.ilid != 0) in ipr_dump_ioasa()
5949 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len); in ipr_dump_ioasa()
5950 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len) in ipr_dump_ioasa()
5952 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len) in ipr_dump_ioasa()
5967 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5976 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer; in ipr_gen_sense()
5977 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata; in ipr_gen_sense()
5978 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; in ipr_gen_sense()
5979 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc); in ipr_gen_sense()
5986 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION; in ipr_gen_sense()
5990 ioasa->u.vset.failing_lba_hi != 0) { in ipr_gen_sense()
6001 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi); in ipr_gen_sense()
6008 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo); in ipr_gen_sense()
6022 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) { in ipr_gen_sense()
6033 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff; in ipr_gen_sense()
6036 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff; in ipr_gen_sense()
6040 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo); in ipr_gen_sense()
6042 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba); in ipr_gen_sense()
6057 * ipr_get_autosense - Copy autosense data to sense buffer
6068 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; in ipr_get_autosense()
6069 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; in ipr_get_autosense()
6071 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0) in ipr_get_autosense()
6074 if (ipr_cmd->ioa_cfg->sis64) in ipr_get_autosense()
6075 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data, in ipr_get_autosense()
6076 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len), in ipr_get_autosense()
6079 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data, in ipr_get_autosense()
6080 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len), in ipr_get_autosense()
6086 * ipr_erp_start - Process an error response for a SCSI op
6099 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in ipr_erp_start()
6100 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; in ipr_erp_start()
6101 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_erp_start()
6117 scsi_cmd->result |= (DID_ABORT << 16); in ipr_erp_start()
6119 scsi_cmd->result |= (DID_IMM_RETRY << 16); in ipr_erp_start()
6123 scsi_cmd->result |= (DID_NO_CONNECT << 16); in ipr_erp_start()
6126 scsi_cmd->result |= (DID_NO_CONNECT << 16); in ipr_erp_start()
6128 res->needs_sync_complete = 1; in ipr_erp_start()
6131 if (!res->in_erp) in ipr_erp_start()
6132 res->needs_sync_complete = 1; in ipr_erp_start()
6133 scsi_cmd->result |= (DID_IMM_RETRY << 16); in ipr_erp_start()
6139 * so SCSI mid-layer and upper layers handle it accordingly. in ipr_erp_start()
6141 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION) in ipr_erp_start()
6142 scsi_cmd->result |= (DID_PASSTHROUGH << 16); in ipr_erp_start()
6150 if (!res->resetting_device) in ipr_erp_start()
6151 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel); in ipr_erp_start()
6152 scsi_cmd->result |= (DID_ERROR << 16); in ipr_erp_start()
6154 res->needs_sync_complete = 1; in ipr_erp_start()
6157 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc); in ipr_erp_start()
6167 res->needs_sync_complete = 1; in ipr_erp_start()
6172 if (res->raw_mode) { in ipr_erp_start()
6173 res->raw_mode = 0; in ipr_erp_start()
6174 scsi_cmd->result |= (DID_IMM_RETRY << 16); in ipr_erp_start()
6176 scsi_cmd->result |= (DID_ERROR << 16); in ipr_erp_start()
6180 scsi_cmd->result |= (DID_ERROR << 16); in ipr_erp_start()
6182 res->needs_sync_complete = 1; in ipr_erp_start()
6186 scsi_dma_unmap(ipr_cmd->scsi_cmd); in ipr_erp_start()
6188 if (ipr_cmd->eh_comp) in ipr_erp_start()
6189 complete(ipr_cmd->eh_comp); in ipr_erp_start()
6190 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_erp_start()
6194 * ipr_scsi_done - mid-layer done function
6198 * ops generated by the SCSI mid-layer
6205 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_scsi_done()
6206 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in ipr_scsi_done()
6207 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_scsi_done()
6210 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); in ipr_scsi_done()
6215 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags); in ipr_scsi_done()
6217 if (ipr_cmd->eh_comp) in ipr_scsi_done()
6218 complete(ipr_cmd->eh_comp); in ipr_scsi_done()
6219 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_scsi_done()
6220 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags); in ipr_scsi_done()
6222 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_scsi_done()
6223 spin_lock(&ipr_cmd->hrrq->_lock); in ipr_scsi_done()
6225 spin_unlock(&ipr_cmd->hrrq->_lock); in ipr_scsi_done()
6226 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_scsi_done()
6231 * ipr_queuecommand - Queue a mid-layer request
6235 * This function queues a request generated by the mid-layer.
6254 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_queuecommand()
6256 scsi_cmd->result = (DID_OK << 16); in ipr_queuecommand()
6257 res = scsi_cmd->device->hostdata; in ipr_queuecommand()
6260 hrrq = &ioa_cfg->hrrq[hrrq_id]; in ipr_queuecommand()
6262 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6265 * We have told the host to stop giving us new requests, but in ipr_queuecommand()
6268 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) { in ipr_queuecommand()
6269 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6274 * FIXME - Create scsi_set_host_offline interface in ipr_queuecommand()
6277 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) { in ipr_queuecommand()
6278 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6284 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6287 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6290 ioarcb = &ipr_cmd->ioarcb; in ipr_queuecommand()
6292 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); in ipr_queuecommand()
6293 ipr_cmd->scsi_cmd = scsi_cmd; in ipr_queuecommand()
6294 ipr_cmd->done = ipr_scsi_eh_done; in ipr_queuecommand()
6297 if (scsi_cmd->underflow == 0) in ipr_queuecommand()
6298 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; in ipr_queuecommand()
6300 if (res->reset_occurred) { in ipr_queuecommand()
6301 res->reset_occurred = 0; in ipr_queuecommand()
6302 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; in ipr_queuecommand()
6307 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; in ipr_queuecommand()
6309 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR; in ipr_queuecommand()
6310 if (scsi_cmd->flags & SCMD_TAGGED) in ipr_queuecommand()
6311 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK; in ipr_queuecommand()
6313 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK; in ipr_queuecommand()
6316 if (scsi_cmd->cmnd[0] >= 0xC0 && in ipr_queuecommand()
6317 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) { in ipr_queuecommand()
6318 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_queuecommand()
6320 if (res->raw_mode && ipr_is_af_dasd_device(res)) { in ipr_queuecommand()
6321 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE; in ipr_queuecommand()
6323 if (scsi_cmd->underflow == 0) in ipr_queuecommand()
6324 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; in ipr_queuecommand()
6327 if (ioa_cfg->sis64) in ipr_queuecommand()
6332 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6333 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) { in ipr_queuecommand()
6334 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); in ipr_queuecommand()
6335 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6341 if (unlikely(hrrq->ioa_is_dead)) { in ipr_queuecommand()
6342 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); in ipr_queuecommand()
6343 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6348 ioarcb->res_handle = res->res_handle; in ipr_queuecommand()
6349 if (res->needs_sync_complete) { in ipr_queuecommand()
6350 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE; in ipr_queuecommand()
6351 res->needs_sync_complete = 0; in ipr_queuecommand()
6353 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q); in ipr_queuecommand()
6356 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6360 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6361 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); in ipr_queuecommand()
6362 scsi_cmd->result = (DID_NO_CONNECT << 16); in ipr_queuecommand()
6364 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6369 * ipr_ioa_info - Get information about the card/driver
6381 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata; in ipr_ioa_info()
6383 spin_lock_irqsave(host->host_lock, lock_flags); in ipr_ioa_info()
6384 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type); in ipr_ioa_info()
6385 spin_unlock_irqrestore(host->host_lock, lock_flags); in ipr_ioa_info()
6406 .this_id = -1,
6428 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6442 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) { in ipr_invalid_adapter()
6455 * ipr_ioa_bringdown_done - IOA bring down completion.
6466 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioa_bringdown_done()
6470 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { in ipr_ioa_bringdown_done()
6472 ioa_cfg->scsi_unblock = 1; in ipr_ioa_bringdown_done()
6473 schedule_work(&ioa_cfg->work_q); in ipr_ioa_bringdown_done()
6476 ioa_cfg->in_reset_reload = 0; in ipr_ioa_bringdown_done()
6477 ioa_cfg->reset_retries = 0; in ipr_ioa_bringdown_done()
6478 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_ioa_bringdown_done()
6479 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_ioa_bringdown_done()
6480 ioa_cfg->hrrq[i].ioa_is_dead = 1; in ipr_ioa_bringdown_done()
6481 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_ioa_bringdown_done()
6485 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_ioa_bringdown_done()
6486 wake_up_all(&ioa_cfg->reset_wait_q); in ipr_ioa_bringdown_done()
6493 * ipr_ioa_reset_done - IOA reset completion.
6497 * It schedules any necessary mid-layer add/removes and
6505 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioa_reset_done()
6510 ioa_cfg->in_reset_reload = 0; in ipr_ioa_reset_done()
6511 for (j = 0; j < ioa_cfg->hrrq_num; j++) { in ipr_ioa_reset_done()
6512 spin_lock(&ioa_cfg->hrrq[j]._lock); in ipr_ioa_reset_done()
6513 ioa_cfg->hrrq[j].allow_cmds = 1; in ipr_ioa_reset_done()
6514 spin_unlock(&ioa_cfg->hrrq[j]._lock); in ipr_ioa_reset_done()
6517 ioa_cfg->reset_cmd = NULL; in ipr_ioa_reset_done()
6518 ioa_cfg->doorbell |= IPR_RUNTIME_RESET; in ipr_ioa_reset_done()
6520 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_ioa_reset_done()
6521 if (res->add_to_ml || res->del_from_ml) { in ipr_ioa_reset_done()
6526 schedule_work(&ioa_cfg->work_q); in ipr_ioa_reset_done()
6529 list_del_init(&ioa_cfg->hostrcb[j]->queue); in ipr_ioa_reset_done()
6533 ioa_cfg->hostrcb[j]); in ipr_ioa_reset_done()
6537 ioa_cfg->hostrcb[j]); in ipr_ioa_reset_done()
6540 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS); in ipr_ioa_reset_done()
6541 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n"); in ipr_ioa_reset_done()
6543 ioa_cfg->reset_retries = 0; in ipr_ioa_reset_done()
6544 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_ioa_reset_done()
6545 wake_up_all(&ioa_cfg->reset_wait_q); in ipr_ioa_reset_done()
6547 ioa_cfg->scsi_unblock = 1; in ipr_ioa_reset_done()
6548 schedule_work(&ioa_cfg->work_q); in ipr_ioa_reset_done()
6554 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6565 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids)); in ipr_set_sup_dev_dflt()
6566 supported_dev->num_records = 1; in ipr_set_sup_dev_dflt()
6567 supported_dev->data_length = in ipr_set_sup_dev_dflt()
6569 supported_dev->reserved = 0; in ipr_set_sup_dev_dflt()
6573 * ipr_set_supported_devs - Send Set Supported Devices for a device
6583 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_set_supported_devs()
6584 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev; in ipr_set_supported_devs()
6585 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_set_supported_devs()
6586 struct ipr_resource_entry *res = ipr_cmd->u.res; in ipr_set_supported_devs()
6588 ipr_cmd->job_step = ipr_ioa_reset_done; in ipr_set_supported_devs()
6590 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) { in ipr_set_supported_devs()
6594 ipr_cmd->u.res = res; in ipr_set_supported_devs()
6595 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids); in ipr_set_supported_devs()
6597 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_set_supported_devs()
6598 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_set_supported_devs()
6599 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_set_supported_devs()
6601 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES; in ipr_set_supported_devs()
6602 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES; in ipr_set_supported_devs()
6603 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff; in ipr_set_supported_devs()
6604 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff; in ipr_set_supported_devs()
6607 ioa_cfg->vpd_cbs_dma + in ipr_set_supported_devs()
6615 if (!ioa_cfg->sis64) in ipr_set_supported_devs()
6616 ipr_cmd->job_step = ipr_set_supported_devs; in ipr_set_supported_devs()
6626 * ipr_get_mode_page - Locate specified mode page
6641 if (!mode_pages || (mode_pages->hdr.length == 0)) in ipr_get_mode_page()
6644 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len; in ipr_get_mode_page()
6646 (mode_pages->data + mode_pages->hdr.block_desc_len); in ipr_get_mode_page()
6650 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr))) in ipr_get_mode_page()
6655 mode_hdr->page_length); in ipr_get_mode_page()
6656 length -= page_length; in ipr_get_mode_page()
6665 * ipr_check_term_power - Check for term power errors
6685 entry_length = mode_page->entry_length; in ipr_check_term_power()
6687 bus = mode_page->bus; in ipr_check_term_power()
6689 for (i = 0; i < mode_page->num_entries; i++) { in ipr_check_term_power()
6690 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) { in ipr_check_term_power()
6691 dev_err(&ioa_cfg->pdev->dev, in ipr_check_term_power()
6693 bus->res_addr.bus); in ipr_check_term_power()
6701 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6718 ioa_cfg->bus_attr[i].bus_width); in ipr_scsi_bus_speed_limit()
6720 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate) in ipr_scsi_bus_speed_limit()
6721 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate; in ipr_scsi_bus_speed_limit()
6726 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6746 entry_length = mode_page->entry_length; in ipr_modify_ioafp_mode_page_28()
6749 for (i = 0, bus = mode_page->bus; in ipr_modify_ioafp_mode_page_28()
6750 i < mode_page->num_entries; in ipr_modify_ioafp_mode_page_28()
6752 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) { in ipr_modify_ioafp_mode_page_28()
6753 dev_err(&ioa_cfg->pdev->dev, in ipr_modify_ioafp_mode_page_28()
6755 IPR_GET_PHYS_LOC(bus->res_addr)); in ipr_modify_ioafp_mode_page_28()
6759 bus_attr = &ioa_cfg->bus_attr[i]; in ipr_modify_ioafp_mode_page_28()
6760 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY; in ipr_modify_ioafp_mode_page_28()
6761 bus->bus_width = bus_attr->bus_width; in ipr_modify_ioafp_mode_page_28()
6762 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate); in ipr_modify_ioafp_mode_page_28()
6763 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK; in ipr_modify_ioafp_mode_page_28()
6764 if (bus_attr->qas_enabled) in ipr_modify_ioafp_mode_page_28()
6765 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS; in ipr_modify_ioafp_mode_page_28()
6767 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS; in ipr_modify_ioafp_mode_page_28()
6772 * ipr_build_mode_select - Build a mode select command
6786 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_mode_select()
6788 ioarcb->res_handle = res_handle; in ipr_build_mode_select()
6789 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; in ipr_build_mode_select()
6790 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_mode_select()
6791 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT; in ipr_build_mode_select()
6792 ioarcb->cmd_pkt.cdb[1] = parm; in ipr_build_mode_select()
6793 ioarcb->cmd_pkt.cdb[4] = xfer_len; in ipr_build_mode_select()
6799 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6810 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_select_page28()
6811 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; in ipr_ioafp_mode_select_page28()
6818 length = mode_pages->hdr.length + 1; in ipr_ioafp_mode_select_page28()
6819 mode_pages->hdr.length = 0; in ipr_ioafp_mode_select_page28()
6822 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), in ipr_ioafp_mode_select_page28()
6825 ipr_cmd->job_step = ipr_set_supported_devs; in ipr_ioafp_mode_select_page28()
6826 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, in ipr_ioafp_mode_select_page28()
6835 * ipr_build_mode_sense - Builds a mode sense command
6849 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_mode_sense()
6851 ioarcb->res_handle = res_handle; in ipr_build_mode_sense()
6852 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE; in ipr_build_mode_sense()
6853 ioarcb->cmd_pkt.cdb[2] = parm; in ipr_build_mode_sense()
6854 ioarcb->cmd_pkt.cdb[4] = xfer_len; in ipr_build_mode_sense()
6855 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; in ipr_build_mode_sense()
6861 * ipr_reset_cmd_failed - Handle failure of IOA reset command
6871 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_cmd_failed()
6872 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_reset_cmd_failed()
6874 dev_err(&ioa_cfg->pdev->dev, in ipr_reset_cmd_failed()
6876 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc); in ipr_reset_cmd_failed()
6879 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_reset_cmd_failed()
6884 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6895 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_mode_sense_failed()
6896 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_reset_mode_sense_failed()
6899 ipr_cmd->job_step = ipr_set_supported_devs; in ipr_reset_mode_sense_failed()
6900 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, in ipr_reset_mode_sense_failed()
6909 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6920 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_sense_page28()
6924 0x28, ioa_cfg->vpd_cbs_dma + in ipr_ioafp_mode_sense_page28()
6928 ipr_cmd->job_step = ipr_ioafp_mode_select_page28; in ipr_ioafp_mode_sense_page28()
6929 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed; in ipr_ioafp_mode_sense_page28()
6938 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6948 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_select_page24()
6949 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; in ipr_ioafp_mode_select_page24()
6958 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF; in ipr_ioafp_mode_select_page24()
6960 length = mode_pages->hdr.length + 1; in ipr_ioafp_mode_select_page24()
6961 mode_pages->hdr.length = 0; in ipr_ioafp_mode_select_page24()
6964 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), in ipr_ioafp_mode_select_page24()
6967 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; in ipr_ioafp_mode_select_page24()
6975 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6986 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_reset_mode_sense_page24_failed()
6989 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; in ipr_reset_mode_sense_page24_failed()
6997 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7008 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_sense_page24()
7012 0x24, ioa_cfg->vpd_cbs_dma + in ipr_ioafp_mode_sense_page24()
7016 ipr_cmd->job_step = ipr_ioafp_mode_select_page24; in ipr_ioafp_mode_sense_page24()
7017 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed; in ipr_ioafp_mode_sense_page24()
7026 * ipr_init_res_table - Initialize the resource table
7031 * devices and schedule adding/removing them from the mid-layer
7039 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_init_res_table()
7046 if (ioa_cfg->sis64) in ipr_init_res_table()
7047 flag = ioa_cfg->u.cfg_table64->hdr64.flags; in ipr_init_res_table()
7049 flag = ioa_cfg->u.cfg_table->hdr.flags; in ipr_init_res_table()
7052 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n"); in ipr_init_res_table()
7054 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue) in ipr_init_res_table()
7055 list_move_tail(&res->queue, &old_res); in ipr_init_res_table()
7057 if (ioa_cfg->sis64) in ipr_init_res_table()
7058 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries); in ipr_init_res_table()
7060 entries = ioa_cfg->u.cfg_table->hdr.num_entries; in ipr_init_res_table()
7063 if (ioa_cfg->sis64) in ipr_init_res_table()
7064 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i]; in ipr_init_res_table()
7066 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i]; in ipr_init_res_table()
7071 list_move_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_init_res_table()
7078 if (list_empty(&ioa_cfg->free_res_q)) { in ipr_init_res_table()
7079 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n"); in ipr_init_res_table()
7084 res = list_entry(ioa_cfg->free_res_q.next, in ipr_init_res_table()
7086 list_move_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_init_res_table()
7088 res->add_to_ml = 1; in ipr_init_res_table()
7089 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))) in ipr_init_res_table()
7090 res->sdev->allow_restart = 1; in ipr_init_res_table()
7097 if (res->sdev) { in ipr_init_res_table()
7098 res->del_from_ml = 1; in ipr_init_res_table()
7099 res->res_handle = IPR_INVALID_RES_HANDLE; in ipr_init_res_table()
7100 list_move_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_init_res_table()
7106 list_move_tail(&res->queue, &ioa_cfg->free_res_q); in ipr_init_res_table()
7109 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) in ipr_init_res_table()
7110 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24; in ipr_init_res_table()
7112 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; in ipr_init_res_table()
7119 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7130 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_query_ioa_cfg()
7131 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_ioafp_query_ioa_cfg()
7132 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; in ipr_ioafp_query_ioa_cfg()
7133 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; in ipr_ioafp_query_ioa_cfg()
7136 if (cap->cap & IPR_CAP_DUAL_IOA_RAID) in ipr_ioafp_query_ioa_cfg()
7137 ioa_cfg->dual_raid = 1; in ipr_ioafp_query_ioa_cfg()
7138 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n", in ipr_ioafp_query_ioa_cfg()
7139 ucode_vpd->major_release, ucode_vpd->card_type, in ipr_ioafp_query_ioa_cfg()
7140 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]); in ipr_ioafp_query_ioa_cfg()
7141 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_ioafp_query_ioa_cfg()
7142 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_ioafp_query_ioa_cfg()
7144 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG; in ipr_ioafp_query_ioa_cfg()
7145 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff; in ipr_ioafp_query_ioa_cfg()
7146 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff; in ipr_ioafp_query_ioa_cfg()
7147 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff; in ipr_ioafp_query_ioa_cfg()
7149 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size, in ipr_ioafp_query_ioa_cfg()
7152 ipr_cmd->job_step = ipr_init_res_table; in ipr_ioafp_query_ioa_cfg()
7162 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_ioa_service_action_failed()
7173 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ioa_service_action()
7175 ioarcb->res_handle = res_handle; in ipr_build_ioa_service_action()
7176 ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION; in ipr_build_ioa_service_action()
7177 ioarcb->cmd_pkt.cdb[1] = sa_code; in ipr_build_ioa_service_action()
7178 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_build_ioa_service_action()
7182 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7191 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_ioafp_set_caching_parameters()
7192 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_set_caching_parameters()
7193 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data; in ipr_ioafp_set_caching_parameters()
7197 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg; in ipr_ioafp_set_caching_parameters()
7199 if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) { in ipr_ioafp_set_caching_parameters()
7204 ioarcb->cmd_pkt.cdb[2] = 0x40; in ipr_ioafp_set_caching_parameters()
7206 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed; in ipr_ioafp_set_caching_parameters()
7219 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7234 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_ioafp_inquiry()
7237 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; in ipr_ioafp_inquiry()
7238 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_ioafp_inquiry()
7240 ioarcb->cmd_pkt.cdb[0] = INQUIRY; in ipr_ioafp_inquiry()
7241 ioarcb->cmd_pkt.cdb[1] = flags; in ipr_ioafp_inquiry()
7242 ioarcb->cmd_pkt.cdb[2] = page; in ipr_ioafp_inquiry()
7243 ioarcb->cmd_pkt.cdb[4] = xfer_len; in ipr_ioafp_inquiry()
7252 * ipr_inquiry_page_supported - Is the given inquiry page supported
7265 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++) in ipr_inquiry_page_supported()
7266 if (page0->page[i] == page) in ipr_inquiry_page_supported()
7273 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
7284 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_pageC4_inquiry()
7285 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; in ipr_ioafp_pageC4_inquiry()
7286 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data; in ipr_ioafp_pageC4_inquiry()
7289 ipr_cmd->job_step = ipr_ioafp_set_caching_parameters; in ipr_ioafp_pageC4_inquiry()
7294 (ioa_cfg->vpd_cbs_dma in ipr_ioafp_pageC4_inquiry()
7306 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7317 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_cap_inquiry()
7318 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; in ipr_ioafp_cap_inquiry()
7319 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; in ipr_ioafp_cap_inquiry()
7322 ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry; in ipr_ioafp_cap_inquiry()
7327 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap), in ipr_ioafp_cap_inquiry()
7337 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7348 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_page3_inquiry()
7352 ipr_cmd->job_step = ipr_ioafp_cap_inquiry; in ipr_ioafp_page3_inquiry()
7355 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data), in ipr_ioafp_page3_inquiry()
7363 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7374 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_page0_inquiry()
7380 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4); in ipr_ioafp_page0_inquiry()
7382 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16); in ipr_ioafp_page0_inquiry()
7385 dev_err(&ioa_cfg->pdev->dev, in ipr_ioafp_page0_inquiry()
7389 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; in ipr_ioafp_page0_inquiry()
7391 list_add_tail(&ipr_cmd->queue, in ipr_ioafp_page0_inquiry()
7392 &ioa_cfg->hrrq->hrrq_free_q); in ipr_ioafp_page0_inquiry()
7397 ipr_cmd->job_step = ipr_ioafp_page3_inquiry; in ipr_ioafp_page0_inquiry()
7400 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data), in ipr_ioafp_page0_inquiry()
7408 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7418 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_std_inquiry()
7421 ipr_cmd->job_step = ipr_ioafp_page0_inquiry; in ipr_ioafp_std_inquiry()
7424 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd), in ipr_ioafp_std_inquiry()
7432 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7443 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_identify_hrrq()
7444 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_ioafp_identify_hrrq()
7448 ipr_cmd->job_step = ipr_ioafp_std_inquiry; in ipr_ioafp_identify_hrrq()
7449 if (ioa_cfg->identify_hrrq_index == 0) in ipr_ioafp_identify_hrrq()
7450 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n"); in ipr_ioafp_identify_hrrq()
7452 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) { in ipr_ioafp_identify_hrrq()
7453 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index]; in ipr_ioafp_identify_hrrq()
7455 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q; in ipr_ioafp_identify_hrrq()
7456 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_ioafp_identify_hrrq()
7458 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_ioafp_identify_hrrq()
7459 if (ioa_cfg->sis64) in ipr_ioafp_identify_hrrq()
7460 ioarcb->cmd_pkt.cdb[1] = 0x1; in ipr_ioafp_identify_hrrq()
7462 if (ioa_cfg->nvectors == 1) in ipr_ioafp_identify_hrrq()
7463 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE; in ipr_ioafp_identify_hrrq()
7465 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE; in ipr_ioafp_identify_hrrq()
7467 ioarcb->cmd_pkt.cdb[2] = in ipr_ioafp_identify_hrrq()
7468 ((u64) hrrq->host_rrq_dma >> 24) & 0xff; in ipr_ioafp_identify_hrrq()
7469 ioarcb->cmd_pkt.cdb[3] = in ipr_ioafp_identify_hrrq()
7470 ((u64) hrrq->host_rrq_dma >> 16) & 0xff; in ipr_ioafp_identify_hrrq()
7471 ioarcb->cmd_pkt.cdb[4] = in ipr_ioafp_identify_hrrq()
7472 ((u64) hrrq->host_rrq_dma >> 8) & 0xff; in ipr_ioafp_identify_hrrq()
7473 ioarcb->cmd_pkt.cdb[5] = in ipr_ioafp_identify_hrrq()
7474 ((u64) hrrq->host_rrq_dma) & 0xff; in ipr_ioafp_identify_hrrq()
7475 ioarcb->cmd_pkt.cdb[7] = in ipr_ioafp_identify_hrrq()
7476 ((sizeof(u32) * hrrq->size) >> 8) & 0xff; in ipr_ioafp_identify_hrrq()
7477 ioarcb->cmd_pkt.cdb[8] = in ipr_ioafp_identify_hrrq()
7478 (sizeof(u32) * hrrq->size) & 0xff; in ipr_ioafp_identify_hrrq()
7480 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE) in ipr_ioafp_identify_hrrq()
7481 ioarcb->cmd_pkt.cdb[9] = in ipr_ioafp_identify_hrrq()
7482 ioa_cfg->identify_hrrq_index; in ipr_ioafp_identify_hrrq()
7484 if (ioa_cfg->sis64) { in ipr_ioafp_identify_hrrq()
7485 ioarcb->cmd_pkt.cdb[10] = in ipr_ioafp_identify_hrrq()
7486 ((u64) hrrq->host_rrq_dma >> 56) & 0xff; in ipr_ioafp_identify_hrrq()
7487 ioarcb->cmd_pkt.cdb[11] = in ipr_ioafp_identify_hrrq()
7488 ((u64) hrrq->host_rrq_dma >> 48) & 0xff; in ipr_ioafp_identify_hrrq()
7489 ioarcb->cmd_pkt.cdb[12] = in ipr_ioafp_identify_hrrq()
7490 ((u64) hrrq->host_rrq_dma >> 40) & 0xff; in ipr_ioafp_identify_hrrq()
7491 ioarcb->cmd_pkt.cdb[13] = in ipr_ioafp_identify_hrrq()
7492 ((u64) hrrq->host_rrq_dma >> 32) & 0xff; in ipr_ioafp_identify_hrrq()
7495 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE) in ipr_ioafp_identify_hrrq()
7496 ioarcb->cmd_pkt.cdb[14] = in ipr_ioafp_identify_hrrq()
7497 ioa_cfg->identify_hrrq_index; in ipr_ioafp_identify_hrrq()
7502 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) in ipr_ioafp_identify_hrrq()
7503 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; in ipr_ioafp_identify_hrrq()
7514 * ipr_reset_timer_done - Adapter reset timer function
7529 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_timer_done()
7532 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_timer_done()
7534 if (ioa_cfg->reset_cmd == ipr_cmd) { in ipr_reset_timer_done()
7535 list_del(&ipr_cmd->queue); in ipr_reset_timer_done()
7536 ipr_cmd->done(ipr_cmd); in ipr_reset_timer_done()
7539 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_timer_done()
7543 * ipr_reset_start_timer - Start a timer for adapter reset job
7561 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_reset_start_timer()
7562 ipr_cmd->done = ipr_reset_ioa_job; in ipr_reset_start_timer()
7564 ipr_cmd->timer.expires = jiffies + timeout; in ipr_reset_start_timer()
7565 ipr_cmd->timer.function = ipr_reset_timer_done; in ipr_reset_start_timer()
7566 add_timer(&ipr_cmd->timer); in ipr_reset_start_timer()
7570 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7581 spin_lock(&hrrq->_lock); in ipr_init_ioa_mem()
7582 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size); in ipr_init_ioa_mem()
7585 hrrq->hrrq_start = hrrq->host_rrq; in ipr_init_ioa_mem()
7586 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1]; in ipr_init_ioa_mem()
7587 hrrq->hrrq_curr = hrrq->hrrq_start; in ipr_init_ioa_mem()
7588 hrrq->toggle_bit = 1; in ipr_init_ioa_mem()
7589 spin_unlock(&hrrq->_lock); in ipr_init_ioa_mem()
7593 ioa_cfg->identify_hrrq_index = 0; in ipr_init_ioa_mem()
7594 if (ioa_cfg->hrrq_num == 1) in ipr_init_ioa_mem()
7595 atomic_set(&ioa_cfg->hrrq_index, 0); in ipr_init_ioa_mem()
7597 atomic_set(&ioa_cfg->hrrq_index, 1); in ipr_init_ioa_mem()
7600 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size); in ipr_init_ioa_mem()
7604 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7615 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_next_stage()
7618 feedback = readl(ioa_cfg->regs.init_feedback_reg); in ipr_reset_next_stage()
7622 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time); in ipr_reset_next_stage()
7633 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_reset_next_stage()
7634 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_next_stage()
7635 stage_time = ioa_cfg->transop_timeout; in ipr_reset_next_stage()
7636 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; in ipr_reset_next_stage()
7638 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_reset_next_stage()
7640 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; in ipr_reset_next_stage()
7643 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_reset_next_stage()
7644 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_next_stage()
7649 ipr_cmd->timer.expires = jiffies + stage_time * HZ; in ipr_reset_next_stage()
7650 ipr_cmd->timer.function = ipr_oper_timeout; in ipr_reset_next_stage()
7651 ipr_cmd->done = ipr_reset_ioa_job; in ipr_reset_next_stage()
7652 add_timer(&ipr_cmd->timer); in ipr_reset_next_stage()
7654 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_reset_next_stage()
7660 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7671 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_enable_ioa()
7677 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; in ipr_reset_enable_ioa()
7680 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_reset_enable_ioa()
7681 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_enable_ioa()
7682 ioa_cfg->hrrq[i].allow_interrupts = 1; in ipr_reset_enable_ioa()
7683 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_enable_ioa()
7685 if (ioa_cfg->sis64) { in ipr_reset_enable_ioa()
7687 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); in ipr_reset_enable_ioa()
7688 int_reg = readl(ioa_cfg->regs.endian_swap_reg); in ipr_reset_enable_ioa()
7691 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_reset_enable_ioa()
7695 ioa_cfg->regs.clr_interrupt_mask_reg32); in ipr_reset_enable_ioa()
7696 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_enable_ioa()
7701 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_reset_enable_ioa()
7703 if (ioa_cfg->sis64) { in ipr_reset_enable_ioa()
7706 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg); in ipr_reset_enable_ioa()
7708 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32); in ipr_reset_enable_ioa()
7710 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_enable_ioa()
7712 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n"); in ipr_reset_enable_ioa()
7714 if (ioa_cfg->sis64) { in ipr_reset_enable_ioa()
7715 ipr_cmd->job_step = ipr_reset_next_stage; in ipr_reset_enable_ioa()
7719 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ); in ipr_reset_enable_ioa()
7720 ipr_cmd->timer.function = ipr_oper_timeout; in ipr_reset_enable_ioa()
7721 ipr_cmd->done = ipr_reset_ioa_job; in ipr_reset_enable_ioa()
7722 add_timer(&ipr_cmd->timer); in ipr_reset_enable_ioa()
7723 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_reset_enable_ioa()
7730 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7741 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_wait_for_dump()
7743 if (ioa_cfg->sdt_state == GET_DUMP) in ipr_reset_wait_for_dump()
7744 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_reset_wait_for_dump()
7745 else if (ioa_cfg->sdt_state == READ_DUMP) in ipr_reset_wait_for_dump()
7746 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_reset_wait_for_dump()
7748 ioa_cfg->dump_timeout = 1; in ipr_reset_wait_for_dump()
7749 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_wait_for_dump()
7755 * ipr_unit_check_no_data - Log a unit check/no data error log
7766 ioa_cfg->errors_logged++; in ipr_unit_check_no_data()
7767 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n"); in ipr_unit_check_no_data()
7771 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7788 mailbox = readl(ioa_cfg->ioa_mailbox); in ipr_get_unit_check_buffer()
7790 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) { in ipr_get_unit_check_buffer()
7810 length = (be32_to_cpu(sdt.entry[0].end_token) - in ipr_get_unit_check_buffer()
7814 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next, in ipr_get_unit_check_buffer()
7816 list_del_init(&hostrcb->queue); in ipr_get_unit_check_buffer()
7817 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam)); in ipr_get_unit_check_buffer()
7821 (__be32 *)&hostrcb->hcam, in ipr_get_unit_check_buffer()
7822 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32)); in ipr_get_unit_check_buffer()
7826 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); in ipr_get_unit_check_buffer()
7828 ioa_cfg->sdt_state == GET_DUMP) in ipr_get_unit_check_buffer()
7829 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_get_unit_check_buffer()
7833 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); in ipr_get_unit_check_buffer()
7837 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
7847 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_get_unit_check_job()
7850 ioa_cfg->ioa_unit_checked = 0; in ipr_reset_get_unit_check_job()
7852 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_get_unit_check_job()
7861 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_dump_mailbox_wait()
7865 if (ioa_cfg->sdt_state != GET_DUMP) in ipr_dump_mailbox_wait()
7868 if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left || in ipr_dump_mailbox_wait()
7869 (readl(ioa_cfg->regs.sense_interrupt_reg) & in ipr_dump_mailbox_wait()
7872 if (!ipr_cmd->u.time_left) in ipr_dump_mailbox_wait()
7873 dev_err(&ioa_cfg->pdev->dev, in ipr_dump_mailbox_wait()
7876 ioa_cfg->sdt_state = READ_DUMP; in ipr_dump_mailbox_wait()
7877 ioa_cfg->dump_timeout = 0; in ipr_dump_mailbox_wait()
7878 if (ioa_cfg->sis64) in ipr_dump_mailbox_wait()
7882 ipr_cmd->job_step = ipr_reset_wait_for_dump; in ipr_dump_mailbox_wait()
7883 schedule_work(&ioa_cfg->work_q); in ipr_dump_mailbox_wait()
7886 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; in ipr_dump_mailbox_wait()
7896 * ipr_reset_restore_cfg_space - Restore PCI config space.
7908 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_restore_cfg_space()
7911 ioa_cfg->pdev->state_saved = true; in ipr_reset_restore_cfg_space()
7912 pci_restore_state(ioa_cfg->pdev); in ipr_reset_restore_cfg_space()
7915 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); in ipr_reset_restore_cfg_space()
7921 if (ioa_cfg->sis64) { in ipr_reset_restore_cfg_space()
7923 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); in ipr_reset_restore_cfg_space()
7924 readl(ioa_cfg->regs.endian_swap_reg); in ipr_reset_restore_cfg_space()
7927 if (ioa_cfg->ioa_unit_checked) { in ipr_reset_restore_cfg_space()
7928 if (ioa_cfg->sis64) { in ipr_reset_restore_cfg_space()
7929 ipr_cmd->job_step = ipr_reset_get_unit_check_job; in ipr_reset_restore_cfg_space()
7933 ioa_cfg->ioa_unit_checked = 0; in ipr_reset_restore_cfg_space()
7935 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_restore_cfg_space()
7941 if (ioa_cfg->in_ioa_bringdown) { in ipr_reset_restore_cfg_space()
7942 ipr_cmd->job_step = ipr_ioa_bringdown_done; in ipr_reset_restore_cfg_space()
7943 } else if (ioa_cfg->sdt_state == GET_DUMP) { in ipr_reset_restore_cfg_space()
7944 ipr_cmd->job_step = ipr_dump_mailbox_wait; in ipr_reset_restore_cfg_space()
7945 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX; in ipr_reset_restore_cfg_space()
7947 ipr_cmd->job_step = ipr_reset_enable_ioa; in ipr_reset_restore_cfg_space()
7955 * ipr_reset_bist_done - BIST has completed on the adapter.
7965 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_bist_done()
7968 if (ioa_cfg->cfg_locked) in ipr_reset_bist_done()
7969 pci_cfg_access_unlock(ioa_cfg->pdev); in ipr_reset_bist_done()
7970 ioa_cfg->cfg_locked = 0; in ipr_reset_bist_done()
7971 ipr_cmd->job_step = ipr_reset_restore_cfg_space; in ipr_reset_bist_done()
7977 * ipr_reset_start_bist - Run BIST on the adapter.
7987 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_start_bist()
7991 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO) in ipr_reset_start_bist()
7993 ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_reset_start_bist()
7995 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START); in ipr_reset_start_bist()
7998 ipr_cmd->job_step = ipr_reset_bist_done; in ipr_reset_start_bist()
8002 if (ioa_cfg->cfg_locked) in ipr_reset_start_bist()
8003 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev); in ipr_reset_start_bist()
8004 ioa_cfg->cfg_locked = 0; in ipr_reset_start_bist()
8005 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); in ipr_reset_start_bist()
8014 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8025 ipr_cmd->job_step = ipr_reset_bist_done; in ipr_reset_slot_reset_done()
8032 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8041 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_reset_work()
8042 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_reset_reset_work()
8050 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_reset_work()
8051 if (ioa_cfg->reset_cmd == ipr_cmd) in ipr_reset_reset_work()
8053 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_reset_work()
8058 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8068 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_slot_reset()
8071 INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work); in ipr_reset_slot_reset()
8072 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work); in ipr_reset_slot_reset()
8073 ipr_cmd->job_step = ipr_reset_slot_reset_done; in ipr_reset_slot_reset()
8079 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8089 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_block_config_access_wait()
8092 if (pci_cfg_access_trylock(ioa_cfg->pdev)) { in ipr_reset_block_config_access_wait()
8093 ioa_cfg->cfg_locked = 1; in ipr_reset_block_config_access_wait()
8094 ipr_cmd->job_step = ioa_cfg->reset; in ipr_reset_block_config_access_wait()
8096 if (ipr_cmd->u.time_left) { in ipr_reset_block_config_access_wait()
8098 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; in ipr_reset_block_config_access_wait()
8102 ipr_cmd->job_step = ioa_cfg->reset; in ipr_reset_block_config_access_wait()
8103 dev_err(&ioa_cfg->pdev->dev, in ipr_reset_block_config_access_wait()
8112 * ipr_reset_block_config_access - Block config access to the IOA
8122 ipr_cmd->ioa_cfg->cfg_locked = 0; in ipr_reset_block_config_access()
8123 ipr_cmd->job_step = ipr_reset_block_config_access_wait; in ipr_reset_block_config_access()
8124 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; in ipr_reset_block_config_access()
8129 * ipr_reset_allowed - Query whether or not IOA can be reset
8133 * 0 if reset not allowed / non-zero if reset is allowed
8139 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_reset_allowed()
8144 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8160 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_wait_to_start_bist()
8163 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) { in ipr_reset_wait_to_start_bist()
8164 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; in ipr_reset_wait_to_start_bist()
8167 ipr_cmd->job_step = ipr_reset_block_config_access; in ipr_reset_wait_to_start_bist()
8175 * ipr_reset_alert - Alert the adapter of a pending reset
8188 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_alert()
8193 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg); in ipr_reset_alert()
8197 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_reset_alert()
8198 ipr_cmd->job_step = ipr_reset_wait_to_start_bist; in ipr_reset_alert()
8200 ipr_cmd->job_step = ipr_reset_block_config_access; in ipr_reset_alert()
8203 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; in ipr_reset_alert()
8211 * ipr_reset_quiesce_done - Complete IOA disconnect
8221 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_quiesce_done()
8224 ipr_cmd->job_step = ipr_ioa_bringdown_done; in ipr_reset_quiesce_done()
8231 * ipr_reset_cancel_hcam_done - Check for outstanding commands
8242 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_cancel_hcam_done()
8249 ipr_cmd->job_step = ipr_reset_quiesce_done; in ipr_reset_cancel_hcam_done()
8252 spin_lock(&hrrq->_lock); in ipr_reset_cancel_hcam_done()
8253 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) { in ipr_reset_cancel_hcam_done()
8256 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_reset_cancel_hcam_done()
8260 spin_unlock(&hrrq->_lock); in ipr_reset_cancel_hcam_done()
8271 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8281 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_cancel_hcam()
8285 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ]; in ipr_reset_cancel_hcam()
8288 ipr_cmd->job_step = ipr_reset_cancel_hcam_done; in ipr_reset_cancel_hcam()
8290 if (!hrrq->ioa_is_dead) { in ipr_reset_cancel_hcam()
8291 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) { in ipr_reset_cancel_hcam()
8292 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) { in ipr_reset_cancel_hcam()
8293 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC) in ipr_reset_cancel_hcam()
8296 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_reset_cancel_hcam()
8297 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_reset_cancel_hcam()
8298 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; in ipr_reset_cancel_hcam()
8299 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; in ipr_reset_cancel_hcam()
8300 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST; in ipr_reset_cancel_hcam()
8301 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB; in ipr_reset_cancel_hcam()
8302 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff; in ipr_reset_cancel_hcam()
8303 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff; in ipr_reset_cancel_hcam()
8304 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff; in ipr_reset_cancel_hcam()
8305 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff; in ipr_reset_cancel_hcam()
8306 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff; in ipr_reset_cancel_hcam()
8307 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff; in ipr_reset_cancel_hcam()
8308 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff; in ipr_reset_cancel_hcam()
8309 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff; in ipr_reset_cancel_hcam()
8315 ipr_cmd->job_step = ipr_reset_cancel_hcam; in ipr_reset_cancel_hcam()
8320 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_cancel_hcam()
8327 * ipr_reset_ucode_download_done - Microcode download completion
8337 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_ucode_download_done()
8338 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; in ipr_reset_ucode_download_done()
8340 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist, in ipr_reset_ucode_download_done()
8341 sglist->num_sg, DMA_TO_DEVICE); in ipr_reset_ucode_download_done()
8343 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_ucode_download_done()
8348 * ipr_reset_ucode_download - Download microcode to the adapter
8359 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_ucode_download()
8360 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; in ipr_reset_ucode_download()
8363 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_ucode_download()
8368 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_reset_ucode_download()
8369 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; in ipr_reset_ucode_download()
8370 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER; in ipr_reset_ucode_download()
8371 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE; in ipr_reset_ucode_download()
8372 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16; in ipr_reset_ucode_download()
8373 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8; in ipr_reset_ucode_download()
8374 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff; in ipr_reset_ucode_download()
8376 if (ioa_cfg->sis64) in ipr_reset_ucode_download()
8380 ipr_cmd->job_step = ipr_reset_ucode_download_done; in ipr_reset_ucode_download()
8390 * ipr_reset_shutdown_ioa - Shutdown the adapter
8402 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_shutdown_ioa()
8403 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type; in ipr_reset_shutdown_ioa()
8409 ipr_cmd->job_step = ipr_reset_cancel_hcam; in ipr_reset_shutdown_ioa()
8411 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_reset_shutdown_ioa()
8412 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_reset_shutdown_ioa()
8413 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_reset_shutdown_ioa()
8414 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; in ipr_reset_shutdown_ioa()
8415 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type; in ipr_reset_shutdown_ioa()
8421 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) in ipr_reset_shutdown_ioa()
8429 ipr_cmd->job_step = ipr_reset_ucode_download; in ipr_reset_shutdown_ioa()
8431 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_shutdown_ioa()
8438 * ipr_reset_ioa_job - Adapter reset job
8449 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_ioa_job()
8452 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_reset_ioa_job()
8454 if (ioa_cfg->reset_cmd != ipr_cmd) { in ipr_reset_ioa_job()
8459 list_add_tail(&ipr_cmd->queue, in ipr_reset_ioa_job()
8460 &ipr_cmd->hrrq->hrrq_free_q); in ipr_reset_ioa_job()
8465 rc = ipr_cmd->job_step_failed(ipr_cmd); in ipr_reset_ioa_job()
8471 ipr_cmd->job_step_failed = ipr_reset_cmd_failed; in ipr_reset_ioa_job()
8472 rc = ipr_cmd->job_step(ipr_cmd); in ipr_reset_ioa_job()
8477 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8497 ioa_cfg->in_reset_reload = 1; in _ipr_initiate_ioa_reset()
8498 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in _ipr_initiate_ioa_reset()
8499 spin_lock(&ioa_cfg->hrrq[i]._lock); in _ipr_initiate_ioa_reset()
8500 ioa_cfg->hrrq[i].allow_cmds = 0; in _ipr_initiate_ioa_reset()
8501 spin_unlock(&ioa_cfg->hrrq[i]._lock); in _ipr_initiate_ioa_reset()
8504 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { in _ipr_initiate_ioa_reset()
8505 ioa_cfg->scsi_unblock = 0; in _ipr_initiate_ioa_reset()
8506 ioa_cfg->scsi_blocked = 1; in _ipr_initiate_ioa_reset()
8507 scsi_block_requests(ioa_cfg->host); in _ipr_initiate_ioa_reset()
8511 ioa_cfg->reset_cmd = ipr_cmd; in _ipr_initiate_ioa_reset()
8512 ipr_cmd->job_step = job_step; in _ipr_initiate_ioa_reset()
8513 ipr_cmd->u.shutdown_type = shutdown_type; in _ipr_initiate_ioa_reset()
8519 * ipr_initiate_ioa_reset - Initiate an adapter reset
8535 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_initiate_ioa_reset()
8538 if (ioa_cfg->in_reset_reload) { in ipr_initiate_ioa_reset()
8539 if (ioa_cfg->sdt_state == GET_DUMP) in ipr_initiate_ioa_reset()
8540 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_initiate_ioa_reset()
8541 else if (ioa_cfg->sdt_state == READ_DUMP) in ipr_initiate_ioa_reset()
8542 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_initiate_ioa_reset()
8545 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) { in ipr_initiate_ioa_reset()
8546 dev_err(&ioa_cfg->pdev->dev, in ipr_initiate_ioa_reset()
8547 "IOA taken offline - error recovery failed\n"); in ipr_initiate_ioa_reset()
8549 ioa_cfg->reset_retries = 0; in ipr_initiate_ioa_reset()
8550 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_initiate_ioa_reset()
8551 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_initiate_ioa_reset()
8552 ioa_cfg->hrrq[i].ioa_is_dead = 1; in ipr_initiate_ioa_reset()
8553 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_initiate_ioa_reset()
8557 if (ioa_cfg->in_ioa_bringdown) { in ipr_initiate_ioa_reset()
8558 ioa_cfg->reset_cmd = NULL; in ipr_initiate_ioa_reset()
8559 ioa_cfg->in_reset_reload = 0; in ipr_initiate_ioa_reset()
8561 wake_up_all(&ioa_cfg->reset_wait_q); in ipr_initiate_ioa_reset()
8563 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { in ipr_initiate_ioa_reset()
8564 ioa_cfg->scsi_unblock = 1; in ipr_initiate_ioa_reset()
8565 schedule_work(&ioa_cfg->work_q); in ipr_initiate_ioa_reset()
8569 ioa_cfg->in_ioa_bringdown = 1; in ipr_initiate_ioa_reset()
8579 * ipr_reset_freeze - Hold off all I/O activity
8588 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_freeze()
8592 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_reset_freeze()
8593 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_freeze()
8594 ioa_cfg->hrrq[i].allow_interrupts = 0; in ipr_reset_freeze()
8595 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_freeze()
8598 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_reset_freeze()
8599 ipr_cmd->done = ipr_reset_ioa_job; in ipr_reset_freeze()
8604 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8607 * Description: This routine is called to tell us that the MMIO
8615 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_mmio_enabled()
8616 if (!ioa_cfg->probe_done) in ipr_pci_mmio_enabled()
8618 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_mmio_enabled()
8623 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8626 * Description: This routine is called to tell us that the PCI bus
8635 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_frozen()
8636 if (ioa_cfg->probe_done) in ipr_pci_frozen()
8638 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_frozen()
8642 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8654 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_slot_reset()
8655 if (ioa_cfg->probe_done) { in ipr_pci_slot_reset()
8656 if (ioa_cfg->needs_warm_reset) in ipr_pci_slot_reset()
8662 wake_up_all(&ioa_cfg->eeh_wait_q); in ipr_pci_slot_reset()
8663 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_slot_reset()
8668 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8680 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_perm_failure()
8681 if (ioa_cfg->probe_done) { in ipr_pci_perm_failure()
8682 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) in ipr_pci_perm_failure()
8683 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_pci_perm_failure()
8684 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1; in ipr_pci_perm_failure()
8685 ioa_cfg->in_ioa_bringdown = 1; in ipr_pci_perm_failure()
8686 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_pci_perm_failure()
8687 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_pci_perm_failure()
8688 ioa_cfg->hrrq[i].allow_cmds = 0; in ipr_pci_perm_failure()
8689 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_pci_perm_failure()
8694 wake_up_all(&ioa_cfg->eeh_wait_q); in ipr_pci_perm_failure()
8695 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_perm_failure()
8699 * ipr_pci_error_detected - Called when a PCI error is detected.
8725 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8739 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in ipr_probe_ioa_part2()
8740 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg); in ipr_probe_ioa_part2()
8741 ioa_cfg->probe_done = 1; in ipr_probe_ioa_part2()
8742 if (ioa_cfg->needs_hard_reset) { in ipr_probe_ioa_part2()
8743 ioa_cfg->needs_hard_reset = 0; in ipr_probe_ioa_part2()
8748 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in ipr_probe_ioa_part2()
8754 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8764 if (ioa_cfg->ipr_cmnd_list) { in ipr_free_cmd_blks()
8766 if (ioa_cfg->ipr_cmnd_list[i]) in ipr_free_cmd_blks()
8767 dma_pool_free(ioa_cfg->ipr_cmd_pool, in ipr_free_cmd_blks()
8768 ioa_cfg->ipr_cmnd_list[i], in ipr_free_cmd_blks()
8769 ioa_cfg->ipr_cmnd_list_dma[i]); in ipr_free_cmd_blks()
8771 ioa_cfg->ipr_cmnd_list[i] = NULL; in ipr_free_cmd_blks()
8775 dma_pool_destroy(ioa_cfg->ipr_cmd_pool); in ipr_free_cmd_blks()
8777 kfree(ioa_cfg->ipr_cmnd_list); in ipr_free_cmd_blks()
8778 kfree(ioa_cfg->ipr_cmnd_list_dma); in ipr_free_cmd_blks()
8779 ioa_cfg->ipr_cmnd_list = NULL; in ipr_free_cmd_blks()
8780 ioa_cfg->ipr_cmnd_list_dma = NULL; in ipr_free_cmd_blks()
8781 ioa_cfg->ipr_cmd_pool = NULL; in ipr_free_cmd_blks()
8785 * ipr_free_mem - Frees memory allocated for an adapter
8795 kfree(ioa_cfg->res_entries); in ipr_free_mem()
8796 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs), in ipr_free_mem()
8797 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); in ipr_free_mem()
8800 for (i = 0; i < ioa_cfg->hrrq_num; i++) in ipr_free_mem()
8801 dma_free_coherent(&ioa_cfg->pdev->dev, in ipr_free_mem()
8802 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_free_mem()
8803 ioa_cfg->hrrq[i].host_rrq, in ipr_free_mem()
8804 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_free_mem()
8806 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size, in ipr_free_mem()
8807 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); in ipr_free_mem()
8810 dma_free_coherent(&ioa_cfg->pdev->dev, in ipr_free_mem()
8812 ioa_cfg->hostrcb[i], in ipr_free_mem()
8813 ioa_cfg->hostrcb_dma[i]); in ipr_free_mem()
8817 kfree(ioa_cfg->trace); in ipr_free_mem()
8821 * ipr_free_irqs - Free all allocated IRQs for the adapter.
8832 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_free_irqs()
8835 for (i = 0; i < ioa_cfg->nvectors; i++) in ipr_free_irqs()
8836 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]); in ipr_free_irqs()
8841 * ipr_free_all_resources - Free all allocated resources for an adapter.
8852 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_free_all_resources()
8856 if (ioa_cfg->reset_work_q) in ipr_free_all_resources()
8857 destroy_workqueue(ioa_cfg->reset_work_q); in ipr_free_all_resources()
8858 iounmap(ioa_cfg->hdw_dma_regs); in ipr_free_all_resources()
8861 scsi_host_put(ioa_cfg->host); in ipr_free_all_resources()
8867 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8871 * 0 on success / -ENOMEM on allocation failure
8880 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev, in ipr_alloc_cmd_blks()
8883 if (!ioa_cfg->ipr_cmd_pool) in ipr_alloc_cmd_blks()
8884 return -ENOMEM; in ipr_alloc_cmd_blks()
8886 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL); in ipr_alloc_cmd_blks()
8887 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL); in ipr_alloc_cmd_blks()
8889 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) { in ipr_alloc_cmd_blks()
8891 return -ENOMEM; in ipr_alloc_cmd_blks()
8894 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_alloc_cmd_blks()
8895 if (ioa_cfg->hrrq_num > 1) { in ipr_alloc_cmd_blks()
8898 ioa_cfg->hrrq[i].min_cmd_id = 0; in ipr_alloc_cmd_blks()
8899 ioa_cfg->hrrq[i].max_cmd_id = in ipr_alloc_cmd_blks()
8900 (entries_each_hrrq - 1); in ipr_alloc_cmd_blks()
8904 (ioa_cfg->hrrq_num - 1); in ipr_alloc_cmd_blks()
8905 ioa_cfg->hrrq[i].min_cmd_id = in ipr_alloc_cmd_blks()
8907 (i - 1) * entries_each_hrrq; in ipr_alloc_cmd_blks()
8908 ioa_cfg->hrrq[i].max_cmd_id = in ipr_alloc_cmd_blks()
8910 i * entries_each_hrrq - 1); in ipr_alloc_cmd_blks()
8914 ioa_cfg->hrrq[i].min_cmd_id = 0; in ipr_alloc_cmd_blks()
8915 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1); in ipr_alloc_cmd_blks()
8917 ioa_cfg->hrrq[i].size = entries_each_hrrq; in ipr_alloc_cmd_blks()
8920 BUG_ON(ioa_cfg->hrrq_num == 0); in ipr_alloc_cmd_blks()
8922 i = IPR_NUM_CMD_BLKS - in ipr_alloc_cmd_blks()
8923 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1; in ipr_alloc_cmd_blks()
8925 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i; in ipr_alloc_cmd_blks()
8926 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i; in ipr_alloc_cmd_blks()
8930 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool, in ipr_alloc_cmd_blks()
8935 return -ENOMEM; in ipr_alloc_cmd_blks()
8938 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd; in ipr_alloc_cmd_blks()
8939 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr; in ipr_alloc_cmd_blks()
8941 ioarcb = &ipr_cmd->ioarcb; in ipr_alloc_cmd_blks()
8942 ipr_cmd->dma_addr = dma_addr; in ipr_alloc_cmd_blks()
8943 if (ioa_cfg->sis64) in ipr_alloc_cmd_blks()
8944 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr); in ipr_alloc_cmd_blks()
8946 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr); in ipr_alloc_cmd_blks()
8948 ioarcb->host_response_handle = cpu_to_be32(i << 2); in ipr_alloc_cmd_blks()
8949 if (ioa_cfg->sis64) { in ipr_alloc_cmd_blks()
8950 ioarcb->u.sis64_addr_data.data_ioadl_addr = in ipr_alloc_cmd_blks()
8952 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr = in ipr_alloc_cmd_blks()
8955 ioarcb->write_ioadl_addr = in ipr_alloc_cmd_blks()
8957 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; in ipr_alloc_cmd_blks()
8958 ioarcb->ioasa_host_pci_addr = in ipr_alloc_cmd_blks()
8961 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); in ipr_alloc_cmd_blks()
8962 ipr_cmd->cmd_index = i; in ipr_alloc_cmd_blks()
8963 ipr_cmd->ioa_cfg = ioa_cfg; in ipr_alloc_cmd_blks()
8964 ipr_cmd->sense_buffer_dma = dma_addr + in ipr_alloc_cmd_blks()
8967 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id; in ipr_alloc_cmd_blks()
8968 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id]; in ipr_alloc_cmd_blks()
8969 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_alloc_cmd_blks()
8970 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id) in ipr_alloc_cmd_blks()
8978 * ipr_alloc_mem - Allocate memory for an adapter
8982 * 0 on success / non-zero for error
8986 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_alloc_mem()
8987 int i, rc = -ENOMEM; in ipr_alloc_mem()
8990 ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported, in ipr_alloc_mem()
8994 if (!ioa_cfg->res_entries) in ipr_alloc_mem()
8997 for (i = 0; i < ioa_cfg->max_devs_supported; i++) { in ipr_alloc_mem()
8998 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); in ipr_alloc_mem()
8999 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg; in ipr_alloc_mem()
9002 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9004 &ioa_cfg->vpd_cbs_dma, in ipr_alloc_mem()
9007 if (!ioa_cfg->vpd_cbs) in ipr_alloc_mem()
9013 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_alloc_mem()
9014 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9015 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
9016 &ioa_cfg->hrrq[i].host_rrq_dma, in ipr_alloc_mem()
9019 if (!ioa_cfg->hrrq[i].host_rrq) { in ipr_alloc_mem()
9020 while (--i >= 0) in ipr_alloc_mem()
9021 dma_free_coherent(&pdev->dev, in ipr_alloc_mem()
9022 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
9023 ioa_cfg->hrrq[i].host_rrq, in ipr_alloc_mem()
9024 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_alloc_mem()
9027 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg; in ipr_alloc_mem()
9030 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9031 ioa_cfg->cfg_table_size, in ipr_alloc_mem()
9032 &ioa_cfg->cfg_table_dma, in ipr_alloc_mem()
9035 if (!ioa_cfg->u.cfg_table) in ipr_alloc_mem()
9039 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9041 &ioa_cfg->hostrcb_dma[i], in ipr_alloc_mem()
9044 if (!ioa_cfg->hostrcb[i]) in ipr_alloc_mem()
9047 ioa_cfg->hostrcb[i]->hostrcb_dma = in ipr_alloc_mem()
9048 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam); in ipr_alloc_mem()
9049 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg; in ipr_alloc_mem()
9050 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q); in ipr_alloc_mem()
9053 ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES, in ipr_alloc_mem()
9057 if (!ioa_cfg->trace) in ipr_alloc_mem()
9066 while (i-- > 0) { in ipr_alloc_mem()
9067 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb), in ipr_alloc_mem()
9068 ioa_cfg->hostrcb[i], in ipr_alloc_mem()
9069 ioa_cfg->hostrcb_dma[i]); in ipr_alloc_mem()
9071 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size, in ipr_alloc_mem()
9072 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); in ipr_alloc_mem()
9074 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_alloc_mem()
9075 dma_free_coherent(&pdev->dev, in ipr_alloc_mem()
9076 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
9077 ioa_cfg->hrrq[i].host_rrq, in ipr_alloc_mem()
9078 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_alloc_mem()
9083 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs), in ipr_alloc_mem()
9084 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); in ipr_alloc_mem()
9086 kfree(ioa_cfg->res_entries); in ipr_alloc_mem()
9091 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9102 ioa_cfg->bus_attr[i].bus = i; in ipr_initialize_bus_attr()
9103 ioa_cfg->bus_attr[i].qas_enabled = 0; in ipr_initialize_bus_attr()
9104 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH; in ipr_initialize_bus_attr()
9106 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed]; in ipr_initialize_bus_attr()
9108 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE; in ipr_initialize_bus_attr()
9113 * ipr_init_regs - Initialize IOA registers
9125 p = &ioa_cfg->chip_cfg->regs; in ipr_init_regs()
9126 t = &ioa_cfg->regs; in ipr_init_regs()
9127 base = ioa_cfg->hdw_dma_regs; in ipr_init_regs()
9129 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg; in ipr_init_regs()
9130 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg; in ipr_init_regs()
9131 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32; in ipr_init_regs()
9132 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg; in ipr_init_regs()
9133 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32; in ipr_init_regs()
9134 t->clr_interrupt_reg = base + p->clr_interrupt_reg; in ipr_init_regs()
9135 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32; in ipr_init_regs()
9136 t->sense_interrupt_reg = base + p->sense_interrupt_reg; in ipr_init_regs()
9137 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32; in ipr_init_regs()
9138 t->ioarrin_reg = base + p->ioarrin_reg; in ipr_init_regs()
9139 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg; in ipr_init_regs()
9140 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32; in ipr_init_regs()
9141 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg; in ipr_init_regs()
9142 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32; in ipr_init_regs()
9143 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg; in ipr_init_regs()
9144 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32; in ipr_init_regs()
9146 if (ioa_cfg->sis64) { in ipr_init_regs()
9147 t->init_feedback_reg = base + p->init_feedback_reg; in ipr_init_regs()
9148 t->dump_addr_reg = base + p->dump_addr_reg; in ipr_init_regs()
9149 t->dump_data_reg = base + p->dump_data_reg; in ipr_init_regs()
9150 t->endian_swap_reg = base + p->endian_swap_reg; in ipr_init_regs()
9155 * ipr_init_ioa_cfg - Initialize IOA config struct
9168 ioa_cfg->host = host; in ipr_init_ioa_cfg()
9169 ioa_cfg->pdev = pdev; in ipr_init_ioa_cfg()
9170 ioa_cfg->log_level = ipr_log_level; in ipr_init_ioa_cfg()
9171 ioa_cfg->doorbell = IPR_DOORBELL; in ipr_init_ioa_cfg()
9172 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER); in ipr_init_ioa_cfg()
9173 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL); in ipr_init_ioa_cfg()
9174 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START); in ipr_init_ioa_cfg()
9175 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL); in ipr_init_ioa_cfg()
9176 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL); in ipr_init_ioa_cfg()
9177 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL); in ipr_init_ioa_cfg()
9179 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q); in ipr_init_ioa_cfg()
9180 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); in ipr_init_ioa_cfg()
9181 INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q); in ipr_init_ioa_cfg()
9182 INIT_LIST_HEAD(&ioa_cfg->free_res_q); in ipr_init_ioa_cfg()
9183 INIT_LIST_HEAD(&ioa_cfg->used_res_q); in ipr_init_ioa_cfg()
9184 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); in ipr_init_ioa_cfg()
9185 INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread); in ipr_init_ioa_cfg()
9186 init_waitqueue_head(&ioa_cfg->reset_wait_q); in ipr_init_ioa_cfg()
9187 init_waitqueue_head(&ioa_cfg->msi_wait_q); in ipr_init_ioa_cfg()
9188 init_waitqueue_head(&ioa_cfg->eeh_wait_q); in ipr_init_ioa_cfg()
9189 ioa_cfg->sdt_state = INACTIVE; in ipr_init_ioa_cfg()
9192 ioa_cfg->max_devs_supported = ipr_max_devs; in ipr_init_ioa_cfg()
9194 if (ioa_cfg->sis64) { in ipr_init_ioa_cfg()
9195 host->max_channel = IPR_MAX_SIS64_BUSES; in ipr_init_ioa_cfg()
9196 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS; in ipr_init_ioa_cfg()
9197 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET; in ipr_init_ioa_cfg()
9199 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS; in ipr_init_ioa_cfg()
9200 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64) in ipr_init_ioa_cfg()
9202 * ioa_cfg->max_devs_supported))); in ipr_init_ioa_cfg()
9204 host->max_channel = IPR_VSET_BUS; in ipr_init_ioa_cfg()
9205 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS; in ipr_init_ioa_cfg()
9206 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET; in ipr_init_ioa_cfg()
9208 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS; in ipr_init_ioa_cfg()
9209 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr) in ipr_init_ioa_cfg()
9211 * ioa_cfg->max_devs_supported))); in ipr_init_ioa_cfg()
9214 host->unique_id = host->host_no; in ipr_init_ioa_cfg()
9215 host->max_cmd_len = IPR_MAX_CDB_LEN; in ipr_init_ioa_cfg()
9216 host->can_queue = ioa_cfg->max_cmds; in ipr_init_ioa_cfg()
9219 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) { in ipr_init_ioa_cfg()
9220 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q); in ipr_init_ioa_cfg()
9221 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q); in ipr_init_ioa_cfg()
9222 spin_lock_init(&ioa_cfg->hrrq[i]._lock); in ipr_init_ioa_cfg()
9224 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock; in ipr_init_ioa_cfg()
9226 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock; in ipr_init_ioa_cfg()
9231 * ipr_get_chip_info - Find adapter chip information
9243 if (ipr_chip[i].vendor == dev_id->vendor && in ipr_get_chip_info()
9244 ipr_chip[i].device == dev_id->device) in ipr_get_chip_info()
9250 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9259 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_wait_for_pci_err_recovery()
9262 wait_event_timeout(ioa_cfg->eeh_wait_q, in ipr_wait_for_pci_err_recovery()
9271 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1; in name_msi_vectors()
9273 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) { in name_msi_vectors()
9274 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n, in name_msi_vectors()
9275 "host%d-%d", ioa_cfg->host->host_no, vec_idx); in name_msi_vectors()
9276 ioa_cfg->vectors_info[vec_idx]. in name_msi_vectors()
9277 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0; in name_msi_vectors()
9286 for (i = 1; i < ioa_cfg->nvectors; i++) { in ipr_request_other_msi_irqs()
9290 ioa_cfg->vectors_info[i].desc, in ipr_request_other_msi_irqs()
9291 &ioa_cfg->hrrq[i]); in ipr_request_other_msi_irqs()
9293 while (--i > 0) in ipr_request_other_msi_irqs()
9295 &ioa_cfg->hrrq[i]); in ipr_request_other_msi_irqs()
9303 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9311 * 0 on success / non-zero on failure
9318 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq); in ipr_test_intr()
9319 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_test_intr()
9321 ioa_cfg->msi_received = 1; in ipr_test_intr()
9322 wake_up(&ioa_cfg->msi_wait_q); in ipr_test_intr()
9324 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_test_intr()
9329 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9338 * 0 on success / non-zero on failure
9348 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
9349 init_waitqueue_head(&ioa_cfg->msi_wait_q); in ipr_test_msi()
9350 ioa_cfg->msi_received = 0; in ipr_test_msi()
9352 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32); in ipr_test_msi()
9353 readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_test_msi()
9354 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
9358 dev_err(&pdev->dev, "Can not assign irq %d\n", irq); in ipr_test_msi()
9361 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq); in ipr_test_msi()
9363 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32); in ipr_test_msi()
9364 readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_test_msi()
9365 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ); in ipr_test_msi()
9366 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
9369 if (!ioa_cfg->msi_received) { in ipr_test_msi()
9371 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n"); in ipr_test_msi()
9372 rc = -EOPNOTSUPP; in ipr_test_msi()
9374 dev_info(&pdev->dev, "MSI test succeeded.\n"); in ipr_test_msi()
9376 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
9385 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9390 * 0 on success / non-zero on failure
9406 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); in ipr_probe_ioa()
9410 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n"); in ipr_probe_ioa()
9411 rc = -ENOMEM; in ipr_probe_ioa()
9415 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; in ipr_probe_ioa()
9418 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id); in ipr_probe_ioa()
9420 if (!ioa_cfg->ipr_chip) { in ipr_probe_ioa()
9421 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n", in ipr_probe_ioa()
9422 dev_id->vendor, dev_id->device); in ipr_probe_ioa()
9427 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0; in ipr_probe_ioa()
9428 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; in ipr_probe_ioa()
9429 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr; in ipr_probe_ioa()
9430 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds; in ipr_probe_ioa()
9433 ioa_cfg->transop_timeout = ipr_transop_timeout; in ipr_probe_ioa()
9434 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT) in ipr_probe_ioa()
9435 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT; in ipr_probe_ioa()
9437 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT; in ipr_probe_ioa()
9439 ioa_cfg->revid = pdev->revision; in ipr_probe_ioa()
9447 dev_err(&pdev->dev, in ipr_probe_ioa()
9461 dev_err(&pdev->dev, "Cannot enable adapter\n"); in ipr_probe_ioa()
9470 dev_err(&pdev->dev, in ipr_probe_ioa()
9472 rc = -ENOMEM; in ipr_probe_ioa()
9476 ioa_cfg->hdw_dma_regs = ipr_regs; in ipr_probe_ioa()
9477 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci; in ipr_probe_ioa()
9478 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs; in ipr_probe_ioa()
9482 if (ioa_cfg->sis64) { in ipr_probe_ioa()
9483 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in ipr_probe_ioa()
9485 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n"); in ipr_probe_ioa()
9486 rc = dma_set_mask_and_coherent(&pdev->dev, in ipr_probe_ioa()
9490 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in ipr_probe_ioa()
9493 dev_err(&pdev->dev, "Failed to set DMA mask\n"); in ipr_probe_ioa()
9498 ioa_cfg->chip_cfg->cache_line_size); in ipr_probe_ioa()
9501 dev_err(&pdev->dev, "Write of cache line size failed\n"); in ipr_probe_ioa()
9503 rc = -EIO; in ipr_probe_ioa()
9508 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_probe_ioa()
9512 dev_err(&pdev->dev, "The max number of MSIX is %d\n", in ipr_probe_ioa()
9518 if (ioa_cfg->ipr_chip->has_msi) in ipr_probe_ioa()
9525 ioa_cfg->nvectors = rc; in ipr_probe_ioa()
9527 if (!pdev->msi_enabled && !pdev->msix_enabled) in ipr_probe_ioa()
9528 ioa_cfg->clear_isr = 1; in ipr_probe_ioa()
9536 rc = -EIO; in ipr_probe_ioa()
9541 if (pdev->msi_enabled || pdev->msix_enabled) { in ipr_probe_ioa()
9545 dev_info(&pdev->dev, in ipr_probe_ioa()
9546 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors, in ipr_probe_ioa()
9547 pdev->msix_enabled ? "-X" : ""); in ipr_probe_ioa()
9549 case -EOPNOTSUPP: in ipr_probe_ioa()
9553 ioa_cfg->nvectors = 1; in ipr_probe_ioa()
9554 ioa_cfg->clear_isr = 1; in ipr_probe_ioa()
9561 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors, in ipr_probe_ioa()
9573 dev_err(&pdev->dev, in ipr_probe_ioa()
9582 dev_err(&pdev->dev, "Failed to save PCI config space\n"); in ipr_probe_ioa()
9583 rc = -EIO; in ipr_probe_ioa()
9591 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); in ipr_probe_ioa()
9592 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_probe_ioa()
9593 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); in ipr_probe_ioa()
9595 ioa_cfg->needs_hard_reset = 1; in ipr_probe_ioa()
9597 ioa_cfg->needs_hard_reset = 1; in ipr_probe_ioa()
9599 ioa_cfg->ioa_unit_checked = 1; in ipr_probe_ioa()
9601 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_probe_ioa()
9603 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_probe_ioa()
9605 if (pdev->msi_enabled || pdev->msix_enabled) { in ipr_probe_ioa()
9608 ioa_cfg->vectors_info[0].desc, in ipr_probe_ioa()
9609 &ioa_cfg->hrrq[0]); in ipr_probe_ioa()
9613 rc = request_irq(pdev->irq, ipr_isr, in ipr_probe_ioa()
9615 IPR_NAME, &ioa_cfg->hrrq[0]); in ipr_probe_ioa()
9618 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n", in ipr_probe_ioa()
9619 pdev->irq, rc); in ipr_probe_ioa()
9623 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) || in ipr_probe_ioa()
9624 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) { in ipr_probe_ioa()
9625 ioa_cfg->needs_warm_reset = 1; in ipr_probe_ioa()
9626 ioa_cfg->reset = ipr_reset_slot_reset; in ipr_probe_ioa()
9628 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d", in ipr_probe_ioa()
9629 WQ_MEM_RECLAIM, host->host_no); in ipr_probe_ioa()
9631 if (!ioa_cfg->reset_work_q) { in ipr_probe_ioa()
9632 dev_err(&pdev->dev, "Couldn't register reset workqueue\n"); in ipr_probe_ioa()
9633 rc = -ENOMEM; in ipr_probe_ioa()
9637 ioa_cfg->reset = ipr_reset_start_bist; in ipr_probe_ioa()
9640 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head); in ipr_probe_ioa()
9666 * ipr_initiate_ioa_bringdown - Bring down an adapter
9683 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) in ipr_initiate_ioa_bringdown()
9684 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_initiate_ioa_bringdown()
9685 ioa_cfg->reset_retries = 0; in ipr_initiate_ioa_bringdown()
9686 ioa_cfg->in_ioa_bringdown = 1; in ipr_initiate_ioa_bringdown()
9692 * __ipr_remove - Remove a single adapter
9708 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
9709 while (ioa_cfg->in_reset_reload) { in __ipr_remove()
9710 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
9711 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in __ipr_remove()
9712 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
9715 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in __ipr_remove()
9716 spin_lock(&ioa_cfg->hrrq[i]._lock); in __ipr_remove()
9717 ioa_cfg->hrrq[i].removing_ioa = 1; in __ipr_remove()
9718 spin_unlock(&ioa_cfg->hrrq[i]._lock); in __ipr_remove()
9723 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
9724 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in __ipr_remove()
9725 flush_work(&ioa_cfg->work_q); in __ipr_remove()
9726 if (ioa_cfg->reset_work_q) in __ipr_remove()
9727 flush_workqueue(ioa_cfg->reset_work_q); in __ipr_remove()
9728 INIT_LIST_HEAD(&ioa_cfg->used_res_q); in __ipr_remove()
9729 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
9732 list_del(&ioa_cfg->queue); in __ipr_remove()
9735 if (ioa_cfg->sdt_state == ABORT_DUMP) in __ipr_remove()
9736 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in __ipr_remove()
9737 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
9745 * ipr_remove - IOA hot plug remove entry point
9759 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_remove()
9761 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj, in ipr_remove()
9763 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj, in ipr_remove()
9765 scsi_remove_host(ioa_cfg->host); in ipr_remove()
9773 * ipr_probe - Adapter hot plug add entry point
9778 * 0 on success / non-zero on failure
9794 rc = scsi_add_host(ioa_cfg->host, &pdev->dev); in ipr_probe()
9801 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
9805 scsi_remove_host(ioa_cfg->host); in ipr_probe()
9810 rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
9814 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
9816 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
9818 scsi_remove_host(ioa_cfg->host); in ipr_probe()
9823 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
9827 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
9829 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
9831 scsi_remove_host(ioa_cfg->host); in ipr_probe()
9835 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_probe()
9836 ioa_cfg->scan_enabled = 1; in ipr_probe()
9837 schedule_work(&ioa_cfg->work_q); in ipr_probe()
9838 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_probe()
9840 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight; in ipr_probe()
9842 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_probe()
9843 for (i = 1; i < ioa_cfg->hrrq_num; i++) { in ipr_probe()
9844 irq_poll_init(&ioa_cfg->hrrq[i].iopoll, in ipr_probe()
9845 ioa_cfg->iopoll_weight, ipr_iopoll); in ipr_probe()
9849 scsi_scan_host(ioa_cfg->host); in ipr_probe()
9855 * ipr_shutdown - Shutdown handler.
9871 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
9872 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_shutdown()
9873 ioa_cfg->iopoll_weight = 0; in ipr_shutdown()
9874 for (i = 1; i < ioa_cfg->hrrq_num; i++) in ipr_shutdown()
9875 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll); in ipr_shutdown()
9878 while (ioa_cfg->in_reset_reload) { in ipr_shutdown()
9879 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
9880 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_shutdown()
9881 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
9884 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) in ipr_shutdown()
9888 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
9889 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_shutdown()
9890 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) { in ipr_shutdown()
9892 pci_disable_device(ioa_cfg->pdev); in ipr_shutdown()
10026 * ipr_halt_done - Shutdown prepare completion
10034 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_halt_done()
10038 * ipr_halt - Issue shutdown prepare to all adapters
10058 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_halt()
10059 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds || in ipr_halt()
10060 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) { in ipr_halt()
10061 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_halt()
10066 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_halt()
10067 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_halt()
10068 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; in ipr_halt()
10069 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL; in ipr_halt()
10072 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_halt()
10084 * ipr_init - Module entry point
10107 * ipr_exit - Module unload