1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2013 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 8 /* 9 * qla2x00_debounce_register 10 * Debounce register. 11 * 12 * Input: 13 * port = register address. 14 * 15 * Returns: 16 * register value. 17 */ 18 static __inline__ uint16_t 19 qla2x00_debounce_register(volatile uint16_t __iomem *addr) 20 { 21 volatile uint16_t first; 22 volatile uint16_t second; 23 24 do { 25 first = RD_REG_WORD(addr); 26 barrier(); 27 cpu_relax(); 28 second = RD_REG_WORD(addr); 29 } while (first != second); 30 31 return (first); 32 } 33 34 static inline void 35 qla2x00_poll(struct rsp_que *rsp) 36 { 37 unsigned long flags; 38 struct qla_hw_data *ha = rsp->hw; 39 local_irq_save(flags); 40 if (IS_QLA82XX(ha)) 41 qla82xx_poll(0, rsp); 42 else 43 ha->isp_ops->intr_handler(0, rsp); 44 local_irq_restore(flags); 45 } 46 47 static inline uint8_t * 48 host_to_fcp_swap(uint8_t *fcp, uint32_t bsize) 49 { 50 uint32_t *ifcp = (uint32_t *) fcp; 51 uint32_t *ofcp = (uint32_t *) fcp; 52 uint32_t iter = bsize >> 2; 53 54 for (; iter ; iter--) 55 *ofcp++ = swab32(*ifcp++); 56 57 return fcp; 58 } 59 60 static inline void 61 qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha) 62 { 63 int i; 64 65 if (IS_FWI2_CAPABLE(ha)) 66 return; 67 68 for (i = 0; i < SNS_FIRST_LOOP_ID; i++) 69 set_bit(i, ha->loop_id_map); 70 set_bit(MANAGEMENT_SERVER, ha->loop_id_map); 71 set_bit(BROADCAST, ha->loop_id_map); 72 } 73 74 static inline int 75 qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id) 76 { 77 struct qla_hw_data *ha = vha->hw; 78 if (IS_FWI2_CAPABLE(ha)) 79 return (loop_id > NPH_LAST_HANDLE); 80 81 return ((loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) || 82 loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST); 83 } 84 85 static inline void 86 qla2x00_clear_loop_id(fc_port_t *fcport) { 87 struct qla_hw_data *ha = fcport->vha->hw; 88 89 if (fcport->loop_id == FC_NO_LOOP_ID || 90 qla2x00_is_reserved_id(fcport->vha, fcport->loop_id)) 91 return; 92 93 clear_bit(fcport->loop_id, ha->loop_id_map); 94 fcport->loop_id = FC_NO_LOOP_ID; 95 } 96 97 static inline void 98 qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp) 99 { 100 struct dsd_dma *dsd_ptr, *tdsd_ptr; 101 struct crc_context *ctx; 102 103 ctx = (struct crc_context *)GET_CMD_CTX_SP(sp); 104 105 /* clean up allocated prev pool */ 106 list_for_each_entry_safe(dsd_ptr, tdsd_ptr, 107 &ctx->dsd_list, list) { 108 dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr, 109 dsd_ptr->dsd_list_dma); 110 list_del(&dsd_ptr->list); 111 kfree(dsd_ptr); 112 } 113 INIT_LIST_HEAD(&ctx->dsd_list); 114 } 115 116 static inline void 117 qla2x00_set_fcport_state(fc_port_t *fcport, int state) 118 { 119 int old_state; 120 121 old_state = atomic_read(&fcport->state); 122 atomic_set(&fcport->state, state); 123 124 /* Don't print state transitions during initial allocation of fcport */ 125 if (old_state && old_state != state) { 126 ql_dbg(ql_dbg_disc, fcport->vha, 0x207d, 127 "FCPort state transitioned from %s to %s - " 128 "portid=%02x%02x%02x.\n", 129 port_state_str[old_state], port_state_str[state], 130 fcport->d_id.b.domain, fcport->d_id.b.area, 131 fcport->d_id.b.al_pa); 132 } 133 } 134 135 static inline int 136 qla2x00_hba_err_chk_enabled(srb_t *sp) 137 { 138 /* 139 * Uncomment when corresponding SCSI changes are done. 140 * 141 if (!sp->cmd->prot_chk) 142 return 0; 143 * 144 */ 145 switch (scsi_get_prot_op(GET_CMD_SP(sp))) { 146 case SCSI_PROT_READ_STRIP: 147 case SCSI_PROT_WRITE_INSERT: 148 if (ql2xenablehba_err_chk >= 1) 149 return 1; 150 break; 151 case SCSI_PROT_READ_PASS: 152 case SCSI_PROT_WRITE_PASS: 153 if (ql2xenablehba_err_chk >= 2) 154 return 1; 155 break; 156 case SCSI_PROT_READ_INSERT: 157 case SCSI_PROT_WRITE_STRIP: 158 return 1; 159 } 160 return 0; 161 } 162 163 static inline int 164 qla2x00_reset_active(scsi_qla_host_t *vha) 165 { 166 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev); 167 168 /* Test appropriate base-vha and vha flags. */ 169 return test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) || 170 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 171 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 172 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 173 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 174 } 175 176 static inline srb_t * 177 qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag) 178 { 179 srb_t *sp = NULL; 180 struct qla_hw_data *ha = vha->hw; 181 uint8_t bail; 182 183 QLA_VHA_MARK_BUSY(vha, bail); 184 if (unlikely(bail)) 185 return NULL; 186 187 sp = mempool_alloc(ha->srb_mempool, flag); 188 if (!sp) 189 goto done; 190 191 memset(sp, 0, sizeof(*sp)); 192 sp->fcport = fcport; 193 sp->iocbs = 1; 194 done: 195 if (!sp) 196 QLA_VHA_MARK_NOT_BUSY(vha); 197 return sp; 198 } 199 200 static inline void 201 qla2x00_rel_sp(scsi_qla_host_t *vha, srb_t *sp) 202 { 203 mempool_free(sp, vha->hw->srb_mempool); 204 QLA_VHA_MARK_NOT_BUSY(vha); 205 } 206 207 static inline void 208 qla2x00_init_timer(srb_t *sp, unsigned long tmo) 209 { 210 init_timer(&sp->u.iocb_cmd.timer); 211 sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ; 212 sp->u.iocb_cmd.timer.data = (unsigned long)sp; 213 sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout; 214 add_timer(&sp->u.iocb_cmd.timer); 215 sp->free = qla2x00_sp_free; 216 } 217 218 static inline int 219 qla2x00_gid_list_size(struct qla_hw_data *ha) 220 { 221 return sizeof(struct gid_list_info) * ha->max_fibre_devices; 222 } 223 224 static inline void 225 qla2x00_do_host_ramp_up(scsi_qla_host_t *vha) 226 { 227 if (vha->hw->cfg_lun_q_depth >= ql2xmaxqdepth) 228 return; 229 230 /* Wait at least HOST_QUEUE_RAMPDOWN_INTERVAL before ramping up */ 231 if (time_before(jiffies, (vha->hw->host_last_rampdown_time + 232 HOST_QUEUE_RAMPDOWN_INTERVAL))) 233 return; 234 235 /* Wait at least HOST_QUEUE_RAMPUP_INTERVAL between each ramp up */ 236 if (time_before(jiffies, (vha->hw->host_last_rampup_time + 237 HOST_QUEUE_RAMPUP_INTERVAL))) 238 return; 239 240 set_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags); 241 } 242