1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 8 #include "qla_target.h" 9 /** 10 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and 11 * Continuation Type 1 IOCBs to allocate. 12 * 13 * @dsds: number of data segment decriptors needed 14 * 15 * Returns the number of IOCB entries needed to store @dsds. 16 */ 17 static inline uint16_t 18 qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds) 19 { 20 uint16_t iocbs; 21 22 iocbs = 1; 23 if (dsds > 1) { 24 iocbs += (dsds - 1) / 5; 25 if ((dsds - 1) % 5) 26 iocbs++; 27 } 28 return iocbs; 29 } 30 31 /* 32 * qla2x00_debounce_register 33 * Debounce register. 34 * 35 * Input: 36 * port = register address. 37 * 38 * Returns: 39 * register value. 40 */ 41 static __inline__ uint16_t 42 qla2x00_debounce_register(volatile uint16_t __iomem *addr) 43 { 44 volatile uint16_t first; 45 volatile uint16_t second; 46 47 do { 48 first = RD_REG_WORD(addr); 49 barrier(); 50 cpu_relax(); 51 second = RD_REG_WORD(addr); 52 } while (first != second); 53 54 return (first); 55 } 56 57 static inline void 58 qla2x00_poll(struct rsp_que *rsp) 59 { 60 unsigned long flags; 61 struct qla_hw_data *ha = rsp->hw; 62 local_irq_save(flags); 63 if (IS_P3P_TYPE(ha)) 64 qla82xx_poll(0, rsp); 65 else 66 ha->isp_ops->intr_handler(0, rsp); 67 local_irq_restore(flags); 68 } 69 70 static inline uint8_t * 71 host_to_fcp_swap(uint8_t *fcp, uint32_t bsize) 72 { 73 uint32_t *ifcp = (uint32_t *) fcp; 74 uint32_t *ofcp = (uint32_t *) fcp; 75 uint32_t iter = bsize >> 2; 76 77 for (; iter ; iter--) 78 *ofcp++ = swab32(*ifcp++); 79 80 return fcp; 81 } 82 83 static inline void 84 host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize) 85 { 86 uint32_t *isrc = (uint32_t *) src; 87 __le32 *odest = (__le32 *) dst; 88 uint32_t iter = bsize >> 2; 89 90 for ( ; iter--; isrc++) 91 *odest++ = cpu_to_le32(*isrc); 92 } 93 94 static inline void 95 qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha) 96 { 97 int i; 98 99 if (IS_FWI2_CAPABLE(ha)) 100 return; 101 102 for (i = 0; i < SNS_FIRST_LOOP_ID; i++) 103 set_bit(i, ha->loop_id_map); 104 set_bit(MANAGEMENT_SERVER, ha->loop_id_map); 105 set_bit(BROADCAST, ha->loop_id_map); 106 } 107 108 static inline int 109 qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id) 110 { 111 struct qla_hw_data *ha = vha->hw; 112 if (IS_FWI2_CAPABLE(ha)) 113 return (loop_id > NPH_LAST_HANDLE); 114 115 return ((loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) || 116 loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST); 117 } 118 119 static inline void 120 qla2x00_clear_loop_id(fc_port_t *fcport) { 121 struct qla_hw_data *ha = fcport->vha->hw; 122 123 if (fcport->loop_id == FC_NO_LOOP_ID || 124 qla2x00_is_reserved_id(fcport->vha, fcport->loop_id)) 125 return; 126 127 clear_bit(fcport->loop_id, ha->loop_id_map); 128 fcport->loop_id = FC_NO_LOOP_ID; 129 } 130 131 static inline void 132 qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp, 133 struct qla_tgt_cmd *tc) 134 { 135 struct dsd_dma *dsd_ptr, *tdsd_ptr; 136 struct crc_context *ctx; 137 138 if (sp) 139 ctx = (struct crc_context *)GET_CMD_CTX_SP(sp); 140 else if (tc) 141 ctx = (struct crc_context *)tc->ctx; 142 else { 143 BUG(); 144 return; 145 } 146 147 /* clean up allocated prev pool */ 148 list_for_each_entry_safe(dsd_ptr, tdsd_ptr, 149 &ctx->dsd_list, list) { 150 dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr, 151 dsd_ptr->dsd_list_dma); 152 list_del(&dsd_ptr->list); 153 kfree(dsd_ptr); 154 } 155 INIT_LIST_HEAD(&ctx->dsd_list); 156 } 157 158 static inline void 159 qla2x00_set_fcport_state(fc_port_t *fcport, int state) 160 { 161 int old_state; 162 163 old_state = atomic_read(&fcport->state); 164 atomic_set(&fcport->state, state); 165 166 /* Don't print state transitions during initial allocation of fcport */ 167 if (old_state && old_state != state) { 168 ql_dbg(ql_dbg_disc, fcport->vha, 0x207d, 169 "FCPort %8phC state transitioned from %s to %s - " 170 "portid=%02x%02x%02x.\n", fcport->port_name, 171 port_state_str[old_state], port_state_str[state], 172 fcport->d_id.b.domain, fcport->d_id.b.area, 173 fcport->d_id.b.al_pa); 174 } 175 } 176 177 static inline int 178 qla2x00_hba_err_chk_enabled(srb_t *sp) 179 { 180 /* 181 * Uncomment when corresponding SCSI changes are done. 182 * 183 if (!sp->cmd->prot_chk) 184 return 0; 185 * 186 */ 187 switch (scsi_get_prot_op(GET_CMD_SP(sp))) { 188 case SCSI_PROT_READ_STRIP: 189 case SCSI_PROT_WRITE_INSERT: 190 if (ql2xenablehba_err_chk >= 1) 191 return 1; 192 break; 193 case SCSI_PROT_READ_PASS: 194 case SCSI_PROT_WRITE_PASS: 195 if (ql2xenablehba_err_chk >= 2) 196 return 1; 197 break; 198 case SCSI_PROT_READ_INSERT: 199 case SCSI_PROT_WRITE_STRIP: 200 return 1; 201 } 202 return 0; 203 } 204 205 static inline int 206 qla2x00_reset_active(scsi_qla_host_t *vha) 207 { 208 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev); 209 210 /* Test appropriate base-vha and vha flags. */ 211 return test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) || 212 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 213 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 214 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 215 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 216 } 217 218 static inline srb_t * 219 qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag) 220 { 221 srb_t *sp = NULL; 222 uint8_t bail; 223 224 QLA_QPAIR_MARK_BUSY(qpair, bail); 225 if (unlikely(bail)) 226 return NULL; 227 228 sp = mempool_alloc(qpair->srb_mempool, flag); 229 if (!sp) 230 goto done; 231 232 memset(sp, 0, sizeof(*sp)); 233 sp->fcport = fcport; 234 sp->iocbs = 1; 235 sp->vha = qpair->vha; 236 done: 237 if (!sp) 238 QLA_QPAIR_MARK_NOT_BUSY(qpair); 239 return sp; 240 } 241 242 static inline void 243 qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp) 244 { 245 mempool_free(sp, qpair->srb_mempool); 246 QLA_QPAIR_MARK_NOT_BUSY(qpair); 247 } 248 249 static inline srb_t * 250 qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag) 251 { 252 srb_t *sp = NULL; 253 uint8_t bail; 254 255 QLA_VHA_MARK_BUSY(vha, bail); 256 if (unlikely(bail)) 257 return NULL; 258 259 sp = mempool_alloc(vha->hw->srb_mempool, flag); 260 if (!sp) 261 goto done; 262 263 memset(sp, 0, sizeof(*sp)); 264 sp->fcport = fcport; 265 sp->iocbs = 1; 266 sp->vha = vha; 267 done: 268 if (!sp) 269 QLA_VHA_MARK_NOT_BUSY(vha); 270 return sp; 271 } 272 273 static inline void 274 qla2x00_rel_sp(srb_t *sp) 275 { 276 QLA_VHA_MARK_NOT_BUSY(sp->vha); 277 mempool_free(sp, sp->vha->hw->srb_mempool); 278 } 279 280 static inline void 281 qla2x00_init_timer(srb_t *sp, unsigned long tmo) 282 { 283 init_timer(&sp->u.iocb_cmd.timer); 284 sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ; 285 sp->u.iocb_cmd.timer.data = (unsigned long)sp; 286 sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout; 287 add_timer(&sp->u.iocb_cmd.timer); 288 sp->free = qla2x00_sp_free; 289 if (IS_QLAFX00(sp->vha->hw) && (sp->type == SRB_FXIOCB_DCMD)) 290 init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp); 291 if (sp->type == SRB_ELS_DCMD) 292 init_completion(&sp->u.iocb_cmd.u.els_logo.comp); 293 } 294 295 static inline int 296 qla2x00_gid_list_size(struct qla_hw_data *ha) 297 { 298 if (IS_QLAFX00(ha)) 299 return sizeof(uint32_t) * 32; 300 else 301 return sizeof(struct gid_list_info) * ha->max_fibre_devices; 302 } 303 304 static inline void 305 qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status) 306 { 307 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 308 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 309 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 310 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 311 complete(&ha->mbx_intr_comp); 312 } 313 } 314 315 static inline void 316 qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t retry_delay) 317 { 318 if (retry_delay) 319 fcport->retry_delay_timestamp = jiffies + 320 (retry_delay * HZ / 10); 321 } 322