1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6 7 #include "qla_target.h" 8 /** 9 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and 10 * Continuation Type 1 IOCBs to allocate. 11 * 12 * @vha: HA context 13 * @dsds: number of data segment descriptors needed 14 * 15 * Returns the number of IOCB entries needed to store @dsds. 16 */ 17 static inline uint16_t 18 qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds) 19 { 20 uint16_t iocbs; 21 22 iocbs = 1; 23 if (dsds > 1) { 24 iocbs += (dsds - 1) / 5; 25 if ((dsds - 1) % 5) 26 iocbs++; 27 } 28 return iocbs; 29 } 30 31 /* 32 * qla2x00_debounce_register 33 * Debounce register. 34 * 35 * Input: 36 * port = register address. 37 * 38 * Returns: 39 * register value. 40 */ 41 static __inline__ uint16_t 42 qla2x00_debounce_register(volatile __le16 __iomem *addr) 43 { 44 volatile uint16_t first; 45 volatile uint16_t second; 46 47 do { 48 first = rd_reg_word(addr); 49 barrier(); 50 cpu_relax(); 51 second = rd_reg_word(addr); 52 } while (first != second); 53 54 return (first); 55 } 56 57 static inline void 58 qla2x00_poll(struct rsp_que *rsp) 59 { 60 struct qla_hw_data *ha = rsp->hw; 61 62 if (IS_P3P_TYPE(ha)) 63 qla82xx_poll(0, rsp); 64 else 65 ha->isp_ops->intr_handler(0, rsp); 66 } 67 68 static inline uint8_t * 69 host_to_fcp_swap(uint8_t *fcp, uint32_t bsize) 70 { 71 uint32_t *ifcp = (uint32_t *) fcp; 72 uint32_t *ofcp = (uint32_t *) fcp; 73 uint32_t iter = bsize >> 2; 74 75 for (; iter ; iter--) 76 *ofcp++ = swab32(*ifcp++); 77 78 return fcp; 79 } 80 81 static inline void 82 host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize) 83 { 84 uint32_t *isrc = (uint32_t *) src; 85 __le32 *odest = (__le32 *) dst; 86 uint32_t iter = bsize >> 2; 87 88 for ( ; iter--; isrc++) 89 *odest++ = cpu_to_le32(*isrc); 90 } 91 92 static inline void 93 qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx) 94 { 95 struct dsd_dma *dsd, *tdsd; 96 97 /* clean up allocated prev pool */ 98 list_for_each_entry_safe(dsd, tdsd, &ctx->dsd_list, list) { 99 dma_pool_free(ha->dl_dma_pool, dsd->dsd_addr, 100 dsd->dsd_list_dma); 101 list_del(&dsd->list); 102 kfree(dsd); 103 } 104 INIT_LIST_HEAD(&ctx->dsd_list); 105 } 106 107 static inline void 108 qla2x00_set_fcport_disc_state(fc_port_t *fcport, int state) 109 { 110 int old_val; 111 uint8_t shiftbits, mask; 112 113 /* This will have to change when the max no. of states > 16 */ 114 shiftbits = 4; 115 mask = (1 << shiftbits) - 1; 116 117 fcport->disc_state = state; 118 while (1) { 119 old_val = atomic_read(&fcport->shadow_disc_state); 120 if (old_val == atomic_cmpxchg(&fcport->shadow_disc_state, 121 old_val, (old_val << shiftbits) | state)) { 122 ql_dbg(ql_dbg_disc, fcport->vha, 0x2134, 123 "FCPort %8phC disc_state transition: %s to %s - portid=%06x.\n", 124 fcport->port_name, port_dstate_str[old_val & mask], 125 port_dstate_str[state], fcport->d_id.b24); 126 return; 127 } 128 } 129 } 130 131 static inline int 132 qla2x00_hba_err_chk_enabled(srb_t *sp) 133 { 134 /* 135 * Uncomment when corresponding SCSI changes are done. 136 * 137 if (!sp->cmd->prot_chk) 138 return 0; 139 * 140 */ 141 switch (scsi_get_prot_op(GET_CMD_SP(sp))) { 142 case SCSI_PROT_READ_STRIP: 143 case SCSI_PROT_WRITE_INSERT: 144 if (ql2xenablehba_err_chk >= 1) 145 return 1; 146 break; 147 case SCSI_PROT_READ_PASS: 148 case SCSI_PROT_WRITE_PASS: 149 if (ql2xenablehba_err_chk >= 2) 150 return 1; 151 break; 152 case SCSI_PROT_READ_INSERT: 153 case SCSI_PROT_WRITE_STRIP: 154 return 1; 155 } 156 return 0; 157 } 158 159 static inline int 160 qla2x00_reset_active(scsi_qla_host_t *vha) 161 { 162 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev); 163 164 /* Test appropriate base-vha and vha flags. */ 165 return test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) || 166 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 167 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 168 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 169 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 170 } 171 172 static inline int 173 qla2x00_chip_is_down(scsi_qla_host_t *vha) 174 { 175 return (qla2x00_reset_active(vha) || !vha->hw->flags.fw_started); 176 } 177 178 static void qla2xxx_init_sp(srb_t *sp, scsi_qla_host_t *vha, 179 struct qla_qpair *qpair, fc_port_t *fcport) 180 { 181 memset(sp, 0, sizeof(*sp)); 182 sp->fcport = fcport; 183 sp->iocbs = 1; 184 sp->vha = vha; 185 sp->qpair = qpair; 186 sp->cmd_type = TYPE_SRB; 187 INIT_LIST_HEAD(&sp->elem); 188 } 189 190 static inline srb_t * 191 qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair, 192 fc_port_t *fcport, gfp_t flag) 193 { 194 srb_t *sp = NULL; 195 uint8_t bail; 196 197 QLA_QPAIR_MARK_BUSY(qpair, bail); 198 if (unlikely(bail)) 199 return NULL; 200 201 sp = mempool_alloc(qpair->srb_mempool, flag); 202 if (sp) 203 qla2xxx_init_sp(sp, vha, qpair, fcport); 204 else 205 QLA_QPAIR_MARK_NOT_BUSY(qpair); 206 return sp; 207 } 208 209 void qla2xxx_rel_done_warning(srb_t *sp, int res); 210 void qla2xxx_rel_free_warning(srb_t *sp); 211 212 static inline void 213 qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp) 214 { 215 sp->qpair = NULL; 216 sp->done = qla2xxx_rel_done_warning; 217 sp->free = qla2xxx_rel_free_warning; 218 mempool_free(sp, qpair->srb_mempool); 219 QLA_QPAIR_MARK_NOT_BUSY(qpair); 220 } 221 222 static inline srb_t * 223 qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag) 224 { 225 srb_t *sp = NULL; 226 uint8_t bail; 227 struct qla_qpair *qpair; 228 229 QLA_VHA_MARK_BUSY(vha, bail); 230 if (unlikely(bail)) 231 return NULL; 232 233 qpair = vha->hw->base_qpair; 234 sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, flag); 235 if (!sp) 236 goto done; 237 238 sp->vha = vha; 239 done: 240 if (!sp) 241 QLA_VHA_MARK_NOT_BUSY(vha); 242 return sp; 243 } 244 245 static inline void 246 qla2x00_rel_sp(srb_t *sp) 247 { 248 QLA_VHA_MARK_NOT_BUSY(sp->vha); 249 qla2xxx_rel_qpair_sp(sp->qpair, sp); 250 } 251 252 static inline int 253 qla2x00_gid_list_size(struct qla_hw_data *ha) 254 { 255 if (IS_QLAFX00(ha)) 256 return sizeof(uint32_t) * 32; 257 else 258 return sizeof(struct gid_list_info) * ha->max_fibre_devices; 259 } 260 261 static inline void 262 qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status) 263 { 264 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 265 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 266 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 267 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 268 complete(&ha->mbx_intr_comp); 269 } 270 } 271 272 static inline void 273 qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t sts_qual) 274 { 275 u8 scope; 276 u16 qual; 277 #define SQ_SCOPE_MASK 0xc000 /* SAM-6 rev5 5.3.2 */ 278 #define SQ_SCOPE_SHIFT 14 279 #define SQ_QUAL_MASK 0x3fff 280 281 #define SQ_MAX_WAIT_SEC 60 /* Max I/O hold off time in seconds. */ 282 #define SQ_MAX_WAIT_TIME (SQ_MAX_WAIT_SEC * 10) /* in 100ms. */ 283 284 if (!sts_qual) /* Common case. */ 285 return; 286 287 scope = (sts_qual & SQ_SCOPE_MASK) >> SQ_SCOPE_SHIFT; 288 /* Handle only scope 1 or 2, which is for I-T nexus. */ 289 if (scope != 1 && scope != 2) 290 return; 291 292 /* Skip processing, if retry delay timer is already in effect. */ 293 if (fcport->retry_delay_timestamp && 294 time_before(jiffies, fcport->retry_delay_timestamp)) 295 return; 296 297 qual = sts_qual & SQ_QUAL_MASK; 298 if (qual < 1 || qual > 0x3fef) 299 return; 300 qual = min(qual, (u16)SQ_MAX_WAIT_TIME); 301 302 /* qual is expressed in 100ms increments. */ 303 fcport->retry_delay_timestamp = jiffies + (qual * HZ / 10); 304 305 ql_log(ql_log_warn, fcport->vha, 0x5101, 306 "%8phC: I/O throttling requested (status qualifier = %04xh), holding off I/Os for %ums.\n", 307 fcport->port_name, sts_qual, qual * 100); 308 } 309 310 static inline bool 311 qla_is_exch_offld_enabled(struct scsi_qla_host *vha) 312 { 313 if (qla_ini_mode_enabled(vha) && 314 (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT)) 315 return true; 316 else if (qla_tgt_mode_enabled(vha) && 317 (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT)) 318 return true; 319 else if (qla_dual_mode_enabled(vha) && 320 ((vha->ql2xiniexchg + vha->ql2xexchoffld) > FW_DEF_EXCHANGES_CNT)) 321 return true; 322 else 323 return false; 324 } 325 326 static inline void 327 qla_cpu_update(struct qla_qpair *qpair, uint16_t cpuid) 328 { 329 qpair->cpuid = cpuid; 330 331 if (!list_empty(&qpair->hints_list)) { 332 struct qla_qpair_hint *h; 333 334 list_for_each_entry(h, &qpair->hints_list, hint_elem) 335 h->cpuid = qpair->cpuid; 336 } 337 } 338 339 static inline struct qla_qpair_hint * 340 qla_qpair_to_hint(struct qla_tgt *tgt, struct qla_qpair *qpair) 341 { 342 struct qla_qpair_hint *h; 343 u16 i; 344 345 for (i = 0; i < tgt->ha->max_qpairs + 1; i++) { 346 h = &tgt->qphints[i]; 347 if (h->qpair == qpair) 348 return h; 349 } 350 351 return NULL; 352 } 353 354 static inline void 355 qla_83xx_start_iocbs(struct qla_qpair *qpair) 356 { 357 struct req_que *req = qpair->req; 358 359 req->ring_index++; 360 if (req->ring_index == req->length) { 361 req->ring_index = 0; 362 req->ring_ptr = req->ring; 363 } else 364 req->ring_ptr++; 365 366 wrt_reg_dword(req->req_q_in, req->ring_index); 367 } 368 369 static inline int 370 qla2xxx_get_fc4_priority(struct scsi_qla_host *vha) 371 { 372 uint32_t data; 373 374 data = 375 ((uint8_t *)vha->hw->nvram)[NVRAM_DUAL_FCP_NVME_FLAG_OFFSET]; 376 377 378 return (data >> 6) & BIT_0 ? FC4_PRIORITY_FCP : FC4_PRIORITY_NVME; 379 } 380 381 enum { 382 RESOURCE_NONE, 383 RESOURCE_INI, 384 }; 385 386 static inline int 387 qla_get_iocbs(struct qla_qpair *qp, struct iocb_resource *iores) 388 { 389 u16 iocbs_used, i; 390 struct qla_hw_data *ha = qp->vha->hw; 391 392 if (!ql2xenforce_iocb_limit) { 393 iores->res_type = RESOURCE_NONE; 394 return 0; 395 } 396 397 if ((iores->iocb_cnt + qp->fwres.iocbs_used) < qp->fwres.iocbs_qp_limit) { 398 qp->fwres.iocbs_used += iores->iocb_cnt; 399 return 0; 400 } else { 401 /* no need to acquire qpair lock. It's just rough calculation */ 402 iocbs_used = ha->base_qpair->fwres.iocbs_used; 403 for (i = 0; i < ha->max_qpairs; i++) { 404 if (ha->queue_pair_map[i]) 405 iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used; 406 } 407 408 if ((iores->iocb_cnt + iocbs_used) < qp->fwres.iocbs_limit) { 409 qp->fwres.iocbs_used += iores->iocb_cnt; 410 return 0; 411 } else { 412 iores->res_type = RESOURCE_NONE; 413 return -ENOSPC; 414 } 415 } 416 } 417 418 static inline void 419 qla_put_iocbs(struct qla_qpair *qp, struct iocb_resource *iores) 420 { 421 switch (iores->res_type) { 422 case RESOURCE_NONE: 423 break; 424 default: 425 if (qp->fwres.iocbs_used >= iores->iocb_cnt) { 426 qp->fwres.iocbs_used -= iores->iocb_cnt; 427 } else { 428 // should not happen 429 qp->fwres.iocbs_used = 0; 430 } 431 break; 432 } 433 iores->res_type = RESOURCE_NONE; 434 } 435