1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 
8 #include "qla_target.h"
9 /**
10  * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
11  * Continuation Type 1 IOCBs to allocate.
12  *
13  * @vha: HA context
14  * @dsds: number of data segment decriptors needed
15  *
16  * Returns the number of IOCB entries needed to store @dsds.
17  */
18 static inline uint16_t
19 qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
20 {
21 	uint16_t iocbs;
22 
23 	iocbs = 1;
24 	if (dsds > 1) {
25 		iocbs += (dsds - 1) / 5;
26 		if ((dsds - 1) % 5)
27 			iocbs++;
28 	}
29 	return iocbs;
30 }
31 
32 /*
33  * qla2x00_debounce_register
34  *      Debounce register.
35  *
36  * Input:
37  *      port = register address.
38  *
39  * Returns:
40  *      register value.
41  */
42 static __inline__ uint16_t
43 qla2x00_debounce_register(volatile uint16_t __iomem *addr)
44 {
45 	volatile uint16_t first;
46 	volatile uint16_t second;
47 
48 	do {
49 		first = RD_REG_WORD(addr);
50 		barrier();
51 		cpu_relax();
52 		second = RD_REG_WORD(addr);
53 	} while (first != second);
54 
55 	return (first);
56 }
57 
58 static inline void
59 qla2x00_poll(struct rsp_que *rsp)
60 {
61 	unsigned long flags;
62 	struct qla_hw_data *ha = rsp->hw;
63 	local_irq_save(flags);
64 	if (IS_P3P_TYPE(ha))
65 		qla82xx_poll(0, rsp);
66 	else
67 		ha->isp_ops->intr_handler(0, rsp);
68 	local_irq_restore(flags);
69 }
70 
71 static inline uint8_t *
72 host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
73 {
74        uint32_t *ifcp = (uint32_t *) fcp;
75        uint32_t *ofcp = (uint32_t *) fcp;
76        uint32_t iter = bsize >> 2;
77 
78        for (; iter ; iter--)
79                *ofcp++ = swab32(*ifcp++);
80 
81        return fcp;
82 }
83 
84 static inline void
85 host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize)
86 {
87 	uint32_t *isrc = (uint32_t *) src;
88 	__le32 *odest = (__le32 *) dst;
89 	uint32_t iter = bsize >> 2;
90 
91 	for ( ; iter--; isrc++)
92 		*odest++ = cpu_to_le32(*isrc);
93 }
94 
95 static inline void
96 qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
97 {
98 	int i;
99 
100 	if (IS_FWI2_CAPABLE(ha))
101 		return;
102 
103 	for (i = 0; i < SNS_FIRST_LOOP_ID; i++)
104 		set_bit(i, ha->loop_id_map);
105 	set_bit(MANAGEMENT_SERVER, ha->loop_id_map);
106 	set_bit(BROADCAST, ha->loop_id_map);
107 }
108 
109 static inline int
110 qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
111 {
112 	struct qla_hw_data *ha = vha->hw;
113 	if (IS_FWI2_CAPABLE(ha))
114 		return (loop_id > NPH_LAST_HANDLE);
115 
116 	return ((loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
117 	    loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST);
118 }
119 
120 static inline void
121 qla2x00_clear_loop_id(fc_port_t *fcport) {
122 	struct qla_hw_data *ha = fcport->vha->hw;
123 
124 	if (fcport->loop_id == FC_NO_LOOP_ID ||
125 	    qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
126 		return;
127 
128 	clear_bit(fcport->loop_id, ha->loop_id_map);
129 	fcport->loop_id = FC_NO_LOOP_ID;
130 }
131 
132 static inline void
133 qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx)
134 {
135 	struct dsd_dma *dsd, *tdsd;
136 
137 	/* clean up allocated prev pool */
138 	list_for_each_entry_safe(dsd, tdsd, &ctx->dsd_list, list) {
139 		dma_pool_free(ha->dl_dma_pool, dsd->dsd_addr,
140 		    dsd->dsd_list_dma);
141 		list_del(&dsd->list);
142 		kfree(dsd);
143 	}
144 	INIT_LIST_HEAD(&ctx->dsd_list);
145 }
146 
147 static inline void
148 qla2x00_set_fcport_state(fc_port_t *fcport, int state)
149 {
150 	int old_state;
151 
152 	old_state = atomic_read(&fcport->state);
153 	atomic_set(&fcport->state, state);
154 
155 	/* Don't print state transitions during initial allocation of fcport */
156 	if (old_state && old_state != state) {
157 		ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
158 		    "FCPort %8phC state transitioned from %s to %s - "
159 			"portid=%02x%02x%02x.\n", fcport->port_name,
160 		    port_state_str[old_state], port_state_str[state],
161 		    fcport->d_id.b.domain, fcport->d_id.b.area,
162 		    fcport->d_id.b.al_pa);
163 	}
164 }
165 
166 static inline int
167 qla2x00_hba_err_chk_enabled(srb_t *sp)
168 {
169 	/*
170 	 * Uncomment when corresponding SCSI changes are done.
171 	 *
172 	if (!sp->cmd->prot_chk)
173 		return 0;
174 	 *
175 	 */
176 	switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
177 	case SCSI_PROT_READ_STRIP:
178 	case SCSI_PROT_WRITE_INSERT:
179 		if (ql2xenablehba_err_chk >= 1)
180 			return 1;
181 		break;
182 	case SCSI_PROT_READ_PASS:
183 	case SCSI_PROT_WRITE_PASS:
184 		if (ql2xenablehba_err_chk >= 2)
185 			return 1;
186 		break;
187 	case SCSI_PROT_READ_INSERT:
188 	case SCSI_PROT_WRITE_STRIP:
189 		return 1;
190 	}
191 	return 0;
192 }
193 
194 static inline int
195 qla2x00_reset_active(scsi_qla_host_t *vha)
196 {
197 	scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
198 
199 	/* Test appropriate base-vha and vha flags. */
200 	return test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) ||
201 	    test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
202 	    test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
203 	    test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
204 	    test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
205 }
206 
207 static inline srb_t *
208 qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
209 {
210 	srb_t *sp = NULL;
211 	uint8_t bail;
212 
213 	QLA_QPAIR_MARK_BUSY(qpair, bail);
214 	if (unlikely(bail))
215 		return NULL;
216 
217 	sp = mempool_alloc(qpair->srb_mempool, flag);
218 	if (!sp)
219 		goto done;
220 
221 	memset(sp, 0, sizeof(*sp));
222 	sp->fcport = fcport;
223 	sp->iocbs = 1;
224 	sp->vha = qpair->vha;
225 done:
226 	if (!sp)
227 		QLA_QPAIR_MARK_NOT_BUSY(qpair);
228 	return sp;
229 }
230 
231 static inline void
232 qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
233 {
234 	mempool_free(sp, qpair->srb_mempool);
235 	QLA_QPAIR_MARK_NOT_BUSY(qpair);
236 }
237 
238 static inline srb_t *
239 qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
240 {
241 	srb_t *sp = NULL;
242 	uint8_t bail;
243 
244 	QLA_VHA_MARK_BUSY(vha, bail);
245 	if (unlikely(bail))
246 		return NULL;
247 
248 	sp = mempool_alloc(vha->hw->srb_mempool, flag);
249 	if (!sp)
250 		goto done;
251 
252 	memset(sp, 0, sizeof(*sp));
253 	sp->fcport = fcport;
254 	sp->cmd_type = TYPE_SRB;
255 	sp->iocbs = 1;
256 	sp->vha = vha;
257 done:
258 	if (!sp)
259 		QLA_VHA_MARK_NOT_BUSY(vha);
260 	return sp;
261 }
262 
263 static inline void
264 qla2x00_rel_sp(srb_t *sp)
265 {
266 	QLA_VHA_MARK_NOT_BUSY(sp->vha);
267 	mempool_free(sp, sp->vha->hw->srb_mempool);
268 }
269 
270 static inline void
271 qla2x00_init_timer(srb_t *sp, unsigned long tmo)
272 {
273 	timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
274 	sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
275 	sp->free = qla2x00_sp_free;
276 	init_completion(&sp->comp);
277 	if (IS_QLAFX00(sp->vha->hw) && (sp->type == SRB_FXIOCB_DCMD))
278 		init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
279 	if (sp->type == SRB_ELS_DCMD)
280 		init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
281 	add_timer(&sp->u.iocb_cmd.timer);
282 }
283 
284 static inline int
285 qla2x00_gid_list_size(struct qla_hw_data *ha)
286 {
287 	if (IS_QLAFX00(ha))
288 		return sizeof(uint32_t) * 32;
289 	else
290 		return sizeof(struct gid_list_info) * ha->max_fibre_devices;
291 }
292 
293 static inline void
294 qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status)
295 {
296 	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
297 	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
298 		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
299 		clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
300 		complete(&ha->mbx_intr_comp);
301 	}
302 }
303 
304 static inline void
305 qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t retry_delay)
306 {
307 	if (retry_delay)
308 		fcport->retry_delay_timestamp = jiffies +
309 		    (retry_delay * HZ / 10);
310 }
311 
312 static inline bool
313 qla_is_exch_offld_enabled(struct scsi_qla_host *vha)
314 {
315 	if (qla_ini_mode_enabled(vha) &&
316 	    (ql2xiniexchg > FW_DEF_EXCHANGES_CNT))
317 		return true;
318 	else if (qla_tgt_mode_enabled(vha) &&
319 	    (ql2xexchoffld > FW_DEF_EXCHANGES_CNT))
320 		return true;
321 	else if (qla_dual_mode_enabled(vha) &&
322 	    ((ql2xiniexchg + ql2xexchoffld) > FW_DEF_EXCHANGES_CNT))
323 		return true;
324 	else
325 		return false;
326 }
327 
328 static inline void
329 qla_cpu_update(struct qla_qpair *qpair, uint16_t cpuid)
330 {
331 	qpair->cpuid = cpuid;
332 
333 	if (!list_empty(&qpair->hints_list)) {
334 		struct qla_qpair_hint *h;
335 
336 		list_for_each_entry(h, &qpair->hints_list, hint_elem)
337 			h->cpuid = qpair->cpuid;
338 	}
339 }
340 
341 static inline struct qla_qpair_hint *
342 qla_qpair_to_hint(struct qla_tgt *tgt, struct qla_qpair *qpair)
343 {
344 	struct qla_qpair_hint *h;
345 	u16 i;
346 
347 	for (i = 0; i < tgt->ha->max_qpairs + 1; i++) {
348 		h = &tgt->qphints[i];
349 		if (h->qpair == qpair)
350 			return h;
351 	}
352 
353 	return NULL;
354 }
355 
356 static inline void
357 qla_83xx_start_iocbs(struct qla_qpair *qpair)
358 {
359 	struct req_que *req = qpair->req;
360 
361 	req->ring_index++;
362 	if (req->ring_index == req->length) {
363 		req->ring_index = 0;
364 		req->ring_ptr = req->ring;
365 	} else
366 		req->ring_ptr++;
367 
368 	WRT_REG_DWORD(req->req_q_in, req->ring_index);
369 }
370