xref: /openbmc/linux/drivers/scsi/qla4xxx/ql4_83xx.c (revision ed26297d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic iSCSI HBA Driver
4  * Copyright (c)   2003-2013 QLogic Corporation
5  */
6 
7 #include <linux/ratelimit.h>
8 
9 #include "ql4_def.h"
10 #include "ql4_version.h"
11 #include "ql4_glbl.h"
12 #include "ql4_dbg.h"
13 #include "ql4_inline.h"
14 
qla4_83xx_rd_reg(struct scsi_qla_host * ha,ulong addr)15 uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr)
16 {
17 	return readl((void __iomem *)(ha->nx_pcibase + addr));
18 }
19 
qla4_83xx_wr_reg(struct scsi_qla_host * ha,ulong addr,uint32_t val)20 void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val)
21 {
22 	writel(val, (void __iomem *)(ha->nx_pcibase + addr));
23 }
24 
qla4_83xx_set_win_base(struct scsi_qla_host * ha,uint32_t addr)25 static int qla4_83xx_set_win_base(struct scsi_qla_host *ha, uint32_t addr)
26 {
27 	uint32_t val;
28 	int ret_val = QLA_SUCCESS;
29 
30 	qla4_83xx_wr_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num), addr);
31 	val = qla4_83xx_rd_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num));
32 	if (val != addr) {
33 		ql4_printk(KERN_ERR, ha, "%s: Failed to set register window : addr written 0x%x, read 0x%x!\n",
34 			   __func__, addr, val);
35 		ret_val = QLA_ERROR;
36 	}
37 
38 	return ret_val;
39 }
40 
qla4_83xx_rd_reg_indirect(struct scsi_qla_host * ha,uint32_t addr,uint32_t * data)41 int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
42 			      uint32_t *data)
43 {
44 	int ret_val;
45 
46 	ret_val = qla4_83xx_set_win_base(ha, addr);
47 
48 	if (ret_val == QLA_SUCCESS) {
49 		*data = qla4_83xx_rd_reg(ha, QLA83XX_WILDCARD);
50 	} else {
51 		*data = 0xffffffff;
52 		ql4_printk(KERN_ERR, ha, "%s: failed read of addr 0x%x!\n",
53 			   __func__, addr);
54 	}
55 
56 	return ret_val;
57 }
58 
qla4_83xx_wr_reg_indirect(struct scsi_qla_host * ha,uint32_t addr,uint32_t data)59 int qla4_83xx_wr_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
60 			      uint32_t data)
61 {
62 	int ret_val;
63 
64 	ret_val = qla4_83xx_set_win_base(ha, addr);
65 
66 	if (ret_val == QLA_SUCCESS)
67 		qla4_83xx_wr_reg(ha, QLA83XX_WILDCARD, data);
68 	else
69 		ql4_printk(KERN_ERR, ha, "%s: failed wrt to addr 0x%x, data 0x%x\n",
70 			   __func__, addr, data);
71 
72 	return ret_val;
73 }
74 
qla4_83xx_flash_lock(struct scsi_qla_host * ha)75 static int qla4_83xx_flash_lock(struct scsi_qla_host *ha)
76 {
77 	int lock_owner;
78 	int timeout = 0;
79 	uint32_t lock_status = 0;
80 	int ret_val = QLA_SUCCESS;
81 
82 	while (lock_status == 0) {
83 		lock_status = qla4_83xx_rd_reg(ha, QLA83XX_FLASH_LOCK);
84 		if (lock_status)
85 			break;
86 
87 		if (++timeout >= QLA83XX_FLASH_LOCK_TIMEOUT / 20) {
88 			lock_owner = qla4_83xx_rd_reg(ha,
89 						      QLA83XX_FLASH_LOCK_ID);
90 			ql4_printk(KERN_ERR, ha, "%s: flash lock by func %d failed, held by func %d\n",
91 				   __func__, ha->func_num, lock_owner);
92 			ret_val = QLA_ERROR;
93 			break;
94 		}
95 		msleep(20);
96 	}
97 
98 	qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, ha->func_num);
99 	return ret_val;
100 }
101 
qla4_83xx_flash_unlock(struct scsi_qla_host * ha)102 static void qla4_83xx_flash_unlock(struct scsi_qla_host *ha)
103 {
104 	/* Reading FLASH_UNLOCK register unlocks the Flash */
105 	qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, 0xFF);
106 	qla4_83xx_rd_reg(ha, QLA83XX_FLASH_UNLOCK);
107 }
108 
qla4_83xx_flash_read_u32(struct scsi_qla_host * ha,uint32_t flash_addr,uint8_t * p_data,int u32_word_count)109 int qla4_83xx_flash_read_u32(struct scsi_qla_host *ha, uint32_t flash_addr,
110 			     uint8_t *p_data, int u32_word_count)
111 {
112 	int i;
113 	uint32_t u32_word;
114 	uint32_t addr = flash_addr;
115 	int ret_val = QLA_SUCCESS;
116 
117 	ret_val = qla4_83xx_flash_lock(ha);
118 	if (ret_val == QLA_ERROR)
119 		goto exit_lock_error;
120 
121 	if (addr & 0x03) {
122 		ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n",
123 			   __func__, addr);
124 		ret_val = QLA_ERROR;
125 		goto exit_flash_read;
126 	}
127 
128 	for (i = 0; i < u32_word_count; i++) {
129 		ret_val = qla4_83xx_wr_reg_indirect(ha,
130 						    QLA83XX_FLASH_DIRECT_WINDOW,
131 						    (addr & 0xFFFF0000));
132 		if (ret_val == QLA_ERROR) {
133 			ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW\n!",
134 				   __func__, addr);
135 			goto exit_flash_read;
136 		}
137 
138 		ret_val = qla4_83xx_rd_reg_indirect(ha,
139 						QLA83XX_FLASH_DIRECT_DATA(addr),
140 						&u32_word);
141 		if (ret_val == QLA_ERROR) {
142 			ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
143 				   __func__, addr);
144 			goto exit_flash_read;
145 		}
146 
147 		*(__le32 *)p_data = le32_to_cpu(u32_word);
148 		p_data = p_data + 4;
149 		addr = addr + 4;
150 	}
151 
152 exit_flash_read:
153 	qla4_83xx_flash_unlock(ha);
154 
155 exit_lock_error:
156 	return ret_val;
157 }
158 
qla4_83xx_lockless_flash_read_u32(struct scsi_qla_host * ha,uint32_t flash_addr,uint8_t * p_data,int u32_word_count)159 int qla4_83xx_lockless_flash_read_u32(struct scsi_qla_host *ha,
160 				      uint32_t flash_addr, uint8_t *p_data,
161 				      int u32_word_count)
162 {
163 	uint32_t i;
164 	uint32_t u32_word;
165 	uint32_t flash_offset;
166 	uint32_t addr = flash_addr;
167 	int ret_val = QLA_SUCCESS;
168 
169 	flash_offset = addr & (QLA83XX_FLASH_SECTOR_SIZE - 1);
170 
171 	if (addr & 0x3) {
172 		ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n",
173 			   __func__, addr);
174 		ret_val = QLA_ERROR;
175 		goto exit_lockless_read;
176 	}
177 
178 	ret_val = qla4_83xx_wr_reg_indirect(ha, QLA83XX_FLASH_DIRECT_WINDOW,
179 					    addr);
180 	if (ret_val == QLA_ERROR) {
181 		ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
182 			   __func__, addr);
183 		goto exit_lockless_read;
184 	}
185 
186 	/* Check if data is spread across multiple sectors  */
187 	if ((flash_offset + (u32_word_count * sizeof(uint32_t))) >
188 	    (QLA83XX_FLASH_SECTOR_SIZE - 1)) {
189 
190 		/* Multi sector read */
191 		for (i = 0; i < u32_word_count; i++) {
192 			ret_val = qla4_83xx_rd_reg_indirect(ha,
193 						QLA83XX_FLASH_DIRECT_DATA(addr),
194 						&u32_word);
195 			if (ret_val == QLA_ERROR) {
196 				ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
197 					   __func__, addr);
198 				goto exit_lockless_read;
199 			}
200 
201 			*(__le32 *)p_data  = le32_to_cpu(u32_word);
202 			p_data = p_data + 4;
203 			addr = addr + 4;
204 			flash_offset = flash_offset + 4;
205 
206 			if (flash_offset > (QLA83XX_FLASH_SECTOR_SIZE - 1)) {
207 				/* This write is needed once for each sector */
208 				ret_val = qla4_83xx_wr_reg_indirect(ha,
209 						   QLA83XX_FLASH_DIRECT_WINDOW,
210 						   addr);
211 				if (ret_val == QLA_ERROR) {
212 					ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
213 						   __func__, addr);
214 					goto exit_lockless_read;
215 				}
216 				flash_offset = 0;
217 			}
218 		}
219 	} else {
220 		/* Single sector read */
221 		for (i = 0; i < u32_word_count; i++) {
222 			ret_val = qla4_83xx_rd_reg_indirect(ha,
223 						QLA83XX_FLASH_DIRECT_DATA(addr),
224 						&u32_word);
225 			if (ret_val == QLA_ERROR) {
226 				ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
227 					   __func__, addr);
228 				goto exit_lockless_read;
229 			}
230 
231 			*(__le32 *)p_data = le32_to_cpu(u32_word);
232 			p_data = p_data + 4;
233 			addr = addr + 4;
234 		}
235 	}
236 
237 exit_lockless_read:
238 	return ret_val;
239 }
240 
qla4_83xx_rom_lock_recovery(struct scsi_qla_host * ha)241 void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha)
242 {
243 	if (qla4_83xx_flash_lock(ha))
244 		ql4_printk(KERN_INFO, ha, "%s: Resetting rom lock\n", __func__);
245 
246 	/*
247 	 * We got the lock, or someone else is holding the lock
248 	 * since we are restting, forcefully unlock
249 	 */
250 	qla4_83xx_flash_unlock(ha);
251 }
252 
253 #define INTENT_TO_RECOVER	0x01
254 #define PROCEED_TO_RECOVER	0x02
255 
qla4_83xx_lock_recovery(struct scsi_qla_host * ha)256 static int qla4_83xx_lock_recovery(struct scsi_qla_host *ha)
257 {
258 
259 	uint32_t lock = 0, lockid;
260 	int ret_val = QLA_ERROR;
261 
262 	lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY);
263 
264 	/* Check for other Recovery in progress, go wait */
265 	if ((lockid & 0x3) != 0)
266 		goto exit_lock_recovery;
267 
268 	/* Intent to Recover */
269 	ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY,
270 				   (ha->func_num << 2) | INTENT_TO_RECOVER);
271 
272 	msleep(200);
273 
274 	/* Check Intent to Recover is advertised */
275 	lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY);
276 	if ((lockid & 0x3C) != (ha->func_num << 2))
277 		goto exit_lock_recovery;
278 
279 	ql4_printk(KERN_INFO, ha, "%s: IDC Lock recovery initiated for func %d\n",
280 		   __func__, ha->func_num);
281 
282 	/* Proceed to Recover */
283 	ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY,
284 				   (ha->func_num << 2) | PROCEED_TO_RECOVER);
285 
286 	/* Force Unlock */
287 	ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, 0xFF);
288 	ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_UNLOCK);
289 
290 	/* Clear bits 0-5 in IDC_RECOVERY register*/
291 	ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY, 0);
292 
293 	/* Get lock */
294 	lock = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK);
295 	if (lock) {
296 		lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK_ID);
297 		lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->func_num;
298 		ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, lockid);
299 		ret_val = QLA_SUCCESS;
300 	}
301 
302 exit_lock_recovery:
303 	return ret_val;
304 }
305 
306 #define	QLA83XX_DRV_LOCK_MSLEEP		200
307 
qla4_83xx_drv_lock(struct scsi_qla_host * ha)308 int qla4_83xx_drv_lock(struct scsi_qla_host *ha)
309 {
310 	int timeout = 0;
311 	uint32_t status = 0;
312 	int ret_val = QLA_SUCCESS;
313 	uint32_t first_owner = 0;
314 	uint32_t tmo_owner = 0;
315 	uint32_t lock_id;
316 	uint32_t func_num;
317 	uint32_t lock_cnt;
318 
319 	while (status == 0) {
320 		status = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK);
321 		if (status) {
322 			/* Increment Counter (8-31) and update func_num (0-7) on
323 			 * getting a successful lock  */
324 			lock_id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
325 			lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->func_num;
326 			qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, lock_id);
327 			break;
328 		}
329 
330 		if (timeout == 0)
331 			/* Save counter + ID of function holding the lock for
332 			 * first failure */
333 			first_owner = ha->isp_ops->rd_reg_direct(ha,
334 							  QLA83XX_DRV_LOCK_ID);
335 
336 		if (++timeout >=
337 		    (QLA83XX_DRV_LOCK_TIMEOUT / QLA83XX_DRV_LOCK_MSLEEP)) {
338 			tmo_owner = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
339 			func_num = tmo_owner & 0xFF;
340 			lock_cnt = tmo_owner >> 8;
341 			ql4_printk(KERN_INFO, ha, "%s: Lock by func %d failed after 2s, lock held by func %d, lock count %d, first_owner %d\n",
342 				   __func__, ha->func_num, func_num, lock_cnt,
343 				   (first_owner & 0xFF));
344 
345 			if (first_owner != tmo_owner) {
346 				/* Some other driver got lock, OR same driver
347 				 * got lock again (counter value changed), when
348 				 * we were waiting for lock.
349 				 * Retry for another 2 sec */
350 				ql4_printk(KERN_INFO, ha, "%s: IDC lock failed for func %d\n",
351 					   __func__, ha->func_num);
352 				timeout = 0;
353 			} else {
354 				/* Same driver holding lock > 2sec.
355 				 * Force Recovery */
356 				ret_val = qla4_83xx_lock_recovery(ha);
357 				if (ret_val == QLA_SUCCESS) {
358 					/* Recovered and got lock */
359 					ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d successful\n",
360 						   __func__, ha->func_num);
361 					break;
362 				}
363 				/* Recovery Failed, some other function
364 				 * has the lock, wait for 2secs and retry */
365 				ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d failed, Retrying timeout\n",
366 					   __func__, ha->func_num);
367 				timeout = 0;
368 			}
369 		}
370 		msleep(QLA83XX_DRV_LOCK_MSLEEP);
371 	}
372 
373 	return ret_val;
374 }
375 
qla4_83xx_drv_unlock(struct scsi_qla_host * ha)376 void qla4_83xx_drv_unlock(struct scsi_qla_host *ha)
377 {
378 	int id;
379 
380 	id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
381 
382 	if ((id & 0xFF) != ha->func_num) {
383 		ql4_printk(KERN_ERR, ha, "%s: IDC Unlock by %d failed, lock owner is %d\n",
384 			   __func__, ha->func_num, (id & 0xFF));
385 		return;
386 	}
387 
388 	/* Keep lock counter value, update the ha->func_num to 0xFF */
389 	qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, (id | 0xFF));
390 	qla4_83xx_rd_reg(ha, QLA83XX_DRV_UNLOCK);
391 }
392 
qla4_83xx_set_idc_dontreset(struct scsi_qla_host * ha)393 void qla4_83xx_set_idc_dontreset(struct scsi_qla_host *ha)
394 {
395 	uint32_t idc_ctrl;
396 
397 	idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
398 	idc_ctrl |= DONTRESET_BIT0;
399 	qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl);
400 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__,
401 			  idc_ctrl));
402 }
403 
qla4_83xx_clear_idc_dontreset(struct scsi_qla_host * ha)404 void qla4_83xx_clear_idc_dontreset(struct scsi_qla_host *ha)
405 {
406 	uint32_t idc_ctrl;
407 
408 	idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
409 	idc_ctrl &= ~DONTRESET_BIT0;
410 	qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl);
411 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__,
412 			  idc_ctrl));
413 }
414 
qla4_83xx_idc_dontreset(struct scsi_qla_host * ha)415 int qla4_83xx_idc_dontreset(struct scsi_qla_host *ha)
416 {
417 	uint32_t idc_ctrl;
418 
419 	idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
420 	return idc_ctrl & DONTRESET_BIT0;
421 }
422 
423 /*-------------------------IDC State Machine ---------------------*/
424 
425 enum {
426 	UNKNOWN_CLASS = 0,
427 	NIC_CLASS,
428 	FCOE_CLASS,
429 	ISCSI_CLASS
430 };
431 
432 struct device_info {
433 	int func_num;
434 	int device_type;
435 	int port_num;
436 };
437 
qla4_83xx_can_perform_reset(struct scsi_qla_host * ha)438 int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha)
439 {
440 	uint32_t drv_active;
441 	uint32_t dev_part, dev_part1, dev_part2;
442 	int i;
443 	struct device_info device_map[16];
444 	int func_nibble;
445 	int nibble;
446 	int nic_present = 0;
447 	int iscsi_present = 0;
448 	int iscsi_func_low = 0;
449 
450 	/* Use the dev_partition register to determine the PCI function number
451 	 * and then check drv_active register to see which driver is loaded */
452 	dev_part1 = qla4_83xx_rd_reg(ha,
453 				     ha->reg_tbl[QLA8XXX_CRB_DEV_PART_INFO]);
454 	dev_part2 = qla4_83xx_rd_reg(ha, QLA83XX_CRB_DEV_PART_INFO2);
455 	drv_active = qla4_83xx_rd_reg(ha, ha->reg_tbl[QLA8XXX_CRB_DRV_ACTIVE]);
456 
457 	/* Each function has 4 bits in dev_partition Info register,
458 	 * Lower 2 bits - device type, Upper 2 bits - physical port number */
459 	dev_part = dev_part1;
460 	for (i = nibble = 0; i <= 15; i++, nibble++) {
461 		func_nibble = dev_part & (0xF << (nibble * 4));
462 		func_nibble >>= (nibble * 4);
463 		device_map[i].func_num = i;
464 		device_map[i].device_type = func_nibble & 0x3;
465 		device_map[i].port_num = func_nibble & 0xC;
466 
467 		if (device_map[i].device_type == NIC_CLASS) {
468 			if (drv_active & (1 << device_map[i].func_num)) {
469 				nic_present++;
470 				break;
471 			}
472 		} else if (device_map[i].device_type == ISCSI_CLASS) {
473 			if (drv_active & (1 << device_map[i].func_num)) {
474 				if (!iscsi_present ||
475 				iscsi_func_low > device_map[i].func_num)
476 					iscsi_func_low = device_map[i].func_num;
477 
478 				iscsi_present++;
479 			}
480 		}
481 
482 		/* For function_num[8..15] get info from dev_part2 register */
483 		if (nibble == 7) {
484 			nibble = 0;
485 			dev_part = dev_part2;
486 		}
487 	}
488 
489 	/* NIC, iSCSI and FCOE are the Reset owners based on order, NIC gets
490 	 * precedence over iSCSI and FCOE and iSCSI over FCOE, based on drivers
491 	 * present. */
492 	if (!nic_present && (ha->func_num == iscsi_func_low)) {
493 		DEBUG2(ql4_printk(KERN_INFO, ha,
494 				  "%s: can reset - NIC not present and lower iSCSI function is %d\n",
495 				  __func__, ha->func_num));
496 		return 1;
497 	}
498 
499 	return 0;
500 }
501 
502 /**
503  * qla4_83xx_need_reset_handler - Code to start reset sequence
504  * @ha: pointer to adapter structure
505  *
506  * Note: IDC lock must be held upon entry
507  **/
qla4_83xx_need_reset_handler(struct scsi_qla_host * ha)508 void qla4_83xx_need_reset_handler(struct scsi_qla_host *ha)
509 {
510 	uint32_t dev_state, drv_state, drv_active;
511 	unsigned long reset_timeout, dev_init_timeout;
512 
513 	ql4_printk(KERN_INFO, ha, "%s: Performing ISP error recovery\n",
514 		   __func__);
515 
516 	if (!test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
517 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: reset acknowledged\n",
518 				  __func__));
519 		qla4_8xxx_set_rst_ready(ha);
520 
521 		/* Non-reset owners ACK Reset and wait for device INIT state
522 		 * as part of Reset Recovery by Reset Owner */
523 		dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
524 
525 		do {
526 			if (time_after_eq(jiffies, dev_init_timeout)) {
527 				ql4_printk(KERN_INFO, ha, "%s: Non Reset owner dev init timeout\n",
528 					   __func__);
529 				break;
530 			}
531 
532 			ha->isp_ops->idc_unlock(ha);
533 			msleep(1000);
534 			ha->isp_ops->idc_lock(ha);
535 
536 			dev_state = qla4_8xxx_rd_direct(ha,
537 							QLA8XXX_CRB_DEV_STATE);
538 		} while (dev_state == QLA8XXX_DEV_NEED_RESET);
539 	} else {
540 		qla4_8xxx_set_rst_ready(ha);
541 		reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
542 		drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
543 		drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
544 
545 		ql4_printk(KERN_INFO, ha, "%s: drv_state = 0x%x, drv_active = 0x%x\n",
546 			   __func__, drv_state, drv_active);
547 
548 		while (drv_state != drv_active) {
549 			if (time_after_eq(jiffies, reset_timeout)) {
550 				ql4_printk(KERN_INFO, ha, "%s: %s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n",
551 					   __func__, DRIVER_NAME, drv_state,
552 					   drv_active);
553 				break;
554 			}
555 
556 			ha->isp_ops->idc_unlock(ha);
557 			msleep(1000);
558 			ha->isp_ops->idc_lock(ha);
559 
560 			drv_state = qla4_8xxx_rd_direct(ha,
561 							QLA8XXX_CRB_DRV_STATE);
562 			drv_active = qla4_8xxx_rd_direct(ha,
563 							QLA8XXX_CRB_DRV_ACTIVE);
564 		}
565 
566 		if (drv_state != drv_active) {
567 			ql4_printk(KERN_INFO, ha, "%s: Reset_owner turning off drv_active of non-acking function 0x%x\n",
568 				   __func__, (drv_active ^ drv_state));
569 			drv_active = drv_active & drv_state;
570 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE,
571 					    drv_active);
572 		}
573 
574 		clear_bit(AF_8XXX_RST_OWNER, &ha->flags);
575 		/* Start Reset Recovery */
576 		qla4_8xxx_device_bootstrap(ha);
577 	}
578 }
579 
qla4_83xx_get_idc_param(struct scsi_qla_host * ha)580 void qla4_83xx_get_idc_param(struct scsi_qla_host *ha)
581 {
582 	uint32_t idc_params, ret_val;
583 
584 	ret_val = qla4_83xx_flash_read_u32(ha, QLA83XX_IDC_PARAM_ADDR,
585 					   (uint8_t *)&idc_params, 1);
586 	if (ret_val == QLA_SUCCESS) {
587 		ha->nx_dev_init_timeout = idc_params & 0xFFFF;
588 		ha->nx_reset_timeout = (idc_params >> 16) & 0xFFFF;
589 	} else {
590 		ha->nx_dev_init_timeout = ROM_DEV_INIT_TIMEOUT;
591 		ha->nx_reset_timeout = ROM_DRV_RESET_ACK_TIMEOUT;
592 	}
593 
594 	DEBUG2(ql4_printk(KERN_DEBUG, ha,
595 			  "%s: ha->nx_dev_init_timeout = %d, ha->nx_reset_timeout = %d\n",
596 			  __func__, ha->nx_dev_init_timeout,
597 			  ha->nx_reset_timeout));
598 }
599 
600 /*-------------------------Reset Sequence Functions-----------------------*/
601 
qla4_83xx_dump_reset_seq_hdr(struct scsi_qla_host * ha)602 static void qla4_83xx_dump_reset_seq_hdr(struct scsi_qla_host *ha)
603 {
604 	uint8_t *phdr;
605 
606 	if (!ha->reset_tmplt.buff) {
607 		ql4_printk(KERN_ERR, ha, "%s: Error: Invalid reset_seq_template\n",
608 			   __func__);
609 		return;
610 	}
611 
612 	phdr = ha->reset_tmplt.buff;
613 
614 	DEBUG2(ql4_printk(KERN_INFO, ha,
615 			  "Reset Template: 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n",
616 			  *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4),
617 			  *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8),
618 			  *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12),
619 			  *(phdr+13), *(phdr+14), *(phdr+15)));
620 }
621 
qla4_83xx_copy_bootloader(struct scsi_qla_host * ha)622 static int qla4_83xx_copy_bootloader(struct scsi_qla_host *ha)
623 {
624 	uint8_t *p_cache;
625 	uint32_t src, count, size;
626 	uint64_t dest;
627 	int ret_val = QLA_SUCCESS;
628 
629 	src = QLA83XX_BOOTLOADER_FLASH_ADDR;
630 	dest = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_ADDR);
631 	size = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_SIZE);
632 
633 	/* 128 bit alignment check */
634 	if (size & 0xF)
635 		size = (size + 16) & ~0xF;
636 
637 	/* 16 byte count */
638 	count = size/16;
639 
640 	p_cache = vmalloc(size);
641 	if (p_cache == NULL) {
642 		ql4_printk(KERN_ERR, ha, "%s: Failed to allocate memory for boot loader cache\n",
643 			   __func__);
644 		ret_val = QLA_ERROR;
645 		goto exit_copy_bootloader;
646 	}
647 
648 	ret_val = qla4_83xx_lockless_flash_read_u32(ha, src, p_cache,
649 						    size / sizeof(uint32_t));
650 	if (ret_val == QLA_ERROR) {
651 		ql4_printk(KERN_ERR, ha, "%s: Error reading firmware from flash\n",
652 			   __func__);
653 		goto exit_copy_error;
654 	}
655 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Read firmware from flash\n",
656 			  __func__));
657 
658 	/* 128 bit/16 byte write to MS memory */
659 	ret_val = qla4_8xxx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache,
660 					      count);
661 	if (ret_val == QLA_ERROR) {
662 		ql4_printk(KERN_ERR, ha, "%s: Error writing firmware to MS\n",
663 			   __func__);
664 		goto exit_copy_error;
665 	}
666 
667 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Wrote firmware size %d to MS\n",
668 			  __func__, size));
669 
670 exit_copy_error:
671 	vfree(p_cache);
672 
673 exit_copy_bootloader:
674 	return ret_val;
675 }
676 
qla4_83xx_check_cmd_peg_status(struct scsi_qla_host * ha)677 static int qla4_83xx_check_cmd_peg_status(struct scsi_qla_host *ha)
678 {
679 	uint32_t val, ret_val = QLA_ERROR;
680 	int retries = CRB_CMDPEG_CHECK_RETRY_COUNT;
681 
682 	do {
683 		val = qla4_83xx_rd_reg(ha, QLA83XX_CMDPEG_STATE);
684 		if (val == PHAN_INITIALIZE_COMPLETE) {
685 			DEBUG2(ql4_printk(KERN_INFO, ha,
686 					  "%s: Command Peg initialization complete. State=0x%x\n",
687 					  __func__, val));
688 			ret_val = QLA_SUCCESS;
689 			break;
690 		}
691 		msleep(CRB_CMDPEG_CHECK_DELAY);
692 	} while (--retries);
693 
694 	return ret_val;
695 }
696 
697 /**
698  * qla4_83xx_poll_reg - Poll the given CRB addr for duration msecs till
699  * value read ANDed with test_mask is equal to test_result.
700  *
701  * @ha : Pointer to adapter structure
702  * @addr : CRB register address
703  * @duration : Poll for total of "duration" msecs
704  * @test_mask : Mask value read with "test_mask"
705  * @test_result : Compare (value&test_mask) with test_result.
706  **/
qla4_83xx_poll_reg(struct scsi_qla_host * ha,uint32_t addr,int duration,uint32_t test_mask,uint32_t test_result)707 static int qla4_83xx_poll_reg(struct scsi_qla_host *ha, uint32_t addr,
708 			      int duration, uint32_t test_mask,
709 			      uint32_t test_result)
710 {
711 	uint32_t value;
712 	uint8_t retries;
713 	int ret_val = QLA_SUCCESS;
714 
715 	ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value);
716 	if (ret_val == QLA_ERROR)
717 		goto exit_poll_reg;
718 
719 	retries = duration / 10;
720 	do {
721 		if ((value & test_mask) != test_result) {
722 			msleep(duration / 10);
723 			ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value);
724 			if (ret_val == QLA_ERROR)
725 				goto exit_poll_reg;
726 
727 			ret_val = QLA_ERROR;
728 		} else {
729 			ret_val = QLA_SUCCESS;
730 			break;
731 		}
732 	} while (retries--);
733 
734 exit_poll_reg:
735 	if (ret_val == QLA_ERROR) {
736 		ha->reset_tmplt.seq_error++;
737 		ql4_printk(KERN_ERR, ha, "%s: Poll Failed:  0x%08x 0x%08x 0x%08x\n",
738 			   __func__, value, test_mask, test_result);
739 	}
740 
741 	return ret_val;
742 }
743 
qla4_83xx_reset_seq_checksum_test(struct scsi_qla_host * ha)744 static int qla4_83xx_reset_seq_checksum_test(struct scsi_qla_host *ha)
745 {
746 	uint32_t sum =  0;
747 	uint16_t *buff = (uint16_t *)ha->reset_tmplt.buff;
748 	int u16_count =  ha->reset_tmplt.hdr->size / sizeof(uint16_t);
749 	int ret_val;
750 
751 	while (u16_count-- > 0)
752 		sum += *buff++;
753 
754 	while (sum >> 16)
755 		sum = (sum & 0xFFFF) +  (sum >> 16);
756 
757 	/* checksum of 0 indicates a valid template */
758 	if (~sum) {
759 		ret_val = QLA_SUCCESS;
760 	} else {
761 		ql4_printk(KERN_ERR, ha, "%s: Reset seq checksum failed\n",
762 			   __func__);
763 		ret_val = QLA_ERROR;
764 	}
765 
766 	return ret_val;
767 }
768 
769 /**
770  * qla4_83xx_read_reset_template - Read Reset Template from Flash
771  * @ha: Pointer to adapter structure
772  **/
qla4_83xx_read_reset_template(struct scsi_qla_host * ha)773 void qla4_83xx_read_reset_template(struct scsi_qla_host *ha)
774 {
775 	uint8_t *p_buff;
776 	uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size;
777 	uint32_t ret_val;
778 
779 	ha->reset_tmplt.seq_error = 0;
780 	ha->reset_tmplt.buff = vmalloc(QLA83XX_RESTART_TEMPLATE_SIZE);
781 	if (ha->reset_tmplt.buff == NULL) {
782 		ql4_printk(KERN_ERR, ha, "%s: Failed to allocate reset template resources\n",
783 			   __func__);
784 		goto exit_read_reset_template;
785 	}
786 
787 	p_buff = ha->reset_tmplt.buff;
788 	addr = QLA83XX_RESET_TEMPLATE_ADDR;
789 
790 	tmplt_hdr_def_size = sizeof(struct qla4_83xx_reset_template_hdr) /
791 				    sizeof(uint32_t);
792 
793 	DEBUG2(ql4_printk(KERN_INFO, ha,
794 			  "%s: Read template hdr size %d from Flash\n",
795 			  __func__, tmplt_hdr_def_size));
796 
797 	/* Copy template header from flash */
798 	ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff,
799 					   tmplt_hdr_def_size);
800 	if (ret_val != QLA_SUCCESS) {
801 		ql4_printk(KERN_ERR, ha, "%s: Failed to read reset template\n",
802 			   __func__);
803 		goto exit_read_template_error;
804 	}
805 
806 	ha->reset_tmplt.hdr =
807 		(struct qla4_83xx_reset_template_hdr *)ha->reset_tmplt.buff;
808 
809 	/* Validate the template header size and signature */
810 	tmplt_hdr_size = ha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t);
811 	if ((tmplt_hdr_size != tmplt_hdr_def_size) ||
812 	    (ha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) {
813 		ql4_printk(KERN_ERR, ha, "%s: Template Header size %d is invalid, tmplt_hdr_def_size %d\n",
814 			   __func__, tmplt_hdr_size, tmplt_hdr_def_size);
815 		goto exit_read_template_error;
816 	}
817 
818 	addr = QLA83XX_RESET_TEMPLATE_ADDR + ha->reset_tmplt.hdr->hdr_size;
819 	p_buff = ha->reset_tmplt.buff + ha->reset_tmplt.hdr->hdr_size;
820 	tmplt_hdr_def_size = (ha->reset_tmplt.hdr->size -
821 			      ha->reset_tmplt.hdr->hdr_size) / sizeof(uint32_t);
822 
823 	DEBUG2(ql4_printk(KERN_INFO, ha,
824 			  "%s: Read rest of the template size %d\n",
825 			  __func__, ha->reset_tmplt.hdr->size));
826 
827 	/* Copy rest of the template */
828 	ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff,
829 					   tmplt_hdr_def_size);
830 	if (ret_val != QLA_SUCCESS) {
831 		ql4_printk(KERN_ERR, ha, "%s: Failed to read reset template\n",
832 			   __func__);
833 		goto exit_read_template_error;
834 	}
835 
836 	/* Integrity check */
837 	if (qla4_83xx_reset_seq_checksum_test(ha)) {
838 		ql4_printk(KERN_ERR, ha, "%s: Reset Seq checksum failed!\n",
839 			   __func__);
840 		goto exit_read_template_error;
841 	}
842 	DEBUG2(ql4_printk(KERN_INFO, ha,
843 			  "%s: Reset Seq checksum passed, Get stop, start and init seq offsets\n",
844 			  __func__));
845 
846 	/* Get STOP, START, INIT sequence offsets */
847 	ha->reset_tmplt.init_offset = ha->reset_tmplt.buff +
848 				      ha->reset_tmplt.hdr->init_seq_offset;
849 	ha->reset_tmplt.start_offset = ha->reset_tmplt.buff +
850 				       ha->reset_tmplt.hdr->start_seq_offset;
851 	ha->reset_tmplt.stop_offset = ha->reset_tmplt.buff +
852 				      ha->reset_tmplt.hdr->hdr_size;
853 	qla4_83xx_dump_reset_seq_hdr(ha);
854 
855 	goto exit_read_reset_template;
856 
857 exit_read_template_error:
858 	vfree(ha->reset_tmplt.buff);
859 
860 exit_read_reset_template:
861 	return;
862 }
863 
864 /**
865  * qla4_83xx_read_write_crb_reg - Read from raddr and write value to waddr.
866  *
867  * @ha : Pointer to adapter structure
868  * @raddr : CRB address to read from
869  * @waddr : CRB address to write to
870  **/
qla4_83xx_read_write_crb_reg(struct scsi_qla_host * ha,uint32_t raddr,uint32_t waddr)871 static void qla4_83xx_read_write_crb_reg(struct scsi_qla_host *ha,
872 					 uint32_t raddr, uint32_t waddr)
873 {
874 	uint32_t value;
875 
876 	qla4_83xx_rd_reg_indirect(ha, raddr, &value);
877 	qla4_83xx_wr_reg_indirect(ha, waddr, value);
878 }
879 
880 /**
881  * qla4_83xx_rmw_crb_reg - Read Modify Write crb register
882  *
883  * This function read value from raddr, AND with test_mask,
884  * Shift Left,Right/OR/XOR with values RMW header and write value to waddr.
885  *
886  * @ha : Pointer to adapter structure
887  * @raddr : CRB address to read from
888  * @waddr : CRB address to write to
889  * @p_rmw_hdr : header with shift/or/xor values.
890  **/
qla4_83xx_rmw_crb_reg(struct scsi_qla_host * ha,uint32_t raddr,uint32_t waddr,struct qla4_83xx_rmw * p_rmw_hdr)891 static void qla4_83xx_rmw_crb_reg(struct scsi_qla_host *ha, uint32_t raddr,
892 				  uint32_t waddr,
893 				  struct qla4_83xx_rmw *p_rmw_hdr)
894 {
895 	uint32_t value;
896 
897 	if (p_rmw_hdr->index_a)
898 		value = ha->reset_tmplt.array[p_rmw_hdr->index_a];
899 	else
900 		qla4_83xx_rd_reg_indirect(ha, raddr, &value);
901 
902 	value &= p_rmw_hdr->test_mask;
903 	value <<= p_rmw_hdr->shl;
904 	value >>= p_rmw_hdr->shr;
905 	value |= p_rmw_hdr->or_value;
906 	value ^= p_rmw_hdr->xor_value;
907 
908 	qla4_83xx_wr_reg_indirect(ha, waddr, value);
909 
910 	return;
911 }
912 
qla4_83xx_write_list(struct scsi_qla_host * ha,struct qla4_83xx_reset_entry_hdr * p_hdr)913 static void qla4_83xx_write_list(struct scsi_qla_host *ha,
914 				 struct qla4_83xx_reset_entry_hdr *p_hdr)
915 {
916 	struct qla4_83xx_entry *p_entry;
917 	uint32_t i;
918 
919 	p_entry = (struct qla4_83xx_entry *)
920 		  ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
921 
922 	for (i = 0; i < p_hdr->count; i++, p_entry++) {
923 		qla4_83xx_wr_reg_indirect(ha, p_entry->arg1, p_entry->arg2);
924 		if (p_hdr->delay)
925 			udelay((uint32_t)(p_hdr->delay));
926 	}
927 }
928 
qla4_83xx_read_write_list(struct scsi_qla_host * ha,struct qla4_83xx_reset_entry_hdr * p_hdr)929 static void qla4_83xx_read_write_list(struct scsi_qla_host *ha,
930 				      struct qla4_83xx_reset_entry_hdr *p_hdr)
931 {
932 	struct qla4_83xx_entry *p_entry;
933 	uint32_t i;
934 
935 	p_entry = (struct qla4_83xx_entry *)
936 		  ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
937 
938 	for (i = 0; i < p_hdr->count; i++, p_entry++) {
939 		qla4_83xx_read_write_crb_reg(ha, p_entry->arg1, p_entry->arg2);
940 		if (p_hdr->delay)
941 			udelay((uint32_t)(p_hdr->delay));
942 	}
943 }
944 
qla4_83xx_poll_list(struct scsi_qla_host * ha,struct qla4_83xx_reset_entry_hdr * p_hdr)945 static void qla4_83xx_poll_list(struct scsi_qla_host *ha,
946 				struct qla4_83xx_reset_entry_hdr *p_hdr)
947 {
948 	long delay;
949 	struct qla4_83xx_entry *p_entry;
950 	struct qla4_83xx_poll *p_poll;
951 	uint32_t i;
952 	uint32_t value;
953 
954 	p_poll = (struct qla4_83xx_poll *)
955 		 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
956 
957 	/* Entries start after 8 byte qla4_83xx_poll, poll header contains
958 	 * the test_mask, test_value. */
959 	p_entry = (struct qla4_83xx_entry *)((char *)p_poll +
960 					     sizeof(struct qla4_83xx_poll));
961 
962 	delay = (long)p_hdr->delay;
963 	if (!delay) {
964 		for (i = 0; i < p_hdr->count; i++, p_entry++) {
965 			qla4_83xx_poll_reg(ha, p_entry->arg1, delay,
966 					   p_poll->test_mask,
967 					   p_poll->test_value);
968 		}
969 	} else {
970 		for (i = 0; i < p_hdr->count; i++, p_entry++) {
971 			if (qla4_83xx_poll_reg(ha, p_entry->arg1, delay,
972 					       p_poll->test_mask,
973 					       p_poll->test_value)) {
974 				qla4_83xx_rd_reg_indirect(ha, p_entry->arg1,
975 							  &value);
976 				qla4_83xx_rd_reg_indirect(ha, p_entry->arg2,
977 							  &value);
978 			}
979 		}
980 	}
981 }
982 
qla4_83xx_poll_write_list(struct scsi_qla_host * ha,struct qla4_83xx_reset_entry_hdr * p_hdr)983 static void qla4_83xx_poll_write_list(struct scsi_qla_host *ha,
984 				      struct qla4_83xx_reset_entry_hdr *p_hdr)
985 {
986 	long delay;
987 	struct qla4_83xx_quad_entry *p_entry;
988 	struct qla4_83xx_poll *p_poll;
989 	uint32_t i;
990 
991 	p_poll = (struct qla4_83xx_poll *)
992 		 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
993 	p_entry = (struct qla4_83xx_quad_entry *)
994 		  ((char *)p_poll + sizeof(struct qla4_83xx_poll));
995 	delay = (long)p_hdr->delay;
996 
997 	for (i = 0; i < p_hdr->count; i++, p_entry++) {
998 		qla4_83xx_wr_reg_indirect(ha, p_entry->dr_addr,
999 					  p_entry->dr_value);
1000 		qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr,
1001 					  p_entry->ar_value);
1002 		if (delay) {
1003 			if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay,
1004 					       p_poll->test_mask,
1005 					       p_poll->test_value)) {
1006 				DEBUG2(ql4_printk(KERN_INFO, ha,
1007 						  "%s: Timeout Error: poll list, item_num %d, entry_num %d\n",
1008 						  __func__, i,
1009 						  ha->reset_tmplt.seq_index));
1010 			}
1011 		}
1012 	}
1013 }
1014 
qla4_83xx_read_modify_write(struct scsi_qla_host * ha,struct qla4_83xx_reset_entry_hdr * p_hdr)1015 static void qla4_83xx_read_modify_write(struct scsi_qla_host *ha,
1016 					struct qla4_83xx_reset_entry_hdr *p_hdr)
1017 {
1018 	struct qla4_83xx_entry *p_entry;
1019 	struct qla4_83xx_rmw *p_rmw_hdr;
1020 	uint32_t i;
1021 
1022 	p_rmw_hdr = (struct qla4_83xx_rmw *)
1023 		    ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
1024 	p_entry = (struct qla4_83xx_entry *)
1025 		  ((char *)p_rmw_hdr + sizeof(struct qla4_83xx_rmw));
1026 
1027 	for (i = 0; i < p_hdr->count; i++, p_entry++) {
1028 		qla4_83xx_rmw_crb_reg(ha, p_entry->arg1, p_entry->arg2,
1029 				      p_rmw_hdr);
1030 		if (p_hdr->delay)
1031 			udelay((uint32_t)(p_hdr->delay));
1032 	}
1033 }
1034 
qla4_83xx_pause(struct scsi_qla_host * ha,struct qla4_83xx_reset_entry_hdr * p_hdr)1035 static void qla4_83xx_pause(struct scsi_qla_host *ha,
1036 			    struct qla4_83xx_reset_entry_hdr *p_hdr)
1037 {
1038 	if (p_hdr->delay)
1039 		mdelay((uint32_t)((long)p_hdr->delay));
1040 }
1041 
qla4_83xx_poll_read_list(struct scsi_qla_host * ha,struct qla4_83xx_reset_entry_hdr * p_hdr)1042 static void qla4_83xx_poll_read_list(struct scsi_qla_host *ha,
1043 				     struct qla4_83xx_reset_entry_hdr *p_hdr)
1044 {
1045 	long delay;
1046 	int index;
1047 	struct qla4_83xx_quad_entry *p_entry;
1048 	struct qla4_83xx_poll *p_poll;
1049 	uint32_t i;
1050 	uint32_t value;
1051 
1052 	p_poll = (struct qla4_83xx_poll *)
1053 		 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
1054 	p_entry = (struct qla4_83xx_quad_entry *)
1055 		  ((char *)p_poll + sizeof(struct qla4_83xx_poll));
1056 	delay = (long)p_hdr->delay;
1057 
1058 	for (i = 0; i < p_hdr->count; i++, p_entry++) {
1059 		qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr,
1060 					  p_entry->ar_value);
1061 		if (delay) {
1062 			if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay,
1063 					       p_poll->test_mask,
1064 					       p_poll->test_value)) {
1065 				DEBUG2(ql4_printk(KERN_INFO, ha,
1066 						  "%s: Timeout Error: poll list, Item_num %d, entry_num %d\n",
1067 						  __func__, i,
1068 						  ha->reset_tmplt.seq_index));
1069 			} else {
1070 				index = ha->reset_tmplt.array_index;
1071 				qla4_83xx_rd_reg_indirect(ha, p_entry->dr_addr,
1072 							  &value);
1073 				ha->reset_tmplt.array[index++] = value;
1074 
1075 				if (index == QLA83XX_MAX_RESET_SEQ_ENTRIES)
1076 					ha->reset_tmplt.array_index = 1;
1077 			}
1078 		}
1079 	}
1080 }
1081 
qla4_83xx_seq_end(struct scsi_qla_host * ha,struct qla4_83xx_reset_entry_hdr * p_hdr)1082 static void qla4_83xx_seq_end(struct scsi_qla_host *ha,
1083 			      struct qla4_83xx_reset_entry_hdr *p_hdr)
1084 {
1085 	ha->reset_tmplt.seq_end = 1;
1086 }
1087 
qla4_83xx_template_end(struct scsi_qla_host * ha,struct qla4_83xx_reset_entry_hdr * p_hdr)1088 static void qla4_83xx_template_end(struct scsi_qla_host *ha,
1089 				   struct qla4_83xx_reset_entry_hdr *p_hdr)
1090 {
1091 	ha->reset_tmplt.template_end = 1;
1092 
1093 	if (ha->reset_tmplt.seq_error == 0) {
1094 		DEBUG2(ql4_printk(KERN_INFO, ha,
1095 				  "%s: Reset sequence completed SUCCESSFULLY.\n",
1096 				  __func__));
1097 	} else {
1098 		ql4_printk(KERN_ERR, ha, "%s: Reset sequence completed with some timeout errors.\n",
1099 			   __func__);
1100 	}
1101 }
1102 
1103 /**
1104  * qla4_83xx_process_reset_template - Process reset template.
1105  *
1106  * Process all entries in reset template till entry with SEQ_END opcode,
1107  * which indicates end of the reset template processing. Each entry has a
1108  * Reset Entry header, entry opcode/command, with size of the entry, number
1109  * of entries in sub-sequence and delay in microsecs or timeout in millisecs.
1110  *
1111  * @ha : Pointer to adapter structure
1112  * @p_buff : Common reset entry header.
1113  **/
qla4_83xx_process_reset_template(struct scsi_qla_host * ha,char * p_buff)1114 static void qla4_83xx_process_reset_template(struct scsi_qla_host *ha,
1115 					     char *p_buff)
1116 {
1117 	int index, entries;
1118 	struct qla4_83xx_reset_entry_hdr *p_hdr;
1119 	char *p_entry = p_buff;
1120 
1121 	ha->reset_tmplt.seq_end = 0;
1122 	ha->reset_tmplt.template_end = 0;
1123 	entries = ha->reset_tmplt.hdr->entries;
1124 	index = ha->reset_tmplt.seq_index;
1125 
1126 	for (; (!ha->reset_tmplt.seq_end) && (index  < entries); index++) {
1127 
1128 		p_hdr = (struct qla4_83xx_reset_entry_hdr *)p_entry;
1129 		switch (p_hdr->cmd) {
1130 		case OPCODE_NOP:
1131 			break;
1132 		case OPCODE_WRITE_LIST:
1133 			qla4_83xx_write_list(ha, p_hdr);
1134 			break;
1135 		case OPCODE_READ_WRITE_LIST:
1136 			qla4_83xx_read_write_list(ha, p_hdr);
1137 			break;
1138 		case OPCODE_POLL_LIST:
1139 			qla4_83xx_poll_list(ha, p_hdr);
1140 			break;
1141 		case OPCODE_POLL_WRITE_LIST:
1142 			qla4_83xx_poll_write_list(ha, p_hdr);
1143 			break;
1144 		case OPCODE_READ_MODIFY_WRITE:
1145 			qla4_83xx_read_modify_write(ha, p_hdr);
1146 			break;
1147 		case OPCODE_SEQ_PAUSE:
1148 			qla4_83xx_pause(ha, p_hdr);
1149 			break;
1150 		case OPCODE_SEQ_END:
1151 			qla4_83xx_seq_end(ha, p_hdr);
1152 			break;
1153 		case OPCODE_TMPL_END:
1154 			qla4_83xx_template_end(ha, p_hdr);
1155 			break;
1156 		case OPCODE_POLL_READ_LIST:
1157 			qla4_83xx_poll_read_list(ha, p_hdr);
1158 			break;
1159 		default:
1160 			ql4_printk(KERN_ERR, ha, "%s: Unknown command ==> 0x%04x on entry = %d\n",
1161 				   __func__, p_hdr->cmd, index);
1162 			break;
1163 		}
1164 
1165 		/* Set pointer to next entry in the sequence. */
1166 		p_entry += p_hdr->size;
1167 	}
1168 
1169 	ha->reset_tmplt.seq_index = index;
1170 }
1171 
qla4_83xx_process_stop_seq(struct scsi_qla_host * ha)1172 static void qla4_83xx_process_stop_seq(struct scsi_qla_host *ha)
1173 {
1174 	ha->reset_tmplt.seq_index = 0;
1175 	qla4_83xx_process_reset_template(ha, ha->reset_tmplt.stop_offset);
1176 
1177 	if (ha->reset_tmplt.seq_end != 1)
1178 		ql4_printk(KERN_ERR, ha, "%s: Abrupt STOP Sub-Sequence end.\n",
1179 			   __func__);
1180 }
1181 
qla4_83xx_process_start_seq(struct scsi_qla_host * ha)1182 static void qla4_83xx_process_start_seq(struct scsi_qla_host *ha)
1183 {
1184 	qla4_83xx_process_reset_template(ha, ha->reset_tmplt.start_offset);
1185 
1186 	if (ha->reset_tmplt.template_end != 1)
1187 		ql4_printk(KERN_ERR, ha, "%s: Abrupt START Sub-Sequence end.\n",
1188 			   __func__);
1189 }
1190 
qla4_83xx_process_init_seq(struct scsi_qla_host * ha)1191 static void qla4_83xx_process_init_seq(struct scsi_qla_host *ha)
1192 {
1193 	qla4_83xx_process_reset_template(ha, ha->reset_tmplt.init_offset);
1194 
1195 	if (ha->reset_tmplt.seq_end != 1)
1196 		ql4_printk(KERN_ERR, ha, "%s: Abrupt INIT Sub-Sequence end.\n",
1197 			   __func__);
1198 }
1199 
qla4_83xx_restart(struct scsi_qla_host * ha)1200 static int qla4_83xx_restart(struct scsi_qla_host *ha)
1201 {
1202 	int ret_val = QLA_SUCCESS;
1203 	uint32_t idc_ctrl;
1204 
1205 	qla4_83xx_process_stop_seq(ha);
1206 
1207 	/*
1208 	 * Collect minidump.
1209 	 * If IDC_CTRL BIT1 is set, clear it on going to INIT state and
1210 	 * don't collect minidump
1211 	 */
1212 	idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
1213 	if (idc_ctrl & GRACEFUL_RESET_BIT1) {
1214 		qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
1215 				 (idc_ctrl & ~GRACEFUL_RESET_BIT1));
1216 		ql4_printk(KERN_INFO, ha, "%s: Graceful RESET: Not collecting minidump\n",
1217 			   __func__);
1218 	} else {
1219 		qla4_8xxx_get_minidump(ha);
1220 	}
1221 
1222 	qla4_83xx_process_init_seq(ha);
1223 
1224 	if (qla4_83xx_copy_bootloader(ha)) {
1225 		ql4_printk(KERN_ERR, ha, "%s: Copy bootloader, firmware restart failed!\n",
1226 			   __func__);
1227 		ret_val = QLA_ERROR;
1228 		goto exit_restart;
1229 	}
1230 
1231 	qla4_83xx_wr_reg(ha, QLA83XX_FW_IMAGE_VALID, QLA83XX_BOOT_FROM_FLASH);
1232 	qla4_83xx_process_start_seq(ha);
1233 
1234 exit_restart:
1235 	return ret_val;
1236 }
1237 
qla4_83xx_start_firmware(struct scsi_qla_host * ha)1238 int qla4_83xx_start_firmware(struct scsi_qla_host *ha)
1239 {
1240 	int ret_val = QLA_SUCCESS;
1241 
1242 	ret_val = qla4_83xx_restart(ha);
1243 	if (ret_val == QLA_ERROR) {
1244 		ql4_printk(KERN_ERR, ha, "%s: Restart error\n", __func__);
1245 		goto exit_start_fw;
1246 	} else {
1247 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Restart done\n",
1248 				  __func__));
1249 	}
1250 
1251 	ret_val = qla4_83xx_check_cmd_peg_status(ha);
1252 	if (ret_val == QLA_ERROR)
1253 		ql4_printk(KERN_ERR, ha, "%s: Peg not initialized\n",
1254 			   __func__);
1255 
1256 exit_start_fw:
1257 	return ret_val;
1258 }
1259 
1260 /*----------------------Interrupt Related functions ---------------------*/
1261 
qla4_83xx_disable_iocb_intrs(struct scsi_qla_host * ha)1262 static void qla4_83xx_disable_iocb_intrs(struct scsi_qla_host *ha)
1263 {
1264 	if (test_and_clear_bit(AF_83XX_IOCB_INTR_ON, &ha->flags))
1265 		qla4_8xxx_intr_disable(ha);
1266 }
1267 
qla4_83xx_disable_mbox_intrs(struct scsi_qla_host * ha)1268 static void qla4_83xx_disable_mbox_intrs(struct scsi_qla_host *ha)
1269 {
1270 	uint32_t mb_int, ret;
1271 
1272 	if (test_and_clear_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) {
1273 		ret = readl(&ha->qla4_83xx_reg->mbox_int);
1274 		mb_int = ret & ~INT_ENABLE_FW_MB;
1275 		writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
1276 		writel(1, &ha->qla4_83xx_reg->leg_int_mask);
1277 	}
1278 }
1279 
qla4_83xx_disable_intrs(struct scsi_qla_host * ha)1280 void qla4_83xx_disable_intrs(struct scsi_qla_host *ha)
1281 {
1282 	qla4_83xx_disable_mbox_intrs(ha);
1283 	qla4_83xx_disable_iocb_intrs(ha);
1284 }
1285 
qla4_83xx_enable_iocb_intrs(struct scsi_qla_host * ha)1286 static void qla4_83xx_enable_iocb_intrs(struct scsi_qla_host *ha)
1287 {
1288 	if (!test_bit(AF_83XX_IOCB_INTR_ON, &ha->flags)) {
1289 		qla4_8xxx_intr_enable(ha);
1290 		set_bit(AF_83XX_IOCB_INTR_ON, &ha->flags);
1291 	}
1292 }
1293 
qla4_83xx_enable_mbox_intrs(struct scsi_qla_host * ha)1294 void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha)
1295 {
1296 	uint32_t mb_int;
1297 
1298 	if (!test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) {
1299 		mb_int = INT_ENABLE_FW_MB;
1300 		writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
1301 		writel(0, &ha->qla4_83xx_reg->leg_int_mask);
1302 		set_bit(AF_83XX_MBOX_INTR_ON, &ha->flags);
1303 	}
1304 }
1305 
1306 
qla4_83xx_enable_intrs(struct scsi_qla_host * ha)1307 void qla4_83xx_enable_intrs(struct scsi_qla_host *ha)
1308 {
1309 	qla4_83xx_enable_mbox_intrs(ha);
1310 	qla4_83xx_enable_iocb_intrs(ha);
1311 }
1312 
1313 
qla4_83xx_queue_mbox_cmd(struct scsi_qla_host * ha,uint32_t * mbx_cmd,int incount)1314 void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
1315 			      int incount)
1316 {
1317 	int i;
1318 
1319 	/* Load all mailbox registers, except mailbox 0. */
1320 	for (i = 1; i < incount; i++)
1321 		writel(mbx_cmd[i], &ha->qla4_83xx_reg->mailbox_in[i]);
1322 
1323 	writel(mbx_cmd[0], &ha->qla4_83xx_reg->mailbox_in[0]);
1324 
1325 	/* Set Host Interrupt register to 1, to tell the firmware that
1326 	 * a mailbox command is pending. Firmware after reading the
1327 	 * mailbox command, clears the host interrupt register */
1328 	writel(HINT_MBX_INT_PENDING, &ha->qla4_83xx_reg->host_intr);
1329 }
1330 
qla4_83xx_process_mbox_intr(struct scsi_qla_host * ha,int outcount)1331 void qla4_83xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount)
1332 {
1333 	int intr_status;
1334 
1335 	intr_status = readl(&ha->qla4_83xx_reg->risc_intr);
1336 	if (intr_status) {
1337 		ha->mbox_status_count = outcount;
1338 		ha->isp_ops->interrupt_service_routine(ha, intr_status);
1339 	}
1340 }
1341 
1342 /**
1343  * qla4_83xx_isp_reset - Resets ISP and aborts all outstanding commands.
1344  * @ha: pointer to host adapter structure.
1345  **/
qla4_83xx_isp_reset(struct scsi_qla_host * ha)1346 int qla4_83xx_isp_reset(struct scsi_qla_host *ha)
1347 {
1348 	int rval;
1349 	uint32_t dev_state;
1350 
1351 	ha->isp_ops->idc_lock(ha);
1352 	dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
1353 
1354 	if (ql4xdontresethba)
1355 		qla4_83xx_set_idc_dontreset(ha);
1356 
1357 	if (dev_state == QLA8XXX_DEV_READY) {
1358 		/* If IDC_CTRL DONTRESETHBA_BIT0 is set dont do reset
1359 		 * recovery */
1360 		if (qla4_83xx_idc_dontreset(ha) == DONTRESET_BIT0) {
1361 			ql4_printk(KERN_ERR, ha, "%s: Reset recovery disabled\n",
1362 				   __func__);
1363 			rval = QLA_ERROR;
1364 			goto exit_isp_reset;
1365 		}
1366 
1367 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET\n",
1368 				  __func__));
1369 		qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
1370 				    QLA8XXX_DEV_NEED_RESET);
1371 
1372 	} else {
1373 		/* If device_state is NEED_RESET, go ahead with
1374 		 * Reset,irrespective of ql4xdontresethba. This is to allow a
1375 		 * non-reset-owner to force a reset. Non-reset-owner sets
1376 		 * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset
1377 		 * and then forces a Reset by setting device_state to
1378 		 * NEED_RESET. */
1379 		DEBUG2(ql4_printk(KERN_INFO, ha,
1380 				  "%s: HW state already set to NEED_RESET\n",
1381 				  __func__));
1382 	}
1383 
1384 	/* For ISP8324 and ISP8042, Reset owner is NIC, iSCSI or FCOE based on
1385 	 * priority and which drivers are present. Unlike ISP8022, the function
1386 	 * setting NEED_RESET, may not be the Reset owner. */
1387 	if (qla4_83xx_can_perform_reset(ha))
1388 		set_bit(AF_8XXX_RST_OWNER, &ha->flags);
1389 
1390 	ha->isp_ops->idc_unlock(ha);
1391 	rval = qla4_8xxx_device_state_handler(ha);
1392 
1393 	ha->isp_ops->idc_lock(ha);
1394 	qla4_8xxx_clear_rst_ready(ha);
1395 exit_isp_reset:
1396 	ha->isp_ops->idc_unlock(ha);
1397 
1398 	if (rval == QLA_SUCCESS)
1399 		clear_bit(AF_FW_RECOVERY, &ha->flags);
1400 
1401 	return rval;
1402 }
1403 
qla4_83xx_dump_pause_control_regs(struct scsi_qla_host * ha)1404 static void qla4_83xx_dump_pause_control_regs(struct scsi_qla_host *ha)
1405 {
1406 	u32 val = 0, val1 = 0;
1407 	int i;
1408 
1409 	qla4_83xx_rd_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL, &val);
1410 	DEBUG2(ql4_printk(KERN_INFO, ha, "SRE-Shim Ctrl:0x%x\n", val));
1411 
1412 	/* Port 0 Rx Buffer Pause Threshold Registers. */
1413 	DEBUG2(ql4_printk(KERN_INFO, ha,
1414 		"Port 0 Rx Buffer Pause Threshold Registers[TC7..TC0]:"));
1415 	for (i = 0; i < 8; i++) {
1416 		qla4_83xx_rd_reg_indirect(ha,
1417 				QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4), &val);
1418 		DEBUG2(pr_info("0x%x ", val));
1419 	}
1420 
1421 	DEBUG2(pr_info("\n"));
1422 
1423 	/* Port 1 Rx Buffer Pause Threshold Registers. */
1424 	DEBUG2(ql4_printk(KERN_INFO, ha,
1425 		"Port 1 Rx Buffer Pause Threshold Registers[TC7..TC0]:"));
1426 	for (i = 0; i < 8; i++) {
1427 		qla4_83xx_rd_reg_indirect(ha,
1428 				QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4), &val);
1429 		DEBUG2(pr_info("0x%x  ", val));
1430 	}
1431 
1432 	DEBUG2(pr_info("\n"));
1433 
1434 	/* Port 0 RxB Traffic Class Max Cell Registers. */
1435 	DEBUG2(ql4_printk(KERN_INFO, ha,
1436 		"Port 0 RxB Traffic Class Max Cell Registers[3..0]:"));
1437 	for (i = 0; i < 4; i++) {
1438 		qla4_83xx_rd_reg_indirect(ha,
1439 			       QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4), &val);
1440 		DEBUG2(pr_info("0x%x  ", val));
1441 	}
1442 
1443 	DEBUG2(pr_info("\n"));
1444 
1445 	/* Port 1 RxB Traffic Class Max Cell Registers. */
1446 	DEBUG2(ql4_printk(KERN_INFO, ha,
1447 		"Port 1 RxB Traffic Class Max Cell Registers[3..0]:"));
1448 	for (i = 0; i < 4; i++) {
1449 		qla4_83xx_rd_reg_indirect(ha,
1450 			       QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4), &val);
1451 		DEBUG2(pr_info("0x%x  ", val));
1452 	}
1453 
1454 	DEBUG2(pr_info("\n"));
1455 
1456 	/* Port 0 RxB Rx Traffic Class Stats. */
1457 	DEBUG2(ql4_printk(KERN_INFO, ha,
1458 			  "Port 0 RxB Rx Traffic Class Stats [TC7..TC0]"));
1459 	for (i = 7; i >= 0; i--) {
1460 		qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS, &val);
1461 		val &= ~(0x7 << 29);    /* Reset bits 29 to 31 */
1462 		qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS,
1463 					  (val | (i << 29)));
1464 		qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS, &val);
1465 		DEBUG2(pr_info("0x%x  ", val));
1466 	}
1467 
1468 	DEBUG2(pr_info("\n"));
1469 
1470 	/* Port 1 RxB Rx Traffic Class Stats. */
1471 	DEBUG2(ql4_printk(KERN_INFO, ha,
1472 			  "Port 1 RxB Rx Traffic Class Stats [TC7..TC0]"));
1473 	for (i = 7; i >= 0; i--) {
1474 		qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS, &val);
1475 		val &= ~(0x7 << 29);    /* Reset bits 29 to 31 */
1476 		qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS,
1477 					  (val | (i << 29)));
1478 		qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS, &val);
1479 		DEBUG2(pr_info("0x%x  ", val));
1480 	}
1481 
1482 	DEBUG2(pr_info("\n"));
1483 
1484 	qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS, &val);
1485 	qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS, &val1);
1486 
1487 	DEBUG2(ql4_printk(KERN_INFO, ha,
1488 			  "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n",
1489 			  val, val1));
1490 }
1491 
__qla4_83xx_disable_pause(struct scsi_qla_host * ha)1492 static void __qla4_83xx_disable_pause(struct scsi_qla_host *ha)
1493 {
1494 	int i;
1495 
1496 	/* set SRE-Shim Control Register */
1497 	qla4_83xx_wr_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL,
1498 				  QLA83XX_SET_PAUSE_VAL);
1499 
1500 	for (i = 0; i < 8; i++) {
1501 		/* Port 0 Rx Buffer Pause Threshold Registers. */
1502 		qla4_83xx_wr_reg_indirect(ha,
1503 				      QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4),
1504 				      QLA83XX_SET_PAUSE_VAL);
1505 		/* Port 1 Rx Buffer Pause Threshold Registers. */
1506 		qla4_83xx_wr_reg_indirect(ha,
1507 				      QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4),
1508 				      QLA83XX_SET_PAUSE_VAL);
1509 	}
1510 
1511 	for (i = 0; i < 4; i++) {
1512 		/* Port 0 RxB Traffic Class Max Cell Registers. */
1513 		qla4_83xx_wr_reg_indirect(ha,
1514 				     QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4),
1515 				     QLA83XX_SET_TC_MAX_CELL_VAL);
1516 		/* Port 1 RxB Traffic Class Max Cell Registers. */
1517 		qla4_83xx_wr_reg_indirect(ha,
1518 				     QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4),
1519 				     QLA83XX_SET_TC_MAX_CELL_VAL);
1520 	}
1521 
1522 	qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS,
1523 				  QLA83XX_SET_PAUSE_VAL);
1524 	qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS,
1525 				  QLA83XX_SET_PAUSE_VAL);
1526 
1527 	ql4_printk(KERN_INFO, ha, "Disabled pause frames successfully.\n");
1528 }
1529 
1530 /**
1531  * qla4_83xx_eport_init - Initialize EPort.
1532  * @ha: Pointer to host adapter structure.
1533  *
1534  * If EPort hardware is in reset state before disabling pause, there would be
1535  * serious hardware wedging issues. To prevent this perform eport init everytime
1536  * before disabling pause frames.
1537  **/
qla4_83xx_eport_init(struct scsi_qla_host * ha)1538 static void qla4_83xx_eport_init(struct scsi_qla_host *ha)
1539 {
1540 	/* Clear the 8 registers */
1541 	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_REG, 0x0);
1542 	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT0, 0x0);
1543 	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT1, 0x0);
1544 	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT2, 0x0);
1545 	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT3, 0x0);
1546 	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_SRE_SHIM, 0x0);
1547 	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_EPG_SHIM, 0x0);
1548 	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_ETHER_PCS, 0x0);
1549 
1550 	/* Write any value to Reset Control register */
1551 	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_CONTROL, 0xFF);
1552 
1553 	ql4_printk(KERN_INFO, ha, "EPORT is out of reset.\n");
1554 }
1555 
qla4_83xx_disable_pause(struct scsi_qla_host * ha)1556 void qla4_83xx_disable_pause(struct scsi_qla_host *ha)
1557 {
1558 	ha->isp_ops->idc_lock(ha);
1559 	/* Before disabling pause frames, ensure that eport is not in reset */
1560 	qla4_83xx_eport_init(ha);
1561 	qla4_83xx_dump_pause_control_regs(ha);
1562 	__qla4_83xx_disable_pause(ha);
1563 	ha->isp_ops->idc_unlock(ha);
1564 }
1565 
1566 /**
1567  * qla4_83xx_is_detached - Check if we are marked invisible.
1568  * @ha: Pointer to host adapter structure.
1569  **/
qla4_83xx_is_detached(struct scsi_qla_host * ha)1570 int qla4_83xx_is_detached(struct scsi_qla_host *ha)
1571 {
1572 	uint32_t drv_active;
1573 
1574 	drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
1575 
1576 	if (test_bit(AF_INIT_DONE, &ha->flags) &&
1577 	    !(drv_active & (1 << ha->func_num))) {
1578 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: drv_active = 0x%X\n",
1579 				  __func__, drv_active));
1580 		return QLA_SUCCESS;
1581 	}
1582 
1583 	return QLA_ERROR;
1584 }
1585