xref: /openbmc/linux/drivers/scsi/qla4xxx/ql4_os.c (revision a55b2d21)
1 /*
2  * QLogic iSCSI HBA Driver
3  * Copyright (c)  2003-2010 QLogic Corporation
4  *
5  * See LICENSE.qla4xxx for copyright and licensing details.
6  */
7 #include <linux/moduleparam.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/iscsi_boot_sysfs.h>
11 #include <linux/inet.h>
12 
13 #include <scsi/scsi_tcq.h>
14 #include <scsi/scsicam.h>
15 
16 #include "ql4_def.h"
17 #include "ql4_version.h"
18 #include "ql4_glbl.h"
19 #include "ql4_dbg.h"
20 #include "ql4_inline.h"
21 
22 /*
23  * Driver version
24  */
25 static char qla4xxx_version_str[40];
26 
27 /*
28  * SRB allocation cache
29  */
30 static struct kmem_cache *srb_cachep;
31 
32 /*
33  * Module parameter information and variables
34  */
35 static int ql4xdisablesysfsboot = 1;
36 module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(ql4xdisablesysfsboot,
38 		 " Set to disable exporting boot targets to sysfs.\n"
39 		 "\t\t  0 - Export boot targets\n"
40 		 "\t\t  1 - Do not export boot targets (Default)");
41 
42 int ql4xdontresethba;
43 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
44 MODULE_PARM_DESC(ql4xdontresethba,
45 		 " Don't reset the HBA for driver recovery.\n"
46 		 "\t\t  0 - It will reset HBA (Default)\n"
47 		 "\t\t  1 - It will NOT reset HBA");
48 
49 int ql4xextended_error_logging;
50 module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
51 MODULE_PARM_DESC(ql4xextended_error_logging,
52 		 " Option to enable extended error logging.\n"
53 		 "\t\t  0 - no logging (Default)\n"
54 		 "\t\t  2 - debug logging");
55 
56 int ql4xenablemsix = 1;
57 module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
58 MODULE_PARM_DESC(ql4xenablemsix,
59 		 " Set to enable MSI or MSI-X interrupt mechanism.\n"
60 		 "\t\t  0 = enable INTx interrupt mechanism.\n"
61 		 "\t\t  1 = enable MSI-X interrupt mechanism (Default).\n"
62 		 "\t\t  2 = enable MSI interrupt mechanism.");
63 
64 #define QL4_DEF_QDEPTH 32
65 static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
66 module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
67 MODULE_PARM_DESC(ql4xmaxqdepth,
68 		 " Maximum queue depth to report for target devices.\n"
69 		 "\t\t  Default: 32.");
70 
71 static int ql4xqfulltracking = 1;
72 module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR);
73 MODULE_PARM_DESC(ql4xqfulltracking,
74 		 " Enable or disable dynamic tracking and adjustment of\n"
75 		 "\t\t scsi device queue depth.\n"
76 		 "\t\t  0 - Disable.\n"
77 		 "\t\t  1 - Enable. (Default)");
78 
79 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
80 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
81 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
82 		" Target Session Recovery Timeout.\n"
83 		"\t\t  Default: 120 sec.");
84 
85 int ql4xmdcapmask = 0x1F;
86 module_param(ql4xmdcapmask, int, S_IRUGO);
87 MODULE_PARM_DESC(ql4xmdcapmask,
88 		 " Set the Minidump driver capture mask level.\n"
89 		 "\t\t  Default is 0x1F.\n"
90 		 "\t\t  Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F");
91 
92 int ql4xenablemd = 1;
93 module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
94 MODULE_PARM_DESC(ql4xenablemd,
95 		 " Set to enable minidump.\n"
96 		 "\t\t  0 - disable minidump\n"
97 		 "\t\t  1 - enable minidump (Default)");
98 
99 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
100 /*
101  * SCSI host template entry points
102  */
103 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
104 
105 /*
106  * iSCSI template entry points
107  */
108 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
109 				     enum iscsi_param param, char *buf);
110 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
111 				  enum iscsi_param param, char *buf);
112 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
113 				  enum iscsi_host_param param, char *buf);
114 static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data,
115 				   uint32_t len);
116 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
117 				   enum iscsi_param_type param_type,
118 				   int param, char *buf);
119 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
120 static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost,
121 						 struct sockaddr *dst_addr,
122 						 int non_blocking);
123 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
124 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep);
125 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
126 				enum iscsi_param param, char *buf);
127 static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
128 static struct iscsi_cls_conn *
129 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx);
130 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
131 			     struct iscsi_cls_conn *cls_conn,
132 			     uint64_t transport_fd, int is_leading);
133 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn);
134 static struct iscsi_cls_session *
135 qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
136 			uint16_t qdepth, uint32_t initial_cmdsn);
137 static void qla4xxx_session_destroy(struct iscsi_cls_session *sess);
138 static void qla4xxx_task_work(struct work_struct *wdata);
139 static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t);
140 static int qla4xxx_task_xmit(struct iscsi_task *);
141 static void qla4xxx_task_cleanup(struct iscsi_task *);
142 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
143 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
144 				   struct iscsi_stats *stats);
145 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
146 			     uint32_t iface_type, uint32_t payload_size,
147 			     uint32_t pid, struct sockaddr *dst_addr);
148 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
149 				 uint32_t *num_entries, char *buf);
150 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx);
151 
152 /*
153  * SCSI host template entry points
154  */
155 static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
156 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
157 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
158 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
159 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
160 static int qla4xxx_slave_alloc(struct scsi_device *device);
161 static int qla4xxx_slave_configure(struct scsi_device *device);
162 static void qla4xxx_slave_destroy(struct scsi_device *sdev);
163 static umode_t ql4_attr_is_visible(int param_type, int param);
164 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
165 static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
166 				      int reason);
167 
168 static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
169     QLA82XX_LEGACY_INTR_CONFIG;
170 
171 static struct scsi_host_template qla4xxx_driver_template = {
172 	.module			= THIS_MODULE,
173 	.name			= DRIVER_NAME,
174 	.proc_name		= DRIVER_NAME,
175 	.queuecommand		= qla4xxx_queuecommand,
176 
177 	.eh_abort_handler	= qla4xxx_eh_abort,
178 	.eh_device_reset_handler = qla4xxx_eh_device_reset,
179 	.eh_target_reset_handler = qla4xxx_eh_target_reset,
180 	.eh_host_reset_handler	= qla4xxx_eh_host_reset,
181 	.eh_timed_out		= qla4xxx_eh_cmd_timed_out,
182 
183 	.slave_configure	= qla4xxx_slave_configure,
184 	.slave_alloc		= qla4xxx_slave_alloc,
185 	.slave_destroy		= qla4xxx_slave_destroy,
186 	.change_queue_depth	= qla4xxx_change_queue_depth,
187 
188 	.this_id		= -1,
189 	.cmd_per_lun		= 3,
190 	.use_clustering		= ENABLE_CLUSTERING,
191 	.sg_tablesize		= SG_ALL,
192 
193 	.max_sectors		= 0xFFFF,
194 	.shost_attrs		= qla4xxx_host_attrs,
195 	.host_reset		= qla4xxx_host_reset,
196 	.vendor_id		= SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
197 };
198 
199 static struct iscsi_transport qla4xxx_iscsi_transport = {
200 	.owner			= THIS_MODULE,
201 	.name			= DRIVER_NAME,
202 	.caps			= CAP_TEXT_NEGO |
203 				  CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
204 				  CAP_DATADGST | CAP_LOGIN_OFFLOAD |
205 				  CAP_MULTI_R2T,
206 	.attr_is_visible	= ql4_attr_is_visible,
207 	.create_session         = qla4xxx_session_create,
208 	.destroy_session        = qla4xxx_session_destroy,
209 	.start_conn             = qla4xxx_conn_start,
210 	.create_conn            = qla4xxx_conn_create,
211 	.bind_conn              = qla4xxx_conn_bind,
212 	.stop_conn              = iscsi_conn_stop,
213 	.destroy_conn           = qla4xxx_conn_destroy,
214 	.set_param              = iscsi_set_param,
215 	.get_conn_param		= qla4xxx_conn_get_param,
216 	.get_session_param	= qla4xxx_session_get_param,
217 	.get_ep_param           = qla4xxx_get_ep_param,
218 	.ep_connect		= qla4xxx_ep_connect,
219 	.ep_poll		= qla4xxx_ep_poll,
220 	.ep_disconnect		= qla4xxx_ep_disconnect,
221 	.get_stats		= qla4xxx_conn_get_stats,
222 	.send_pdu		= iscsi_conn_send_pdu,
223 	.xmit_task		= qla4xxx_task_xmit,
224 	.cleanup_task		= qla4xxx_task_cleanup,
225 	.alloc_pdu		= qla4xxx_alloc_pdu,
226 
227 	.get_host_param		= qla4xxx_host_get_param,
228 	.set_iface_param	= qla4xxx_iface_set_param,
229 	.get_iface_param	= qla4xxx_get_iface_param,
230 	.bsg_request		= qla4xxx_bsg_request,
231 	.send_ping		= qla4xxx_send_ping,
232 	.get_chap		= qla4xxx_get_chap_list,
233 	.delete_chap		= qla4xxx_delete_chap,
234 };
235 
236 static struct scsi_transport_template *qla4xxx_scsi_transport;
237 
238 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
239 			     uint32_t iface_type, uint32_t payload_size,
240 			     uint32_t pid, struct sockaddr *dst_addr)
241 {
242 	struct scsi_qla_host *ha = to_qla_host(shost);
243 	struct sockaddr_in *addr;
244 	struct sockaddr_in6 *addr6;
245 	uint32_t options = 0;
246 	uint8_t ipaddr[IPv6_ADDR_LEN];
247 	int rval;
248 
249 	memset(ipaddr, 0, IPv6_ADDR_LEN);
250 	/* IPv4 to IPv4 */
251 	if ((iface_type == ISCSI_IFACE_TYPE_IPV4) &&
252 	    (dst_addr->sa_family == AF_INET)) {
253 		addr = (struct sockaddr_in *)dst_addr;
254 		memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN);
255 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 "
256 				  "dest: %pI4\n", __func__,
257 				  &ha->ip_config.ip_address, ipaddr));
258 		rval = qla4xxx_ping_iocb(ha, options, payload_size, pid,
259 					 ipaddr);
260 		if (rval)
261 			rval = -EINVAL;
262 	} else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) &&
263 		   (dst_addr->sa_family == AF_INET6)) {
264 		/* IPv6 to IPv6 */
265 		addr6 = (struct sockaddr_in6 *)dst_addr;
266 		memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN);
267 
268 		options |= PING_IPV6_PROTOCOL_ENABLE;
269 
270 		/* Ping using LinkLocal address */
271 		if ((iface_num == 0) || (iface_num == 1)) {
272 			DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping "
273 					  "src: %pI6 dest: %pI6\n", __func__,
274 					  &ha->ip_config.ipv6_link_local_addr,
275 					  ipaddr));
276 			options |= PING_IPV6_LINKLOCAL_ADDR;
277 			rval = qla4xxx_ping_iocb(ha, options, payload_size,
278 						 pid, ipaddr);
279 		} else {
280 			ql4_printk(KERN_WARNING, ha, "%s: iface num = %d "
281 				   "not supported\n", __func__, iface_num);
282 			rval = -ENOSYS;
283 			goto exit_send_ping;
284 		}
285 
286 		/*
287 		 * If ping using LinkLocal address fails, try ping using
288 		 * IPv6 address
289 		 */
290 		if (rval != QLA_SUCCESS) {
291 			options &= ~PING_IPV6_LINKLOCAL_ADDR;
292 			if (iface_num == 0) {
293 				options |= PING_IPV6_ADDR0;
294 				DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
295 						  "Ping src: %pI6 "
296 						  "dest: %pI6\n", __func__,
297 						  &ha->ip_config.ipv6_addr0,
298 						  ipaddr));
299 			} else if (iface_num == 1) {
300 				options |= PING_IPV6_ADDR1;
301 				DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
302 						  "Ping src: %pI6 "
303 						  "dest: %pI6\n", __func__,
304 						  &ha->ip_config.ipv6_addr1,
305 						  ipaddr));
306 			}
307 			rval = qla4xxx_ping_iocb(ha, options, payload_size,
308 						 pid, ipaddr);
309 			if (rval)
310 				rval = -EINVAL;
311 		}
312 	} else
313 		rval = -ENOSYS;
314 exit_send_ping:
315 	return rval;
316 }
317 
318 static umode_t ql4_attr_is_visible(int param_type, int param)
319 {
320 	switch (param_type) {
321 	case ISCSI_HOST_PARAM:
322 		switch (param) {
323 		case ISCSI_HOST_PARAM_HWADDRESS:
324 		case ISCSI_HOST_PARAM_IPADDRESS:
325 		case ISCSI_HOST_PARAM_INITIATOR_NAME:
326 		case ISCSI_HOST_PARAM_PORT_STATE:
327 		case ISCSI_HOST_PARAM_PORT_SPEED:
328 			return S_IRUGO;
329 		default:
330 			return 0;
331 		}
332 	case ISCSI_PARAM:
333 		switch (param) {
334 		case ISCSI_PARAM_PERSISTENT_ADDRESS:
335 		case ISCSI_PARAM_PERSISTENT_PORT:
336 		case ISCSI_PARAM_CONN_ADDRESS:
337 		case ISCSI_PARAM_CONN_PORT:
338 		case ISCSI_PARAM_TARGET_NAME:
339 		case ISCSI_PARAM_TPGT:
340 		case ISCSI_PARAM_TARGET_ALIAS:
341 		case ISCSI_PARAM_MAX_BURST:
342 		case ISCSI_PARAM_MAX_R2T:
343 		case ISCSI_PARAM_FIRST_BURST:
344 		case ISCSI_PARAM_MAX_RECV_DLENGTH:
345 		case ISCSI_PARAM_MAX_XMIT_DLENGTH:
346 		case ISCSI_PARAM_IFACE_NAME:
347 		case ISCSI_PARAM_CHAP_OUT_IDX:
348 		case ISCSI_PARAM_CHAP_IN_IDX:
349 		case ISCSI_PARAM_USERNAME:
350 		case ISCSI_PARAM_PASSWORD:
351 		case ISCSI_PARAM_USERNAME_IN:
352 		case ISCSI_PARAM_PASSWORD_IN:
353 			return S_IRUGO;
354 		default:
355 			return 0;
356 		}
357 	case ISCSI_NET_PARAM:
358 		switch (param) {
359 		case ISCSI_NET_PARAM_IPV4_ADDR:
360 		case ISCSI_NET_PARAM_IPV4_SUBNET:
361 		case ISCSI_NET_PARAM_IPV4_GW:
362 		case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
363 		case ISCSI_NET_PARAM_IFACE_ENABLE:
364 		case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
365 		case ISCSI_NET_PARAM_IPV6_ADDR:
366 		case ISCSI_NET_PARAM_IPV6_ROUTER:
367 		case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
368 		case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
369 		case ISCSI_NET_PARAM_VLAN_ID:
370 		case ISCSI_NET_PARAM_VLAN_PRIORITY:
371 		case ISCSI_NET_PARAM_VLAN_ENABLED:
372 		case ISCSI_NET_PARAM_MTU:
373 		case ISCSI_NET_PARAM_PORT:
374 			return S_IRUGO;
375 		default:
376 			return 0;
377 		}
378 	}
379 
380 	return 0;
381 }
382 
383 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
384 				  uint32_t *num_entries, char *buf)
385 {
386 	struct scsi_qla_host *ha = to_qla_host(shost);
387 	struct ql4_chap_table *chap_table;
388 	struct iscsi_chap_rec *chap_rec;
389 	int max_chap_entries = 0;
390 	int valid_chap_entries = 0;
391 	int ret = 0, i;
392 
393 	if (is_qla8022(ha))
394 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
395 					sizeof(struct ql4_chap_table);
396 	else
397 		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
398 
399 	ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n",
400 			__func__, *num_entries, chap_tbl_idx);
401 
402 	if (!buf) {
403 		ret = -ENOMEM;
404 		goto exit_get_chap_list;
405 	}
406 
407 	chap_rec = (struct iscsi_chap_rec *) buf;
408 	mutex_lock(&ha->chap_sem);
409 	for (i = chap_tbl_idx; i < max_chap_entries; i++) {
410 		chap_table = (struct ql4_chap_table *)ha->chap_list + i;
411 		if (chap_table->cookie !=
412 		    __constant_cpu_to_le16(CHAP_VALID_COOKIE))
413 			continue;
414 
415 		chap_rec->chap_tbl_idx = i;
416 		strncpy(chap_rec->username, chap_table->name,
417 			ISCSI_CHAP_AUTH_NAME_MAX_LEN);
418 		strncpy(chap_rec->password, chap_table->secret,
419 			QL4_CHAP_MAX_SECRET_LEN);
420 		chap_rec->password_length = chap_table->secret_len;
421 
422 		if (chap_table->flags & BIT_7) /* local */
423 			chap_rec->chap_type = CHAP_TYPE_OUT;
424 
425 		if (chap_table->flags & BIT_6) /* peer */
426 			chap_rec->chap_type = CHAP_TYPE_IN;
427 
428 		chap_rec++;
429 
430 		valid_chap_entries++;
431 		if (valid_chap_entries == *num_entries)
432 			break;
433 		else
434 			continue;
435 	}
436 	mutex_unlock(&ha->chap_sem);
437 
438 exit_get_chap_list:
439 	ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n",
440 			__func__,  valid_chap_entries);
441 	*num_entries = valid_chap_entries;
442 	return ret;
443 }
444 
445 static int __qla4xxx_is_chap_active(struct device *dev, void *data)
446 {
447 	int ret = 0;
448 	uint16_t *chap_tbl_idx = (uint16_t *) data;
449 	struct iscsi_cls_session *cls_session;
450 	struct iscsi_session *sess;
451 	struct ddb_entry *ddb_entry;
452 
453 	if (!iscsi_is_session_dev(dev))
454 		goto exit_is_chap_active;
455 
456 	cls_session = iscsi_dev_to_session(dev);
457 	sess = cls_session->dd_data;
458 	ddb_entry = sess->dd_data;
459 
460 	if (iscsi_session_chkready(cls_session))
461 		goto exit_is_chap_active;
462 
463 	if (ddb_entry->chap_tbl_idx == *chap_tbl_idx)
464 		ret = 1;
465 
466 exit_is_chap_active:
467 	return ret;
468 }
469 
470 static int qla4xxx_is_chap_active(struct Scsi_Host *shost,
471 				  uint16_t chap_tbl_idx)
472 {
473 	int ret = 0;
474 
475 	ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx,
476 				    __qla4xxx_is_chap_active);
477 
478 	return ret;
479 }
480 
481 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx)
482 {
483 	struct scsi_qla_host *ha = to_qla_host(shost);
484 	struct ql4_chap_table *chap_table;
485 	dma_addr_t chap_dma;
486 	int max_chap_entries = 0;
487 	uint32_t offset = 0;
488 	uint32_t chap_size;
489 	int ret = 0;
490 
491 	chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
492 	if (chap_table == NULL)
493 		return -ENOMEM;
494 
495 	memset(chap_table, 0, sizeof(struct ql4_chap_table));
496 
497 	if (is_qla8022(ha))
498 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
499 				   sizeof(struct ql4_chap_table);
500 	else
501 		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
502 
503 	if (chap_tbl_idx > max_chap_entries) {
504 		ret = -EINVAL;
505 		goto exit_delete_chap;
506 	}
507 
508 	/* Check if chap index is in use.
509 	 * If chap is in use don't delet chap entry */
510 	ret = qla4xxx_is_chap_active(shost, chap_tbl_idx);
511 	if (ret) {
512 		ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot "
513 			   "delete from flash\n", chap_tbl_idx);
514 		ret = -EBUSY;
515 		goto exit_delete_chap;
516 	}
517 
518 	chap_size = sizeof(struct ql4_chap_table);
519 	if (is_qla40XX(ha))
520 		offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size);
521 	else {
522 		offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
523 		/* flt_chap_size is CHAP table size for both ports
524 		 * so divide it by 2 to calculate the offset for second port
525 		 */
526 		if (ha->port_num == 1)
527 			offset += (ha->hw.flt_chap_size / 2);
528 		offset += (chap_tbl_idx * chap_size);
529 	}
530 
531 	ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
532 	if (ret != QLA_SUCCESS) {
533 		ret = -EINVAL;
534 		goto exit_delete_chap;
535 	}
536 
537 	DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
538 			  __le16_to_cpu(chap_table->cookie)));
539 
540 	if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
541 		ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
542 		goto exit_delete_chap;
543 	}
544 
545 	chap_table->cookie = __constant_cpu_to_le16(0xFFFF);
546 
547 	offset = FLASH_CHAP_OFFSET |
548 			(chap_tbl_idx * sizeof(struct ql4_chap_table));
549 	ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size,
550 				FLASH_OPT_RMW_COMMIT);
551 	if (ret == QLA_SUCCESS && ha->chap_list) {
552 		mutex_lock(&ha->chap_sem);
553 		/* Update ha chap_list cache */
554 		memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx,
555 			chap_table, sizeof(struct ql4_chap_table));
556 		mutex_unlock(&ha->chap_sem);
557 	}
558 	if (ret != QLA_SUCCESS)
559 		ret =  -EINVAL;
560 
561 exit_delete_chap:
562 	dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
563 	return ret;
564 }
565 
566 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
567 				   enum iscsi_param_type param_type,
568 				   int param, char *buf)
569 {
570 	struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
571 	struct scsi_qla_host *ha = to_qla_host(shost);
572 	int len = -ENOSYS;
573 
574 	if (param_type != ISCSI_NET_PARAM)
575 		return -ENOSYS;
576 
577 	switch (param) {
578 	case ISCSI_NET_PARAM_IPV4_ADDR:
579 		len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
580 		break;
581 	case ISCSI_NET_PARAM_IPV4_SUBNET:
582 		len = sprintf(buf, "%pI4\n", &ha->ip_config.subnet_mask);
583 		break;
584 	case ISCSI_NET_PARAM_IPV4_GW:
585 		len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
586 		break;
587 	case ISCSI_NET_PARAM_IFACE_ENABLE:
588 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
589 			len = sprintf(buf, "%s\n",
590 				      (ha->ip_config.ipv4_options &
591 				       IPOPT_IPV4_PROTOCOL_ENABLE) ?
592 				      "enabled" : "disabled");
593 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
594 			len = sprintf(buf, "%s\n",
595 				      (ha->ip_config.ipv6_options &
596 				       IPV6_OPT_IPV6_PROTOCOL_ENABLE) ?
597 				       "enabled" : "disabled");
598 		break;
599 	case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
600 		len = sprintf(buf, "%s\n",
601 			      (ha->ip_config.tcp_options & TCPOPT_DHCP_ENABLE) ?
602 			      "dhcp" : "static");
603 		break;
604 	case ISCSI_NET_PARAM_IPV6_ADDR:
605 		if (iface->iface_num == 0)
606 			len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr0);
607 		if (iface->iface_num == 1)
608 			len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr1);
609 		break;
610 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
611 		len = sprintf(buf, "%pI6\n",
612 			      &ha->ip_config.ipv6_link_local_addr);
613 		break;
614 	case ISCSI_NET_PARAM_IPV6_ROUTER:
615 		len = sprintf(buf, "%pI6\n",
616 			      &ha->ip_config.ipv6_default_router_addr);
617 		break;
618 	case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
619 		len = sprintf(buf, "%s\n",
620 			      (ha->ip_config.ipv6_addl_options &
621 			       IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
622 			       "nd" : "static");
623 		break;
624 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
625 		len = sprintf(buf, "%s\n",
626 			      (ha->ip_config.ipv6_addl_options &
627 			       IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
628 			       "auto" : "static");
629 		break;
630 	case ISCSI_NET_PARAM_VLAN_ID:
631 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
632 			len = sprintf(buf, "%d\n",
633 				      (ha->ip_config.ipv4_vlan_tag &
634 				       ISCSI_MAX_VLAN_ID));
635 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
636 			len = sprintf(buf, "%d\n",
637 				      (ha->ip_config.ipv6_vlan_tag &
638 				       ISCSI_MAX_VLAN_ID));
639 		break;
640 	case ISCSI_NET_PARAM_VLAN_PRIORITY:
641 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
642 			len = sprintf(buf, "%d\n",
643 				      ((ha->ip_config.ipv4_vlan_tag >> 13) &
644 					ISCSI_MAX_VLAN_PRIORITY));
645 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
646 			len = sprintf(buf, "%d\n",
647 				      ((ha->ip_config.ipv6_vlan_tag >> 13) &
648 					ISCSI_MAX_VLAN_PRIORITY));
649 		break;
650 	case ISCSI_NET_PARAM_VLAN_ENABLED:
651 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
652 			len = sprintf(buf, "%s\n",
653 				      (ha->ip_config.ipv4_options &
654 				       IPOPT_VLAN_TAGGING_ENABLE) ?
655 				       "enabled" : "disabled");
656 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
657 			len = sprintf(buf, "%s\n",
658 				      (ha->ip_config.ipv6_options &
659 				       IPV6_OPT_VLAN_TAGGING_ENABLE) ?
660 				       "enabled" : "disabled");
661 		break;
662 	case ISCSI_NET_PARAM_MTU:
663 		len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
664 		break;
665 	case ISCSI_NET_PARAM_PORT:
666 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
667 			len = sprintf(buf, "%d\n", ha->ip_config.ipv4_port);
668 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
669 			len = sprintf(buf, "%d\n", ha->ip_config.ipv6_port);
670 		break;
671 	default:
672 		len = -ENOSYS;
673 	}
674 
675 	return len;
676 }
677 
678 static struct iscsi_endpoint *
679 qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
680 		   int non_blocking)
681 {
682 	int ret;
683 	struct iscsi_endpoint *ep;
684 	struct qla_endpoint *qla_ep;
685 	struct scsi_qla_host *ha;
686 	struct sockaddr_in *addr;
687 	struct sockaddr_in6 *addr6;
688 
689 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
690 	if (!shost) {
691 		ret = -ENXIO;
692 		printk(KERN_ERR "%s: shost is NULL\n",
693 		       __func__);
694 		return ERR_PTR(ret);
695 	}
696 
697 	ha = iscsi_host_priv(shost);
698 
699 	ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
700 	if (!ep) {
701 		ret = -ENOMEM;
702 		return ERR_PTR(ret);
703 	}
704 
705 	qla_ep = ep->dd_data;
706 	memset(qla_ep, 0, sizeof(struct qla_endpoint));
707 	if (dst_addr->sa_family == AF_INET) {
708 		memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in));
709 		addr = (struct sockaddr_in *)&qla_ep->dst_addr;
710 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__,
711 				  (char *)&addr->sin_addr));
712 	} else if (dst_addr->sa_family == AF_INET6) {
713 		memcpy(&qla_ep->dst_addr, dst_addr,
714 		       sizeof(struct sockaddr_in6));
715 		addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
716 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
717 				  (char *)&addr6->sin6_addr));
718 	}
719 
720 	qla_ep->host = shost;
721 
722 	return ep;
723 }
724 
725 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
726 {
727 	struct qla_endpoint *qla_ep;
728 	struct scsi_qla_host *ha;
729 	int ret = 0;
730 
731 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
732 	qla_ep = ep->dd_data;
733 	ha = to_qla_host(qla_ep->host);
734 
735 	if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
736 		ret = 1;
737 
738 	return ret;
739 }
740 
741 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
742 {
743 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
744 	iscsi_destroy_endpoint(ep);
745 }
746 
747 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
748 				enum iscsi_param param,
749 				char *buf)
750 {
751 	struct qla_endpoint *qla_ep = ep->dd_data;
752 	struct sockaddr *dst_addr;
753 
754 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
755 
756 	switch (param) {
757 	case ISCSI_PARAM_CONN_PORT:
758 	case ISCSI_PARAM_CONN_ADDRESS:
759 		if (!qla_ep)
760 			return -ENOTCONN;
761 
762 		dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
763 		if (!dst_addr)
764 			return -ENOTCONN;
765 
766 		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
767 						 &qla_ep->dst_addr, param, buf);
768 	default:
769 		return -ENOSYS;
770 	}
771 }
772 
773 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
774 				   struct iscsi_stats *stats)
775 {
776 	struct iscsi_session *sess;
777 	struct iscsi_cls_session *cls_sess;
778 	struct ddb_entry *ddb_entry;
779 	struct scsi_qla_host *ha;
780 	struct ql_iscsi_stats *ql_iscsi_stats;
781 	int stats_size;
782 	int ret;
783 	dma_addr_t iscsi_stats_dma;
784 
785 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
786 
787 	cls_sess = iscsi_conn_to_session(cls_conn);
788 	sess = cls_sess->dd_data;
789 	ddb_entry = sess->dd_data;
790 	ha = ddb_entry->ha;
791 
792 	stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
793 	/* Allocate memory */
794 	ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
795 					    &iscsi_stats_dma, GFP_KERNEL);
796 	if (!ql_iscsi_stats) {
797 		ql4_printk(KERN_ERR, ha,
798 			   "Unable to allocate memory for iscsi stats\n");
799 		goto exit_get_stats;
800 	}
801 
802 	ret =  qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size,
803 				     iscsi_stats_dma);
804 	if (ret != QLA_SUCCESS) {
805 		ql4_printk(KERN_ERR, ha,
806 			   "Unable to retreive iscsi stats\n");
807 		goto free_stats;
808 	}
809 
810 	/* octets */
811 	stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets);
812 	stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets);
813 	/* xmit pdus */
814 	stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus);
815 	stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus);
816 	stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus);
817 	stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus);
818 	stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus);
819 	stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus);
820 	stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus);
821 	stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus);
822 	/* recv pdus */
823 	stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus);
824 	stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus);
825 	stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus);
826 	stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus);
827 	stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus);
828 	stats->logoutrsp_pdus =
829 			le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus);
830 	stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus);
831 	stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus);
832 	stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus);
833 
834 free_stats:
835 	dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats,
836 			  iscsi_stats_dma);
837 exit_get_stats:
838 	return;
839 }
840 
841 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
842 {
843 	struct iscsi_cls_session *session;
844 	struct iscsi_session *sess;
845 	unsigned long flags;
846 	enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED;
847 
848 	session = starget_to_session(scsi_target(sc->device));
849 	sess = session->dd_data;
850 
851 	spin_lock_irqsave(&session->lock, flags);
852 	if (session->state == ISCSI_SESSION_FAILED)
853 		ret = BLK_EH_RESET_TIMER;
854 	spin_unlock_irqrestore(&session->lock, flags);
855 
856 	return ret;
857 }
858 
859 static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
860 {
861 	struct scsi_qla_host *ha = to_qla_host(shost);
862 	struct iscsi_cls_host *ihost = shost->shost_data;
863 	uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN;
864 
865 	qla4xxx_get_firmware_state(ha);
866 
867 	switch (ha->addl_fw_state & 0x0F00) {
868 	case FW_ADDSTATE_LINK_SPEED_10MBPS:
869 		speed = ISCSI_PORT_SPEED_10MBPS;
870 		break;
871 	case FW_ADDSTATE_LINK_SPEED_100MBPS:
872 		speed = ISCSI_PORT_SPEED_100MBPS;
873 		break;
874 	case FW_ADDSTATE_LINK_SPEED_1GBPS:
875 		speed = ISCSI_PORT_SPEED_1GBPS;
876 		break;
877 	case FW_ADDSTATE_LINK_SPEED_10GBPS:
878 		speed = ISCSI_PORT_SPEED_10GBPS;
879 		break;
880 	}
881 	ihost->port_speed = speed;
882 }
883 
884 static void qla4xxx_set_port_state(struct Scsi_Host *shost)
885 {
886 	struct scsi_qla_host *ha = to_qla_host(shost);
887 	struct iscsi_cls_host *ihost = shost->shost_data;
888 	uint32_t state = ISCSI_PORT_STATE_DOWN;
889 
890 	if (test_bit(AF_LINK_UP, &ha->flags))
891 		state = ISCSI_PORT_STATE_UP;
892 
893 	ihost->port_state = state;
894 }
895 
896 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
897 				  enum iscsi_host_param param, char *buf)
898 {
899 	struct scsi_qla_host *ha = to_qla_host(shost);
900 	int len;
901 
902 	switch (param) {
903 	case ISCSI_HOST_PARAM_HWADDRESS:
904 		len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
905 		break;
906 	case ISCSI_HOST_PARAM_IPADDRESS:
907 		len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
908 		break;
909 	case ISCSI_HOST_PARAM_INITIATOR_NAME:
910 		len = sprintf(buf, "%s\n", ha->name_string);
911 		break;
912 	case ISCSI_HOST_PARAM_PORT_STATE:
913 		qla4xxx_set_port_state(shost);
914 		len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
915 		break;
916 	case ISCSI_HOST_PARAM_PORT_SPEED:
917 		qla4xxx_set_port_speed(shost);
918 		len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
919 		break;
920 	default:
921 		return -ENOSYS;
922 	}
923 
924 	return len;
925 }
926 
927 static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha)
928 {
929 	if (ha->iface_ipv4)
930 		return;
931 
932 	/* IPv4 */
933 	ha->iface_ipv4 = iscsi_create_iface(ha->host,
934 					    &qla4xxx_iscsi_transport,
935 					    ISCSI_IFACE_TYPE_IPV4, 0, 0);
936 	if (!ha->iface_ipv4)
937 		ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI "
938 			   "iface0.\n");
939 }
940 
941 static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha)
942 {
943 	if (!ha->iface_ipv6_0)
944 		/* IPv6 iface-0 */
945 		ha->iface_ipv6_0 = iscsi_create_iface(ha->host,
946 						      &qla4xxx_iscsi_transport,
947 						      ISCSI_IFACE_TYPE_IPV6, 0,
948 						      0);
949 	if (!ha->iface_ipv6_0)
950 		ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
951 			   "iface0.\n");
952 
953 	if (!ha->iface_ipv6_1)
954 		/* IPv6 iface-1 */
955 		ha->iface_ipv6_1 = iscsi_create_iface(ha->host,
956 						      &qla4xxx_iscsi_transport,
957 						      ISCSI_IFACE_TYPE_IPV6, 1,
958 						      0);
959 	if (!ha->iface_ipv6_1)
960 		ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
961 			   "iface1.\n");
962 }
963 
964 static void qla4xxx_create_ifaces(struct scsi_qla_host *ha)
965 {
966 	if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE)
967 		qla4xxx_create_ipv4_iface(ha);
968 
969 	if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE)
970 		qla4xxx_create_ipv6_iface(ha);
971 }
972 
973 static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha)
974 {
975 	if (ha->iface_ipv4) {
976 		iscsi_destroy_iface(ha->iface_ipv4);
977 		ha->iface_ipv4 = NULL;
978 	}
979 }
980 
981 static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha)
982 {
983 	if (ha->iface_ipv6_0) {
984 		iscsi_destroy_iface(ha->iface_ipv6_0);
985 		ha->iface_ipv6_0 = NULL;
986 	}
987 	if (ha->iface_ipv6_1) {
988 		iscsi_destroy_iface(ha->iface_ipv6_1);
989 		ha->iface_ipv6_1 = NULL;
990 	}
991 }
992 
993 static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha)
994 {
995 	qla4xxx_destroy_ipv4_iface(ha);
996 	qla4xxx_destroy_ipv6_iface(ha);
997 }
998 
999 static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
1000 			     struct iscsi_iface_param_info *iface_param,
1001 			     struct addr_ctrl_blk *init_fw_cb)
1002 {
1003 	/*
1004 	 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg.
1005 	 * iface_num 1 is valid only for IPv6 Addr.
1006 	 */
1007 	switch (iface_param->param) {
1008 	case ISCSI_NET_PARAM_IPV6_ADDR:
1009 		if (iface_param->iface_num & 0x1)
1010 			/* IPv6 Addr 1 */
1011 			memcpy(init_fw_cb->ipv6_addr1, iface_param->value,
1012 			       sizeof(init_fw_cb->ipv6_addr1));
1013 		else
1014 			/* IPv6 Addr 0 */
1015 			memcpy(init_fw_cb->ipv6_addr0, iface_param->value,
1016 			       sizeof(init_fw_cb->ipv6_addr0));
1017 		break;
1018 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
1019 		if (iface_param->iface_num & 0x1)
1020 			break;
1021 		memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8],
1022 		       sizeof(init_fw_cb->ipv6_if_id));
1023 		break;
1024 	case ISCSI_NET_PARAM_IPV6_ROUTER:
1025 		if (iface_param->iface_num & 0x1)
1026 			break;
1027 		memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value,
1028 		       sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
1029 		break;
1030 	case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
1031 		/* Autocfg applies to even interface */
1032 		if (iface_param->iface_num & 0x1)
1033 			break;
1034 
1035 		if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE)
1036 			init_fw_cb->ipv6_addtl_opts &=
1037 				cpu_to_le16(
1038 				  ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
1039 		else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE)
1040 			init_fw_cb->ipv6_addtl_opts |=
1041 				cpu_to_le16(
1042 				  IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
1043 		else
1044 			ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
1045 				   "IPv6 addr\n");
1046 		break;
1047 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
1048 		/* Autocfg applies to even interface */
1049 		if (iface_param->iface_num & 0x1)
1050 			break;
1051 
1052 		if (iface_param->value[0] ==
1053 		    ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE)
1054 			init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(
1055 					IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
1056 		else if (iface_param->value[0] ==
1057 			 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE)
1058 			init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
1059 				       ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
1060 		else
1061 			ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
1062 				   "IPv6 linklocal addr\n");
1063 		break;
1064 	case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
1065 		/* Autocfg applies to even interface */
1066 		if (iface_param->iface_num & 0x1)
1067 			break;
1068 
1069 		if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE)
1070 			memset(init_fw_cb->ipv6_dflt_rtr_addr, 0,
1071 			       sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
1072 		break;
1073 	case ISCSI_NET_PARAM_IFACE_ENABLE:
1074 		if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
1075 			init_fw_cb->ipv6_opts |=
1076 				cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE);
1077 			qla4xxx_create_ipv6_iface(ha);
1078 		} else {
1079 			init_fw_cb->ipv6_opts &=
1080 				cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE &
1081 					    0xFFFF);
1082 			qla4xxx_destroy_ipv6_iface(ha);
1083 		}
1084 		break;
1085 	case ISCSI_NET_PARAM_VLAN_TAG:
1086 		if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag))
1087 			break;
1088 		init_fw_cb->ipv6_vlan_tag =
1089 				cpu_to_be16(*(uint16_t *)iface_param->value);
1090 		break;
1091 	case ISCSI_NET_PARAM_VLAN_ENABLED:
1092 		if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
1093 			init_fw_cb->ipv6_opts |=
1094 				cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE);
1095 		else
1096 			init_fw_cb->ipv6_opts &=
1097 				cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE);
1098 		break;
1099 	case ISCSI_NET_PARAM_MTU:
1100 		init_fw_cb->eth_mtu_size =
1101 				cpu_to_le16(*(uint16_t *)iface_param->value);
1102 		break;
1103 	case ISCSI_NET_PARAM_PORT:
1104 		/* Autocfg applies to even interface */
1105 		if (iface_param->iface_num & 0x1)
1106 			break;
1107 
1108 		init_fw_cb->ipv6_port =
1109 				cpu_to_le16(*(uint16_t *)iface_param->value);
1110 		break;
1111 	default:
1112 		ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
1113 			   iface_param->param);
1114 		break;
1115 	}
1116 }
1117 
1118 static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
1119 			     struct iscsi_iface_param_info *iface_param,
1120 			     struct addr_ctrl_blk *init_fw_cb)
1121 {
1122 	switch (iface_param->param) {
1123 	case ISCSI_NET_PARAM_IPV4_ADDR:
1124 		memcpy(init_fw_cb->ipv4_addr, iface_param->value,
1125 		       sizeof(init_fw_cb->ipv4_addr));
1126 		break;
1127 	case ISCSI_NET_PARAM_IPV4_SUBNET:
1128 		memcpy(init_fw_cb->ipv4_subnet,	iface_param->value,
1129 		       sizeof(init_fw_cb->ipv4_subnet));
1130 		break;
1131 	case ISCSI_NET_PARAM_IPV4_GW:
1132 		memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value,
1133 		       sizeof(init_fw_cb->ipv4_gw_addr));
1134 		break;
1135 	case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
1136 		if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
1137 			init_fw_cb->ipv4_tcp_opts |=
1138 					cpu_to_le16(TCPOPT_DHCP_ENABLE);
1139 		else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
1140 			init_fw_cb->ipv4_tcp_opts &=
1141 					cpu_to_le16(~TCPOPT_DHCP_ENABLE);
1142 		else
1143 			ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n");
1144 		break;
1145 	case ISCSI_NET_PARAM_IFACE_ENABLE:
1146 		if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
1147 			init_fw_cb->ipv4_ip_opts |=
1148 				cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE);
1149 			qla4xxx_create_ipv4_iface(ha);
1150 		} else {
1151 			init_fw_cb->ipv4_ip_opts &=
1152 				cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE &
1153 					    0xFFFF);
1154 			qla4xxx_destroy_ipv4_iface(ha);
1155 		}
1156 		break;
1157 	case ISCSI_NET_PARAM_VLAN_TAG:
1158 		if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag))
1159 			break;
1160 		init_fw_cb->ipv4_vlan_tag =
1161 				cpu_to_be16(*(uint16_t *)iface_param->value);
1162 		break;
1163 	case ISCSI_NET_PARAM_VLAN_ENABLED:
1164 		if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
1165 			init_fw_cb->ipv4_ip_opts |=
1166 					cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE);
1167 		else
1168 			init_fw_cb->ipv4_ip_opts &=
1169 					cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE);
1170 		break;
1171 	case ISCSI_NET_PARAM_MTU:
1172 		init_fw_cb->eth_mtu_size =
1173 				cpu_to_le16(*(uint16_t *)iface_param->value);
1174 		break;
1175 	case ISCSI_NET_PARAM_PORT:
1176 		init_fw_cb->ipv4_port =
1177 				cpu_to_le16(*(uint16_t *)iface_param->value);
1178 		break;
1179 	default:
1180 		ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
1181 			   iface_param->param);
1182 		break;
1183 	}
1184 }
1185 
1186 static void
1187 qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
1188 {
1189 	struct addr_ctrl_blk_def *acb;
1190 	acb = (struct addr_ctrl_blk_def *)init_fw_cb;
1191 	memset(acb->reserved1, 0, sizeof(acb->reserved1));
1192 	memset(acb->reserved2, 0, sizeof(acb->reserved2));
1193 	memset(acb->reserved3, 0, sizeof(acb->reserved3));
1194 	memset(acb->reserved4, 0, sizeof(acb->reserved4));
1195 	memset(acb->reserved5, 0, sizeof(acb->reserved5));
1196 	memset(acb->reserved6, 0, sizeof(acb->reserved6));
1197 	memset(acb->reserved7, 0, sizeof(acb->reserved7));
1198 	memset(acb->reserved8, 0, sizeof(acb->reserved8));
1199 	memset(acb->reserved9, 0, sizeof(acb->reserved9));
1200 	memset(acb->reserved10, 0, sizeof(acb->reserved10));
1201 	memset(acb->reserved11, 0, sizeof(acb->reserved11));
1202 	memset(acb->reserved12, 0, sizeof(acb->reserved12));
1203 	memset(acb->reserved13, 0, sizeof(acb->reserved13));
1204 	memset(acb->reserved14, 0, sizeof(acb->reserved14));
1205 	memset(acb->reserved15, 0, sizeof(acb->reserved15));
1206 }
1207 
1208 static int
1209 qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
1210 {
1211 	struct scsi_qla_host *ha = to_qla_host(shost);
1212 	int rval = 0;
1213 	struct iscsi_iface_param_info *iface_param = NULL;
1214 	struct addr_ctrl_blk *init_fw_cb = NULL;
1215 	dma_addr_t init_fw_cb_dma;
1216 	uint32_t mbox_cmd[MBOX_REG_COUNT];
1217 	uint32_t mbox_sts[MBOX_REG_COUNT];
1218 	uint32_t rem = len;
1219 	struct nlattr *attr;
1220 
1221 	init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
1222 					sizeof(struct addr_ctrl_blk),
1223 					&init_fw_cb_dma, GFP_KERNEL);
1224 	if (!init_fw_cb) {
1225 		ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
1226 			   __func__);
1227 		return -ENOMEM;
1228 	}
1229 
1230 	memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
1231 	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1232 	memset(&mbox_sts, 0, sizeof(mbox_sts));
1233 
1234 	if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) {
1235 		ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__);
1236 		rval = -EIO;
1237 		goto exit_init_fw_cb;
1238 	}
1239 
1240 	nla_for_each_attr(attr, data, len, rem) {
1241 		iface_param = nla_data(attr);
1242 
1243 		if (iface_param->param_type != ISCSI_NET_PARAM)
1244 			continue;
1245 
1246 		switch (iface_param->iface_type) {
1247 		case ISCSI_IFACE_TYPE_IPV4:
1248 			switch (iface_param->iface_num) {
1249 			case 0:
1250 				qla4xxx_set_ipv4(ha, iface_param, init_fw_cb);
1251 				break;
1252 			default:
1253 				/* Cannot have more than one IPv4 interface */
1254 				ql4_printk(KERN_ERR, ha, "Invalid IPv4 iface "
1255 					   "number = %d\n",
1256 					   iface_param->iface_num);
1257 				break;
1258 			}
1259 			break;
1260 		case ISCSI_IFACE_TYPE_IPV6:
1261 			switch (iface_param->iface_num) {
1262 			case 0:
1263 			case 1:
1264 				qla4xxx_set_ipv6(ha, iface_param, init_fw_cb);
1265 				break;
1266 			default:
1267 				/* Cannot have more than two IPv6 interface */
1268 				ql4_printk(KERN_ERR, ha, "Invalid IPv6 iface "
1269 					   "number = %d\n",
1270 					   iface_param->iface_num);
1271 				break;
1272 			}
1273 			break;
1274 		default:
1275 			ql4_printk(KERN_ERR, ha, "Invalid iface type\n");
1276 			break;
1277 		}
1278 	}
1279 
1280 	init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A);
1281 
1282 	rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB,
1283 				 sizeof(struct addr_ctrl_blk),
1284 				 FLASH_OPT_RMW_COMMIT);
1285 	if (rval != QLA_SUCCESS) {
1286 		ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n",
1287 			   __func__);
1288 		rval = -EIO;
1289 		goto exit_init_fw_cb;
1290 	}
1291 
1292 	rval = qla4xxx_disable_acb(ha);
1293 	if (rval != QLA_SUCCESS) {
1294 		ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n",
1295 			   __func__);
1296 		rval = -EIO;
1297 		goto exit_init_fw_cb;
1298 	}
1299 
1300 	wait_for_completion_timeout(&ha->disable_acb_comp,
1301 				    DISABLE_ACB_TOV * HZ);
1302 
1303 	qla4xxx_initcb_to_acb(init_fw_cb);
1304 
1305 	rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma);
1306 	if (rval != QLA_SUCCESS) {
1307 		ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n",
1308 			   __func__);
1309 		rval = -EIO;
1310 		goto exit_init_fw_cb;
1311 	}
1312 
1313 	memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
1314 	qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb,
1315 				  init_fw_cb_dma);
1316 
1317 exit_init_fw_cb:
1318 	dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
1319 			  init_fw_cb, init_fw_cb_dma);
1320 
1321 	return rval;
1322 }
1323 
1324 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
1325 				     enum iscsi_param param, char *buf)
1326 {
1327 	struct iscsi_session *sess = cls_sess->dd_data;
1328 	struct ddb_entry *ddb_entry = sess->dd_data;
1329 	struct scsi_qla_host *ha = ddb_entry->ha;
1330 	int rval, len;
1331 	uint16_t idx;
1332 
1333 	switch (param) {
1334 	case ISCSI_PARAM_CHAP_IN_IDX:
1335 		rval = qla4xxx_get_chap_index(ha, sess->username_in,
1336 					      sess->password_in, BIDI_CHAP,
1337 					      &idx);
1338 		if (rval)
1339 			return -EINVAL;
1340 
1341 		len = sprintf(buf, "%hu\n", idx);
1342 		break;
1343 	case ISCSI_PARAM_CHAP_OUT_IDX:
1344 		rval = qla4xxx_get_chap_index(ha, sess->username,
1345 					      sess->password, LOCAL_CHAP,
1346 					      &idx);
1347 		if (rval)
1348 			return -EINVAL;
1349 
1350 		len = sprintf(buf, "%hu\n", idx);
1351 		break;
1352 	default:
1353 		return iscsi_session_get_param(cls_sess, param, buf);
1354 	}
1355 
1356 	return len;
1357 }
1358 
1359 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
1360 				  enum iscsi_param param, char *buf)
1361 {
1362 	struct iscsi_conn *conn;
1363 	struct qla_conn *qla_conn;
1364 	struct sockaddr *dst_addr;
1365 	int len = 0;
1366 
1367 	conn = cls_conn->dd_data;
1368 	qla_conn = conn->dd_data;
1369 	dst_addr = &qla_conn->qla_ep->dst_addr;
1370 
1371 	switch (param) {
1372 	case ISCSI_PARAM_CONN_PORT:
1373 	case ISCSI_PARAM_CONN_ADDRESS:
1374 		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
1375 						 dst_addr, param, buf);
1376 	default:
1377 		return iscsi_conn_get_param(cls_conn, param, buf);
1378 	}
1379 
1380 	return len;
1381 
1382 }
1383 
1384 int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
1385 {
1386 	uint32_t mbx_sts = 0;
1387 	uint16_t tmp_ddb_index;
1388 	int ret;
1389 
1390 get_ddb_index:
1391 	tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
1392 
1393 	if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
1394 		DEBUG2(ql4_printk(KERN_INFO, ha,
1395 				  "Free DDB index not available\n"));
1396 		ret = QLA_ERROR;
1397 		goto exit_get_ddb_index;
1398 	}
1399 
1400 	if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
1401 		goto get_ddb_index;
1402 
1403 	DEBUG2(ql4_printk(KERN_INFO, ha,
1404 			  "Found a free DDB index at %d\n", tmp_ddb_index));
1405 	ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
1406 	if (ret == QLA_ERROR) {
1407 		if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
1408 			ql4_printk(KERN_INFO, ha,
1409 				   "DDB index = %d not available trying next\n",
1410 				   tmp_ddb_index);
1411 			goto get_ddb_index;
1412 		}
1413 		DEBUG2(ql4_printk(KERN_INFO, ha,
1414 				  "Free FW DDB not available\n"));
1415 	}
1416 
1417 	*ddb_index = tmp_ddb_index;
1418 
1419 exit_get_ddb_index:
1420 	return ret;
1421 }
1422 
1423 static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
1424 				   struct ddb_entry *ddb_entry,
1425 				   char *existing_ipaddr,
1426 				   char *user_ipaddr)
1427 {
1428 	uint8_t dst_ipaddr[IPv6_ADDR_LEN];
1429 	char formatted_ipaddr[DDB_IPADDR_LEN];
1430 	int status = QLA_SUCCESS, ret = 0;
1431 
1432 	if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
1433 		ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
1434 			       '\0', NULL);
1435 		if (ret == 0) {
1436 			status = QLA_ERROR;
1437 			goto out_match;
1438 		}
1439 		ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
1440 	} else {
1441 		ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
1442 			       '\0', NULL);
1443 		if (ret == 0) {
1444 			status = QLA_ERROR;
1445 			goto out_match;
1446 		}
1447 		ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
1448 	}
1449 
1450 	if (strcmp(existing_ipaddr, formatted_ipaddr))
1451 		status = QLA_ERROR;
1452 
1453 out_match:
1454 	return status;
1455 }
1456 
1457 static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
1458 				      struct iscsi_cls_conn *cls_conn)
1459 {
1460 	int idx = 0, max_ddbs, rval;
1461 	struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1462 	struct iscsi_session *sess, *existing_sess;
1463 	struct iscsi_conn *conn, *existing_conn;
1464 	struct ddb_entry *ddb_entry;
1465 
1466 	sess = cls_sess->dd_data;
1467 	conn = cls_conn->dd_data;
1468 
1469 	if (sess->targetname == NULL ||
1470 	    conn->persistent_address == NULL ||
1471 	    conn->persistent_port == 0)
1472 		return QLA_ERROR;
1473 
1474 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
1475 				     MAX_DEV_DB_ENTRIES;
1476 
1477 	for (idx = 0; idx < max_ddbs; idx++) {
1478 		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
1479 		if (ddb_entry == NULL)
1480 			continue;
1481 
1482 		if (ddb_entry->ddb_type != FLASH_DDB)
1483 			continue;
1484 
1485 		existing_sess = ddb_entry->sess->dd_data;
1486 		existing_conn = ddb_entry->conn->dd_data;
1487 
1488 		if (existing_sess->targetname == NULL ||
1489 		    existing_conn->persistent_address == NULL ||
1490 		    existing_conn->persistent_port == 0)
1491 			continue;
1492 
1493 		DEBUG2(ql4_printk(KERN_INFO, ha,
1494 				  "IQN = %s User IQN = %s\n",
1495 				  existing_sess->targetname,
1496 				  sess->targetname));
1497 
1498 		DEBUG2(ql4_printk(KERN_INFO, ha,
1499 				  "IP = %s User IP = %s\n",
1500 				  existing_conn->persistent_address,
1501 				  conn->persistent_address));
1502 
1503 		DEBUG2(ql4_printk(KERN_INFO, ha,
1504 				  "Port = %d User Port = %d\n",
1505 				  existing_conn->persistent_port,
1506 				  conn->persistent_port));
1507 
1508 		if (strcmp(existing_sess->targetname, sess->targetname))
1509 			continue;
1510 		rval = qla4xxx_match_ipaddress(ha, ddb_entry,
1511 					existing_conn->persistent_address,
1512 					conn->persistent_address);
1513 		if (rval == QLA_ERROR)
1514 			continue;
1515 		if (existing_conn->persistent_port != conn->persistent_port)
1516 			continue;
1517 		break;
1518 	}
1519 
1520 	if (idx == max_ddbs)
1521 		return QLA_ERROR;
1522 
1523 	DEBUG2(ql4_printk(KERN_INFO, ha,
1524 			  "Match found in fwdb sessions\n"));
1525 	return QLA_SUCCESS;
1526 }
1527 
1528 static struct iscsi_cls_session *
1529 qla4xxx_session_create(struct iscsi_endpoint *ep,
1530 			uint16_t cmds_max, uint16_t qdepth,
1531 			uint32_t initial_cmdsn)
1532 {
1533 	struct iscsi_cls_session *cls_sess;
1534 	struct scsi_qla_host *ha;
1535 	struct qla_endpoint *qla_ep;
1536 	struct ddb_entry *ddb_entry;
1537 	uint16_t ddb_index;
1538 	struct iscsi_session *sess;
1539 	struct sockaddr *dst_addr;
1540 	int ret;
1541 
1542 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1543 	if (!ep) {
1544 		printk(KERN_ERR "qla4xxx: missing ep.\n");
1545 		return NULL;
1546 	}
1547 
1548 	qla_ep = ep->dd_data;
1549 	dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
1550 	ha = to_qla_host(qla_ep->host);
1551 
1552 	ret = qla4xxx_get_ddb_index(ha, &ddb_index);
1553 	if (ret == QLA_ERROR)
1554 		return NULL;
1555 
1556 	cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
1557 				       cmds_max, sizeof(struct ddb_entry),
1558 				       sizeof(struct ql4_task_data),
1559 				       initial_cmdsn, ddb_index);
1560 	if (!cls_sess)
1561 		return NULL;
1562 
1563 	sess = cls_sess->dd_data;
1564 	ddb_entry = sess->dd_data;
1565 	ddb_entry->fw_ddb_index = ddb_index;
1566 	ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
1567 	ddb_entry->ha = ha;
1568 	ddb_entry->sess = cls_sess;
1569 	ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
1570 	ddb_entry->ddb_change = qla4xxx_ddb_change;
1571 	cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
1572 	ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
1573 	ha->tot_ddbs++;
1574 
1575 	return cls_sess;
1576 }
1577 
1578 static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
1579 {
1580 	struct iscsi_session *sess;
1581 	struct ddb_entry *ddb_entry;
1582 	struct scsi_qla_host *ha;
1583 	unsigned long flags, wtime;
1584 	struct dev_db_entry *fw_ddb_entry = NULL;
1585 	dma_addr_t fw_ddb_entry_dma;
1586 	uint32_t ddb_state;
1587 	int ret;
1588 
1589 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1590 	sess = cls_sess->dd_data;
1591 	ddb_entry = sess->dd_data;
1592 	ha = ddb_entry->ha;
1593 
1594 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1595 					  &fw_ddb_entry_dma, GFP_KERNEL);
1596 	if (!fw_ddb_entry) {
1597 		ql4_printk(KERN_ERR, ha,
1598 			   "%s: Unable to allocate dma buffer\n", __func__);
1599 		goto destroy_session;
1600 	}
1601 
1602 	wtime = jiffies + (HZ * LOGOUT_TOV);
1603 	do {
1604 		ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
1605 					      fw_ddb_entry, fw_ddb_entry_dma,
1606 					      NULL, NULL, &ddb_state, NULL,
1607 					      NULL, NULL);
1608 		if (ret == QLA_ERROR)
1609 			goto destroy_session;
1610 
1611 		if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
1612 		    (ddb_state == DDB_DS_SESSION_FAILED))
1613 			goto destroy_session;
1614 
1615 		schedule_timeout_uninterruptible(HZ);
1616 	} while ((time_after(wtime, jiffies)));
1617 
1618 destroy_session:
1619 	qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
1620 
1621 	spin_lock_irqsave(&ha->hardware_lock, flags);
1622 	qla4xxx_free_ddb(ha, ddb_entry);
1623 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1624 
1625 	iscsi_session_teardown(cls_sess);
1626 
1627 	if (fw_ddb_entry)
1628 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1629 				  fw_ddb_entry, fw_ddb_entry_dma);
1630 }
1631 
1632 static struct iscsi_cls_conn *
1633 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
1634 {
1635 	struct iscsi_cls_conn *cls_conn;
1636 	struct iscsi_session *sess;
1637 	struct ddb_entry *ddb_entry;
1638 
1639 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1640 	cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
1641 				    conn_idx);
1642 	if (!cls_conn)
1643 		return NULL;
1644 
1645 	sess = cls_sess->dd_data;
1646 	ddb_entry = sess->dd_data;
1647 	ddb_entry->conn = cls_conn;
1648 
1649 	return cls_conn;
1650 }
1651 
1652 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
1653 			     struct iscsi_cls_conn *cls_conn,
1654 			     uint64_t transport_fd, int is_leading)
1655 {
1656 	struct iscsi_conn *conn;
1657 	struct qla_conn *qla_conn;
1658 	struct iscsi_endpoint *ep;
1659 
1660 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1661 
1662 	if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
1663 		return -EINVAL;
1664 	ep = iscsi_lookup_endpoint(transport_fd);
1665 	conn = cls_conn->dd_data;
1666 	qla_conn = conn->dd_data;
1667 	qla_conn->qla_ep = ep->dd_data;
1668 	return 0;
1669 }
1670 
1671 static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
1672 {
1673 	struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1674 	struct iscsi_session *sess;
1675 	struct ddb_entry *ddb_entry;
1676 	struct scsi_qla_host *ha;
1677 	struct dev_db_entry *fw_ddb_entry = NULL;
1678 	dma_addr_t fw_ddb_entry_dma;
1679 	uint32_t mbx_sts = 0;
1680 	int ret = 0;
1681 	int status = QLA_SUCCESS;
1682 
1683 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1684 	sess = cls_sess->dd_data;
1685 	ddb_entry = sess->dd_data;
1686 	ha = ddb_entry->ha;
1687 
1688 	/* Check if we have  matching FW DDB, if yes then do not
1689 	 * login to this target. This could cause target to logout previous
1690 	 * connection
1691 	 */
1692 	ret = qla4xxx_match_fwdb_session(ha, cls_conn);
1693 	if (ret == QLA_SUCCESS) {
1694 		ql4_printk(KERN_INFO, ha,
1695 			   "Session already exist in FW.\n");
1696 		ret = -EEXIST;
1697 		goto exit_conn_start;
1698 	}
1699 
1700 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1701 					  &fw_ddb_entry_dma, GFP_KERNEL);
1702 	if (!fw_ddb_entry) {
1703 		ql4_printk(KERN_ERR, ha,
1704 			   "%s: Unable to allocate dma buffer\n", __func__);
1705 		ret = -ENOMEM;
1706 		goto exit_conn_start;
1707 	}
1708 
1709 	ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
1710 	if (ret) {
1711 		/* If iscsid is stopped and started then no need to do
1712 		* set param again since ddb state will be already
1713 		* active and FW does not allow set ddb to an
1714 		* active session.
1715 		*/
1716 		if (mbx_sts)
1717 			if (ddb_entry->fw_ddb_device_state ==
1718 						DDB_DS_SESSION_ACTIVE) {
1719 				ddb_entry->unblock_sess(ddb_entry->sess);
1720 				goto exit_set_param;
1721 			}
1722 
1723 		ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n",
1724 			   __func__, ddb_entry->fw_ddb_index);
1725 		goto exit_conn_start;
1726 	}
1727 
1728 	status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
1729 	if (status == QLA_ERROR) {
1730 		ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
1731 			   sess->targetname);
1732 		ret = -EINVAL;
1733 		goto exit_conn_start;
1734 	}
1735 
1736 	if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE)
1737 		ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
1738 
1739 	DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__,
1740 		      ddb_entry->fw_ddb_device_state));
1741 
1742 exit_set_param:
1743 	ret = 0;
1744 
1745 exit_conn_start:
1746 	if (fw_ddb_entry)
1747 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1748 				  fw_ddb_entry, fw_ddb_entry_dma);
1749 	return ret;
1750 }
1751 
1752 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
1753 {
1754 	struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1755 	struct iscsi_session *sess;
1756 	struct scsi_qla_host *ha;
1757 	struct ddb_entry *ddb_entry;
1758 	int options;
1759 
1760 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1761 	sess = cls_sess->dd_data;
1762 	ddb_entry = sess->dd_data;
1763 	ha = ddb_entry->ha;
1764 
1765 	options = LOGOUT_OPTION_CLOSE_SESSION;
1766 	if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
1767 		ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
1768 }
1769 
1770 static void qla4xxx_task_work(struct work_struct *wdata)
1771 {
1772 	struct ql4_task_data *task_data;
1773 	struct scsi_qla_host *ha;
1774 	struct passthru_status *sts;
1775 	struct iscsi_task *task;
1776 	struct iscsi_hdr *hdr;
1777 	uint8_t *data;
1778 	uint32_t data_len;
1779 	struct iscsi_conn *conn;
1780 	int hdr_len;
1781 	itt_t itt;
1782 
1783 	task_data = container_of(wdata, struct ql4_task_data, task_work);
1784 	ha = task_data->ha;
1785 	task = task_data->task;
1786 	sts = &task_data->sts;
1787 	hdr_len = sizeof(struct iscsi_hdr);
1788 
1789 	DEBUG3(printk(KERN_INFO "Status returned\n"));
1790 	DEBUG3(qla4xxx_dump_buffer(sts, 64));
1791 	DEBUG3(printk(KERN_INFO "Response buffer"));
1792 	DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64));
1793 
1794 	conn = task->conn;
1795 
1796 	switch (sts->completionStatus) {
1797 	case PASSTHRU_STATUS_COMPLETE:
1798 		hdr = (struct iscsi_hdr *)task_data->resp_buffer;
1799 		/* Assign back the itt in hdr, until we use the PREASSIGN_TAG */
1800 		itt = sts->handle;
1801 		hdr->itt = itt;
1802 		data = task_data->resp_buffer + hdr_len;
1803 		data_len = task_data->resp_len - hdr_len;
1804 		iscsi_complete_pdu(conn, hdr, data, data_len);
1805 		break;
1806 	default:
1807 		ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n",
1808 			   sts->completionStatus);
1809 		break;
1810 	}
1811 	return;
1812 }
1813 
1814 static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
1815 {
1816 	struct ql4_task_data *task_data;
1817 	struct iscsi_session *sess;
1818 	struct ddb_entry *ddb_entry;
1819 	struct scsi_qla_host *ha;
1820 	int hdr_len;
1821 
1822 	sess = task->conn->session;
1823 	ddb_entry = sess->dd_data;
1824 	ha = ddb_entry->ha;
1825 	task_data = task->dd_data;
1826 	memset(task_data, 0, sizeof(struct ql4_task_data));
1827 
1828 	if (task->sc) {
1829 		ql4_printk(KERN_INFO, ha,
1830 			   "%s: SCSI Commands not implemented\n", __func__);
1831 		return -EINVAL;
1832 	}
1833 
1834 	hdr_len = sizeof(struct iscsi_hdr);
1835 	task_data->ha = ha;
1836 	task_data->task = task;
1837 
1838 	if (task->data_count) {
1839 		task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
1840 						     task->data_count,
1841 						     PCI_DMA_TODEVICE);
1842 	}
1843 
1844 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1845 		      __func__, task->conn->max_recv_dlength, hdr_len));
1846 
1847 	task_data->resp_len = task->conn->max_recv_dlength + hdr_len;
1848 	task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev,
1849 						    task_data->resp_len,
1850 						    &task_data->resp_dma,
1851 						    GFP_ATOMIC);
1852 	if (!task_data->resp_buffer)
1853 		goto exit_alloc_pdu;
1854 
1855 	task_data->req_len = task->data_count + hdr_len;
1856 	task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev,
1857 						   task_data->req_len,
1858 						   &task_data->req_dma,
1859 						   GFP_ATOMIC);
1860 	if (!task_data->req_buffer)
1861 		goto exit_alloc_pdu;
1862 
1863 	task->hdr = task_data->req_buffer;
1864 
1865 	INIT_WORK(&task_data->task_work, qla4xxx_task_work);
1866 
1867 	return 0;
1868 
1869 exit_alloc_pdu:
1870 	if (task_data->resp_buffer)
1871 		dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1872 				  task_data->resp_buffer, task_data->resp_dma);
1873 
1874 	if (task_data->req_buffer)
1875 		dma_free_coherent(&ha->pdev->dev, task_data->req_len,
1876 				  task_data->req_buffer, task_data->req_dma);
1877 	return -ENOMEM;
1878 }
1879 
1880 static void qla4xxx_task_cleanup(struct iscsi_task *task)
1881 {
1882 	struct ql4_task_data *task_data;
1883 	struct iscsi_session *sess;
1884 	struct ddb_entry *ddb_entry;
1885 	struct scsi_qla_host *ha;
1886 	int hdr_len;
1887 
1888 	hdr_len = sizeof(struct iscsi_hdr);
1889 	sess = task->conn->session;
1890 	ddb_entry = sess->dd_data;
1891 	ha = ddb_entry->ha;
1892 	task_data = task->dd_data;
1893 
1894 	if (task->data_count) {
1895 		dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
1896 				 task->data_count, PCI_DMA_TODEVICE);
1897 	}
1898 
1899 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1900 		      __func__, task->conn->max_recv_dlength, hdr_len));
1901 
1902 	dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1903 			  task_data->resp_buffer, task_data->resp_dma);
1904 	dma_free_coherent(&ha->pdev->dev, task_data->req_len,
1905 			  task_data->req_buffer, task_data->req_dma);
1906 	return;
1907 }
1908 
1909 static int qla4xxx_task_xmit(struct iscsi_task *task)
1910 {
1911 	struct scsi_cmnd *sc = task->sc;
1912 	struct iscsi_session *sess = task->conn->session;
1913 	struct ddb_entry *ddb_entry = sess->dd_data;
1914 	struct scsi_qla_host *ha = ddb_entry->ha;
1915 
1916 	if (!sc)
1917 		return qla4xxx_send_passthru0(task);
1918 
1919 	ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n",
1920 		   __func__);
1921 	return -ENOSYS;
1922 }
1923 
1924 static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
1925 				     struct dev_db_entry *fw_ddb_entry,
1926 				     struct iscsi_cls_session *cls_sess,
1927 				     struct iscsi_cls_conn *cls_conn)
1928 {
1929 	int buflen = 0;
1930 	struct iscsi_session *sess;
1931 	struct ddb_entry *ddb_entry;
1932 	struct iscsi_conn *conn;
1933 	char ip_addr[DDB_IPADDR_LEN];
1934 	uint16_t options = 0;
1935 
1936 	sess = cls_sess->dd_data;
1937 	ddb_entry = sess->dd_data;
1938 	conn = cls_conn->dd_data;
1939 
1940 	ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
1941 
1942 	conn->max_recv_dlength = BYTE_UNITS *
1943 			  le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
1944 
1945 	conn->max_xmit_dlength = BYTE_UNITS *
1946 			  le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
1947 
1948 	sess->initial_r2t_en =
1949 			    (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1950 
1951 	sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
1952 
1953 	sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1954 
1955 	sess->first_burst = BYTE_UNITS *
1956 			       le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
1957 
1958 	sess->max_burst = BYTE_UNITS *
1959 				 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
1960 
1961 	sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
1962 
1963 	sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
1964 
1965 	conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
1966 
1967 	sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
1968 
1969 	options = le16_to_cpu(fw_ddb_entry->options);
1970 	if (options & DDB_OPT_IPV6_DEVICE)
1971 		sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
1972 	else
1973 		sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
1974 
1975 	iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
1976 			(char *)fw_ddb_entry->iscsi_name, buflen);
1977 	iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
1978 			(char *)ha->name_string, buflen);
1979 	iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
1980 			(char *)ip_addr, buflen);
1981 	iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS,
1982 			(char *)fw_ddb_entry->iscsi_alias, buflen);
1983 }
1984 
1985 void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
1986 					     struct ddb_entry *ddb_entry)
1987 {
1988 	struct iscsi_cls_session *cls_sess;
1989 	struct iscsi_cls_conn *cls_conn;
1990 	uint32_t ddb_state;
1991 	dma_addr_t fw_ddb_entry_dma;
1992 	struct dev_db_entry *fw_ddb_entry;
1993 
1994 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1995 					  &fw_ddb_entry_dma, GFP_KERNEL);
1996 	if (!fw_ddb_entry) {
1997 		ql4_printk(KERN_ERR, ha,
1998 			   "%s: Unable to allocate dma buffer\n", __func__);
1999 		goto exit_session_conn_fwddb_param;
2000 	}
2001 
2002 	if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
2003 				    fw_ddb_entry_dma, NULL, NULL, &ddb_state,
2004 				    NULL, NULL, NULL) == QLA_ERROR) {
2005 		DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
2006 				  "get_ddb_entry for fw_ddb_index %d\n",
2007 				  ha->host_no, __func__,
2008 				  ddb_entry->fw_ddb_index));
2009 		goto exit_session_conn_fwddb_param;
2010 	}
2011 
2012 	cls_sess = ddb_entry->sess;
2013 
2014 	cls_conn = ddb_entry->conn;
2015 
2016 	/* Update params */
2017 	qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
2018 
2019 exit_session_conn_fwddb_param:
2020 	if (fw_ddb_entry)
2021 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2022 				  fw_ddb_entry, fw_ddb_entry_dma);
2023 }
2024 
2025 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
2026 				       struct ddb_entry *ddb_entry)
2027 {
2028 	struct iscsi_cls_session *cls_sess;
2029 	struct iscsi_cls_conn *cls_conn;
2030 	struct iscsi_session *sess;
2031 	struct iscsi_conn *conn;
2032 	uint32_t ddb_state;
2033 	dma_addr_t fw_ddb_entry_dma;
2034 	struct dev_db_entry *fw_ddb_entry;
2035 
2036 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2037 					  &fw_ddb_entry_dma, GFP_KERNEL);
2038 	if (!fw_ddb_entry) {
2039 		ql4_printk(KERN_ERR, ha,
2040 			   "%s: Unable to allocate dma buffer\n", __func__);
2041 		goto exit_session_conn_param;
2042 	}
2043 
2044 	if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
2045 				    fw_ddb_entry_dma, NULL, NULL, &ddb_state,
2046 				    NULL, NULL, NULL) == QLA_ERROR) {
2047 		DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
2048 				  "get_ddb_entry for fw_ddb_index %d\n",
2049 				  ha->host_no, __func__,
2050 				  ddb_entry->fw_ddb_index));
2051 		goto exit_session_conn_param;
2052 	}
2053 
2054 	cls_sess = ddb_entry->sess;
2055 	sess = cls_sess->dd_data;
2056 
2057 	cls_conn = ddb_entry->conn;
2058 	conn = cls_conn->dd_data;
2059 
2060 	/* Update timers after login */
2061 	ddb_entry->default_relogin_timeout =
2062 		(le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
2063 		 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
2064 		 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
2065 	ddb_entry->default_time2wait =
2066 				le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2067 
2068 	/* Update params */
2069 	ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2070 	conn->max_recv_dlength = BYTE_UNITS *
2071 			  le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
2072 
2073 	conn->max_xmit_dlength = BYTE_UNITS *
2074 			  le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
2075 
2076 	sess->initial_r2t_en =
2077 			    (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
2078 
2079 	sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
2080 
2081 	sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
2082 
2083 	sess->first_burst = BYTE_UNITS *
2084 			       le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
2085 
2086 	sess->max_burst = BYTE_UNITS *
2087 				 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
2088 
2089 	sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2090 
2091 	sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
2092 
2093 	sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
2094 
2095 	memcpy(sess->initiatorname, ha->name_string,
2096 	       min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
2097 
2098 	iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS,
2099 			(char *)fw_ddb_entry->iscsi_alias, 0);
2100 
2101 exit_session_conn_param:
2102 	if (fw_ddb_entry)
2103 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2104 				  fw_ddb_entry, fw_ddb_entry_dma);
2105 }
2106 
2107 /*
2108  * Timer routines
2109  */
2110 
2111 static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
2112 				unsigned long interval)
2113 {
2114 	DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
2115 		     __func__, ha->host->host_no));
2116 	init_timer(&ha->timer);
2117 	ha->timer.expires = jiffies + interval * HZ;
2118 	ha->timer.data = (unsigned long)ha;
2119 	ha->timer.function = (void (*)(unsigned long))func;
2120 	add_timer(&ha->timer);
2121 	ha->timer_active = 1;
2122 }
2123 
2124 static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
2125 {
2126 	del_timer_sync(&ha->timer);
2127 	ha->timer_active = 0;
2128 }
2129 
2130 /***
2131  * qla4xxx_mark_device_missing - blocks the session
2132  * @cls_session: Pointer to the session to be blocked
2133  * @ddb_entry: Pointer to device database entry
2134  *
2135  * This routine marks a device missing and close connection.
2136  **/
2137 void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session)
2138 {
2139 	iscsi_block_session(cls_session);
2140 }
2141 
2142 /**
2143  * qla4xxx_mark_all_devices_missing - mark all devices as missing.
2144  * @ha: Pointer to host adapter structure.
2145  *
2146  * This routine marks a device missing and resets the relogin retry count.
2147  **/
2148 void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
2149 {
2150 	iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing);
2151 }
2152 
2153 static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
2154 				       struct ddb_entry *ddb_entry,
2155 				       struct scsi_cmnd *cmd)
2156 {
2157 	struct srb *srb;
2158 
2159 	srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
2160 	if (!srb)
2161 		return srb;
2162 
2163 	kref_init(&srb->srb_ref);
2164 	srb->ha = ha;
2165 	srb->ddb = ddb_entry;
2166 	srb->cmd = cmd;
2167 	srb->flags = 0;
2168 	CMD_SP(cmd) = (void *)srb;
2169 
2170 	return srb;
2171 }
2172 
2173 static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
2174 {
2175 	struct scsi_cmnd *cmd = srb->cmd;
2176 
2177 	if (srb->flags & SRB_DMA_VALID) {
2178 		scsi_dma_unmap(cmd);
2179 		srb->flags &= ~SRB_DMA_VALID;
2180 	}
2181 	CMD_SP(cmd) = NULL;
2182 }
2183 
2184 void qla4xxx_srb_compl(struct kref *ref)
2185 {
2186 	struct srb *srb = container_of(ref, struct srb, srb_ref);
2187 	struct scsi_cmnd *cmd = srb->cmd;
2188 	struct scsi_qla_host *ha = srb->ha;
2189 
2190 	qla4xxx_srb_free_dma(ha, srb);
2191 
2192 	mempool_free(srb, ha->srb_mempool);
2193 
2194 	cmd->scsi_done(cmd);
2195 }
2196 
2197 /**
2198  * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
2199  * @host: scsi host
2200  * @cmd: Pointer to Linux's SCSI command structure
2201  *
2202  * Remarks:
2203  * This routine is invoked by Linux to send a SCSI command to the driver.
2204  * The mid-level driver tries to ensure that queuecommand never gets
2205  * invoked concurrently with itself or the interrupt handler (although
2206  * the interrupt handler may call this routine as part of request-
2207  * completion handling).   Unfortunely, it sometimes calls the scheduler
2208  * in interrupt context which is a big NO! NO!.
2209  **/
2210 static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2211 {
2212 	struct scsi_qla_host *ha = to_qla_host(host);
2213 	struct ddb_entry *ddb_entry = cmd->device->hostdata;
2214 	struct iscsi_cls_session *sess = ddb_entry->sess;
2215 	struct srb *srb;
2216 	int rval;
2217 
2218 	if (test_bit(AF_EEH_BUSY, &ha->flags)) {
2219 		if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
2220 			cmd->result = DID_NO_CONNECT << 16;
2221 		else
2222 			cmd->result = DID_REQUEUE << 16;
2223 		goto qc_fail_command;
2224 	}
2225 
2226 	if (!sess) {
2227 		cmd->result = DID_IMM_RETRY << 16;
2228 		goto qc_fail_command;
2229 	}
2230 
2231 	rval = iscsi_session_chkready(sess);
2232 	if (rval) {
2233 		cmd->result = rval;
2234 		goto qc_fail_command;
2235 	}
2236 
2237 	if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
2238 	    test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
2239 	    test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2240 	    test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
2241 	    test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
2242 	    !test_bit(AF_ONLINE, &ha->flags) ||
2243 	    !test_bit(AF_LINK_UP, &ha->flags) ||
2244 	    test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
2245 		goto qc_host_busy;
2246 
2247 	srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
2248 	if (!srb)
2249 		goto qc_host_busy;
2250 
2251 	rval = qla4xxx_send_command_to_isp(ha, srb);
2252 	if (rval != QLA_SUCCESS)
2253 		goto qc_host_busy_free_sp;
2254 
2255 	return 0;
2256 
2257 qc_host_busy_free_sp:
2258 	qla4xxx_srb_free_dma(ha, srb);
2259 	mempool_free(srb, ha->srb_mempool);
2260 
2261 qc_host_busy:
2262 	return SCSI_MLQUEUE_HOST_BUSY;
2263 
2264 qc_fail_command:
2265 	cmd->scsi_done(cmd);
2266 
2267 	return 0;
2268 }
2269 
2270 /**
2271  * qla4xxx_mem_free - frees memory allocated to adapter
2272  * @ha: Pointer to host adapter structure.
2273  *
2274  * Frees memory previously allocated by qla4xxx_mem_alloc
2275  **/
2276 static void qla4xxx_mem_free(struct scsi_qla_host *ha)
2277 {
2278 	if (ha->queues)
2279 		dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
2280 				  ha->queues_dma);
2281 
2282 	 if (ha->fw_dump)
2283 		vfree(ha->fw_dump);
2284 
2285 	ha->queues_len = 0;
2286 	ha->queues = NULL;
2287 	ha->queues_dma = 0;
2288 	ha->request_ring = NULL;
2289 	ha->request_dma = 0;
2290 	ha->response_ring = NULL;
2291 	ha->response_dma = 0;
2292 	ha->shadow_regs = NULL;
2293 	ha->shadow_regs_dma = 0;
2294 	ha->fw_dump = NULL;
2295 	ha->fw_dump_size = 0;
2296 
2297 	/* Free srb pool. */
2298 	if (ha->srb_mempool)
2299 		mempool_destroy(ha->srb_mempool);
2300 
2301 	ha->srb_mempool = NULL;
2302 
2303 	if (ha->chap_dma_pool)
2304 		dma_pool_destroy(ha->chap_dma_pool);
2305 
2306 	if (ha->chap_list)
2307 		vfree(ha->chap_list);
2308 	ha->chap_list = NULL;
2309 
2310 	if (ha->fw_ddb_dma_pool)
2311 		dma_pool_destroy(ha->fw_ddb_dma_pool);
2312 
2313 	/* release io space registers  */
2314 	if (is_qla8022(ha)) {
2315 		if (ha->nx_pcibase)
2316 			iounmap(
2317 			    (struct device_reg_82xx __iomem *)ha->nx_pcibase);
2318 	} else if (ha->reg)
2319 		iounmap(ha->reg);
2320 	pci_release_regions(ha->pdev);
2321 }
2322 
2323 /**
2324  * qla4xxx_mem_alloc - allocates memory for use by adapter.
2325  * @ha: Pointer to host adapter structure
2326  *
2327  * Allocates DMA memory for request and response queues. Also allocates memory
2328  * for srbs.
2329  **/
2330 static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
2331 {
2332 	unsigned long align;
2333 
2334 	/* Allocate contiguous block of DMA memory for queues. */
2335 	ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
2336 			  (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
2337 			  sizeof(struct shadow_regs) +
2338 			  MEM_ALIGN_VALUE +
2339 			  (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
2340 	ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
2341 					&ha->queues_dma, GFP_KERNEL);
2342 	if (ha->queues == NULL) {
2343 		ql4_printk(KERN_WARNING, ha,
2344 		    "Memory Allocation failed - queues.\n");
2345 
2346 		goto mem_alloc_error_exit;
2347 	}
2348 	memset(ha->queues, 0, ha->queues_len);
2349 
2350 	/*
2351 	 * As per RISC alignment requirements -- the bus-address must be a
2352 	 * multiple of the request-ring size (in bytes).
2353 	 */
2354 	align = 0;
2355 	if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
2356 		align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
2357 					   (MEM_ALIGN_VALUE - 1));
2358 
2359 	/* Update request and response queue pointers. */
2360 	ha->request_dma = ha->queues_dma + align;
2361 	ha->request_ring = (struct queue_entry *) (ha->queues + align);
2362 	ha->response_dma = ha->queues_dma + align +
2363 		(REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
2364 	ha->response_ring = (struct queue_entry *) (ha->queues + align +
2365 						    (REQUEST_QUEUE_DEPTH *
2366 						     QUEUE_SIZE));
2367 	ha->shadow_regs_dma = ha->queues_dma + align +
2368 		(REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
2369 		(RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
2370 	ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
2371 						  (REQUEST_QUEUE_DEPTH *
2372 						   QUEUE_SIZE) +
2373 						  (RESPONSE_QUEUE_DEPTH *
2374 						   QUEUE_SIZE));
2375 
2376 	/* Allocate memory for srb pool. */
2377 	ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
2378 					 mempool_free_slab, srb_cachep);
2379 	if (ha->srb_mempool == NULL) {
2380 		ql4_printk(KERN_WARNING, ha,
2381 		    "Memory Allocation failed - SRB Pool.\n");
2382 
2383 		goto mem_alloc_error_exit;
2384 	}
2385 
2386 	ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev,
2387 					    CHAP_DMA_BLOCK_SIZE, 8, 0);
2388 
2389 	if (ha->chap_dma_pool == NULL) {
2390 		ql4_printk(KERN_WARNING, ha,
2391 		    "%s: chap_dma_pool allocation failed..\n", __func__);
2392 		goto mem_alloc_error_exit;
2393 	}
2394 
2395 	ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
2396 					      DDB_DMA_BLOCK_SIZE, 8, 0);
2397 
2398 	if (ha->fw_ddb_dma_pool == NULL) {
2399 		ql4_printk(KERN_WARNING, ha,
2400 			   "%s: fw_ddb_dma_pool allocation failed..\n",
2401 			   __func__);
2402 		goto mem_alloc_error_exit;
2403 	}
2404 
2405 	return QLA_SUCCESS;
2406 
2407 mem_alloc_error_exit:
2408 	qla4xxx_mem_free(ha);
2409 	return QLA_ERROR;
2410 }
2411 
2412 /**
2413  * qla4_8xxx_check_temp - Check the ISP82XX temperature.
2414  * @ha: adapter block pointer.
2415  *
2416  * Note: The caller should not hold the idc lock.
2417  **/
2418 static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
2419 {
2420 	uint32_t temp, temp_state, temp_val;
2421 	int status = QLA_SUCCESS;
2422 
2423 	temp = qla4_8xxx_rd_32(ha, CRB_TEMP_STATE);
2424 
2425 	temp_state = qla82xx_get_temp_state(temp);
2426 	temp_val = qla82xx_get_temp_val(temp);
2427 
2428 	if (temp_state == QLA82XX_TEMP_PANIC) {
2429 		ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
2430 			   " exceeds maximum allowed. Hardware has been shut"
2431 			   " down.\n", temp_val);
2432 		status = QLA_ERROR;
2433 	} else if (temp_state == QLA82XX_TEMP_WARN) {
2434 		if (ha->temperature == QLA82XX_TEMP_NORMAL)
2435 			ql4_printk(KERN_WARNING, ha, "Device temperature %d"
2436 				   " degrees C exceeds operating range."
2437 				   " Immediate action needed.\n", temp_val);
2438 	} else {
2439 		if (ha->temperature == QLA82XX_TEMP_WARN)
2440 			ql4_printk(KERN_INFO, ha, "Device temperature is"
2441 				   " now %d degrees C in normal range.\n",
2442 				   temp_val);
2443 	}
2444 	ha->temperature = temp_state;
2445 	return status;
2446 }
2447 
2448 /**
2449  * qla4_8xxx_check_fw_alive  - Check firmware health
2450  * @ha: Pointer to host adapter structure.
2451  *
2452  * Context: Interrupt
2453  **/
2454 static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
2455 {
2456 	uint32_t fw_heartbeat_counter;
2457 	int status = QLA_SUCCESS;
2458 
2459 	fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
2460 	/* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
2461 	if (fw_heartbeat_counter == 0xffffffff) {
2462 		DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
2463 		    "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
2464 		    ha->host_no, __func__));
2465 		return status;
2466 	}
2467 
2468 	if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
2469 		ha->seconds_since_last_heartbeat++;
2470 		/* FW not alive after 2 seconds */
2471 		if (ha->seconds_since_last_heartbeat == 2) {
2472 			ha->seconds_since_last_heartbeat = 0;
2473 
2474 			ql4_printk(KERN_INFO, ha,
2475 				   "scsi(%ld): %s, Dumping hw/fw registers:\n "
2476 				   " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2:"
2477 				   " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
2478 				   " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
2479 				   " 0x%x,\n PEG_NET_4_PC: 0x%x\n",
2480 				   ha->host_no, __func__,
2481 				   qla4_8xxx_rd_32(ha,
2482 						   QLA82XX_PEG_HALT_STATUS1),
2483 				   qla4_8xxx_rd_32(ha,
2484 						   QLA82XX_PEG_HALT_STATUS2),
2485 				   qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
2486 						   0x3c),
2487 				   qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 +
2488 						   0x3c),
2489 				   qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 +
2490 						   0x3c),
2491 				   qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 +
2492 						   0x3c),
2493 				   qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
2494 						   0x3c));
2495 			status = QLA_ERROR;
2496 		}
2497 	} else
2498 		ha->seconds_since_last_heartbeat = 0;
2499 
2500 	ha->fw_heartbeat_counter = fw_heartbeat_counter;
2501 	return status;
2502 }
2503 
2504 /**
2505  * qla4_8xxx_watchdog - Poll dev state
2506  * @ha: Pointer to host adapter structure.
2507  *
2508  * Context: Interrupt
2509  **/
2510 void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2511 {
2512 	uint32_t dev_state, halt_status;
2513 
2514 	/* don't poll if reset is going on */
2515 	if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
2516 	    test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2517 	    test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
2518 		dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
2519 
2520 		if (qla4_8xxx_check_temp(ha)) {
2521 			ql4_printk(KERN_INFO, ha, "disabling pause"
2522 				   " transmit on port 0 & 1.\n");
2523 			qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2524 					CRB_NIU_XG_PAUSE_CTL_P0 |
2525 					CRB_NIU_XG_PAUSE_CTL_P1);
2526 			set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
2527 			qla4xxx_wake_dpc(ha);
2528 		} else if (dev_state == QLA82XX_DEV_NEED_RESET &&
2529 		    !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
2530 			if (!ql4xdontresethba) {
2531 				ql4_printk(KERN_INFO, ha, "%s: HW State: "
2532 				    "NEED RESET!\n", __func__);
2533 				set_bit(DPC_RESET_HA, &ha->dpc_flags);
2534 				qla4xxx_wake_dpc(ha);
2535 			}
2536 		} else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
2537 		    !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
2538 			ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
2539 			    __func__);
2540 			set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
2541 			qla4xxx_wake_dpc(ha);
2542 		} else  {
2543 			/* Check firmware health */
2544 			if (qla4_8xxx_check_fw_alive(ha)) {
2545 				ql4_printk(KERN_INFO, ha, "disabling pause"
2546 					   " transmit on port 0 & 1.\n");
2547 				qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2548 						CRB_NIU_XG_PAUSE_CTL_P0 |
2549 						CRB_NIU_XG_PAUSE_CTL_P1);
2550 				halt_status = qla4_8xxx_rd_32(ha,
2551 						QLA82XX_PEG_HALT_STATUS1);
2552 
2553 				if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
2554 					ql4_printk(KERN_ERR, ha, "%s:"
2555 						   " Firmware aborted with"
2556 						   " error code 0x00006700."
2557 						   " Device is being reset\n",
2558 						   __func__);
2559 
2560 				/* Since we cannot change dev_state in interrupt
2561 				 * context, set appropriate DPC flag then wakeup
2562 				 * DPC */
2563 				if (halt_status & HALT_STATUS_UNRECOVERABLE)
2564 					set_bit(DPC_HA_UNRECOVERABLE,
2565 						&ha->dpc_flags);
2566 				else {
2567 					ql4_printk(KERN_INFO, ha, "%s: detect "
2568 						   "abort needed!\n", __func__);
2569 					set_bit(DPC_RESET_HA, &ha->dpc_flags);
2570 				}
2571 				qla4xxx_mailbox_premature_completion(ha);
2572 				qla4xxx_wake_dpc(ha);
2573 			}
2574 		}
2575 	}
2576 }
2577 
2578 static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
2579 {
2580 	struct iscsi_session *sess;
2581 	struct ddb_entry *ddb_entry;
2582 	struct scsi_qla_host *ha;
2583 
2584 	sess = cls_sess->dd_data;
2585 	ddb_entry = sess->dd_data;
2586 	ha = ddb_entry->ha;
2587 
2588 	if (!(ddb_entry->ddb_type == FLASH_DDB))
2589 		return;
2590 
2591 	if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
2592 	    !iscsi_is_session_online(cls_sess)) {
2593 		if (atomic_read(&ddb_entry->retry_relogin_timer) !=
2594 		    INVALID_ENTRY) {
2595 			if (atomic_read(&ddb_entry->retry_relogin_timer) ==
2596 					0) {
2597 				atomic_set(&ddb_entry->retry_relogin_timer,
2598 					   INVALID_ENTRY);
2599 				set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
2600 				set_bit(DF_RELOGIN, &ddb_entry->flags);
2601 				DEBUG2(ql4_printk(KERN_INFO, ha,
2602 				       "%s: index [%d] login device\n",
2603 					__func__, ddb_entry->fw_ddb_index));
2604 			} else
2605 				atomic_dec(&ddb_entry->retry_relogin_timer);
2606 		}
2607 	}
2608 
2609 	/* Wait for relogin to timeout */
2610 	if (atomic_read(&ddb_entry->relogin_timer) &&
2611 	    (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
2612 		/*
2613 		 * If the relogin times out and the device is
2614 		 * still NOT ONLINE then try and relogin again.
2615 		 */
2616 		if (!iscsi_is_session_online(cls_sess)) {
2617 			/* Reset retry relogin timer */
2618 			atomic_inc(&ddb_entry->relogin_retry_count);
2619 			DEBUG2(ql4_printk(KERN_INFO, ha,
2620 				"%s: index[%d] relogin timed out-retrying"
2621 				" relogin (%d), retry (%d)\n", __func__,
2622 				ddb_entry->fw_ddb_index,
2623 				atomic_read(&ddb_entry->relogin_retry_count),
2624 				ddb_entry->default_time2wait + 4));
2625 			set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
2626 			atomic_set(&ddb_entry->retry_relogin_timer,
2627 				   ddb_entry->default_time2wait + 4);
2628 		}
2629 	}
2630 }
2631 
2632 /**
2633  * qla4xxx_timer - checks every second for work to do.
2634  * @ha: Pointer to host adapter structure.
2635  **/
2636 static void qla4xxx_timer(struct scsi_qla_host *ha)
2637 {
2638 	int start_dpc = 0;
2639 	uint16_t w;
2640 
2641 	iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
2642 
2643 	/* If we are in the middle of AER/EEH processing
2644 	 * skip any processing and reschedule the timer
2645 	 */
2646 	if (test_bit(AF_EEH_BUSY, &ha->flags)) {
2647 		mod_timer(&ha->timer, jiffies + HZ);
2648 		return;
2649 	}
2650 
2651 	/* Hardware read to trigger an EEH error during mailbox waits. */
2652 	if (!pci_channel_offline(ha->pdev))
2653 		pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
2654 
2655 	if (is_qla8022(ha)) {
2656 		qla4_8xxx_watchdog(ha);
2657 	}
2658 
2659 	if (!is_qla8022(ha)) {
2660 		/* Check for heartbeat interval. */
2661 		if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
2662 		    ha->heartbeat_interval != 0) {
2663 			ha->seconds_since_last_heartbeat++;
2664 			if (ha->seconds_since_last_heartbeat >
2665 			    ha->heartbeat_interval + 2)
2666 				set_bit(DPC_RESET_HA, &ha->dpc_flags);
2667 		}
2668 	}
2669 
2670 	/* Process any deferred work. */
2671 	if (!list_empty(&ha->work_list))
2672 		start_dpc++;
2673 
2674 	/* Wakeup the dpc routine for this adapter, if needed. */
2675 	if (start_dpc ||
2676 	     test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2677 	     test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
2678 	     test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
2679 	     test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
2680 	     test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
2681 	     test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
2682 	     test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
2683 	     test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
2684 	     test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
2685 	     test_bit(DPC_AEN, &ha->dpc_flags)) {
2686 		DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
2687 			      " - dpc flags = 0x%lx\n",
2688 			      ha->host_no, __func__, ha->dpc_flags));
2689 		qla4xxx_wake_dpc(ha);
2690 	}
2691 
2692 	/* Reschedule timer thread to call us back in one second */
2693 	mod_timer(&ha->timer, jiffies + HZ);
2694 
2695 	DEBUG2(ha->seconds_since_last_intr++);
2696 }
2697 
2698 /**
2699  * qla4xxx_cmd_wait - waits for all outstanding commands to complete
2700  * @ha: Pointer to host adapter structure.
2701  *
2702  * This routine stalls the driver until all outstanding commands are returned.
2703  * Caller must release the Hardware Lock prior to calling this routine.
2704  **/
2705 static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
2706 {
2707 	uint32_t index = 0;
2708 	unsigned long flags;
2709 	struct scsi_cmnd *cmd;
2710 
2711 	unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ);
2712 
2713 	DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to "
2714 	    "complete\n", WAIT_CMD_TOV));
2715 
2716 	while (!time_after_eq(jiffies, wtime)) {
2717 		spin_lock_irqsave(&ha->hardware_lock, flags);
2718 		/* Find a command that hasn't completed. */
2719 		for (index = 0; index < ha->host->can_queue; index++) {
2720 			cmd = scsi_host_find_tag(ha->host, index);
2721 			/*
2722 			 * We cannot just check if the index is valid,
2723 			 * becase if we are run from the scsi eh, then
2724 			 * the scsi/block layer is going to prevent
2725 			 * the tag from being released.
2726 			 */
2727 			if (cmd != NULL && CMD_SP(cmd))
2728 				break;
2729 		}
2730 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
2731 
2732 		/* If No Commands are pending, wait is complete */
2733 		if (index == ha->host->can_queue)
2734 			return QLA_SUCCESS;
2735 
2736 		msleep(1000);
2737 	}
2738 	/* If we timed out on waiting for commands to come back
2739 	 * return ERROR. */
2740 	return QLA_ERROR;
2741 }
2742 
2743 int qla4xxx_hw_reset(struct scsi_qla_host *ha)
2744 {
2745 	uint32_t ctrl_status;
2746 	unsigned long flags = 0;
2747 
2748 	DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
2749 
2750 	if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
2751 		return QLA_ERROR;
2752 
2753 	spin_lock_irqsave(&ha->hardware_lock, flags);
2754 
2755 	/*
2756 	 * If the SCSI Reset Interrupt bit is set, clear it.
2757 	 * Otherwise, the Soft Reset won't work.
2758 	 */
2759 	ctrl_status = readw(&ha->reg->ctrl_status);
2760 	if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
2761 		writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
2762 
2763 	/* Issue Soft Reset */
2764 	writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
2765 	readl(&ha->reg->ctrl_status);
2766 
2767 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2768 	return QLA_SUCCESS;
2769 }
2770 
2771 /**
2772  * qla4xxx_soft_reset - performs soft reset.
2773  * @ha: Pointer to host adapter structure.
2774  **/
2775 int qla4xxx_soft_reset(struct scsi_qla_host *ha)
2776 {
2777 	uint32_t max_wait_time;
2778 	unsigned long flags = 0;
2779 	int status;
2780 	uint32_t ctrl_status;
2781 
2782 	status = qla4xxx_hw_reset(ha);
2783 	if (status != QLA_SUCCESS)
2784 		return status;
2785 
2786 	status = QLA_ERROR;
2787 	/* Wait until the Network Reset Intr bit is cleared */
2788 	max_wait_time = RESET_INTR_TOV;
2789 	do {
2790 		spin_lock_irqsave(&ha->hardware_lock, flags);
2791 		ctrl_status = readw(&ha->reg->ctrl_status);
2792 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
2793 
2794 		if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
2795 			break;
2796 
2797 		msleep(1000);
2798 	} while ((--max_wait_time));
2799 
2800 	if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
2801 		DEBUG2(printk(KERN_WARNING
2802 			      "scsi%ld: Network Reset Intr not cleared by "
2803 			      "Network function, clearing it now!\n",
2804 			      ha->host_no));
2805 		spin_lock_irqsave(&ha->hardware_lock, flags);
2806 		writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
2807 		readl(&ha->reg->ctrl_status);
2808 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
2809 	}
2810 
2811 	/* Wait until the firmware tells us the Soft Reset is done */
2812 	max_wait_time = SOFT_RESET_TOV;
2813 	do {
2814 		spin_lock_irqsave(&ha->hardware_lock, flags);
2815 		ctrl_status = readw(&ha->reg->ctrl_status);
2816 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
2817 
2818 		if ((ctrl_status & CSR_SOFT_RESET) == 0) {
2819 			status = QLA_SUCCESS;
2820 			break;
2821 		}
2822 
2823 		msleep(1000);
2824 	} while ((--max_wait_time));
2825 
2826 	/*
2827 	 * Also, make sure that the SCSI Reset Interrupt bit has been cleared
2828 	 * after the soft reset has taken place.
2829 	 */
2830 	spin_lock_irqsave(&ha->hardware_lock, flags);
2831 	ctrl_status = readw(&ha->reg->ctrl_status);
2832 	if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
2833 		writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
2834 		readl(&ha->reg->ctrl_status);
2835 	}
2836 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2837 
2838 	/* If soft reset fails then most probably the bios on other
2839 	 * function is also enabled.
2840 	 * Since the initialization is sequential the other fn
2841 	 * wont be able to acknowledge the soft reset.
2842 	 * Issue a force soft reset to workaround this scenario.
2843 	 */
2844 	if (max_wait_time == 0) {
2845 		/* Issue Force Soft Reset */
2846 		spin_lock_irqsave(&ha->hardware_lock, flags);
2847 		writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
2848 		readl(&ha->reg->ctrl_status);
2849 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
2850 		/* Wait until the firmware tells us the Soft Reset is done */
2851 		max_wait_time = SOFT_RESET_TOV;
2852 		do {
2853 			spin_lock_irqsave(&ha->hardware_lock, flags);
2854 			ctrl_status = readw(&ha->reg->ctrl_status);
2855 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
2856 
2857 			if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
2858 				status = QLA_SUCCESS;
2859 				break;
2860 			}
2861 
2862 			msleep(1000);
2863 		} while ((--max_wait_time));
2864 	}
2865 
2866 	return status;
2867 }
2868 
2869 /**
2870  * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S.
2871  * @ha: Pointer to host adapter structure.
2872  * @res: returned scsi status
2873  *
2874  * This routine is called just prior to a HARD RESET to return all
2875  * outstanding commands back to the Operating System.
2876  * Caller should make sure that the following locks are released
2877  * before this calling routine: Hardware lock, and io_request_lock.
2878  **/
2879 static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res)
2880 {
2881 	struct srb *srb;
2882 	int i;
2883 	unsigned long flags;
2884 
2885 	spin_lock_irqsave(&ha->hardware_lock, flags);
2886 	for (i = 0; i < ha->host->can_queue; i++) {
2887 		srb = qla4xxx_del_from_active_array(ha, i);
2888 		if (srb != NULL) {
2889 			srb->cmd->result = res;
2890 			kref_put(&srb->srb_ref, qla4xxx_srb_compl);
2891 		}
2892 	}
2893 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2894 }
2895 
2896 void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
2897 {
2898 	clear_bit(AF_ONLINE, &ha->flags);
2899 
2900 	/* Disable the board */
2901 	ql4_printk(KERN_INFO, ha, "Disabling the board\n");
2902 
2903 	qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
2904 	qla4xxx_mark_all_devices_missing(ha);
2905 	clear_bit(AF_INIT_DONE, &ha->flags);
2906 }
2907 
2908 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
2909 {
2910 	struct iscsi_session *sess;
2911 	struct ddb_entry *ddb_entry;
2912 
2913 	sess = cls_session->dd_data;
2914 	ddb_entry = sess->dd_data;
2915 	ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
2916 
2917 	if (ddb_entry->ddb_type == FLASH_DDB)
2918 		iscsi_block_session(ddb_entry->sess);
2919 	else
2920 		iscsi_session_failure(cls_session->dd_data,
2921 				      ISCSI_ERR_CONN_FAILED);
2922 }
2923 
2924 /**
2925  * qla4xxx_recover_adapter - recovers adapter after a fatal error
2926  * @ha: Pointer to host adapter structure.
2927  **/
2928 static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
2929 {
2930 	int status = QLA_ERROR;
2931 	uint8_t reset_chip = 0;
2932 	uint32_t dev_state;
2933 	unsigned long wait;
2934 
2935 	/* Stall incoming I/O until we are done */
2936 	scsi_block_requests(ha->host);
2937 	clear_bit(AF_ONLINE, &ha->flags);
2938 	clear_bit(AF_LINK_UP, &ha->flags);
2939 
2940 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
2941 
2942 	set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
2943 
2944 	iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
2945 
2946 	if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
2947 		reset_chip = 1;
2948 
2949 	/* For the DPC_RESET_HA_INTR case (ISP-4xxx specific)
2950 	 * do not reset adapter, jump to initialize_adapter */
2951 	if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
2952 		status = QLA_SUCCESS;
2953 		goto recover_ha_init_adapter;
2954 	}
2955 
2956 	/* For the ISP-82xx adapter, issue a stop_firmware if invoked
2957 	 * from eh_host_reset or ioctl module */
2958 	if (is_qla8022(ha) && !reset_chip &&
2959 	    test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
2960 
2961 		DEBUG2(ql4_printk(KERN_INFO, ha,
2962 		    "scsi%ld: %s - Performing stop_firmware...\n",
2963 		    ha->host_no, __func__));
2964 		status = ha->isp_ops->reset_firmware(ha);
2965 		if (status == QLA_SUCCESS) {
2966 			if (!test_bit(AF_FW_RECOVERY, &ha->flags))
2967 				qla4xxx_cmd_wait(ha);
2968 			ha->isp_ops->disable_intrs(ha);
2969 			qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2970 			qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2971 		} else {
2972 			/* If the stop_firmware fails then
2973 			 * reset the entire chip */
2974 			reset_chip = 1;
2975 			clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
2976 			set_bit(DPC_RESET_HA, &ha->dpc_flags);
2977 		}
2978 	}
2979 
2980 	/* Issue full chip reset if recovering from a catastrophic error,
2981 	 * or if stop_firmware fails for ISP-82xx.
2982 	 * This is the default case for ISP-4xxx */
2983 	if (!is_qla8022(ha) || reset_chip) {
2984 		if (!is_qla8022(ha))
2985 			goto chip_reset;
2986 
2987 		/* Check if 82XX firmware is alive or not
2988 		 * We may have arrived here from NEED_RESET
2989 		 * detection only */
2990 		if (test_bit(AF_FW_RECOVERY, &ha->flags))
2991 			goto chip_reset;
2992 
2993 		wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ);
2994 		while (time_before(jiffies, wait)) {
2995 			if (qla4_8xxx_check_fw_alive(ha)) {
2996 				qla4xxx_mailbox_premature_completion(ha);
2997 				break;
2998 			}
2999 
3000 			set_current_state(TASK_UNINTERRUPTIBLE);
3001 			schedule_timeout(HZ);
3002 		}
3003 
3004 		if (!test_bit(AF_FW_RECOVERY, &ha->flags))
3005 			qla4xxx_cmd_wait(ha);
3006 chip_reset:
3007 		qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3008 		qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
3009 		DEBUG2(ql4_printk(KERN_INFO, ha,
3010 		    "scsi%ld: %s - Performing chip reset..\n",
3011 		    ha->host_no, __func__));
3012 		status = ha->isp_ops->reset_chip(ha);
3013 	}
3014 
3015 	/* Flush any pending ddb changed AENs */
3016 	qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3017 
3018 recover_ha_init_adapter:
3019 	/* Upon successful firmware/chip reset, re-initialize the adapter */
3020 	if (status == QLA_SUCCESS) {
3021 		/* For ISP-4xxx, force function 1 to always initialize
3022 		 * before function 3 to prevent both funcions from
3023 		 * stepping on top of the other */
3024 		if (!is_qla8022(ha) && (ha->mac_index == 3))
3025 			ssleep(6);
3026 
3027 		/* NOTE: AF_ONLINE flag set upon successful completion of
3028 		 *       qla4xxx_initialize_adapter */
3029 		status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
3030 	}
3031 
3032 	/* Retry failed adapter initialization, if necessary
3033 	 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific)
3034 	 * case to prevent ping-pong resets between functions */
3035 	if (!test_bit(AF_ONLINE, &ha->flags) &&
3036 	    !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
3037 		/* Adapter initialization failed, see if we can retry
3038 		 * resetting the ha.
3039 		 * Since we don't want to block the DPC for too long
3040 		 * with multiple resets in the same thread,
3041 		 * utilize DPC to retry */
3042 		if (is_qla8022(ha)) {
3043 			qla4_8xxx_idc_lock(ha);
3044 			dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3045 			qla4_8xxx_idc_unlock(ha);
3046 			if (dev_state == QLA82XX_DEV_FAILED) {
3047 				ql4_printk(KERN_INFO, ha, "%s: don't retry "
3048 					   "recover adapter. H/W is in Failed "
3049 					   "state\n", __func__);
3050 				qla4xxx_dead_adapter_cleanup(ha);
3051 				clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3052 				clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3053 				clear_bit(DPC_RESET_HA_FW_CONTEXT,
3054 						&ha->dpc_flags);
3055 				status = QLA_ERROR;
3056 
3057 				goto exit_recover;
3058 			}
3059 		}
3060 
3061 		if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
3062 			ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
3063 			DEBUG2(printk("scsi%ld: recover adapter - retrying "
3064 				      "(%d) more times\n", ha->host_no,
3065 				      ha->retry_reset_ha_cnt));
3066 			set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3067 			status = QLA_ERROR;
3068 		} else {
3069 			if (ha->retry_reset_ha_cnt > 0) {
3070 				/* Schedule another Reset HA--DPC will retry */
3071 				ha->retry_reset_ha_cnt--;
3072 				DEBUG2(printk("scsi%ld: recover adapter - "
3073 					      "retry remaining %d\n",
3074 					      ha->host_no,
3075 					      ha->retry_reset_ha_cnt));
3076 				status = QLA_ERROR;
3077 			}
3078 
3079 			if (ha->retry_reset_ha_cnt == 0) {
3080 				/* Recover adapter retries have been exhausted.
3081 				 * Adapter DEAD */
3082 				DEBUG2(printk("scsi%ld: recover adapter "
3083 					      "failed - board disabled\n",
3084 					      ha->host_no));
3085 				qla4xxx_dead_adapter_cleanup(ha);
3086 				clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3087 				clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3088 				clear_bit(DPC_RESET_HA_FW_CONTEXT,
3089 					  &ha->dpc_flags);
3090 				status = QLA_ERROR;
3091 			}
3092 		}
3093 	} else {
3094 		clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3095 		clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3096 		clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3097 	}
3098 
3099 exit_recover:
3100 	ha->adapter_error_count++;
3101 
3102 	if (test_bit(AF_ONLINE, &ha->flags))
3103 		ha->isp_ops->enable_intrs(ha);
3104 
3105 	scsi_unblock_requests(ha->host);
3106 
3107 	clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
3108 	DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no,
3109 	    status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
3110 
3111 	return status;
3112 }
3113 
3114 static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
3115 {
3116 	struct iscsi_session *sess;
3117 	struct ddb_entry *ddb_entry;
3118 	struct scsi_qla_host *ha;
3119 
3120 	sess = cls_session->dd_data;
3121 	ddb_entry = sess->dd_data;
3122 	ha = ddb_entry->ha;
3123 	if (!iscsi_is_session_online(cls_session)) {
3124 		if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
3125 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3126 				   " unblock session\n", ha->host_no, __func__,
3127 				   ddb_entry->fw_ddb_index);
3128 			iscsi_unblock_session(ddb_entry->sess);
3129 		} else {
3130 			/* Trigger relogin */
3131 			if (ddb_entry->ddb_type == FLASH_DDB) {
3132 				if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
3133 					qla4xxx_arm_relogin_timer(ddb_entry);
3134 			} else
3135 				iscsi_session_failure(cls_session->dd_data,
3136 						      ISCSI_ERR_CONN_FAILED);
3137 		}
3138 	}
3139 }
3140 
3141 int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
3142 {
3143 	struct iscsi_session *sess;
3144 	struct ddb_entry *ddb_entry;
3145 	struct scsi_qla_host *ha;
3146 
3147 	sess = cls_session->dd_data;
3148 	ddb_entry = sess->dd_data;
3149 	ha = ddb_entry->ha;
3150 	ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3151 		   " unblock session\n", ha->host_no, __func__,
3152 		   ddb_entry->fw_ddb_index);
3153 
3154 	iscsi_unblock_session(ddb_entry->sess);
3155 
3156 	/* Start scan target */
3157 	if (test_bit(AF_ONLINE, &ha->flags)) {
3158 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3159 			   " start scan\n", ha->host_no, __func__,
3160 			   ddb_entry->fw_ddb_index);
3161 		scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
3162 	}
3163 	return QLA_SUCCESS;
3164 }
3165 
3166 int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
3167 {
3168 	struct iscsi_session *sess;
3169 	struct ddb_entry *ddb_entry;
3170 	struct scsi_qla_host *ha;
3171 
3172 	sess = cls_session->dd_data;
3173 	ddb_entry = sess->dd_data;
3174 	ha = ddb_entry->ha;
3175 	ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3176 		   " unblock user space session\n", ha->host_no, __func__,
3177 		   ddb_entry->fw_ddb_index);
3178 	iscsi_conn_start(ddb_entry->conn);
3179 	iscsi_conn_login_event(ddb_entry->conn,
3180 			       ISCSI_CONN_STATE_LOGGED_IN);
3181 
3182 	return QLA_SUCCESS;
3183 }
3184 
3185 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
3186 {
3187 	iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
3188 }
3189 
3190 static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
3191 {
3192 	uint16_t relogin_timer;
3193 	struct iscsi_session *sess;
3194 	struct ddb_entry *ddb_entry;
3195 	struct scsi_qla_host *ha;
3196 
3197 	sess = cls_sess->dd_data;
3198 	ddb_entry = sess->dd_data;
3199 	ha = ddb_entry->ha;
3200 
3201 	relogin_timer = max(ddb_entry->default_relogin_timeout,
3202 			    (uint16_t)RELOGIN_TOV);
3203 	atomic_set(&ddb_entry->relogin_timer, relogin_timer);
3204 
3205 	DEBUG2(ql4_printk(KERN_INFO, ha,
3206 			  "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
3207 			  ddb_entry->fw_ddb_index, relogin_timer));
3208 
3209 	qla4xxx_login_flash_ddb(cls_sess);
3210 }
3211 
3212 static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
3213 {
3214 	struct iscsi_session *sess;
3215 	struct ddb_entry *ddb_entry;
3216 	struct scsi_qla_host *ha;
3217 
3218 	sess = cls_sess->dd_data;
3219 	ddb_entry = sess->dd_data;
3220 	ha = ddb_entry->ha;
3221 
3222 	if (!(ddb_entry->ddb_type == FLASH_DDB))
3223 		return;
3224 
3225 	if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
3226 	    !iscsi_is_session_online(cls_sess)) {
3227 		DEBUG2(ql4_printk(KERN_INFO, ha,
3228 				  "relogin issued\n"));
3229 		qla4xxx_relogin_flash_ddb(cls_sess);
3230 	}
3231 }
3232 
3233 void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
3234 {
3235 	if (ha->dpc_thread)
3236 		queue_work(ha->dpc_thread, &ha->dpc_work);
3237 }
3238 
3239 static struct qla4_work_evt *
3240 qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size,
3241 		   enum qla4_work_type type)
3242 {
3243 	struct qla4_work_evt *e;
3244 	uint32_t size = sizeof(struct qla4_work_evt) + data_size;
3245 
3246 	e = kzalloc(size, GFP_ATOMIC);
3247 	if (!e)
3248 		return NULL;
3249 
3250 	INIT_LIST_HEAD(&e->list);
3251 	e->type = type;
3252 	return e;
3253 }
3254 
3255 static void qla4xxx_post_work(struct scsi_qla_host *ha,
3256 			     struct qla4_work_evt *e)
3257 {
3258 	unsigned long flags;
3259 
3260 	spin_lock_irqsave(&ha->work_lock, flags);
3261 	list_add_tail(&e->list, &ha->work_list);
3262 	spin_unlock_irqrestore(&ha->work_lock, flags);
3263 	qla4xxx_wake_dpc(ha);
3264 }
3265 
3266 int qla4xxx_post_aen_work(struct scsi_qla_host *ha,
3267 			  enum iscsi_host_event_code aen_code,
3268 			  uint32_t data_size, uint8_t *data)
3269 {
3270 	struct qla4_work_evt *e;
3271 
3272 	e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN);
3273 	if (!e)
3274 		return QLA_ERROR;
3275 
3276 	e->u.aen.code = aen_code;
3277 	e->u.aen.data_size = data_size;
3278 	memcpy(e->u.aen.data, data, data_size);
3279 
3280 	qla4xxx_post_work(ha, e);
3281 
3282 	return QLA_SUCCESS;
3283 }
3284 
3285 int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
3286 			       uint32_t status, uint32_t pid,
3287 			       uint32_t data_size, uint8_t *data)
3288 {
3289 	struct qla4_work_evt *e;
3290 
3291 	e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS);
3292 	if (!e)
3293 		return QLA_ERROR;
3294 
3295 	e->u.ping.status = status;
3296 	e->u.ping.pid = pid;
3297 	e->u.ping.data_size = data_size;
3298 	memcpy(e->u.ping.data, data, data_size);
3299 
3300 	qla4xxx_post_work(ha, e);
3301 
3302 	return QLA_SUCCESS;
3303 }
3304 
3305 static void qla4xxx_do_work(struct scsi_qla_host *ha)
3306 {
3307 	struct qla4_work_evt *e, *tmp;
3308 	unsigned long flags;
3309 	LIST_HEAD(work);
3310 
3311 	spin_lock_irqsave(&ha->work_lock, flags);
3312 	list_splice_init(&ha->work_list, &work);
3313 	spin_unlock_irqrestore(&ha->work_lock, flags);
3314 
3315 	list_for_each_entry_safe(e, tmp, &work, list) {
3316 		list_del_init(&e->list);
3317 
3318 		switch (e->type) {
3319 		case QLA4_EVENT_AEN:
3320 			iscsi_post_host_event(ha->host_no,
3321 					      &qla4xxx_iscsi_transport,
3322 					      e->u.aen.code,
3323 					      e->u.aen.data_size,
3324 					      e->u.aen.data);
3325 			break;
3326 		case QLA4_EVENT_PING_STATUS:
3327 			iscsi_ping_comp_event(ha->host_no,
3328 					      &qla4xxx_iscsi_transport,
3329 					      e->u.ping.status,
3330 					      e->u.ping.pid,
3331 					      e->u.ping.data_size,
3332 					      e->u.ping.data);
3333 			break;
3334 		default:
3335 			ql4_printk(KERN_WARNING, ha, "event type: 0x%x not "
3336 				   "supported", e->type);
3337 		}
3338 		kfree(e);
3339 	}
3340 }
3341 
3342 /**
3343  * qla4xxx_do_dpc - dpc routine
3344  * @data: in our case pointer to adapter structure
3345  *
3346  * This routine is a task that is schedule by the interrupt handler
3347  * to perform the background processing for interrupts.  We put it
3348  * on a task queue that is consumed whenever the scheduler runs; that's
3349  * so you can do anything (i.e. put the process to sleep etc).  In fact,
3350  * the mid-level tries to sleep when it reaches the driver threshold
3351  * "host->can_queue". This can cause a panic if we were in our interrupt code.
3352  **/
3353 static void qla4xxx_do_dpc(struct work_struct *work)
3354 {
3355 	struct scsi_qla_host *ha =
3356 		container_of(work, struct scsi_qla_host, dpc_work);
3357 	int status = QLA_ERROR;
3358 
3359 	DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
3360 	    "flags = 0x%08lx, dpc_flags = 0x%08lx\n",
3361 	    ha->host_no, __func__, ha->flags, ha->dpc_flags))
3362 
3363 	/* Initialization not yet finished. Don't do anything yet. */
3364 	if (!test_bit(AF_INIT_DONE, &ha->flags))
3365 		return;
3366 
3367 	if (test_bit(AF_EEH_BUSY, &ha->flags)) {
3368 		DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
3369 		    ha->host_no, __func__, ha->flags));
3370 		return;
3371 	}
3372 
3373 	/* post events to application */
3374 	qla4xxx_do_work(ha);
3375 
3376 	if (is_qla8022(ha)) {
3377 		if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
3378 			qla4_8xxx_idc_lock(ha);
3379 			qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3380 			    QLA82XX_DEV_FAILED);
3381 			qla4_8xxx_idc_unlock(ha);
3382 			ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
3383 			qla4_8xxx_device_state_handler(ha);
3384 		}
3385 		if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
3386 			qla4_8xxx_need_qsnt_handler(ha);
3387 		}
3388 	}
3389 
3390 	if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) &&
3391 	    (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
3392 	    test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
3393 	    test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
3394 		if (ql4xdontresethba) {
3395 			DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
3396 			    ha->host_no, __func__));
3397 			clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3398 			clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
3399 			clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3400 			goto dpc_post_reset_ha;
3401 		}
3402 		if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
3403 		    test_bit(DPC_RESET_HA, &ha->dpc_flags))
3404 			qla4xxx_recover_adapter(ha);
3405 
3406 		if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
3407 			uint8_t wait_time = RESET_INTR_TOV;
3408 
3409 			while ((readw(&ha->reg->ctrl_status) &
3410 				(CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
3411 				if (--wait_time == 0)
3412 					break;
3413 				msleep(1000);
3414 			}
3415 			if (wait_time == 0)
3416 				DEBUG2(printk("scsi%ld: %s: SR|FSR "
3417 					      "bit not cleared-- resetting\n",
3418 					      ha->host_no, __func__));
3419 			qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
3420 			if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
3421 				qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3422 				status = qla4xxx_recover_adapter(ha);
3423 			}
3424 			clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
3425 			if (status == QLA_SUCCESS)
3426 				ha->isp_ops->enable_intrs(ha);
3427 		}
3428 	}
3429 
3430 dpc_post_reset_ha:
3431 	/* ---- process AEN? --- */
3432 	if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
3433 		qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
3434 
3435 	/* ---- Get DHCP IP Address? --- */
3436 	if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
3437 		qla4xxx_get_dhcp_ip_address(ha);
3438 
3439 	/* ---- relogin device? --- */
3440 	if (adapter_up(ha) &&
3441 	    test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
3442 		iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
3443 	}
3444 
3445 	/* ---- link change? --- */
3446 	if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
3447 		if (!test_bit(AF_LINK_UP, &ha->flags)) {
3448 			/* ---- link down? --- */
3449 			qla4xxx_mark_all_devices_missing(ha);
3450 		} else {
3451 			/* ---- link up? --- *
3452 			 * F/W will auto login to all devices ONLY ONCE after
3453 			 * link up during driver initialization and runtime
3454 			 * fatal error recovery.  Therefore, the driver must
3455 			 * manually relogin to devices when recovering from
3456 			 * connection failures, logouts, expired KATO, etc. */
3457 			if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
3458 				qla4xxx_build_ddb_list(ha, ha->is_reset);
3459 				iscsi_host_for_each_session(ha->host,
3460 						qla4xxx_login_flash_ddb);
3461 			} else
3462 				qla4xxx_relogin_all_devices(ha);
3463 		}
3464 	}
3465 }
3466 
3467 /**
3468  * qla4xxx_free_adapter - release the adapter
3469  * @ha: pointer to adapter structure
3470  **/
3471 static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
3472 {
3473 	qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
3474 
3475 	if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
3476 		/* Turn-off interrupts on the card. */
3477 		ha->isp_ops->disable_intrs(ha);
3478 	}
3479 
3480 	/* Remove timer thread, if present */
3481 	if (ha->timer_active)
3482 		qla4xxx_stop_timer(ha);
3483 
3484 	/* Kill the kernel thread for this host */
3485 	if (ha->dpc_thread)
3486 		destroy_workqueue(ha->dpc_thread);
3487 
3488 	/* Kill the kernel thread for this host */
3489 	if (ha->task_wq)
3490 		destroy_workqueue(ha->task_wq);
3491 
3492 	/* Put firmware in known state */
3493 	ha->isp_ops->reset_firmware(ha);
3494 
3495 	if (is_qla8022(ha)) {
3496 		qla4_8xxx_idc_lock(ha);
3497 		qla4_8xxx_clear_drv_active(ha);
3498 		qla4_8xxx_idc_unlock(ha);
3499 	}
3500 
3501 	/* Detach interrupts */
3502 	if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
3503 		qla4xxx_free_irqs(ha);
3504 
3505 	/* free extra memory */
3506 	qla4xxx_mem_free(ha);
3507 }
3508 
3509 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
3510 {
3511 	int status = 0;
3512 	unsigned long mem_base, mem_len, db_base, db_len;
3513 	struct pci_dev *pdev = ha->pdev;
3514 
3515 	status = pci_request_regions(pdev, DRIVER_NAME);
3516 	if (status) {
3517 		printk(KERN_WARNING
3518 		    "scsi(%ld) Failed to reserve PIO regions (%s) "
3519 		    "status=%d\n", ha->host_no, pci_name(pdev), status);
3520 		goto iospace_error_exit;
3521 	}
3522 
3523 	DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
3524 	    __func__, pdev->revision));
3525 	ha->revision_id = pdev->revision;
3526 
3527 	/* remap phys address */
3528 	mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
3529 	mem_len = pci_resource_len(pdev, 0);
3530 	DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n",
3531 	    __func__, mem_base, mem_len));
3532 
3533 	/* mapping of pcibase pointer */
3534 	ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len);
3535 	if (!ha->nx_pcibase) {
3536 		printk(KERN_ERR
3537 		    "cannot remap MMIO (%s), aborting\n", pci_name(pdev));
3538 		pci_release_regions(ha->pdev);
3539 		goto iospace_error_exit;
3540 	}
3541 
3542 	/* Mapping of IO base pointer, door bell read and write pointer */
3543 
3544 	/* mapping of IO base pointer */
3545 	ha->qla4_8xxx_reg =
3546 	    (struct device_reg_82xx  __iomem *)((uint8_t *)ha->nx_pcibase +
3547 	    0xbc000 + (ha->pdev->devfn << 11));
3548 
3549 	db_base = pci_resource_start(pdev, 4);  /* doorbell is on bar 4 */
3550 	db_len = pci_resource_len(pdev, 4);
3551 
3552 	ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
3553 	    QLA82XX_CAM_RAM_DB2);
3554 
3555 	return 0;
3556 iospace_error_exit:
3557 	return -ENOMEM;
3558 }
3559 
3560 /***
3561  * qla4xxx_iospace_config - maps registers
3562  * @ha: pointer to adapter structure
3563  *
3564  * This routines maps HBA's registers from the pci address space
3565  * into the kernel virtual address space for memory mapped i/o.
3566  **/
3567 int qla4xxx_iospace_config(struct scsi_qla_host *ha)
3568 {
3569 	unsigned long pio, pio_len, pio_flags;
3570 	unsigned long mmio, mmio_len, mmio_flags;
3571 
3572 	pio = pci_resource_start(ha->pdev, 0);
3573 	pio_len = pci_resource_len(ha->pdev, 0);
3574 	pio_flags = pci_resource_flags(ha->pdev, 0);
3575 	if (pio_flags & IORESOURCE_IO) {
3576 		if (pio_len < MIN_IOBASE_LEN) {
3577 			ql4_printk(KERN_WARNING, ha,
3578 				"Invalid PCI I/O region size\n");
3579 			pio = 0;
3580 		}
3581 	} else {
3582 		ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n");
3583 		pio = 0;
3584 	}
3585 
3586 	/* Use MMIO operations for all accesses. */
3587 	mmio = pci_resource_start(ha->pdev, 1);
3588 	mmio_len = pci_resource_len(ha->pdev, 1);
3589 	mmio_flags = pci_resource_flags(ha->pdev, 1);
3590 
3591 	if (!(mmio_flags & IORESOURCE_MEM)) {
3592 		ql4_printk(KERN_ERR, ha,
3593 		    "region #0 not an MMIO resource, aborting\n");
3594 
3595 		goto iospace_error_exit;
3596 	}
3597 
3598 	if (mmio_len < MIN_IOBASE_LEN) {
3599 		ql4_printk(KERN_ERR, ha,
3600 		    "Invalid PCI mem region size, aborting\n");
3601 		goto iospace_error_exit;
3602 	}
3603 
3604 	if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
3605 		ql4_printk(KERN_WARNING, ha,
3606 		    "Failed to reserve PIO/MMIO regions\n");
3607 
3608 		goto iospace_error_exit;
3609 	}
3610 
3611 	ha->pio_address = pio;
3612 	ha->pio_length = pio_len;
3613 	ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
3614 	if (!ha->reg) {
3615 		ql4_printk(KERN_ERR, ha,
3616 		    "cannot remap MMIO, aborting\n");
3617 
3618 		goto iospace_error_exit;
3619 	}
3620 
3621 	return 0;
3622 
3623 iospace_error_exit:
3624 	return -ENOMEM;
3625 }
3626 
3627 static struct isp_operations qla4xxx_isp_ops = {
3628 	.iospace_config         = qla4xxx_iospace_config,
3629 	.pci_config             = qla4xxx_pci_config,
3630 	.disable_intrs          = qla4xxx_disable_intrs,
3631 	.enable_intrs           = qla4xxx_enable_intrs,
3632 	.start_firmware         = qla4xxx_start_firmware,
3633 	.intr_handler           = qla4xxx_intr_handler,
3634 	.interrupt_service_routine = qla4xxx_interrupt_service_routine,
3635 	.reset_chip             = qla4xxx_soft_reset,
3636 	.reset_firmware         = qla4xxx_hw_reset,
3637 	.queue_iocb             = qla4xxx_queue_iocb,
3638 	.complete_iocb          = qla4xxx_complete_iocb,
3639 	.rd_shdw_req_q_out      = qla4xxx_rd_shdw_req_q_out,
3640 	.rd_shdw_rsp_q_in       = qla4xxx_rd_shdw_rsp_q_in,
3641 	.get_sys_info           = qla4xxx_get_sys_info,
3642 };
3643 
3644 static struct isp_operations qla4_8xxx_isp_ops = {
3645 	.iospace_config         = qla4_8xxx_iospace_config,
3646 	.pci_config             = qla4_8xxx_pci_config,
3647 	.disable_intrs          = qla4_8xxx_disable_intrs,
3648 	.enable_intrs           = qla4_8xxx_enable_intrs,
3649 	.start_firmware         = qla4_8xxx_load_risc,
3650 	.intr_handler           = qla4_8xxx_intr_handler,
3651 	.interrupt_service_routine = qla4_8xxx_interrupt_service_routine,
3652 	.reset_chip             = qla4_8xxx_isp_reset,
3653 	.reset_firmware         = qla4_8xxx_stop_firmware,
3654 	.queue_iocb             = qla4_8xxx_queue_iocb,
3655 	.complete_iocb          = qla4_8xxx_complete_iocb,
3656 	.rd_shdw_req_q_out      = qla4_8xxx_rd_shdw_req_q_out,
3657 	.rd_shdw_rsp_q_in       = qla4_8xxx_rd_shdw_rsp_q_in,
3658 	.get_sys_info           = qla4_8xxx_get_sys_info,
3659 };
3660 
3661 uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
3662 {
3663 	return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
3664 }
3665 
3666 uint16_t qla4_8xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
3667 {
3668 	return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->req_q_out));
3669 }
3670 
3671 uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
3672 {
3673 	return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
3674 }
3675 
3676 uint16_t qla4_8xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
3677 {
3678 	return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->rsp_q_in));
3679 }
3680 
3681 static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
3682 {
3683 	struct scsi_qla_host *ha = data;
3684 	char *str = buf;
3685 	int rc;
3686 
3687 	switch (type) {
3688 	case ISCSI_BOOT_ETH_FLAGS:
3689 		rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
3690 		break;
3691 	case ISCSI_BOOT_ETH_INDEX:
3692 		rc = sprintf(str, "0\n");
3693 		break;
3694 	case ISCSI_BOOT_ETH_MAC:
3695 		rc = sysfs_format_mac(str, ha->my_mac,
3696 				      MAC_ADDR_LEN);
3697 		break;
3698 	default:
3699 		rc = -ENOSYS;
3700 		break;
3701 	}
3702 	return rc;
3703 }
3704 
3705 static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
3706 {
3707 	int rc;
3708 
3709 	switch (type) {
3710 	case ISCSI_BOOT_ETH_FLAGS:
3711 	case ISCSI_BOOT_ETH_MAC:
3712 	case ISCSI_BOOT_ETH_INDEX:
3713 		rc = S_IRUGO;
3714 		break;
3715 	default:
3716 		rc = 0;
3717 		break;
3718 	}
3719 	return rc;
3720 }
3721 
3722 static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
3723 {
3724 	struct scsi_qla_host *ha = data;
3725 	char *str = buf;
3726 	int rc;
3727 
3728 	switch (type) {
3729 	case ISCSI_BOOT_INI_INITIATOR_NAME:
3730 		rc = sprintf(str, "%s\n", ha->name_string);
3731 		break;
3732 	default:
3733 		rc = -ENOSYS;
3734 		break;
3735 	}
3736 	return rc;
3737 }
3738 
3739 static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
3740 {
3741 	int rc;
3742 
3743 	switch (type) {
3744 	case ISCSI_BOOT_INI_INITIATOR_NAME:
3745 		rc = S_IRUGO;
3746 		break;
3747 	default:
3748 		rc = 0;
3749 		break;
3750 	}
3751 	return rc;
3752 }
3753 
3754 static ssize_t
3755 qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
3756 			   char *buf)
3757 {
3758 	struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
3759 	char *str = buf;
3760 	int rc;
3761 
3762 	switch (type) {
3763 	case ISCSI_BOOT_TGT_NAME:
3764 		rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name);
3765 		break;
3766 	case ISCSI_BOOT_TGT_IP_ADDR:
3767 		if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1)
3768 			rc = sprintf(buf, "%pI4\n",
3769 				     &boot_conn->dest_ipaddr.ip_address);
3770 		else
3771 			rc = sprintf(str, "%pI6\n",
3772 				     &boot_conn->dest_ipaddr.ip_address);
3773 		break;
3774 	case ISCSI_BOOT_TGT_PORT:
3775 			rc = sprintf(str, "%d\n", boot_conn->dest_port);
3776 		break;
3777 	case ISCSI_BOOT_TGT_CHAP_NAME:
3778 		rc = sprintf(str,  "%.*s\n",
3779 			     boot_conn->chap.target_chap_name_length,
3780 			     (char *)&boot_conn->chap.target_chap_name);
3781 		break;
3782 	case ISCSI_BOOT_TGT_CHAP_SECRET:
3783 		rc = sprintf(str,  "%.*s\n",
3784 			     boot_conn->chap.target_secret_length,
3785 			     (char *)&boot_conn->chap.target_secret);
3786 		break;
3787 	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
3788 		rc = sprintf(str,  "%.*s\n",
3789 			     boot_conn->chap.intr_chap_name_length,
3790 			     (char *)&boot_conn->chap.intr_chap_name);
3791 		break;
3792 	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
3793 		rc = sprintf(str,  "%.*s\n",
3794 			     boot_conn->chap.intr_secret_length,
3795 			     (char *)&boot_conn->chap.intr_secret);
3796 		break;
3797 	case ISCSI_BOOT_TGT_FLAGS:
3798 		rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
3799 		break;
3800 	case ISCSI_BOOT_TGT_NIC_ASSOC:
3801 		rc = sprintf(str, "0\n");
3802 		break;
3803 	default:
3804 		rc = -ENOSYS;
3805 		break;
3806 	}
3807 	return rc;
3808 }
3809 
3810 static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf)
3811 {
3812 	struct scsi_qla_host *ha = data;
3813 	struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess);
3814 
3815 	return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
3816 }
3817 
3818 static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
3819 {
3820 	struct scsi_qla_host *ha = data;
3821 	struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess);
3822 
3823 	return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
3824 }
3825 
3826 static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
3827 {
3828 	int rc;
3829 
3830 	switch (type) {
3831 	case ISCSI_BOOT_TGT_NAME:
3832 	case ISCSI_BOOT_TGT_IP_ADDR:
3833 	case ISCSI_BOOT_TGT_PORT:
3834 	case ISCSI_BOOT_TGT_CHAP_NAME:
3835 	case ISCSI_BOOT_TGT_CHAP_SECRET:
3836 	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
3837 	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
3838 	case ISCSI_BOOT_TGT_NIC_ASSOC:
3839 	case ISCSI_BOOT_TGT_FLAGS:
3840 		rc = S_IRUGO;
3841 		break;
3842 	default:
3843 		rc = 0;
3844 		break;
3845 	}
3846 	return rc;
3847 }
3848 
3849 static void qla4xxx_boot_release(void *data)
3850 {
3851 	struct scsi_qla_host *ha = data;
3852 
3853 	scsi_host_put(ha->host);
3854 }
3855 
3856 static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
3857 {
3858 	dma_addr_t buf_dma;
3859 	uint32_t addr, pri_addr, sec_addr;
3860 	uint32_t offset;
3861 	uint16_t func_num;
3862 	uint8_t val;
3863 	uint8_t *buf = NULL;
3864 	size_t size = 13 * sizeof(uint8_t);
3865 	int ret = QLA_SUCCESS;
3866 
3867 	func_num = PCI_FUNC(ha->pdev->devfn);
3868 
3869 	ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n",
3870 		   __func__, ha->pdev->device, func_num);
3871 
3872 	if (is_qla40XX(ha)) {
3873 		if (func_num == 1) {
3874 			addr = NVRAM_PORT0_BOOT_MODE;
3875 			pri_addr = NVRAM_PORT0_BOOT_PRI_TGT;
3876 			sec_addr = NVRAM_PORT0_BOOT_SEC_TGT;
3877 		} else if (func_num == 3) {
3878 			addr = NVRAM_PORT1_BOOT_MODE;
3879 			pri_addr = NVRAM_PORT1_BOOT_PRI_TGT;
3880 			sec_addr = NVRAM_PORT1_BOOT_SEC_TGT;
3881 		} else {
3882 			ret = QLA_ERROR;
3883 			goto exit_boot_info;
3884 		}
3885 
3886 		/* Check Boot Mode */
3887 		val = rd_nvram_byte(ha, addr);
3888 		if (!(val & 0x07)) {
3889 			DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot "
3890 					  "options : 0x%x\n", __func__, val));
3891 			ret = QLA_ERROR;
3892 			goto exit_boot_info;
3893 		}
3894 
3895 		/* get primary valid target index */
3896 		val = rd_nvram_byte(ha, pri_addr);
3897 		if (val & BIT_7)
3898 			ddb_index[0] = (val & 0x7f);
3899 
3900 		/* get secondary valid target index */
3901 		val = rd_nvram_byte(ha, sec_addr);
3902 		if (val & BIT_7)
3903 			ddb_index[1] = (val & 0x7f);
3904 
3905 	} else if (is_qla8022(ha)) {
3906 		buf = dma_alloc_coherent(&ha->pdev->dev, size,
3907 					 &buf_dma, GFP_KERNEL);
3908 		if (!buf) {
3909 			DEBUG2(ql4_printk(KERN_ERR, ha,
3910 					  "%s: Unable to allocate dma buffer\n",
3911 					   __func__));
3912 			ret = QLA_ERROR;
3913 			goto exit_boot_info;
3914 		}
3915 
3916 		if (ha->port_num == 0)
3917 			offset = BOOT_PARAM_OFFSET_PORT0;
3918 		else if (ha->port_num == 1)
3919 			offset = BOOT_PARAM_OFFSET_PORT1;
3920 		else {
3921 			ret = QLA_ERROR;
3922 			goto exit_boot_info_free;
3923 		}
3924 		addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) +
3925 		       offset;
3926 		if (qla4xxx_get_flash(ha, buf_dma, addr,
3927 				      13 * sizeof(uint8_t)) != QLA_SUCCESS) {
3928 			DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
3929 					  " failed\n", ha->host_no, __func__));
3930 			ret = QLA_ERROR;
3931 			goto exit_boot_info_free;
3932 		}
3933 		/* Check Boot Mode */
3934 		if (!(buf[1] & 0x07)) {
3935 			DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options"
3936 					  " : 0x%x\n", buf[1]));
3937 			ret = QLA_ERROR;
3938 			goto exit_boot_info_free;
3939 		}
3940 
3941 		/* get primary valid target index */
3942 		if (buf[2] & BIT_7)
3943 			ddb_index[0] = buf[2] & 0x7f;
3944 
3945 		/* get secondary valid target index */
3946 		if (buf[11] & BIT_7)
3947 			ddb_index[1] = buf[11] & 0x7f;
3948 	} else {
3949 		ret = QLA_ERROR;
3950 		goto exit_boot_info;
3951 	}
3952 
3953 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary"
3954 			  " target ID %d\n", __func__, ddb_index[0],
3955 			  ddb_index[1]));
3956 
3957 exit_boot_info_free:
3958 	dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
3959 exit_boot_info:
3960 	ha->pri_ddb_idx = ddb_index[0];
3961 	ha->sec_ddb_idx = ddb_index[1];
3962 	return ret;
3963 }
3964 
3965 /**
3966  * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password
3967  * @ha: pointer to adapter structure
3968  * @username: CHAP username to be returned
3969  * @password: CHAP password to be returned
3970  *
3971  * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP
3972  * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/.
3973  * So from the CHAP cache find the first BIDI CHAP entry and set it
3974  * to the boot record in sysfs.
3975  **/
3976 static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
3977 			    char *password)
3978 {
3979 	int i, ret = -EINVAL;
3980 	int max_chap_entries = 0;
3981 	struct ql4_chap_table *chap_table;
3982 
3983 	if (is_qla8022(ha))
3984 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
3985 						sizeof(struct ql4_chap_table);
3986 	else
3987 		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
3988 
3989 	if (!ha->chap_list) {
3990 		ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
3991 		return ret;
3992 	}
3993 
3994 	mutex_lock(&ha->chap_sem);
3995 	for (i = 0; i < max_chap_entries; i++) {
3996 		chap_table = (struct ql4_chap_table *)ha->chap_list + i;
3997 		if (chap_table->cookie !=
3998 		    __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
3999 			continue;
4000 		}
4001 
4002 		if (chap_table->flags & BIT_7) /* local */
4003 			continue;
4004 
4005 		if (!(chap_table->flags & BIT_6)) /* Not BIDI */
4006 			continue;
4007 
4008 		strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
4009 		strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
4010 		ret = 0;
4011 		break;
4012 	}
4013 	mutex_unlock(&ha->chap_sem);
4014 
4015 	return ret;
4016 }
4017 
4018 
4019 static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
4020 				   struct ql4_boot_session_info *boot_sess,
4021 				   uint16_t ddb_index)
4022 {
4023 	struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
4024 	struct dev_db_entry *fw_ddb_entry;
4025 	dma_addr_t fw_ddb_entry_dma;
4026 	uint16_t idx;
4027 	uint16_t options;
4028 	int ret = QLA_SUCCESS;
4029 
4030 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
4031 					  &fw_ddb_entry_dma, GFP_KERNEL);
4032 	if (!fw_ddb_entry) {
4033 		DEBUG2(ql4_printk(KERN_ERR, ha,
4034 				  "%s: Unable to allocate dma buffer.\n",
4035 				  __func__));
4036 		ret = QLA_ERROR;
4037 		return ret;
4038 	}
4039 
4040 	if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
4041 				   fw_ddb_entry_dma, ddb_index)) {
4042 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at "
4043 				  "index [%d]\n", __func__, ddb_index));
4044 		ret = QLA_ERROR;
4045 		goto exit_boot_target;
4046 	}
4047 
4048 	/* Update target name and IP from DDB */
4049 	memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name,
4050 	       min(sizeof(boot_sess->target_name),
4051 		   sizeof(fw_ddb_entry->iscsi_name)));
4052 
4053 	options = le16_to_cpu(fw_ddb_entry->options);
4054 	if (options & DDB_OPT_IPV6_DEVICE) {
4055 		memcpy(&boot_conn->dest_ipaddr.ip_address,
4056 		       &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN);
4057 	} else {
4058 		boot_conn->dest_ipaddr.ip_type = 0x1;
4059 		memcpy(&boot_conn->dest_ipaddr.ip_address,
4060 		       &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN);
4061 	}
4062 
4063 	boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port);
4064 
4065 	/* update chap information */
4066 	idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
4067 
4068 	if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options))	{
4069 
4070 		DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n"));
4071 
4072 		ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
4073 				       target_chap_name,
4074 				       (char *)&boot_conn->chap.target_secret,
4075 				       idx);
4076 		if (ret) {
4077 			ql4_printk(KERN_ERR, ha, "Failed to set chap\n");
4078 			ret = QLA_ERROR;
4079 			goto exit_boot_target;
4080 		}
4081 
4082 		boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
4083 		boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN;
4084 	}
4085 
4086 	if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
4087 
4088 		DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n"));
4089 
4090 		ret = qla4xxx_get_bidi_chap(ha,
4091 				    (char *)&boot_conn->chap.intr_chap_name,
4092 				    (char *)&boot_conn->chap.intr_secret);
4093 
4094 		if (ret) {
4095 			ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n");
4096 			ret = QLA_ERROR;
4097 			goto exit_boot_target;
4098 		}
4099 
4100 		boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
4101 		boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN;
4102 	}
4103 
4104 exit_boot_target:
4105 	dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
4106 			  fw_ddb_entry, fw_ddb_entry_dma);
4107 	return ret;
4108 }
4109 
4110 static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
4111 {
4112 	uint16_t ddb_index[2];
4113 	int ret = QLA_ERROR;
4114 	int rval;
4115 
4116 	memset(ddb_index, 0, sizeof(ddb_index));
4117 	ddb_index[0] = 0xffff;
4118 	ddb_index[1] = 0xffff;
4119 	ret = get_fw_boot_info(ha, ddb_index);
4120 	if (ret != QLA_SUCCESS) {
4121 		DEBUG2(ql4_printk(KERN_INFO, ha,
4122 				"%s: No boot target configured.\n", __func__));
4123 		return ret;
4124 	}
4125 
4126 	if (ql4xdisablesysfsboot)
4127 		return QLA_SUCCESS;
4128 
4129 	if (ddb_index[0] == 0xffff)
4130 		goto sec_target;
4131 
4132 	rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
4133 				      ddb_index[0]);
4134 	if (rval != QLA_SUCCESS) {
4135 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not "
4136 				  "configured\n", __func__));
4137 	} else
4138 		ret = QLA_SUCCESS;
4139 
4140 sec_target:
4141 	if (ddb_index[1] == 0xffff)
4142 		goto exit_get_boot_info;
4143 
4144 	rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
4145 				      ddb_index[1]);
4146 	if (rval != QLA_SUCCESS) {
4147 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not"
4148 				  " configured\n", __func__));
4149 	} else
4150 		ret = QLA_SUCCESS;
4151 
4152 exit_get_boot_info:
4153 	return ret;
4154 }
4155 
4156 static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
4157 {
4158 	struct iscsi_boot_kobj *boot_kobj;
4159 
4160 	if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
4161 		return QLA_ERROR;
4162 
4163 	if (ql4xdisablesysfsboot) {
4164 		ql4_printk(KERN_INFO, ha,
4165 			   "%s: syfsboot disabled - driver will trigger login "
4166 			   "and publish session for discovery .\n", __func__);
4167 		return QLA_SUCCESS;
4168 	}
4169 
4170 
4171 	ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
4172 	if (!ha->boot_kset)
4173 		goto kset_free;
4174 
4175 	if (!scsi_host_get(ha->host))
4176 		goto kset_free;
4177 	boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha,
4178 					     qla4xxx_show_boot_tgt_pri_info,
4179 					     qla4xxx_tgt_get_attr_visibility,
4180 					     qla4xxx_boot_release);
4181 	if (!boot_kobj)
4182 		goto put_host;
4183 
4184 	if (!scsi_host_get(ha->host))
4185 		goto kset_free;
4186 	boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha,
4187 					     qla4xxx_show_boot_tgt_sec_info,
4188 					     qla4xxx_tgt_get_attr_visibility,
4189 					     qla4xxx_boot_release);
4190 	if (!boot_kobj)
4191 		goto put_host;
4192 
4193 	if (!scsi_host_get(ha->host))
4194 		goto kset_free;
4195 	boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha,
4196 					       qla4xxx_show_boot_ini_info,
4197 					       qla4xxx_ini_get_attr_visibility,
4198 					       qla4xxx_boot_release);
4199 	if (!boot_kobj)
4200 		goto put_host;
4201 
4202 	if (!scsi_host_get(ha->host))
4203 		goto kset_free;
4204 	boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha,
4205 					       qla4xxx_show_boot_eth_info,
4206 					       qla4xxx_eth_get_attr_visibility,
4207 					       qla4xxx_boot_release);
4208 	if (!boot_kobj)
4209 		goto put_host;
4210 
4211 	return QLA_SUCCESS;
4212 
4213 put_host:
4214 	scsi_host_put(ha->host);
4215 kset_free:
4216 	iscsi_boot_destroy_kset(ha->boot_kset);
4217 	return -ENOMEM;
4218 }
4219 
4220 
4221 /**
4222  * qla4xxx_create chap_list - Create CHAP list from FLASH
4223  * @ha: pointer to adapter structure
4224  *
4225  * Read flash and make a list of CHAP entries, during login when a CHAP entry
4226  * is received, it will be checked in this list. If entry exist then the CHAP
4227  * entry index is set in the DDB. If CHAP entry does not exist in this list
4228  * then a new entry is added in FLASH in CHAP table and the index obtained is
4229  * used in the DDB.
4230  **/
4231 static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
4232 {
4233 	int rval = 0;
4234 	uint8_t *chap_flash_data = NULL;
4235 	uint32_t offset;
4236 	dma_addr_t chap_dma;
4237 	uint32_t chap_size = 0;
4238 
4239 	if (is_qla40XX(ha))
4240 		chap_size = MAX_CHAP_ENTRIES_40XX  *
4241 					sizeof(struct ql4_chap_table);
4242 	else	/* Single region contains CHAP info for both
4243 		 * ports which is divided into half for each port.
4244 		 */
4245 		chap_size = ha->hw.flt_chap_size / 2;
4246 
4247 	chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size,
4248 					  &chap_dma, GFP_KERNEL);
4249 	if (!chap_flash_data) {
4250 		ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n");
4251 		return;
4252 	}
4253 	if (is_qla40XX(ha))
4254 		offset = FLASH_CHAP_OFFSET;
4255 	else {
4256 		offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
4257 		if (ha->port_num == 1)
4258 			offset += chap_size;
4259 	}
4260 
4261 	rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
4262 	if (rval != QLA_SUCCESS)
4263 		goto exit_chap_list;
4264 
4265 	if (ha->chap_list == NULL)
4266 		ha->chap_list = vmalloc(chap_size);
4267 	if (ha->chap_list == NULL) {
4268 		ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n");
4269 		goto exit_chap_list;
4270 	}
4271 
4272 	memcpy(ha->chap_list, chap_flash_data, chap_size);
4273 
4274 exit_chap_list:
4275 	dma_free_coherent(&ha->pdev->dev, chap_size,
4276 			chap_flash_data, chap_dma);
4277 }
4278 
4279 static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
4280 				  struct ql4_tuple_ddb *tddb)
4281 {
4282 	struct scsi_qla_host *ha;
4283 	struct iscsi_cls_session *cls_sess;
4284 	struct iscsi_cls_conn *cls_conn;
4285 	struct iscsi_session *sess;
4286 	struct iscsi_conn *conn;
4287 
4288 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
4289 	ha = ddb_entry->ha;
4290 	cls_sess = ddb_entry->sess;
4291 	sess = cls_sess->dd_data;
4292 	cls_conn = ddb_entry->conn;
4293 	conn = cls_conn->dd_data;
4294 
4295 	tddb->tpgt = sess->tpgt;
4296 	tddb->port = conn->persistent_port;
4297 	strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
4298 	strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
4299 }
4300 
4301 static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
4302 				      struct ql4_tuple_ddb *tddb,
4303 				      uint8_t *flash_isid)
4304 {
4305 	uint16_t options = 0;
4306 
4307 	tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
4308 	memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
4309 	       min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
4310 
4311 	options = le16_to_cpu(fw_ddb_entry->options);
4312 	if (options & DDB_OPT_IPV6_DEVICE)
4313 		sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
4314 	else
4315 		sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
4316 
4317 	tddb->port = le16_to_cpu(fw_ddb_entry->port);
4318 
4319 	if (flash_isid == NULL)
4320 		memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0],
4321 		       sizeof(tddb->isid));
4322 	else
4323 		memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid));
4324 }
4325 
4326 static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
4327 				     struct ql4_tuple_ddb *old_tddb,
4328 				     struct ql4_tuple_ddb *new_tddb,
4329 				     uint8_t is_isid_compare)
4330 {
4331 	if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
4332 		return QLA_ERROR;
4333 
4334 	if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
4335 		return QLA_ERROR;
4336 
4337 	if (old_tddb->port != new_tddb->port)
4338 		return QLA_ERROR;
4339 
4340 	/* For multi sessions, driver generates the ISID, so do not compare
4341 	 * ISID in reset path since it would be a comparision between the
4342 	 * driver generated ISID and firmware generated ISID. This could
4343 	 * lead to adding duplicated DDBs in the list as driver generated
4344 	 * ISID would not match firmware generated ISID.
4345 	 */
4346 	if (is_isid_compare) {
4347 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x"
4348 			"%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n",
4349 			__func__, old_tddb->isid[5], old_tddb->isid[4],
4350 			old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1],
4351 			old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4],
4352 			new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1],
4353 			new_tddb->isid[0]));
4354 
4355 		if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
4356 			   sizeof(old_tddb->isid)))
4357 			return QLA_ERROR;
4358 	}
4359 
4360 	DEBUG2(ql4_printk(KERN_INFO, ha,
4361 			  "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
4362 			  old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
4363 			  old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
4364 			  new_tddb->ip_addr, new_tddb->iscsi_name));
4365 
4366 	return QLA_SUCCESS;
4367 }
4368 
4369 static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
4370 				     struct dev_db_entry *fw_ddb_entry)
4371 {
4372 	struct ddb_entry *ddb_entry;
4373 	struct ql4_tuple_ddb *fw_tddb = NULL;
4374 	struct ql4_tuple_ddb *tmp_tddb = NULL;
4375 	int idx;
4376 	int ret = QLA_ERROR;
4377 
4378 	fw_tddb = vzalloc(sizeof(*fw_tddb));
4379 	if (!fw_tddb) {
4380 		DEBUG2(ql4_printk(KERN_WARNING, ha,
4381 				  "Memory Allocation failed.\n"));
4382 		ret = QLA_SUCCESS;
4383 		goto exit_check;
4384 	}
4385 
4386 	tmp_tddb = vzalloc(sizeof(*tmp_tddb));
4387 	if (!tmp_tddb) {
4388 		DEBUG2(ql4_printk(KERN_WARNING, ha,
4389 				  "Memory Allocation failed.\n"));
4390 		ret = QLA_SUCCESS;
4391 		goto exit_check;
4392 	}
4393 
4394 	qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
4395 
4396 	for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
4397 		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
4398 		if (ddb_entry == NULL)
4399 			continue;
4400 
4401 		qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
4402 		if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) {
4403 			ret = QLA_SUCCESS; /* found */
4404 			goto exit_check;
4405 		}
4406 	}
4407 
4408 exit_check:
4409 	if (fw_tddb)
4410 		vfree(fw_tddb);
4411 	if (tmp_tddb)
4412 		vfree(tmp_tddb);
4413 	return ret;
4414 }
4415 
4416 /**
4417  * qla4xxx_check_existing_isid - check if target with same isid exist
4418  *				 in target list
4419  * @list_nt: list of target
4420  * @isid: isid to check
4421  *
4422  * This routine return QLA_SUCCESS if target with same isid exist
4423  **/
4424 static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid)
4425 {
4426 	struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
4427 	struct dev_db_entry *fw_ddb_entry;
4428 
4429 	list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
4430 		fw_ddb_entry = &nt_ddb_idx->fw_ddb;
4431 
4432 		if (memcmp(&fw_ddb_entry->isid[0], &isid[0],
4433 			   sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) {
4434 			return QLA_SUCCESS;
4435 		}
4436 	}
4437 	return QLA_ERROR;
4438 }
4439 
4440 /**
4441  * qla4xxx_update_isid - compare ddbs and updated isid
4442  * @ha: Pointer to host adapter structure.
4443  * @list_nt: list of nt target
4444  * @fw_ddb_entry: firmware ddb entry
4445  *
4446  * This routine update isid if ddbs have same iqn, same isid and
4447  * different IP addr.
4448  * Return QLA_SUCCESS if isid is updated.
4449  **/
4450 static int qla4xxx_update_isid(struct scsi_qla_host *ha,
4451 			       struct list_head *list_nt,
4452 			       struct dev_db_entry *fw_ddb_entry)
4453 {
4454 	uint8_t base_value, i;
4455 
4456 	base_value = fw_ddb_entry->isid[1] & 0x1f;
4457 	for (i = 0; i < 8; i++) {
4458 		fw_ddb_entry->isid[1] = (base_value | (i << 5));
4459 		if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
4460 			break;
4461 	}
4462 
4463 	if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
4464 		return QLA_ERROR;
4465 
4466 	return QLA_SUCCESS;
4467 }
4468 
4469 /**
4470  * qla4xxx_should_update_isid - check if isid need to update
4471  * @ha: Pointer to host adapter structure.
4472  * @old_tddb: ddb tuple
4473  * @new_tddb: ddb tuple
4474  *
4475  * Return QLA_SUCCESS if different IP, different PORT, same iqn,
4476  * same isid
4477  **/
4478 static int qla4xxx_should_update_isid(struct scsi_qla_host *ha,
4479 				      struct ql4_tuple_ddb *old_tddb,
4480 				      struct ql4_tuple_ddb *new_tddb)
4481 {
4482 	if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) {
4483 		/* Same ip */
4484 		if (old_tddb->port == new_tddb->port)
4485 			return QLA_ERROR;
4486 	}
4487 
4488 	if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
4489 		/* different iqn */
4490 		return QLA_ERROR;
4491 
4492 	if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
4493 		   sizeof(old_tddb->isid)))
4494 		/* different isid */
4495 		return QLA_ERROR;
4496 
4497 	return QLA_SUCCESS;
4498 }
4499 
4500 /**
4501  * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt
4502  * @ha: Pointer to host adapter structure.
4503  * @list_nt: list of nt target.
4504  * @fw_ddb_entry: firmware ddb entry.
4505  *
4506  * This routine check if fw_ddb_entry already exists in list_nt to avoid
4507  * duplicate ddb in list_nt.
4508  * Return QLA_SUCCESS if duplicate ddb exit in list_nl.
4509  * Note: This function also update isid of DDB if required.
4510  **/
4511 
4512 static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
4513 				       struct list_head *list_nt,
4514 				       struct dev_db_entry *fw_ddb_entry)
4515 {
4516 	struct qla_ddb_index  *nt_ddb_idx, *nt_ddb_idx_tmp;
4517 	struct ql4_tuple_ddb *fw_tddb = NULL;
4518 	struct ql4_tuple_ddb *tmp_tddb = NULL;
4519 	int rval, ret = QLA_ERROR;
4520 
4521 	fw_tddb = vzalloc(sizeof(*fw_tddb));
4522 	if (!fw_tddb) {
4523 		DEBUG2(ql4_printk(KERN_WARNING, ha,
4524 				  "Memory Allocation failed.\n"));
4525 		ret = QLA_SUCCESS;
4526 		goto exit_check;
4527 	}
4528 
4529 	tmp_tddb = vzalloc(sizeof(*tmp_tddb));
4530 	if (!tmp_tddb) {
4531 		DEBUG2(ql4_printk(KERN_WARNING, ha,
4532 				  "Memory Allocation failed.\n"));
4533 		ret = QLA_SUCCESS;
4534 		goto exit_check;
4535 	}
4536 
4537 	qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
4538 
4539 	list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
4540 		qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb,
4541 					  nt_ddb_idx->flash_isid);
4542 		ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true);
4543 		/* found duplicate ddb */
4544 		if (ret == QLA_SUCCESS)
4545 			goto exit_check;
4546 	}
4547 
4548 	list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
4549 		qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL);
4550 
4551 		ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb);
4552 		if (ret == QLA_SUCCESS) {
4553 			rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry);
4554 			if (rval == QLA_SUCCESS)
4555 				ret = QLA_ERROR;
4556 			else
4557 				ret = QLA_SUCCESS;
4558 
4559 			goto exit_check;
4560 		}
4561 	}
4562 
4563 exit_check:
4564 	if (fw_tddb)
4565 		vfree(fw_tddb);
4566 	if (tmp_tddb)
4567 		vfree(tmp_tddb);
4568 	return ret;
4569 }
4570 
4571 static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
4572 {
4573 	struct qla_ddb_index  *ddb_idx, *ddb_idx_tmp;
4574 
4575 	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
4576 		list_del_init(&ddb_idx->list);
4577 		vfree(ddb_idx);
4578 	}
4579 }
4580 
4581 static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
4582 					struct dev_db_entry *fw_ddb_entry)
4583 {
4584 	struct iscsi_endpoint *ep;
4585 	struct sockaddr_in *addr;
4586 	struct sockaddr_in6 *addr6;
4587 	struct sockaddr *dst_addr;
4588 	char *ip;
4589 
4590 	/* TODO: need to destroy on unload iscsi_endpoint*/
4591 	dst_addr = vmalloc(sizeof(*dst_addr));
4592 	if (!dst_addr)
4593 		return NULL;
4594 
4595 	if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
4596 		dst_addr->sa_family = AF_INET6;
4597 		addr6 = (struct sockaddr_in6 *)dst_addr;
4598 		ip = (char *)&addr6->sin6_addr;
4599 		memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
4600 		addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
4601 
4602 	} else {
4603 		dst_addr->sa_family = AF_INET;
4604 		addr = (struct sockaddr_in *)dst_addr;
4605 		ip = (char *)&addr->sin_addr;
4606 		memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
4607 		addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
4608 	}
4609 
4610 	ep = qla4xxx_ep_connect(ha->host, dst_addr, 0);
4611 	vfree(dst_addr);
4612 	return ep;
4613 }
4614 
4615 static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
4616 {
4617 	if (ql4xdisablesysfsboot)
4618 		return QLA_SUCCESS;
4619 	if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
4620 		return QLA_ERROR;
4621 	return QLA_SUCCESS;
4622 }
4623 
4624 static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
4625 					  struct ddb_entry *ddb_entry)
4626 {
4627 	uint16_t def_timeout;
4628 
4629 	ddb_entry->ddb_type = FLASH_DDB;
4630 	ddb_entry->fw_ddb_index = INVALID_ENTRY;
4631 	ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
4632 	ddb_entry->ha = ha;
4633 	ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
4634 	ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
4635 
4636 	atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
4637 	atomic_set(&ddb_entry->relogin_timer, 0);
4638 	atomic_set(&ddb_entry->relogin_retry_count, 0);
4639 	def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
4640 	ddb_entry->default_relogin_timeout =
4641 		(def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
4642 		def_timeout : LOGIN_TOV;
4643 	ddb_entry->default_time2wait =
4644 		le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
4645 }
4646 
4647 static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
4648 {
4649 	uint32_t idx = 0;
4650 	uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
4651 	uint32_t sts[MBOX_REG_COUNT];
4652 	uint32_t ip_state;
4653 	unsigned long wtime;
4654 	int ret;
4655 
4656 	wtime = jiffies + (HZ * IP_CONFIG_TOV);
4657 	do {
4658 		for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
4659 			if (ip_idx[idx] == -1)
4660 				continue;
4661 
4662 			ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
4663 
4664 			if (ret == QLA_ERROR) {
4665 				ip_idx[idx] = -1;
4666 				continue;
4667 			}
4668 
4669 			ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
4670 
4671 			DEBUG2(ql4_printk(KERN_INFO, ha,
4672 					  "Waiting for IP state for idx = %d, state = 0x%x\n",
4673 					  ip_idx[idx], ip_state));
4674 			if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
4675 			    ip_state == IP_ADDRSTATE_INVALID ||
4676 			    ip_state == IP_ADDRSTATE_PREFERRED ||
4677 			    ip_state == IP_ADDRSTATE_DEPRICATED ||
4678 			    ip_state == IP_ADDRSTATE_DISABLING)
4679 				ip_idx[idx] = -1;
4680 		}
4681 
4682 		/* Break if all IP states checked */
4683 		if ((ip_idx[0] == -1) &&
4684 		    (ip_idx[1] == -1) &&
4685 		    (ip_idx[2] == -1) &&
4686 		    (ip_idx[3] == -1))
4687 			break;
4688 		schedule_timeout_uninterruptible(HZ);
4689 	} while (time_after(wtime, jiffies));
4690 }
4691 
4692 static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
4693 				  struct list_head *list_st)
4694 {
4695 	struct qla_ddb_index  *st_ddb_idx;
4696 	int max_ddbs;
4697 	int fw_idx_size;
4698 	struct dev_db_entry *fw_ddb_entry;
4699 	dma_addr_t fw_ddb_dma;
4700 	int ret;
4701 	uint32_t idx = 0, next_idx = 0;
4702 	uint32_t state = 0, conn_err = 0;
4703 	uint16_t conn_id = 0;
4704 
4705 	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
4706 				      &fw_ddb_dma);
4707 	if (fw_ddb_entry == NULL) {
4708 		DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
4709 		goto exit_st_list;
4710 	}
4711 
4712 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
4713 				     MAX_DEV_DB_ENTRIES;
4714 	fw_idx_size = sizeof(struct qla_ddb_index);
4715 
4716 	for (idx = 0; idx < max_ddbs; idx = next_idx) {
4717 		ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
4718 					      NULL, &next_idx, &state,
4719 					      &conn_err, NULL, &conn_id);
4720 		if (ret == QLA_ERROR)
4721 			break;
4722 
4723 		/* Ignore DDB if invalid state (unassigned) */
4724 		if (state == DDB_DS_UNASSIGNED)
4725 			goto continue_next_st;
4726 
4727 		/* Check if ST, add to the list_st */
4728 		if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
4729 			goto continue_next_st;
4730 
4731 		st_ddb_idx = vzalloc(fw_idx_size);
4732 		if (!st_ddb_idx)
4733 			break;
4734 
4735 		st_ddb_idx->fw_ddb_idx = idx;
4736 
4737 		list_add_tail(&st_ddb_idx->list, list_st);
4738 continue_next_st:
4739 		if (next_idx == 0)
4740 			break;
4741 	}
4742 
4743 exit_st_list:
4744 	if (fw_ddb_entry)
4745 		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
4746 }
4747 
4748 /**
4749  * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
4750  * @ha: pointer to adapter structure
4751  * @list_ddb: List from which failed ddb to be removed
4752  *
4753  * Iterate over the list of DDBs and find and remove DDBs that are either in
4754  * no connection active state or failed state
4755  **/
4756 static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
4757 				      struct list_head *list_ddb)
4758 {
4759 	struct qla_ddb_index  *ddb_idx, *ddb_idx_tmp;
4760 	uint32_t next_idx = 0;
4761 	uint32_t state = 0, conn_err = 0;
4762 	int ret;
4763 
4764 	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
4765 		ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
4766 					      NULL, 0, NULL, &next_idx, &state,
4767 					      &conn_err, NULL, NULL);
4768 		if (ret == QLA_ERROR)
4769 			continue;
4770 
4771 		if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
4772 		    state == DDB_DS_SESSION_FAILED) {
4773 			list_del_init(&ddb_idx->list);
4774 			vfree(ddb_idx);
4775 		}
4776 	}
4777 }
4778 
4779 static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
4780 				   struct dev_db_entry *fw_ddb_entry,
4781 				   int is_reset)
4782 {
4783 	struct iscsi_cls_session *cls_sess;
4784 	struct iscsi_session *sess;
4785 	struct iscsi_cls_conn *cls_conn;
4786 	struct iscsi_endpoint *ep;
4787 	uint16_t cmds_max = 32;
4788 	uint16_t conn_id = 0;
4789 	uint32_t initial_cmdsn = 0;
4790 	int ret = QLA_SUCCESS;
4791 
4792 	struct ddb_entry *ddb_entry = NULL;
4793 
4794 	/* Create session object, with INVALID_ENTRY,
4795 	 * the targer_id would get set when we issue the login
4796 	 */
4797 	cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
4798 				       cmds_max, sizeof(struct ddb_entry),
4799 				       sizeof(struct ql4_task_data),
4800 				       initial_cmdsn, INVALID_ENTRY);
4801 	if (!cls_sess) {
4802 		ret = QLA_ERROR;
4803 		goto exit_setup;
4804 	}
4805 
4806 	/*
4807 	 * so calling module_put function to decrement the
4808 	 * reference count.
4809 	 **/
4810 	module_put(qla4xxx_iscsi_transport.owner);
4811 	sess = cls_sess->dd_data;
4812 	ddb_entry = sess->dd_data;
4813 	ddb_entry->sess = cls_sess;
4814 
4815 	cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
4816 	memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
4817 	       sizeof(struct dev_db_entry));
4818 
4819 	qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
4820 
4821 	cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
4822 
4823 	if (!cls_conn) {
4824 		ret = QLA_ERROR;
4825 		goto exit_setup;
4826 	}
4827 
4828 	ddb_entry->conn = cls_conn;
4829 
4830 	/* Setup ep, for displaying attributes in sysfs */
4831 	ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
4832 	if (ep) {
4833 		ep->conn = cls_conn;
4834 		cls_conn->ep = ep;
4835 	} else {
4836 		DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
4837 		ret = QLA_ERROR;
4838 		goto exit_setup;
4839 	}
4840 
4841 	/* Update sess/conn params */
4842 	qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
4843 
4844 	if (is_reset == RESET_ADAPTER) {
4845 		iscsi_block_session(cls_sess);
4846 		/* Use the relogin path to discover new devices
4847 		 *  by short-circuting the logic of setting
4848 		 *  timer to relogin - instead set the flags
4849 		 *  to initiate login right away.
4850 		 */
4851 		set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
4852 		set_bit(DF_RELOGIN, &ddb_entry->flags);
4853 	}
4854 
4855 exit_setup:
4856 	return ret;
4857 }
4858 
4859 static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
4860 				  struct list_head *list_nt, int is_reset)
4861 {
4862 	struct dev_db_entry *fw_ddb_entry;
4863 	dma_addr_t fw_ddb_dma;
4864 	int max_ddbs;
4865 	int fw_idx_size;
4866 	int ret;
4867 	uint32_t idx = 0, next_idx = 0;
4868 	uint32_t state = 0, conn_err = 0;
4869 	uint16_t conn_id = 0;
4870 	struct qla_ddb_index  *nt_ddb_idx;
4871 
4872 	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
4873 				      &fw_ddb_dma);
4874 	if (fw_ddb_entry == NULL) {
4875 		DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
4876 		goto exit_nt_list;
4877 	}
4878 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
4879 				     MAX_DEV_DB_ENTRIES;
4880 	fw_idx_size = sizeof(struct qla_ddb_index);
4881 
4882 	for (idx = 0; idx < max_ddbs; idx = next_idx) {
4883 		ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
4884 					      NULL, &next_idx, &state,
4885 					      &conn_err, NULL, &conn_id);
4886 		if (ret == QLA_ERROR)
4887 			break;
4888 
4889 		if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
4890 			goto continue_next_nt;
4891 
4892 		/* Check if NT, then add to list it */
4893 		if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
4894 			goto continue_next_nt;
4895 
4896 		if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
4897 		    state == DDB_DS_SESSION_FAILED))
4898 			goto continue_next_nt;
4899 
4900 		DEBUG2(ql4_printk(KERN_INFO, ha,
4901 				  "Adding  DDB to session = 0x%x\n", idx));
4902 		if (is_reset == INIT_ADAPTER) {
4903 			nt_ddb_idx = vmalloc(fw_idx_size);
4904 			if (!nt_ddb_idx)
4905 				break;
4906 
4907 			nt_ddb_idx->fw_ddb_idx = idx;
4908 
4909 			/* Copy original isid as it may get updated in function
4910 			 * qla4xxx_update_isid(). We need original isid in
4911 			 * function qla4xxx_compare_tuple_ddb to find duplicate
4912 			 * target */
4913 			memcpy(&nt_ddb_idx->flash_isid[0],
4914 			       &fw_ddb_entry->isid[0],
4915 			       sizeof(nt_ddb_idx->flash_isid));
4916 
4917 			ret = qla4xxx_is_flash_ddb_exists(ha, list_nt,
4918 							  fw_ddb_entry);
4919 			if (ret == QLA_SUCCESS) {
4920 				/* free nt_ddb_idx and do not add to list_nt */
4921 				vfree(nt_ddb_idx);
4922 				goto continue_next_nt;
4923 			}
4924 
4925 			/* Copy updated isid */
4926 			memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
4927 			       sizeof(struct dev_db_entry));
4928 
4929 			list_add_tail(&nt_ddb_idx->list, list_nt);
4930 		} else if (is_reset == RESET_ADAPTER) {
4931 			if (qla4xxx_is_session_exists(ha, fw_ddb_entry) ==
4932 								QLA_SUCCESS)
4933 				goto continue_next_nt;
4934 		}
4935 
4936 		ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset);
4937 		if (ret == QLA_ERROR)
4938 			goto exit_nt_list;
4939 
4940 continue_next_nt:
4941 		if (next_idx == 0)
4942 			break;
4943 	}
4944 
4945 exit_nt_list:
4946 	if (fw_ddb_entry)
4947 		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
4948 }
4949 
4950 /**
4951  * qla4xxx_build_ddb_list - Build ddb list and setup sessions
4952  * @ha: pointer to adapter structure
4953  * @is_reset: Is this init path or reset path
4954  *
4955  * Create a list of sendtargets (st) from firmware DDBs, issue send targets
4956  * using connection open, then create the list of normal targets (nt)
4957  * from firmware DDBs. Based on the list of nt setup session and connection
4958  * objects.
4959  **/
4960 void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
4961 {
4962 	uint16_t tmo = 0;
4963 	struct list_head list_st, list_nt;
4964 	struct qla_ddb_index  *st_ddb_idx, *st_ddb_idx_tmp;
4965 	unsigned long wtime;
4966 
4967 	if (!test_bit(AF_LINK_UP, &ha->flags)) {
4968 		set_bit(AF_BUILD_DDB_LIST, &ha->flags);
4969 		ha->is_reset = is_reset;
4970 		return;
4971 	}
4972 
4973 	INIT_LIST_HEAD(&list_st);
4974 	INIT_LIST_HEAD(&list_nt);
4975 
4976 	qla4xxx_build_st_list(ha, &list_st);
4977 
4978 	/* Before issuing conn open mbox, ensure all IPs states are configured
4979 	 * Note, conn open fails if IPs are not configured
4980 	 */
4981 	qla4xxx_wait_for_ip_configuration(ha);
4982 
4983 	/* Go thru the STs and fire the sendtargets by issuing conn open mbx */
4984 	list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
4985 		qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
4986 	}
4987 
4988 	/* Wait to ensure all sendtargets are done for min 12 sec wait */
4989 	tmo = ((ha->def_timeout > LOGIN_TOV) &&
4990 	       (ha->def_timeout < LOGIN_TOV * 10) ?
4991 	       ha->def_timeout : LOGIN_TOV);
4992 
4993 	DEBUG2(ql4_printk(KERN_INFO, ha,
4994 			  "Default time to wait for build ddb %d\n", tmo));
4995 
4996 	wtime = jiffies + (HZ * tmo);
4997 	do {
4998 		if (list_empty(&list_st))
4999 			break;
5000 
5001 		qla4xxx_remove_failed_ddb(ha, &list_st);
5002 		schedule_timeout_uninterruptible(HZ / 10);
5003 	} while (time_after(wtime, jiffies));
5004 
5005 	/* Free up the sendtargets list */
5006 	qla4xxx_free_ddb_list(&list_st);
5007 
5008 	qla4xxx_build_nt_list(ha, &list_nt, is_reset);
5009 
5010 	qla4xxx_free_ddb_list(&list_nt);
5011 
5012 	qla4xxx_free_ddb_index(ha);
5013 }
5014 
5015 /**
5016  * qla4xxx_probe_adapter - callback function to probe HBA
5017  * @pdev: pointer to pci_dev structure
5018  * @pci_device_id: pointer to pci_device entry
5019  *
5020  * This routine will probe for Qlogic 4xxx iSCSI host adapters.
5021  * It returns zero if successful. It also initializes all data necessary for
5022  * the driver.
5023  **/
5024 static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
5025 					   const struct pci_device_id *ent)
5026 {
5027 	int ret = -ENODEV, status;
5028 	struct Scsi_Host *host;
5029 	struct scsi_qla_host *ha;
5030 	uint8_t init_retry_count = 0;
5031 	char buf[34];
5032 	struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
5033 	uint32_t dev_state;
5034 
5035 	if (pci_enable_device(pdev))
5036 		return -1;
5037 
5038 	host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0);
5039 	if (host == NULL) {
5040 		printk(KERN_WARNING
5041 		       "qla4xxx: Couldn't allocate host from scsi layer!\n");
5042 		goto probe_disable_device;
5043 	}
5044 
5045 	/* Clear our data area */
5046 	ha = to_qla_host(host);
5047 	memset(ha, 0, sizeof(*ha));
5048 
5049 	/* Save the information from PCI BIOS.	*/
5050 	ha->pdev = pdev;
5051 	ha->host = host;
5052 	ha->host_no = host->host_no;
5053 
5054 	pci_enable_pcie_error_reporting(pdev);
5055 
5056 	/* Setup Runtime configurable options */
5057 	if (is_qla8022(ha)) {
5058 		ha->isp_ops = &qla4_8xxx_isp_ops;
5059 		rwlock_init(&ha->hw_lock);
5060 		ha->qdr_sn_window = -1;
5061 		ha->ddr_mn_window = -1;
5062 		ha->curr_window = 255;
5063 		ha->func_num = PCI_FUNC(ha->pdev->devfn);
5064 		nx_legacy_intr = &legacy_intr[ha->func_num];
5065 		ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
5066 		ha->nx_legacy_intr.tgt_status_reg =
5067 			nx_legacy_intr->tgt_status_reg;
5068 		ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
5069 		ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
5070 	} else {
5071 		ha->isp_ops = &qla4xxx_isp_ops;
5072 	}
5073 
5074 	/* Set EEH reset type to fundamental if required by hba */
5075 	if (is_qla8022(ha))
5076 		pdev->needs_freset = 1;
5077 
5078 	/* Configure PCI I/O space. */
5079 	ret = ha->isp_ops->iospace_config(ha);
5080 	if (ret)
5081 		goto probe_failed_ioconfig;
5082 
5083 	ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n",
5084 		   pdev->device, pdev->irq, ha->reg);
5085 
5086 	qla4xxx_config_dma_addressing(ha);
5087 
5088 	/* Initialize lists and spinlocks. */
5089 	INIT_LIST_HEAD(&ha->free_srb_q);
5090 
5091 	mutex_init(&ha->mbox_sem);
5092 	mutex_init(&ha->chap_sem);
5093 	init_completion(&ha->mbx_intr_comp);
5094 	init_completion(&ha->disable_acb_comp);
5095 
5096 	spin_lock_init(&ha->hardware_lock);
5097 
5098 	/* Initialize work list */
5099 	INIT_LIST_HEAD(&ha->work_list);
5100 
5101 	/* Allocate dma buffers */
5102 	if (qla4xxx_mem_alloc(ha)) {
5103 		ql4_printk(KERN_WARNING, ha,
5104 		    "[ERROR] Failed to allocate memory for adapter\n");
5105 
5106 		ret = -ENOMEM;
5107 		goto probe_failed;
5108 	}
5109 
5110 	host->cmd_per_lun = 3;
5111 	host->max_channel = 0;
5112 	host->max_lun = MAX_LUNS - 1;
5113 	host->max_id = MAX_TARGETS;
5114 	host->max_cmd_len = IOCB_MAX_CDB_LEN;
5115 	host->can_queue = MAX_SRBS ;
5116 	host->transportt = qla4xxx_scsi_transport;
5117 
5118 	ret = scsi_init_shared_tag_map(host, MAX_SRBS);
5119 	if (ret) {
5120 		ql4_printk(KERN_WARNING, ha,
5121 			   "%s: scsi_init_shared_tag_map failed\n", __func__);
5122 		goto probe_failed;
5123 	}
5124 
5125 	pci_set_drvdata(pdev, ha);
5126 
5127 	ret = scsi_add_host(host, &pdev->dev);
5128 	if (ret)
5129 		goto probe_failed;
5130 
5131 	if (is_qla8022(ha))
5132 		(void) qla4_8xxx_get_flash_info(ha);
5133 
5134 	/*
5135 	 * Initialize the Host adapter request/response queues and
5136 	 * firmware
5137 	 * NOTE: interrupts enabled upon successful completion
5138 	 */
5139 	status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
5140 	while ((!test_bit(AF_ONLINE, &ha->flags)) &&
5141 	    init_retry_count++ < MAX_INIT_RETRIES) {
5142 
5143 		if (is_qla8022(ha)) {
5144 			qla4_8xxx_idc_lock(ha);
5145 			dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
5146 			qla4_8xxx_idc_unlock(ha);
5147 			if (dev_state == QLA82XX_DEV_FAILED) {
5148 				ql4_printk(KERN_WARNING, ha, "%s: don't retry "
5149 				    "initialize adapter. H/W is in failed state\n",
5150 				    __func__);
5151 				break;
5152 			}
5153 		}
5154 		DEBUG2(printk("scsi: %s: retrying adapter initialization "
5155 			      "(%d)\n", __func__, init_retry_count));
5156 
5157 		if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
5158 			continue;
5159 
5160 		status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
5161 	}
5162 
5163 	if (!test_bit(AF_ONLINE, &ha->flags)) {
5164 		ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
5165 
5166 		if (is_qla8022(ha) && ql4xdontresethba) {
5167 			/* Put the device in failed state. */
5168 			DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
5169 			qla4_8xxx_idc_lock(ha);
5170 			qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
5171 			    QLA82XX_DEV_FAILED);
5172 			qla4_8xxx_idc_unlock(ha);
5173 		}
5174 		ret = -ENODEV;
5175 		goto remove_host;
5176 	}
5177 
5178 	/* Startup the kernel thread for this host adapter. */
5179 	DEBUG2(printk("scsi: %s: Starting kernel thread for "
5180 		      "qla4xxx_dpc\n", __func__));
5181 	sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
5182 	ha->dpc_thread = create_singlethread_workqueue(buf);
5183 	if (!ha->dpc_thread) {
5184 		ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
5185 		ret = -ENODEV;
5186 		goto remove_host;
5187 	}
5188 	INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
5189 
5190 	sprintf(buf, "qla4xxx_%lu_task", ha->host_no);
5191 	ha->task_wq = alloc_workqueue(buf, WQ_MEM_RECLAIM, 1);
5192 	if (!ha->task_wq) {
5193 		ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
5194 		ret = -ENODEV;
5195 		goto remove_host;
5196 	}
5197 
5198 	/* For ISP-82XX, request_irqs is called in qla4_8xxx_load_risc
5199 	 * (which is called indirectly by qla4xxx_initialize_adapter),
5200 	 * so that irqs will be registered after crbinit but before
5201 	 * mbx_intr_enable.
5202 	 */
5203 	if (!is_qla8022(ha)) {
5204 		ret = qla4xxx_request_irqs(ha);
5205 		if (ret) {
5206 			ql4_printk(KERN_WARNING, ha, "Failed to reserve "
5207 			    "interrupt %d already in use.\n", pdev->irq);
5208 			goto remove_host;
5209 		}
5210 	}
5211 
5212 	pci_save_state(ha->pdev);
5213 	ha->isp_ops->enable_intrs(ha);
5214 
5215 	/* Start timer thread. */
5216 	qla4xxx_start_timer(ha, qla4xxx_timer, 1);
5217 
5218 	set_bit(AF_INIT_DONE, &ha->flags);
5219 
5220 	qla4_8xxx_alloc_sysfs_attr(ha);
5221 
5222 	printk(KERN_INFO
5223 	       " QLogic iSCSI HBA Driver version: %s\n"
5224 	       "  QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
5225 	       qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
5226 	       ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
5227 	       ha->patch_number, ha->build_number);
5228 
5229 	if (qla4xxx_setup_boot_info(ha))
5230 		ql4_printk(KERN_ERR, ha,
5231 			   "%s: No iSCSI boot target configured\n", __func__);
5232 
5233 		/* Perform the build ddb list and login to each */
5234 	qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
5235 	iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
5236 
5237 	qla4xxx_create_chap_list(ha);
5238 
5239 	qla4xxx_create_ifaces(ha);
5240 	return 0;
5241 
5242 remove_host:
5243 	scsi_remove_host(ha->host);
5244 
5245 probe_failed:
5246 	qla4xxx_free_adapter(ha);
5247 
5248 probe_failed_ioconfig:
5249 	pci_disable_pcie_error_reporting(pdev);
5250 	scsi_host_put(ha->host);
5251 
5252 probe_disable_device:
5253 	pci_disable_device(pdev);
5254 
5255 	return ret;
5256 }
5257 
5258 /**
5259  * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
5260  * @ha: pointer to adapter structure
5261  *
5262  * Mark the other ISP-4xxx port to indicate that the driver is being removed,
5263  * so that the other port will not re-initialize while in the process of
5264  * removing the ha due to driver unload or hba hotplug.
5265  **/
5266 static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
5267 {
5268 	struct scsi_qla_host *other_ha = NULL;
5269 	struct pci_dev *other_pdev = NULL;
5270 	int fn = ISP4XXX_PCI_FN_2;
5271 
5272 	/*iscsi function numbers for ISP4xxx is 1 and 3*/
5273 	if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
5274 		fn = ISP4XXX_PCI_FN_1;
5275 
5276 	other_pdev =
5277 		pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
5278 		ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
5279 		fn));
5280 
5281 	/* Get other_ha if other_pdev is valid and state is enable*/
5282 	if (other_pdev) {
5283 		if (atomic_read(&other_pdev->enable_cnt)) {
5284 			other_ha = pci_get_drvdata(other_pdev);
5285 			if (other_ha) {
5286 				set_bit(AF_HA_REMOVAL, &other_ha->flags);
5287 				DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
5288 				    "Prevent %s reinit\n", __func__,
5289 				    dev_name(&other_ha->pdev->dev)));
5290 			}
5291 		}
5292 		pci_dev_put(other_pdev);
5293 	}
5294 }
5295 
5296 static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
5297 {
5298 	struct ddb_entry *ddb_entry;
5299 	int options;
5300 	int idx;
5301 
5302 	for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
5303 
5304 		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
5305 		if ((ddb_entry != NULL) &&
5306 		    (ddb_entry->ddb_type == FLASH_DDB)) {
5307 
5308 			options = LOGOUT_OPTION_CLOSE_SESSION;
5309 			if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
5310 			    == QLA_ERROR)
5311 				ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
5312 					   __func__);
5313 
5314 			qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
5315 			/*
5316 			 * we have decremented the reference count of the driver
5317 			 * when we setup the session to have the driver unload
5318 			 * to be seamless without actually destroying the
5319 			 * session
5320 			 **/
5321 			try_module_get(qla4xxx_iscsi_transport.owner);
5322 			iscsi_destroy_endpoint(ddb_entry->conn->ep);
5323 			qla4xxx_free_ddb(ha, ddb_entry);
5324 			iscsi_session_teardown(ddb_entry->sess);
5325 		}
5326 	}
5327 }
5328 /**
5329  * qla4xxx_remove_adapter - calback function to remove adapter.
5330  * @pci_dev: PCI device pointer
5331  **/
5332 static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
5333 {
5334 	struct scsi_qla_host *ha;
5335 
5336 	ha = pci_get_drvdata(pdev);
5337 
5338 	if (!is_qla8022(ha))
5339 		qla4xxx_prevent_other_port_reinit(ha);
5340 
5341 	/* destroy iface from sysfs */
5342 	qla4xxx_destroy_ifaces(ha);
5343 
5344 	if ((!ql4xdisablesysfsboot) && ha->boot_kset)
5345 		iscsi_boot_destroy_kset(ha->boot_kset);
5346 
5347 	qla4xxx_destroy_fw_ddb_session(ha);
5348 	qla4_8xxx_free_sysfs_attr(ha);
5349 
5350 	scsi_remove_host(ha->host);
5351 
5352 	qla4xxx_free_adapter(ha);
5353 
5354 	scsi_host_put(ha->host);
5355 
5356 	pci_disable_pcie_error_reporting(pdev);
5357 	pci_disable_device(pdev);
5358 	pci_set_drvdata(pdev, NULL);
5359 }
5360 
5361 /**
5362  * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
5363  * @ha: HA context
5364  *
5365  * At exit, the @ha's flags.enable_64bit_addressing set to indicated
5366  * supported addressing method.
5367  */
5368 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
5369 {
5370 	int retval;
5371 
5372 	/* Update our PCI device dma_mask for full 64 bit mask */
5373 	if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
5374 		if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
5375 			dev_dbg(&ha->pdev->dev,
5376 				  "Failed to set 64 bit PCI consistent mask; "
5377 				   "using 32 bit.\n");
5378 			retval = pci_set_consistent_dma_mask(ha->pdev,
5379 							     DMA_BIT_MASK(32));
5380 		}
5381 	} else
5382 		retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
5383 }
5384 
5385 static int qla4xxx_slave_alloc(struct scsi_device *sdev)
5386 {
5387 	struct iscsi_cls_session *cls_sess;
5388 	struct iscsi_session *sess;
5389 	struct ddb_entry *ddb;
5390 	int queue_depth = QL4_DEF_QDEPTH;
5391 
5392 	cls_sess = starget_to_session(sdev->sdev_target);
5393 	sess = cls_sess->dd_data;
5394 	ddb = sess->dd_data;
5395 
5396 	sdev->hostdata = ddb;
5397 	sdev->tagged_supported = 1;
5398 
5399 	if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
5400 		queue_depth = ql4xmaxqdepth;
5401 
5402 	scsi_activate_tcq(sdev, queue_depth);
5403 	return 0;
5404 }
5405 
5406 static int qla4xxx_slave_configure(struct scsi_device *sdev)
5407 {
5408 	sdev->tagged_supported = 1;
5409 	return 0;
5410 }
5411 
5412 static void qla4xxx_slave_destroy(struct scsi_device *sdev)
5413 {
5414 	scsi_deactivate_tcq(sdev, 1);
5415 }
5416 
5417 static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
5418 				      int reason)
5419 {
5420 	if (!ql4xqfulltracking)
5421 		return -EOPNOTSUPP;
5422 
5423 	return iscsi_change_queue_depth(sdev, qdepth, reason);
5424 }
5425 
5426 /**
5427  * qla4xxx_del_from_active_array - returns an active srb
5428  * @ha: Pointer to host adapter structure.
5429  * @index: index into the active_array
5430  *
5431  * This routine removes and returns the srb at the specified index
5432  **/
5433 struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
5434     uint32_t index)
5435 {
5436 	struct srb *srb = NULL;
5437 	struct scsi_cmnd *cmd = NULL;
5438 
5439 	cmd = scsi_host_find_tag(ha->host, index);
5440 	if (!cmd)
5441 		return srb;
5442 
5443 	srb = (struct srb *)CMD_SP(cmd);
5444 	if (!srb)
5445 		return srb;
5446 
5447 	/* update counters */
5448 	if (srb->flags & SRB_DMA_VALID) {
5449 		ha->req_q_count += srb->iocb_cnt;
5450 		ha->iocb_cnt -= srb->iocb_cnt;
5451 		if (srb->cmd)
5452 			srb->cmd->host_scribble =
5453 				(unsigned char *)(unsigned long) MAX_SRBS;
5454 	}
5455 	return srb;
5456 }
5457 
5458 /**
5459  * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
5460  * @ha: Pointer to host adapter structure.
5461  * @cmd: Scsi Command to wait on.
5462  *
5463  * This routine waits for the command to be returned by the Firmware
5464  * for some max time.
5465  **/
5466 static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
5467 				      struct scsi_cmnd *cmd)
5468 {
5469 	int done = 0;
5470 	struct srb *rp;
5471 	uint32_t max_wait_time = EH_WAIT_CMD_TOV;
5472 	int ret = SUCCESS;
5473 
5474 	/* Dont wait on command if PCI error is being handled
5475 	 * by PCI AER driver
5476 	 */
5477 	if (unlikely(pci_channel_offline(ha->pdev)) ||
5478 	    (test_bit(AF_EEH_BUSY, &ha->flags))) {
5479 		ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
5480 		    ha->host_no, __func__);
5481 		return ret;
5482 	}
5483 
5484 	do {
5485 		/* Checking to see if its returned to OS */
5486 		rp = (struct srb *) CMD_SP(cmd);
5487 		if (rp == NULL) {
5488 			done++;
5489 			break;
5490 		}
5491 
5492 		msleep(2000);
5493 	} while (max_wait_time--);
5494 
5495 	return done;
5496 }
5497 
5498 /**
5499  * qla4xxx_wait_for_hba_online - waits for HBA to come online
5500  * @ha: Pointer to host adapter structure
5501  **/
5502 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
5503 {
5504 	unsigned long wait_online;
5505 
5506 	wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
5507 	while (time_before(jiffies, wait_online)) {
5508 
5509 		if (adapter_up(ha))
5510 			return QLA_SUCCESS;
5511 
5512 		msleep(2000);
5513 	}
5514 
5515 	return QLA_ERROR;
5516 }
5517 
5518 /**
5519  * qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
5520  * @ha: pointer to HBA
5521  * @t: target id
5522  * @l: lun id
5523  *
5524  * This function waits for all outstanding commands to a lun to complete. It
5525  * returns 0 if all pending commands are returned and 1 otherwise.
5526  **/
5527 static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
5528 					struct scsi_target *stgt,
5529 					struct scsi_device *sdev)
5530 {
5531 	int cnt;
5532 	int status = 0;
5533 	struct scsi_cmnd *cmd;
5534 
5535 	/*
5536 	 * Waiting for all commands for the designated target or dev
5537 	 * in the active array
5538 	 */
5539 	for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
5540 		cmd = scsi_host_find_tag(ha->host, cnt);
5541 		if (cmd && stgt == scsi_target(cmd->device) &&
5542 		    (!sdev || sdev == cmd->device)) {
5543 			if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
5544 				status++;
5545 				break;
5546 			}
5547 		}
5548 	}
5549 	return status;
5550 }
5551 
5552 /**
5553  * qla4xxx_eh_abort - callback for abort task.
5554  * @cmd: Pointer to Linux's SCSI command structure
5555  *
5556  * This routine is called by the Linux OS to abort the specified
5557  * command.
5558  **/
5559 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
5560 {
5561 	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
5562 	unsigned int id = cmd->device->id;
5563 	unsigned int lun = cmd->device->lun;
5564 	unsigned long flags;
5565 	struct srb *srb = NULL;
5566 	int ret = SUCCESS;
5567 	int wait = 0;
5568 
5569 	ql4_printk(KERN_INFO, ha,
5570 	    "scsi%ld:%d:%d: Abort command issued cmd=%p\n",
5571 	    ha->host_no, id, lun, cmd);
5572 
5573 	spin_lock_irqsave(&ha->hardware_lock, flags);
5574 	srb = (struct srb *) CMD_SP(cmd);
5575 	if (!srb) {
5576 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
5577 		return SUCCESS;
5578 	}
5579 	kref_get(&srb->srb_ref);
5580 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
5581 
5582 	if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
5583 		DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
5584 		    ha->host_no, id, lun));
5585 		ret = FAILED;
5586 	} else {
5587 		DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n",
5588 		    ha->host_no, id, lun));
5589 		wait = 1;
5590 	}
5591 
5592 	kref_put(&srb->srb_ref, qla4xxx_srb_compl);
5593 
5594 	/* Wait for command to complete */
5595 	if (wait) {
5596 		if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
5597 			DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n",
5598 			    ha->host_no, id, lun));
5599 			ret = FAILED;
5600 		}
5601 	}
5602 
5603 	ql4_printk(KERN_INFO, ha,
5604 	    "scsi%ld:%d:%d: Abort command - %s\n",
5605 	    ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed");
5606 
5607 	return ret;
5608 }
5609 
5610 /**
5611  * qla4xxx_eh_device_reset - callback for target reset.
5612  * @cmd: Pointer to Linux's SCSI command structure
5613  *
5614  * This routine is called by the Linux OS to reset all luns on the
5615  * specified target.
5616  **/
5617 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
5618 {
5619 	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
5620 	struct ddb_entry *ddb_entry = cmd->device->hostdata;
5621 	int ret = FAILED, stat;
5622 
5623 	if (!ddb_entry)
5624 		return ret;
5625 
5626 	ret = iscsi_block_scsi_eh(cmd);
5627 	if (ret)
5628 		return ret;
5629 	ret = FAILED;
5630 
5631 	ql4_printk(KERN_INFO, ha,
5632 		   "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no,
5633 		   cmd->device->channel, cmd->device->id, cmd->device->lun);
5634 
5635 	DEBUG2(printk(KERN_INFO
5636 		      "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
5637 		      "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
5638 		      cmd, jiffies, cmd->request->timeout / HZ,
5639 		      ha->dpc_flags, cmd->result, cmd->allowed));
5640 
5641 	/* FIXME: wait for hba to go online */
5642 	stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
5643 	if (stat != QLA_SUCCESS) {
5644 		ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat);
5645 		goto eh_dev_reset_done;
5646 	}
5647 
5648 	if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
5649 					 cmd->device)) {
5650 		ql4_printk(KERN_INFO, ha,
5651 			   "DEVICE RESET FAILED - waiting for "
5652 			   "commands.\n");
5653 		goto eh_dev_reset_done;
5654 	}
5655 
5656 	/* Send marker. */
5657 	if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
5658 		MM_LUN_RESET) != QLA_SUCCESS)
5659 		goto eh_dev_reset_done;
5660 
5661 	ql4_printk(KERN_INFO, ha,
5662 		   "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
5663 		   ha->host_no, cmd->device->channel, cmd->device->id,
5664 		   cmd->device->lun);
5665 
5666 	ret = SUCCESS;
5667 
5668 eh_dev_reset_done:
5669 
5670 	return ret;
5671 }
5672 
5673 /**
5674  * qla4xxx_eh_target_reset - callback for target reset.
5675  * @cmd: Pointer to Linux's SCSI command structure
5676  *
5677  * This routine is called by the Linux OS to reset the target.
5678  **/
5679 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
5680 {
5681 	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
5682 	struct ddb_entry *ddb_entry = cmd->device->hostdata;
5683 	int stat, ret;
5684 
5685 	if (!ddb_entry)
5686 		return FAILED;
5687 
5688 	ret = iscsi_block_scsi_eh(cmd);
5689 	if (ret)
5690 		return ret;
5691 
5692 	starget_printk(KERN_INFO, scsi_target(cmd->device),
5693 		       "WARM TARGET RESET ISSUED.\n");
5694 
5695 	DEBUG2(printk(KERN_INFO
5696 		      "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
5697 		      "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
5698 		      ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
5699 		      ha->dpc_flags, cmd->result, cmd->allowed));
5700 
5701 	stat = qla4xxx_reset_target(ha, ddb_entry);
5702 	if (stat != QLA_SUCCESS) {
5703 		starget_printk(KERN_INFO, scsi_target(cmd->device),
5704 			       "WARM TARGET RESET FAILED.\n");
5705 		return FAILED;
5706 	}
5707 
5708 	if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
5709 					 NULL)) {
5710 		starget_printk(KERN_INFO, scsi_target(cmd->device),
5711 			       "WARM TARGET DEVICE RESET FAILED - "
5712 			       "waiting for commands.\n");
5713 		return FAILED;
5714 	}
5715 
5716 	/* Send marker. */
5717 	if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
5718 		MM_TGT_WARM_RESET) != QLA_SUCCESS) {
5719 		starget_printk(KERN_INFO, scsi_target(cmd->device),
5720 			       "WARM TARGET DEVICE RESET FAILED - "
5721 			       "marker iocb failed.\n");
5722 		return FAILED;
5723 	}
5724 
5725 	starget_printk(KERN_INFO, scsi_target(cmd->device),
5726 		       "WARM TARGET RESET SUCCEEDED.\n");
5727 	return SUCCESS;
5728 }
5729 
5730 /**
5731  * qla4xxx_is_eh_active - check if error handler is running
5732  * @shost: Pointer to SCSI Host struct
5733  *
5734  * This routine finds that if reset host is called in EH
5735  * scenario or from some application like sg_reset
5736  **/
5737 static int qla4xxx_is_eh_active(struct Scsi_Host *shost)
5738 {
5739 	if (shost->shost_state == SHOST_RECOVERY)
5740 		return 1;
5741 	return 0;
5742 }
5743 
5744 /**
5745  * qla4xxx_eh_host_reset - kernel callback
5746  * @cmd: Pointer to Linux's SCSI command structure
5747  *
5748  * This routine is invoked by the Linux kernel to perform fatal error
5749  * recovery on the specified adapter.
5750  **/
5751 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
5752 {
5753 	int return_status = FAILED;
5754 	struct scsi_qla_host *ha;
5755 
5756 	ha = to_qla_host(cmd->device->host);
5757 
5758 	if (ql4xdontresethba) {
5759 		DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
5760 		     ha->host_no, __func__));
5761 
5762 		/* Clear outstanding srb in queues */
5763 		if (qla4xxx_is_eh_active(cmd->device->host))
5764 			qla4xxx_abort_active_cmds(ha, DID_ABORT << 16);
5765 
5766 		return FAILED;
5767 	}
5768 
5769 	ql4_printk(KERN_INFO, ha,
5770 		   "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no,
5771 		   cmd->device->channel, cmd->device->id, cmd->device->lun);
5772 
5773 	if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
5774 		DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host.  Adapter "
5775 			      "DEAD.\n", ha->host_no, cmd->device->channel,
5776 			      __func__));
5777 
5778 		return FAILED;
5779 	}
5780 
5781 	if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
5782 		if (is_qla8022(ha))
5783 			set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
5784 		else
5785 			set_bit(DPC_RESET_HA, &ha->dpc_flags);
5786 	}
5787 
5788 	if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS)
5789 		return_status = SUCCESS;
5790 
5791 	ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n",
5792 		   return_status == FAILED ? "FAILED" : "SUCCEEDED");
5793 
5794 	return return_status;
5795 }
5796 
5797 static int qla4xxx_context_reset(struct scsi_qla_host *ha)
5798 {
5799 	uint32_t mbox_cmd[MBOX_REG_COUNT];
5800 	uint32_t mbox_sts[MBOX_REG_COUNT];
5801 	struct addr_ctrl_blk_def *acb = NULL;
5802 	uint32_t acb_len = sizeof(struct addr_ctrl_blk_def);
5803 	int rval = QLA_SUCCESS;
5804 	dma_addr_t acb_dma;
5805 
5806 	acb = dma_alloc_coherent(&ha->pdev->dev,
5807 				 sizeof(struct addr_ctrl_blk_def),
5808 				 &acb_dma, GFP_KERNEL);
5809 	if (!acb) {
5810 		ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
5811 			   __func__);
5812 		rval = -ENOMEM;
5813 		goto exit_port_reset;
5814 	}
5815 
5816 	memset(acb, 0, acb_len);
5817 
5818 	rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len);
5819 	if (rval != QLA_SUCCESS) {
5820 		rval = -EIO;
5821 		goto exit_free_acb;
5822 	}
5823 
5824 	rval = qla4xxx_disable_acb(ha);
5825 	if (rval != QLA_SUCCESS) {
5826 		rval = -EIO;
5827 		goto exit_free_acb;
5828 	}
5829 
5830 	wait_for_completion_timeout(&ha->disable_acb_comp,
5831 				    DISABLE_ACB_TOV * HZ);
5832 
5833 	rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
5834 	if (rval != QLA_SUCCESS) {
5835 		rval = -EIO;
5836 		goto exit_free_acb;
5837 	}
5838 
5839 exit_free_acb:
5840 	dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def),
5841 			  acb, acb_dma);
5842 exit_port_reset:
5843 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__,
5844 			  rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
5845 	return rval;
5846 }
5847 
5848 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
5849 {
5850 	struct scsi_qla_host *ha = to_qla_host(shost);
5851 	int rval = QLA_SUCCESS;
5852 
5853 	if (ql4xdontresethba) {
5854 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
5855 				  __func__));
5856 		rval = -EPERM;
5857 		goto exit_host_reset;
5858 	}
5859 
5860 	rval = qla4xxx_wait_for_hba_online(ha);
5861 	if (rval != QLA_SUCCESS) {
5862 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unable to reset host "
5863 				  "adapter\n", __func__));
5864 		rval = -EIO;
5865 		goto exit_host_reset;
5866 	}
5867 
5868 	if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
5869 		goto recover_adapter;
5870 
5871 	switch (reset_type) {
5872 	case SCSI_ADAPTER_RESET:
5873 		set_bit(DPC_RESET_HA, &ha->dpc_flags);
5874 		break;
5875 	case SCSI_FIRMWARE_RESET:
5876 		if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
5877 			if (is_qla8022(ha))
5878 				/* set firmware context reset */
5879 				set_bit(DPC_RESET_HA_FW_CONTEXT,
5880 					&ha->dpc_flags);
5881 			else {
5882 				rval = qla4xxx_context_reset(ha);
5883 				goto exit_host_reset;
5884 			}
5885 		}
5886 		break;
5887 	}
5888 
5889 recover_adapter:
5890 	rval = qla4xxx_recover_adapter(ha);
5891 	if (rval != QLA_SUCCESS) {
5892 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
5893 				  __func__));
5894 		rval = -EIO;
5895 	}
5896 
5897 exit_host_reset:
5898 	return rval;
5899 }
5900 
5901 /* PCI AER driver recovers from all correctable errors w/o
5902  * driver intervention. For uncorrectable errors PCI AER
5903  * driver calls the following device driver's callbacks
5904  *
5905  * - Fatal Errors - link_reset
5906  * - Non-Fatal Errors - driver's pci_error_detected() which
5907  * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
5908  *
5909  * PCI AER driver calls
5910  * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled
5911  *               returns RECOVERED or NEED_RESET if fw_hung
5912  * NEED_RESET - driver's slot_reset()
5913  * DISCONNECT - device is dead & cannot recover
5914  * RECOVERED - driver's pci_resume()
5915  */
5916 static pci_ers_result_t
5917 qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5918 {
5919 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
5920 
5921 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
5922 	    ha->host_no, __func__, state);
5923 
5924 	if (!is_aer_supported(ha))
5925 		return PCI_ERS_RESULT_NONE;
5926 
5927 	switch (state) {
5928 	case pci_channel_io_normal:
5929 		clear_bit(AF_EEH_BUSY, &ha->flags);
5930 		return PCI_ERS_RESULT_CAN_RECOVER;
5931 	case pci_channel_io_frozen:
5932 		set_bit(AF_EEH_BUSY, &ha->flags);
5933 		qla4xxx_mailbox_premature_completion(ha);
5934 		qla4xxx_free_irqs(ha);
5935 		pci_disable_device(pdev);
5936 		/* Return back all IOs */
5937 		qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
5938 		return PCI_ERS_RESULT_NEED_RESET;
5939 	case pci_channel_io_perm_failure:
5940 		set_bit(AF_EEH_BUSY, &ha->flags);
5941 		set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
5942 		qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
5943 		return PCI_ERS_RESULT_DISCONNECT;
5944 	}
5945 	return PCI_ERS_RESULT_NEED_RESET;
5946 }
5947 
5948 /**
5949  * qla4xxx_pci_mmio_enabled() gets called if
5950  * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
5951  * and read/write to the device still works.
5952  **/
5953 static pci_ers_result_t
5954 qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
5955 {
5956 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
5957 
5958 	if (!is_aer_supported(ha))
5959 		return PCI_ERS_RESULT_NONE;
5960 
5961 	return PCI_ERS_RESULT_RECOVERED;
5962 }
5963 
5964 static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
5965 {
5966 	uint32_t rval = QLA_ERROR;
5967 	uint32_t ret = 0;
5968 	int fn;
5969 	struct pci_dev *other_pdev = NULL;
5970 
5971 	ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
5972 
5973 	set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
5974 
5975 	if (test_bit(AF_ONLINE, &ha->flags)) {
5976 		clear_bit(AF_ONLINE, &ha->flags);
5977 		clear_bit(AF_LINK_UP, &ha->flags);
5978 		iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
5979 		qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
5980 	}
5981 
5982 	fn = PCI_FUNC(ha->pdev->devfn);
5983 	while (fn > 0) {
5984 		fn--;
5985 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at "
5986 		    "func %x\n", ha->host_no, __func__, fn);
5987 		/* Get the pci device given the domain, bus,
5988 		 * slot/function number */
5989 		other_pdev =
5990 		    pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
5991 		    ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
5992 		    fn));
5993 
5994 		if (!other_pdev)
5995 			continue;
5996 
5997 		if (atomic_read(&other_pdev->enable_cnt)) {
5998 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI "
5999 			    "func in enabled state%x\n", ha->host_no,
6000 			    __func__, fn);
6001 			pci_dev_put(other_pdev);
6002 			break;
6003 		}
6004 		pci_dev_put(other_pdev);
6005 	}
6006 
6007 	/* The first function on the card, the reset owner will
6008 	 * start & initialize the firmware. The other functions
6009 	 * on the card will reset the firmware context
6010 	 */
6011 	if (!fn) {
6012 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
6013 		    "0x%x is the owner\n", ha->host_no, __func__,
6014 		    ha->pdev->devfn);
6015 
6016 		qla4_8xxx_idc_lock(ha);
6017 		qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
6018 		    QLA82XX_DEV_COLD);
6019 
6020 		qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
6021 		    QLA82XX_IDC_VERSION);
6022 
6023 		qla4_8xxx_idc_unlock(ha);
6024 		clear_bit(AF_FW_RECOVERY, &ha->flags);
6025 		rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
6026 		qla4_8xxx_idc_lock(ha);
6027 
6028 		if (rval != QLA_SUCCESS) {
6029 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
6030 			    "FAILED\n", ha->host_no, __func__);
6031 			qla4_8xxx_clear_drv_active(ha);
6032 			qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
6033 			    QLA82XX_DEV_FAILED);
6034 		} else {
6035 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
6036 			    "READY\n", ha->host_no, __func__);
6037 			qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
6038 			    QLA82XX_DEV_READY);
6039 			/* Clear driver state register */
6040 			qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
6041 			qla4_8xxx_set_drv_active(ha);
6042 			ret = qla4xxx_request_irqs(ha);
6043 			if (ret) {
6044 				ql4_printk(KERN_WARNING, ha, "Failed to "
6045 				    "reserve interrupt %d already in use.\n",
6046 				    ha->pdev->irq);
6047 				rval = QLA_ERROR;
6048 			} else {
6049 				ha->isp_ops->enable_intrs(ha);
6050 				rval = QLA_SUCCESS;
6051 			}
6052 		}
6053 		qla4_8xxx_idc_unlock(ha);
6054 	} else {
6055 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
6056 		    "the reset owner\n", ha->host_no, __func__,
6057 		    ha->pdev->devfn);
6058 		if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
6059 		    QLA82XX_DEV_READY)) {
6060 			clear_bit(AF_FW_RECOVERY, &ha->flags);
6061 			rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
6062 			if (rval == QLA_SUCCESS) {
6063 				ret = qla4xxx_request_irqs(ha);
6064 				if (ret) {
6065 					ql4_printk(KERN_WARNING, ha, "Failed to"
6066 					    " reserve interrupt %d already in"
6067 					    " use.\n", ha->pdev->irq);
6068 					rval = QLA_ERROR;
6069 				} else {
6070 					ha->isp_ops->enable_intrs(ha);
6071 					rval = QLA_SUCCESS;
6072 				}
6073 			}
6074 			qla4_8xxx_idc_lock(ha);
6075 			qla4_8xxx_set_drv_active(ha);
6076 			qla4_8xxx_idc_unlock(ha);
6077 		}
6078 	}
6079 	clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
6080 	return rval;
6081 }
6082 
6083 static pci_ers_result_t
6084 qla4xxx_pci_slot_reset(struct pci_dev *pdev)
6085 {
6086 	pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
6087 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
6088 	int rc;
6089 
6090 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
6091 	    ha->host_no, __func__);
6092 
6093 	if (!is_aer_supported(ha))
6094 		return PCI_ERS_RESULT_NONE;
6095 
6096 	/* Restore the saved state of PCIe device -
6097 	 * BAR registers, PCI Config space, PCIX, MSI,
6098 	 * IOV states
6099 	 */
6100 	pci_restore_state(pdev);
6101 
6102 	/* pci_restore_state() clears the saved_state flag of the device
6103 	 * save restored state which resets saved_state flag
6104 	 */
6105 	pci_save_state(pdev);
6106 
6107 	/* Initialize device or resume if in suspended state */
6108 	rc = pci_enable_device(pdev);
6109 	if (rc) {
6110 		ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable "
6111 		    "device after reset\n", ha->host_no, __func__);
6112 		goto exit_slot_reset;
6113 	}
6114 
6115 	ha->isp_ops->disable_intrs(ha);
6116 
6117 	if (is_qla8022(ha)) {
6118 		if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
6119 			ret = PCI_ERS_RESULT_RECOVERED;
6120 			goto exit_slot_reset;
6121 		} else
6122 			goto exit_slot_reset;
6123 	}
6124 
6125 exit_slot_reset:
6126 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
6127 	    "device after reset\n", ha->host_no, __func__, ret);
6128 	return ret;
6129 }
6130 
6131 static void
6132 qla4xxx_pci_resume(struct pci_dev *pdev)
6133 {
6134 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
6135 	int ret;
6136 
6137 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
6138 	    ha->host_no, __func__);
6139 
6140 	ret = qla4xxx_wait_for_hba_online(ha);
6141 	if (ret != QLA_SUCCESS) {
6142 		ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
6143 		    "resume I/O from slot/link_reset\n", ha->host_no,
6144 		     __func__);
6145 	}
6146 
6147 	pci_cleanup_aer_uncorrect_error_status(pdev);
6148 	clear_bit(AF_EEH_BUSY, &ha->flags);
6149 }
6150 
6151 static const struct pci_error_handlers qla4xxx_err_handler = {
6152 	.error_detected = qla4xxx_pci_error_detected,
6153 	.mmio_enabled = qla4xxx_pci_mmio_enabled,
6154 	.slot_reset = qla4xxx_pci_slot_reset,
6155 	.resume = qla4xxx_pci_resume,
6156 };
6157 
6158 static struct pci_device_id qla4xxx_pci_tbl[] = {
6159 	{
6160 		.vendor		= PCI_VENDOR_ID_QLOGIC,
6161 		.device		= PCI_DEVICE_ID_QLOGIC_ISP4010,
6162 		.subvendor	= PCI_ANY_ID,
6163 		.subdevice	= PCI_ANY_ID,
6164 	},
6165 	{
6166 		.vendor		= PCI_VENDOR_ID_QLOGIC,
6167 		.device		= PCI_DEVICE_ID_QLOGIC_ISP4022,
6168 		.subvendor	= PCI_ANY_ID,
6169 		.subdevice	= PCI_ANY_ID,
6170 	},
6171 	{
6172 		.vendor		= PCI_VENDOR_ID_QLOGIC,
6173 		.device		= PCI_DEVICE_ID_QLOGIC_ISP4032,
6174 		.subvendor	= PCI_ANY_ID,
6175 		.subdevice	= PCI_ANY_ID,
6176 	},
6177 	{
6178 		.vendor         = PCI_VENDOR_ID_QLOGIC,
6179 		.device         = PCI_DEVICE_ID_QLOGIC_ISP8022,
6180 		.subvendor      = PCI_ANY_ID,
6181 		.subdevice      = PCI_ANY_ID,
6182 	},
6183 	{0, 0},
6184 };
6185 MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
6186 
6187 static struct pci_driver qla4xxx_pci_driver = {
6188 	.name		= DRIVER_NAME,
6189 	.id_table	= qla4xxx_pci_tbl,
6190 	.probe		= qla4xxx_probe_adapter,
6191 	.remove		= qla4xxx_remove_adapter,
6192 	.err_handler = &qla4xxx_err_handler,
6193 };
6194 
6195 static int __init qla4xxx_module_init(void)
6196 {
6197 	int ret;
6198 
6199 	/* Allocate cache for SRBs. */
6200 	srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
6201 				       SLAB_HWCACHE_ALIGN, NULL);
6202 	if (srb_cachep == NULL) {
6203 		printk(KERN_ERR
6204 		       "%s: Unable to allocate SRB cache..."
6205 		       "Failing load!\n", DRIVER_NAME);
6206 		ret = -ENOMEM;
6207 		goto no_srp_cache;
6208 	}
6209 
6210 	/* Derive version string. */
6211 	strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
6212 	if (ql4xextended_error_logging)
6213 		strcat(qla4xxx_version_str, "-debug");
6214 
6215 	qla4xxx_scsi_transport =
6216 		iscsi_register_transport(&qla4xxx_iscsi_transport);
6217 	if (!qla4xxx_scsi_transport){
6218 		ret = -ENODEV;
6219 		goto release_srb_cache;
6220 	}
6221 
6222 	ret = pci_register_driver(&qla4xxx_pci_driver);
6223 	if (ret)
6224 		goto unregister_transport;
6225 
6226 	printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
6227 	return 0;
6228 
6229 unregister_transport:
6230 	iscsi_unregister_transport(&qla4xxx_iscsi_transport);
6231 release_srb_cache:
6232 	kmem_cache_destroy(srb_cachep);
6233 no_srp_cache:
6234 	return ret;
6235 }
6236 
6237 static void __exit qla4xxx_module_exit(void)
6238 {
6239 	pci_unregister_driver(&qla4xxx_pci_driver);
6240 	iscsi_unregister_transport(&qla4xxx_iscsi_transport);
6241 	kmem_cache_destroy(srb_cachep);
6242 }
6243 
6244 module_init(qla4xxx_module_init);
6245 module_exit(qla4xxx_module_exit);
6246 
6247 MODULE_AUTHOR("QLogic Corporation");
6248 MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
6249 MODULE_LICENSE("GPL");
6250 MODULE_VERSION(QLA4XXX_DRIVER_VERSION);
6251