xref: /openbmc/linux/drivers/scsi/qla4xxx/ql4_os.c (revision ee996a69)
1 /*
2  * QLogic iSCSI HBA Driver
3  * Copyright (c)  2003-2010 QLogic Corporation
4  *
5  * See LICENSE.qla4xxx for copyright and licensing details.
6  */
7 #include <linux/moduleparam.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/iscsi_boot_sysfs.h>
11 #include <linux/inet.h>
12 
13 #include <scsi/scsi_tcq.h>
14 #include <scsi/scsicam.h>
15 
16 #include "ql4_def.h"
17 #include "ql4_version.h"
18 #include "ql4_glbl.h"
19 #include "ql4_dbg.h"
20 #include "ql4_inline.h"
21 
22 /*
23  * Driver version
24  */
25 static char qla4xxx_version_str[40];
26 
27 /*
28  * SRB allocation cache
29  */
30 static struct kmem_cache *srb_cachep;
31 
32 /*
33  * Module parameter information and variables
34  */
35 static int ql4xdisablesysfsboot = 1;
36 module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(ql4xdisablesysfsboot,
38 		 " Set to disable exporting boot targets to sysfs.\n"
39 		 "\t\t  0 - Export boot targets\n"
40 		 "\t\t  1 - Do not export boot targets (Default)");
41 
42 int ql4xdontresethba;
43 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
44 MODULE_PARM_DESC(ql4xdontresethba,
45 		 " Don't reset the HBA for driver recovery.\n"
46 		 "\t\t  0 - It will reset HBA (Default)\n"
47 		 "\t\t  1 - It will NOT reset HBA");
48 
49 int ql4xextended_error_logging;
50 module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
51 MODULE_PARM_DESC(ql4xextended_error_logging,
52 		 " Option to enable extended error logging.\n"
53 		 "\t\t  0 - no logging (Default)\n"
54 		 "\t\t  2 - debug logging");
55 
56 int ql4xenablemsix = 1;
57 module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
58 MODULE_PARM_DESC(ql4xenablemsix,
59 		 " Set to enable MSI or MSI-X interrupt mechanism.\n"
60 		 "\t\t  0 = enable INTx interrupt mechanism.\n"
61 		 "\t\t  1 = enable MSI-X interrupt mechanism (Default).\n"
62 		 "\t\t  2 = enable MSI interrupt mechanism.");
63 
64 #define QL4_DEF_QDEPTH 32
65 static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
66 module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
67 MODULE_PARM_DESC(ql4xmaxqdepth,
68 		 " Maximum queue depth to report for target devices.\n"
69 		 "\t\t  Default: 32.");
70 
71 static int ql4xqfulltracking = 1;
72 module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR);
73 MODULE_PARM_DESC(ql4xqfulltracking,
74 		 " Enable or disable dynamic tracking and adjustment of\n"
75 		 "\t\t scsi device queue depth.\n"
76 		 "\t\t  0 - Disable.\n"
77 		 "\t\t  1 - Enable. (Default)");
78 
79 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
80 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
81 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
82 		" Target Session Recovery Timeout.\n"
83 		"\t\t  Default: 120 sec.");
84 
85 int ql4xmdcapmask = 0x1F;
86 module_param(ql4xmdcapmask, int, S_IRUGO);
87 MODULE_PARM_DESC(ql4xmdcapmask,
88 		 " Set the Minidump driver capture mask level.\n"
89 		 "\t\t  Default is 0x1F.\n"
90 		 "\t\t  Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F");
91 
92 int ql4xenablemd = 1;
93 module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
94 MODULE_PARM_DESC(ql4xenablemd,
95 		 " Set to enable minidump.\n"
96 		 "\t\t  0 - disable minidump\n"
97 		 "\t\t  1 - enable minidump (Default)");
98 
99 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
100 /*
101  * SCSI host template entry points
102  */
103 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
104 
105 /*
106  * iSCSI template entry points
107  */
108 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
109 				     enum iscsi_param param, char *buf);
110 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
111 				  enum iscsi_param param, char *buf);
112 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
113 				  enum iscsi_host_param param, char *buf);
114 static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data,
115 				   uint32_t len);
116 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
117 				   enum iscsi_param_type param_type,
118 				   int param, char *buf);
119 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
120 static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost,
121 						 struct sockaddr *dst_addr,
122 						 int non_blocking);
123 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
124 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep);
125 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
126 				enum iscsi_param param, char *buf);
127 static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
128 static struct iscsi_cls_conn *
129 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx);
130 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
131 			     struct iscsi_cls_conn *cls_conn,
132 			     uint64_t transport_fd, int is_leading);
133 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn);
134 static struct iscsi_cls_session *
135 qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
136 			uint16_t qdepth, uint32_t initial_cmdsn);
137 static void qla4xxx_session_destroy(struct iscsi_cls_session *sess);
138 static void qla4xxx_task_work(struct work_struct *wdata);
139 static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t);
140 static int qla4xxx_task_xmit(struct iscsi_task *);
141 static void qla4xxx_task_cleanup(struct iscsi_task *);
142 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
143 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
144 				   struct iscsi_stats *stats);
145 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
146 			     uint32_t iface_type, uint32_t payload_size,
147 			     uint32_t pid, struct sockaddr *dst_addr);
148 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
149 				 uint32_t *num_entries, char *buf);
150 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx);
151 
152 /*
153  * SCSI host template entry points
154  */
155 static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
156 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
157 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
158 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
159 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
160 static int qla4xxx_slave_alloc(struct scsi_device *device);
161 static int qla4xxx_slave_configure(struct scsi_device *device);
162 static void qla4xxx_slave_destroy(struct scsi_device *sdev);
163 static umode_t qla4_attr_is_visible(int param_type, int param);
164 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
165 static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
166 				      int reason);
167 
168 static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
169     QLA82XX_LEGACY_INTR_CONFIG;
170 
171 static struct scsi_host_template qla4xxx_driver_template = {
172 	.module			= THIS_MODULE,
173 	.name			= DRIVER_NAME,
174 	.proc_name		= DRIVER_NAME,
175 	.queuecommand		= qla4xxx_queuecommand,
176 
177 	.eh_abort_handler	= qla4xxx_eh_abort,
178 	.eh_device_reset_handler = qla4xxx_eh_device_reset,
179 	.eh_target_reset_handler = qla4xxx_eh_target_reset,
180 	.eh_host_reset_handler	= qla4xxx_eh_host_reset,
181 	.eh_timed_out		= qla4xxx_eh_cmd_timed_out,
182 
183 	.slave_configure	= qla4xxx_slave_configure,
184 	.slave_alloc		= qla4xxx_slave_alloc,
185 	.slave_destroy		= qla4xxx_slave_destroy,
186 	.change_queue_depth	= qla4xxx_change_queue_depth,
187 
188 	.this_id		= -1,
189 	.cmd_per_lun		= 3,
190 	.use_clustering		= ENABLE_CLUSTERING,
191 	.sg_tablesize		= SG_ALL,
192 
193 	.max_sectors		= 0xFFFF,
194 	.shost_attrs		= qla4xxx_host_attrs,
195 	.host_reset		= qla4xxx_host_reset,
196 	.vendor_id		= SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
197 };
198 
199 static struct iscsi_transport qla4xxx_iscsi_transport = {
200 	.owner			= THIS_MODULE,
201 	.name			= DRIVER_NAME,
202 	.caps			= CAP_TEXT_NEGO |
203 				  CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
204 				  CAP_DATADGST | CAP_LOGIN_OFFLOAD |
205 				  CAP_MULTI_R2T,
206 	.attr_is_visible	= qla4_attr_is_visible,
207 	.create_session         = qla4xxx_session_create,
208 	.destroy_session        = qla4xxx_session_destroy,
209 	.start_conn             = qla4xxx_conn_start,
210 	.create_conn            = qla4xxx_conn_create,
211 	.bind_conn              = qla4xxx_conn_bind,
212 	.stop_conn              = iscsi_conn_stop,
213 	.destroy_conn           = qla4xxx_conn_destroy,
214 	.set_param              = iscsi_set_param,
215 	.get_conn_param		= qla4xxx_conn_get_param,
216 	.get_session_param	= qla4xxx_session_get_param,
217 	.get_ep_param           = qla4xxx_get_ep_param,
218 	.ep_connect		= qla4xxx_ep_connect,
219 	.ep_poll		= qla4xxx_ep_poll,
220 	.ep_disconnect		= qla4xxx_ep_disconnect,
221 	.get_stats		= qla4xxx_conn_get_stats,
222 	.send_pdu		= iscsi_conn_send_pdu,
223 	.xmit_task		= qla4xxx_task_xmit,
224 	.cleanup_task		= qla4xxx_task_cleanup,
225 	.alloc_pdu		= qla4xxx_alloc_pdu,
226 
227 	.get_host_param		= qla4xxx_host_get_param,
228 	.set_iface_param	= qla4xxx_iface_set_param,
229 	.get_iface_param	= qla4xxx_get_iface_param,
230 	.bsg_request		= qla4xxx_bsg_request,
231 	.send_ping		= qla4xxx_send_ping,
232 	.get_chap		= qla4xxx_get_chap_list,
233 	.delete_chap		= qla4xxx_delete_chap,
234 };
235 
236 static struct scsi_transport_template *qla4xxx_scsi_transport;
237 
238 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
239 			     uint32_t iface_type, uint32_t payload_size,
240 			     uint32_t pid, struct sockaddr *dst_addr)
241 {
242 	struct scsi_qla_host *ha = to_qla_host(shost);
243 	struct sockaddr_in *addr;
244 	struct sockaddr_in6 *addr6;
245 	uint32_t options = 0;
246 	uint8_t ipaddr[IPv6_ADDR_LEN];
247 	int rval;
248 
249 	memset(ipaddr, 0, IPv6_ADDR_LEN);
250 	/* IPv4 to IPv4 */
251 	if ((iface_type == ISCSI_IFACE_TYPE_IPV4) &&
252 	    (dst_addr->sa_family == AF_INET)) {
253 		addr = (struct sockaddr_in *)dst_addr;
254 		memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN);
255 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 "
256 				  "dest: %pI4\n", __func__,
257 				  &ha->ip_config.ip_address, ipaddr));
258 		rval = qla4xxx_ping_iocb(ha, options, payload_size, pid,
259 					 ipaddr);
260 		if (rval)
261 			rval = -EINVAL;
262 	} else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) &&
263 		   (dst_addr->sa_family == AF_INET6)) {
264 		/* IPv6 to IPv6 */
265 		addr6 = (struct sockaddr_in6 *)dst_addr;
266 		memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN);
267 
268 		options |= PING_IPV6_PROTOCOL_ENABLE;
269 
270 		/* Ping using LinkLocal address */
271 		if ((iface_num == 0) || (iface_num == 1)) {
272 			DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping "
273 					  "src: %pI6 dest: %pI6\n", __func__,
274 					  &ha->ip_config.ipv6_link_local_addr,
275 					  ipaddr));
276 			options |= PING_IPV6_LINKLOCAL_ADDR;
277 			rval = qla4xxx_ping_iocb(ha, options, payload_size,
278 						 pid, ipaddr);
279 		} else {
280 			ql4_printk(KERN_WARNING, ha, "%s: iface num = %d "
281 				   "not supported\n", __func__, iface_num);
282 			rval = -ENOSYS;
283 			goto exit_send_ping;
284 		}
285 
286 		/*
287 		 * If ping using LinkLocal address fails, try ping using
288 		 * IPv6 address
289 		 */
290 		if (rval != QLA_SUCCESS) {
291 			options &= ~PING_IPV6_LINKLOCAL_ADDR;
292 			if (iface_num == 0) {
293 				options |= PING_IPV6_ADDR0;
294 				DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
295 						  "Ping src: %pI6 "
296 						  "dest: %pI6\n", __func__,
297 						  &ha->ip_config.ipv6_addr0,
298 						  ipaddr));
299 			} else if (iface_num == 1) {
300 				options |= PING_IPV6_ADDR1;
301 				DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
302 						  "Ping src: %pI6 "
303 						  "dest: %pI6\n", __func__,
304 						  &ha->ip_config.ipv6_addr1,
305 						  ipaddr));
306 			}
307 			rval = qla4xxx_ping_iocb(ha, options, payload_size,
308 						 pid, ipaddr);
309 			if (rval)
310 				rval = -EINVAL;
311 		}
312 	} else
313 		rval = -ENOSYS;
314 exit_send_ping:
315 	return rval;
316 }
317 
318 static umode_t qla4_attr_is_visible(int param_type, int param)
319 {
320 	switch (param_type) {
321 	case ISCSI_HOST_PARAM:
322 		switch (param) {
323 		case ISCSI_HOST_PARAM_HWADDRESS:
324 		case ISCSI_HOST_PARAM_IPADDRESS:
325 		case ISCSI_HOST_PARAM_INITIATOR_NAME:
326 		case ISCSI_HOST_PARAM_PORT_STATE:
327 		case ISCSI_HOST_PARAM_PORT_SPEED:
328 			return S_IRUGO;
329 		default:
330 			return 0;
331 		}
332 	case ISCSI_PARAM:
333 		switch (param) {
334 		case ISCSI_PARAM_PERSISTENT_ADDRESS:
335 		case ISCSI_PARAM_PERSISTENT_PORT:
336 		case ISCSI_PARAM_CONN_ADDRESS:
337 		case ISCSI_PARAM_CONN_PORT:
338 		case ISCSI_PARAM_TARGET_NAME:
339 		case ISCSI_PARAM_TPGT:
340 		case ISCSI_PARAM_TARGET_ALIAS:
341 		case ISCSI_PARAM_MAX_BURST:
342 		case ISCSI_PARAM_MAX_R2T:
343 		case ISCSI_PARAM_FIRST_BURST:
344 		case ISCSI_PARAM_MAX_RECV_DLENGTH:
345 		case ISCSI_PARAM_MAX_XMIT_DLENGTH:
346 		case ISCSI_PARAM_IFACE_NAME:
347 		case ISCSI_PARAM_CHAP_OUT_IDX:
348 		case ISCSI_PARAM_CHAP_IN_IDX:
349 		case ISCSI_PARAM_USERNAME:
350 		case ISCSI_PARAM_PASSWORD:
351 		case ISCSI_PARAM_USERNAME_IN:
352 		case ISCSI_PARAM_PASSWORD_IN:
353 			return S_IRUGO;
354 		default:
355 			return 0;
356 		}
357 	case ISCSI_NET_PARAM:
358 		switch (param) {
359 		case ISCSI_NET_PARAM_IPV4_ADDR:
360 		case ISCSI_NET_PARAM_IPV4_SUBNET:
361 		case ISCSI_NET_PARAM_IPV4_GW:
362 		case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
363 		case ISCSI_NET_PARAM_IFACE_ENABLE:
364 		case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
365 		case ISCSI_NET_PARAM_IPV6_ADDR:
366 		case ISCSI_NET_PARAM_IPV6_ROUTER:
367 		case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
368 		case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
369 		case ISCSI_NET_PARAM_VLAN_ID:
370 		case ISCSI_NET_PARAM_VLAN_PRIORITY:
371 		case ISCSI_NET_PARAM_VLAN_ENABLED:
372 		case ISCSI_NET_PARAM_MTU:
373 		case ISCSI_NET_PARAM_PORT:
374 			return S_IRUGO;
375 		default:
376 			return 0;
377 		}
378 	}
379 
380 	return 0;
381 }
382 
383 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
384 				  uint32_t *num_entries, char *buf)
385 {
386 	struct scsi_qla_host *ha = to_qla_host(shost);
387 	struct ql4_chap_table *chap_table;
388 	struct iscsi_chap_rec *chap_rec;
389 	int max_chap_entries = 0;
390 	int valid_chap_entries = 0;
391 	int ret = 0, i;
392 
393 	if (is_qla8022(ha))
394 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
395 					sizeof(struct ql4_chap_table);
396 	else
397 		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
398 
399 	ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n",
400 			__func__, *num_entries, chap_tbl_idx);
401 
402 	if (!buf) {
403 		ret = -ENOMEM;
404 		goto exit_get_chap_list;
405 	}
406 
407 	chap_rec = (struct iscsi_chap_rec *) buf;
408 	mutex_lock(&ha->chap_sem);
409 	for (i = chap_tbl_idx; i < max_chap_entries; i++) {
410 		chap_table = (struct ql4_chap_table *)ha->chap_list + i;
411 		if (chap_table->cookie !=
412 		    __constant_cpu_to_le16(CHAP_VALID_COOKIE))
413 			continue;
414 
415 		chap_rec->chap_tbl_idx = i;
416 		strncpy(chap_rec->username, chap_table->name,
417 			ISCSI_CHAP_AUTH_NAME_MAX_LEN);
418 		strncpy(chap_rec->password, chap_table->secret,
419 			QL4_CHAP_MAX_SECRET_LEN);
420 		chap_rec->password_length = chap_table->secret_len;
421 
422 		if (chap_table->flags & BIT_7) /* local */
423 			chap_rec->chap_type = CHAP_TYPE_OUT;
424 
425 		if (chap_table->flags & BIT_6) /* peer */
426 			chap_rec->chap_type = CHAP_TYPE_IN;
427 
428 		chap_rec++;
429 
430 		valid_chap_entries++;
431 		if (valid_chap_entries == *num_entries)
432 			break;
433 		else
434 			continue;
435 	}
436 	mutex_unlock(&ha->chap_sem);
437 
438 exit_get_chap_list:
439 	ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n",
440 			__func__,  valid_chap_entries);
441 	*num_entries = valid_chap_entries;
442 	return ret;
443 }
444 
445 static int __qla4xxx_is_chap_active(struct device *dev, void *data)
446 {
447 	int ret = 0;
448 	uint16_t *chap_tbl_idx = (uint16_t *) data;
449 	struct iscsi_cls_session *cls_session;
450 	struct iscsi_session *sess;
451 	struct ddb_entry *ddb_entry;
452 
453 	if (!iscsi_is_session_dev(dev))
454 		goto exit_is_chap_active;
455 
456 	cls_session = iscsi_dev_to_session(dev);
457 	sess = cls_session->dd_data;
458 	ddb_entry = sess->dd_data;
459 
460 	if (iscsi_session_chkready(cls_session))
461 		goto exit_is_chap_active;
462 
463 	if (ddb_entry->chap_tbl_idx == *chap_tbl_idx)
464 		ret = 1;
465 
466 exit_is_chap_active:
467 	return ret;
468 }
469 
470 static int qla4xxx_is_chap_active(struct Scsi_Host *shost,
471 				  uint16_t chap_tbl_idx)
472 {
473 	int ret = 0;
474 
475 	ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx,
476 				    __qla4xxx_is_chap_active);
477 
478 	return ret;
479 }
480 
481 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx)
482 {
483 	struct scsi_qla_host *ha = to_qla_host(shost);
484 	struct ql4_chap_table *chap_table;
485 	dma_addr_t chap_dma;
486 	int max_chap_entries = 0;
487 	uint32_t offset = 0;
488 	uint32_t chap_size;
489 	int ret = 0;
490 
491 	chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
492 	if (chap_table == NULL)
493 		return -ENOMEM;
494 
495 	memset(chap_table, 0, sizeof(struct ql4_chap_table));
496 
497 	if (is_qla8022(ha))
498 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
499 				   sizeof(struct ql4_chap_table);
500 	else
501 		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
502 
503 	if (chap_tbl_idx > max_chap_entries) {
504 		ret = -EINVAL;
505 		goto exit_delete_chap;
506 	}
507 
508 	/* Check if chap index is in use.
509 	 * If chap is in use don't delet chap entry */
510 	ret = qla4xxx_is_chap_active(shost, chap_tbl_idx);
511 	if (ret) {
512 		ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot "
513 			   "delete from flash\n", chap_tbl_idx);
514 		ret = -EBUSY;
515 		goto exit_delete_chap;
516 	}
517 
518 	chap_size = sizeof(struct ql4_chap_table);
519 	if (is_qla40XX(ha))
520 		offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size);
521 	else {
522 		offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
523 		/* flt_chap_size is CHAP table size for both ports
524 		 * so divide it by 2 to calculate the offset for second port
525 		 */
526 		if (ha->port_num == 1)
527 			offset += (ha->hw.flt_chap_size / 2);
528 		offset += (chap_tbl_idx * chap_size);
529 	}
530 
531 	ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
532 	if (ret != QLA_SUCCESS) {
533 		ret = -EINVAL;
534 		goto exit_delete_chap;
535 	}
536 
537 	DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
538 			  __le16_to_cpu(chap_table->cookie)));
539 
540 	if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
541 		ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
542 		goto exit_delete_chap;
543 	}
544 
545 	chap_table->cookie = __constant_cpu_to_le16(0xFFFF);
546 
547 	offset = FLASH_CHAP_OFFSET |
548 			(chap_tbl_idx * sizeof(struct ql4_chap_table));
549 	ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size,
550 				FLASH_OPT_RMW_COMMIT);
551 	if (ret == QLA_SUCCESS && ha->chap_list) {
552 		mutex_lock(&ha->chap_sem);
553 		/* Update ha chap_list cache */
554 		memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx,
555 			chap_table, sizeof(struct ql4_chap_table));
556 		mutex_unlock(&ha->chap_sem);
557 	}
558 	if (ret != QLA_SUCCESS)
559 		ret =  -EINVAL;
560 
561 exit_delete_chap:
562 	dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
563 	return ret;
564 }
565 
566 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
567 				   enum iscsi_param_type param_type,
568 				   int param, char *buf)
569 {
570 	struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
571 	struct scsi_qla_host *ha = to_qla_host(shost);
572 	int len = -ENOSYS;
573 
574 	if (param_type != ISCSI_NET_PARAM)
575 		return -ENOSYS;
576 
577 	switch (param) {
578 	case ISCSI_NET_PARAM_IPV4_ADDR:
579 		len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
580 		break;
581 	case ISCSI_NET_PARAM_IPV4_SUBNET:
582 		len = sprintf(buf, "%pI4\n", &ha->ip_config.subnet_mask);
583 		break;
584 	case ISCSI_NET_PARAM_IPV4_GW:
585 		len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
586 		break;
587 	case ISCSI_NET_PARAM_IFACE_ENABLE:
588 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
589 			len = sprintf(buf, "%s\n",
590 				      (ha->ip_config.ipv4_options &
591 				       IPOPT_IPV4_PROTOCOL_ENABLE) ?
592 				      "enabled" : "disabled");
593 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
594 			len = sprintf(buf, "%s\n",
595 				      (ha->ip_config.ipv6_options &
596 				       IPV6_OPT_IPV6_PROTOCOL_ENABLE) ?
597 				       "enabled" : "disabled");
598 		break;
599 	case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
600 		len = sprintf(buf, "%s\n",
601 			      (ha->ip_config.tcp_options & TCPOPT_DHCP_ENABLE) ?
602 			      "dhcp" : "static");
603 		break;
604 	case ISCSI_NET_PARAM_IPV6_ADDR:
605 		if (iface->iface_num == 0)
606 			len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr0);
607 		if (iface->iface_num == 1)
608 			len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr1);
609 		break;
610 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
611 		len = sprintf(buf, "%pI6\n",
612 			      &ha->ip_config.ipv6_link_local_addr);
613 		break;
614 	case ISCSI_NET_PARAM_IPV6_ROUTER:
615 		len = sprintf(buf, "%pI6\n",
616 			      &ha->ip_config.ipv6_default_router_addr);
617 		break;
618 	case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
619 		len = sprintf(buf, "%s\n",
620 			      (ha->ip_config.ipv6_addl_options &
621 			       IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
622 			       "nd" : "static");
623 		break;
624 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
625 		len = sprintf(buf, "%s\n",
626 			      (ha->ip_config.ipv6_addl_options &
627 			       IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
628 			       "auto" : "static");
629 		break;
630 	case ISCSI_NET_PARAM_VLAN_ID:
631 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
632 			len = sprintf(buf, "%d\n",
633 				      (ha->ip_config.ipv4_vlan_tag &
634 				       ISCSI_MAX_VLAN_ID));
635 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
636 			len = sprintf(buf, "%d\n",
637 				      (ha->ip_config.ipv6_vlan_tag &
638 				       ISCSI_MAX_VLAN_ID));
639 		break;
640 	case ISCSI_NET_PARAM_VLAN_PRIORITY:
641 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
642 			len = sprintf(buf, "%d\n",
643 				      ((ha->ip_config.ipv4_vlan_tag >> 13) &
644 					ISCSI_MAX_VLAN_PRIORITY));
645 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
646 			len = sprintf(buf, "%d\n",
647 				      ((ha->ip_config.ipv6_vlan_tag >> 13) &
648 					ISCSI_MAX_VLAN_PRIORITY));
649 		break;
650 	case ISCSI_NET_PARAM_VLAN_ENABLED:
651 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
652 			len = sprintf(buf, "%s\n",
653 				      (ha->ip_config.ipv4_options &
654 				       IPOPT_VLAN_TAGGING_ENABLE) ?
655 				       "enabled" : "disabled");
656 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
657 			len = sprintf(buf, "%s\n",
658 				      (ha->ip_config.ipv6_options &
659 				       IPV6_OPT_VLAN_TAGGING_ENABLE) ?
660 				       "enabled" : "disabled");
661 		break;
662 	case ISCSI_NET_PARAM_MTU:
663 		len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
664 		break;
665 	case ISCSI_NET_PARAM_PORT:
666 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
667 			len = sprintf(buf, "%d\n", ha->ip_config.ipv4_port);
668 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
669 			len = sprintf(buf, "%d\n", ha->ip_config.ipv6_port);
670 		break;
671 	default:
672 		len = -ENOSYS;
673 	}
674 
675 	return len;
676 }
677 
678 static struct iscsi_endpoint *
679 qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
680 		   int non_blocking)
681 {
682 	int ret;
683 	struct iscsi_endpoint *ep;
684 	struct qla_endpoint *qla_ep;
685 	struct scsi_qla_host *ha;
686 	struct sockaddr_in *addr;
687 	struct sockaddr_in6 *addr6;
688 
689 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
690 	if (!shost) {
691 		ret = -ENXIO;
692 		printk(KERN_ERR "%s: shost is NULL\n",
693 		       __func__);
694 		return ERR_PTR(ret);
695 	}
696 
697 	ha = iscsi_host_priv(shost);
698 
699 	ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
700 	if (!ep) {
701 		ret = -ENOMEM;
702 		return ERR_PTR(ret);
703 	}
704 
705 	qla_ep = ep->dd_data;
706 	memset(qla_ep, 0, sizeof(struct qla_endpoint));
707 	if (dst_addr->sa_family == AF_INET) {
708 		memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in));
709 		addr = (struct sockaddr_in *)&qla_ep->dst_addr;
710 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__,
711 				  (char *)&addr->sin_addr));
712 	} else if (dst_addr->sa_family == AF_INET6) {
713 		memcpy(&qla_ep->dst_addr, dst_addr,
714 		       sizeof(struct sockaddr_in6));
715 		addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
716 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
717 				  (char *)&addr6->sin6_addr));
718 	}
719 
720 	qla_ep->host = shost;
721 
722 	return ep;
723 }
724 
725 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
726 {
727 	struct qla_endpoint *qla_ep;
728 	struct scsi_qla_host *ha;
729 	int ret = 0;
730 
731 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
732 	qla_ep = ep->dd_data;
733 	ha = to_qla_host(qla_ep->host);
734 
735 	if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
736 		ret = 1;
737 
738 	return ret;
739 }
740 
741 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
742 {
743 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
744 	iscsi_destroy_endpoint(ep);
745 }
746 
747 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
748 				enum iscsi_param param,
749 				char *buf)
750 {
751 	struct qla_endpoint *qla_ep = ep->dd_data;
752 	struct sockaddr *dst_addr;
753 
754 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
755 
756 	switch (param) {
757 	case ISCSI_PARAM_CONN_PORT:
758 	case ISCSI_PARAM_CONN_ADDRESS:
759 		if (!qla_ep)
760 			return -ENOTCONN;
761 
762 		dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
763 		if (!dst_addr)
764 			return -ENOTCONN;
765 
766 		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
767 						 &qla_ep->dst_addr, param, buf);
768 	default:
769 		return -ENOSYS;
770 	}
771 }
772 
773 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
774 				   struct iscsi_stats *stats)
775 {
776 	struct iscsi_session *sess;
777 	struct iscsi_cls_session *cls_sess;
778 	struct ddb_entry *ddb_entry;
779 	struct scsi_qla_host *ha;
780 	struct ql_iscsi_stats *ql_iscsi_stats;
781 	int stats_size;
782 	int ret;
783 	dma_addr_t iscsi_stats_dma;
784 
785 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
786 
787 	cls_sess = iscsi_conn_to_session(cls_conn);
788 	sess = cls_sess->dd_data;
789 	ddb_entry = sess->dd_data;
790 	ha = ddb_entry->ha;
791 
792 	stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
793 	/* Allocate memory */
794 	ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
795 					    &iscsi_stats_dma, GFP_KERNEL);
796 	if (!ql_iscsi_stats) {
797 		ql4_printk(KERN_ERR, ha,
798 			   "Unable to allocate memory for iscsi stats\n");
799 		goto exit_get_stats;
800 	}
801 
802 	ret =  qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size,
803 				     iscsi_stats_dma);
804 	if (ret != QLA_SUCCESS) {
805 		ql4_printk(KERN_ERR, ha,
806 			   "Unable to retreive iscsi stats\n");
807 		goto free_stats;
808 	}
809 
810 	/* octets */
811 	stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets);
812 	stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets);
813 	/* xmit pdus */
814 	stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus);
815 	stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus);
816 	stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus);
817 	stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus);
818 	stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus);
819 	stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus);
820 	stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus);
821 	stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus);
822 	/* recv pdus */
823 	stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus);
824 	stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus);
825 	stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus);
826 	stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus);
827 	stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus);
828 	stats->logoutrsp_pdus =
829 			le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus);
830 	stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus);
831 	stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus);
832 	stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus);
833 
834 free_stats:
835 	dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats,
836 			  iscsi_stats_dma);
837 exit_get_stats:
838 	return;
839 }
840 
841 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
842 {
843 	struct iscsi_cls_session *session;
844 	struct iscsi_session *sess;
845 	unsigned long flags;
846 	enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED;
847 
848 	session = starget_to_session(scsi_target(sc->device));
849 	sess = session->dd_data;
850 
851 	spin_lock_irqsave(&session->lock, flags);
852 	if (session->state == ISCSI_SESSION_FAILED)
853 		ret = BLK_EH_RESET_TIMER;
854 	spin_unlock_irqrestore(&session->lock, flags);
855 
856 	return ret;
857 }
858 
859 static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
860 {
861 	struct scsi_qla_host *ha = to_qla_host(shost);
862 	struct iscsi_cls_host *ihost = shost->shost_data;
863 	uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN;
864 
865 	qla4xxx_get_firmware_state(ha);
866 
867 	switch (ha->addl_fw_state & 0x0F00) {
868 	case FW_ADDSTATE_LINK_SPEED_10MBPS:
869 		speed = ISCSI_PORT_SPEED_10MBPS;
870 		break;
871 	case FW_ADDSTATE_LINK_SPEED_100MBPS:
872 		speed = ISCSI_PORT_SPEED_100MBPS;
873 		break;
874 	case FW_ADDSTATE_LINK_SPEED_1GBPS:
875 		speed = ISCSI_PORT_SPEED_1GBPS;
876 		break;
877 	case FW_ADDSTATE_LINK_SPEED_10GBPS:
878 		speed = ISCSI_PORT_SPEED_10GBPS;
879 		break;
880 	}
881 	ihost->port_speed = speed;
882 }
883 
884 static void qla4xxx_set_port_state(struct Scsi_Host *shost)
885 {
886 	struct scsi_qla_host *ha = to_qla_host(shost);
887 	struct iscsi_cls_host *ihost = shost->shost_data;
888 	uint32_t state = ISCSI_PORT_STATE_DOWN;
889 
890 	if (test_bit(AF_LINK_UP, &ha->flags))
891 		state = ISCSI_PORT_STATE_UP;
892 
893 	ihost->port_state = state;
894 }
895 
896 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
897 				  enum iscsi_host_param param, char *buf)
898 {
899 	struct scsi_qla_host *ha = to_qla_host(shost);
900 	int len;
901 
902 	switch (param) {
903 	case ISCSI_HOST_PARAM_HWADDRESS:
904 		len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
905 		break;
906 	case ISCSI_HOST_PARAM_IPADDRESS:
907 		len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
908 		break;
909 	case ISCSI_HOST_PARAM_INITIATOR_NAME:
910 		len = sprintf(buf, "%s\n", ha->name_string);
911 		break;
912 	case ISCSI_HOST_PARAM_PORT_STATE:
913 		qla4xxx_set_port_state(shost);
914 		len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
915 		break;
916 	case ISCSI_HOST_PARAM_PORT_SPEED:
917 		qla4xxx_set_port_speed(shost);
918 		len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
919 		break;
920 	default:
921 		return -ENOSYS;
922 	}
923 
924 	return len;
925 }
926 
927 static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha)
928 {
929 	if (ha->iface_ipv4)
930 		return;
931 
932 	/* IPv4 */
933 	ha->iface_ipv4 = iscsi_create_iface(ha->host,
934 					    &qla4xxx_iscsi_transport,
935 					    ISCSI_IFACE_TYPE_IPV4, 0, 0);
936 	if (!ha->iface_ipv4)
937 		ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI "
938 			   "iface0.\n");
939 }
940 
941 static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha)
942 {
943 	if (!ha->iface_ipv6_0)
944 		/* IPv6 iface-0 */
945 		ha->iface_ipv6_0 = iscsi_create_iface(ha->host,
946 						      &qla4xxx_iscsi_transport,
947 						      ISCSI_IFACE_TYPE_IPV6, 0,
948 						      0);
949 	if (!ha->iface_ipv6_0)
950 		ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
951 			   "iface0.\n");
952 
953 	if (!ha->iface_ipv6_1)
954 		/* IPv6 iface-1 */
955 		ha->iface_ipv6_1 = iscsi_create_iface(ha->host,
956 						      &qla4xxx_iscsi_transport,
957 						      ISCSI_IFACE_TYPE_IPV6, 1,
958 						      0);
959 	if (!ha->iface_ipv6_1)
960 		ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
961 			   "iface1.\n");
962 }
963 
964 static void qla4xxx_create_ifaces(struct scsi_qla_host *ha)
965 {
966 	if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE)
967 		qla4xxx_create_ipv4_iface(ha);
968 
969 	if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE)
970 		qla4xxx_create_ipv6_iface(ha);
971 }
972 
973 static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha)
974 {
975 	if (ha->iface_ipv4) {
976 		iscsi_destroy_iface(ha->iface_ipv4);
977 		ha->iface_ipv4 = NULL;
978 	}
979 }
980 
981 static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha)
982 {
983 	if (ha->iface_ipv6_0) {
984 		iscsi_destroy_iface(ha->iface_ipv6_0);
985 		ha->iface_ipv6_0 = NULL;
986 	}
987 	if (ha->iface_ipv6_1) {
988 		iscsi_destroy_iface(ha->iface_ipv6_1);
989 		ha->iface_ipv6_1 = NULL;
990 	}
991 }
992 
993 static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha)
994 {
995 	qla4xxx_destroy_ipv4_iface(ha);
996 	qla4xxx_destroy_ipv6_iface(ha);
997 }
998 
999 static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
1000 			     struct iscsi_iface_param_info *iface_param,
1001 			     struct addr_ctrl_blk *init_fw_cb)
1002 {
1003 	/*
1004 	 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg.
1005 	 * iface_num 1 is valid only for IPv6 Addr.
1006 	 */
1007 	switch (iface_param->param) {
1008 	case ISCSI_NET_PARAM_IPV6_ADDR:
1009 		if (iface_param->iface_num & 0x1)
1010 			/* IPv6 Addr 1 */
1011 			memcpy(init_fw_cb->ipv6_addr1, iface_param->value,
1012 			       sizeof(init_fw_cb->ipv6_addr1));
1013 		else
1014 			/* IPv6 Addr 0 */
1015 			memcpy(init_fw_cb->ipv6_addr0, iface_param->value,
1016 			       sizeof(init_fw_cb->ipv6_addr0));
1017 		break;
1018 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
1019 		if (iface_param->iface_num & 0x1)
1020 			break;
1021 		memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8],
1022 		       sizeof(init_fw_cb->ipv6_if_id));
1023 		break;
1024 	case ISCSI_NET_PARAM_IPV6_ROUTER:
1025 		if (iface_param->iface_num & 0x1)
1026 			break;
1027 		memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value,
1028 		       sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
1029 		break;
1030 	case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
1031 		/* Autocfg applies to even interface */
1032 		if (iface_param->iface_num & 0x1)
1033 			break;
1034 
1035 		if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE)
1036 			init_fw_cb->ipv6_addtl_opts &=
1037 				cpu_to_le16(
1038 				  ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
1039 		else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE)
1040 			init_fw_cb->ipv6_addtl_opts |=
1041 				cpu_to_le16(
1042 				  IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
1043 		else
1044 			ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
1045 				   "IPv6 addr\n");
1046 		break;
1047 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
1048 		/* Autocfg applies to even interface */
1049 		if (iface_param->iface_num & 0x1)
1050 			break;
1051 
1052 		if (iface_param->value[0] ==
1053 		    ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE)
1054 			init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(
1055 					IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
1056 		else if (iface_param->value[0] ==
1057 			 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE)
1058 			init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
1059 				       ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
1060 		else
1061 			ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
1062 				   "IPv6 linklocal addr\n");
1063 		break;
1064 	case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
1065 		/* Autocfg applies to even interface */
1066 		if (iface_param->iface_num & 0x1)
1067 			break;
1068 
1069 		if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE)
1070 			memset(init_fw_cb->ipv6_dflt_rtr_addr, 0,
1071 			       sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
1072 		break;
1073 	case ISCSI_NET_PARAM_IFACE_ENABLE:
1074 		if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
1075 			init_fw_cb->ipv6_opts |=
1076 				cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE);
1077 			qla4xxx_create_ipv6_iface(ha);
1078 		} else {
1079 			init_fw_cb->ipv6_opts &=
1080 				cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE &
1081 					    0xFFFF);
1082 			qla4xxx_destroy_ipv6_iface(ha);
1083 		}
1084 		break;
1085 	case ISCSI_NET_PARAM_VLAN_TAG:
1086 		if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag))
1087 			break;
1088 		init_fw_cb->ipv6_vlan_tag =
1089 				cpu_to_be16(*(uint16_t *)iface_param->value);
1090 		break;
1091 	case ISCSI_NET_PARAM_VLAN_ENABLED:
1092 		if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
1093 			init_fw_cb->ipv6_opts |=
1094 				cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE);
1095 		else
1096 			init_fw_cb->ipv6_opts &=
1097 				cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE);
1098 		break;
1099 	case ISCSI_NET_PARAM_MTU:
1100 		init_fw_cb->eth_mtu_size =
1101 				cpu_to_le16(*(uint16_t *)iface_param->value);
1102 		break;
1103 	case ISCSI_NET_PARAM_PORT:
1104 		/* Autocfg applies to even interface */
1105 		if (iface_param->iface_num & 0x1)
1106 			break;
1107 
1108 		init_fw_cb->ipv6_port =
1109 				cpu_to_le16(*(uint16_t *)iface_param->value);
1110 		break;
1111 	default:
1112 		ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
1113 			   iface_param->param);
1114 		break;
1115 	}
1116 }
1117 
1118 static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
1119 			     struct iscsi_iface_param_info *iface_param,
1120 			     struct addr_ctrl_blk *init_fw_cb)
1121 {
1122 	switch (iface_param->param) {
1123 	case ISCSI_NET_PARAM_IPV4_ADDR:
1124 		memcpy(init_fw_cb->ipv4_addr, iface_param->value,
1125 		       sizeof(init_fw_cb->ipv4_addr));
1126 		break;
1127 	case ISCSI_NET_PARAM_IPV4_SUBNET:
1128 		memcpy(init_fw_cb->ipv4_subnet,	iface_param->value,
1129 		       sizeof(init_fw_cb->ipv4_subnet));
1130 		break;
1131 	case ISCSI_NET_PARAM_IPV4_GW:
1132 		memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value,
1133 		       sizeof(init_fw_cb->ipv4_gw_addr));
1134 		break;
1135 	case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
1136 		if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
1137 			init_fw_cb->ipv4_tcp_opts |=
1138 					cpu_to_le16(TCPOPT_DHCP_ENABLE);
1139 		else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
1140 			init_fw_cb->ipv4_tcp_opts &=
1141 					cpu_to_le16(~TCPOPT_DHCP_ENABLE);
1142 		else
1143 			ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n");
1144 		break;
1145 	case ISCSI_NET_PARAM_IFACE_ENABLE:
1146 		if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
1147 			init_fw_cb->ipv4_ip_opts |=
1148 				cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE);
1149 			qla4xxx_create_ipv4_iface(ha);
1150 		} else {
1151 			init_fw_cb->ipv4_ip_opts &=
1152 				cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE &
1153 					    0xFFFF);
1154 			qla4xxx_destroy_ipv4_iface(ha);
1155 		}
1156 		break;
1157 	case ISCSI_NET_PARAM_VLAN_TAG:
1158 		if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag))
1159 			break;
1160 		init_fw_cb->ipv4_vlan_tag =
1161 				cpu_to_be16(*(uint16_t *)iface_param->value);
1162 		break;
1163 	case ISCSI_NET_PARAM_VLAN_ENABLED:
1164 		if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
1165 			init_fw_cb->ipv4_ip_opts |=
1166 					cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE);
1167 		else
1168 			init_fw_cb->ipv4_ip_opts &=
1169 					cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE);
1170 		break;
1171 	case ISCSI_NET_PARAM_MTU:
1172 		init_fw_cb->eth_mtu_size =
1173 				cpu_to_le16(*(uint16_t *)iface_param->value);
1174 		break;
1175 	case ISCSI_NET_PARAM_PORT:
1176 		init_fw_cb->ipv4_port =
1177 				cpu_to_le16(*(uint16_t *)iface_param->value);
1178 		break;
1179 	default:
1180 		ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
1181 			   iface_param->param);
1182 		break;
1183 	}
1184 }
1185 
1186 static void
1187 qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
1188 {
1189 	struct addr_ctrl_blk_def *acb;
1190 	acb = (struct addr_ctrl_blk_def *)init_fw_cb;
1191 	memset(acb->reserved1, 0, sizeof(acb->reserved1));
1192 	memset(acb->reserved2, 0, sizeof(acb->reserved2));
1193 	memset(acb->reserved3, 0, sizeof(acb->reserved3));
1194 	memset(acb->reserved4, 0, sizeof(acb->reserved4));
1195 	memset(acb->reserved5, 0, sizeof(acb->reserved5));
1196 	memset(acb->reserved6, 0, sizeof(acb->reserved6));
1197 	memset(acb->reserved7, 0, sizeof(acb->reserved7));
1198 	memset(acb->reserved8, 0, sizeof(acb->reserved8));
1199 	memset(acb->reserved9, 0, sizeof(acb->reserved9));
1200 	memset(acb->reserved10, 0, sizeof(acb->reserved10));
1201 	memset(acb->reserved11, 0, sizeof(acb->reserved11));
1202 	memset(acb->reserved12, 0, sizeof(acb->reserved12));
1203 	memset(acb->reserved13, 0, sizeof(acb->reserved13));
1204 	memset(acb->reserved14, 0, sizeof(acb->reserved14));
1205 	memset(acb->reserved15, 0, sizeof(acb->reserved15));
1206 }
1207 
1208 static int
1209 qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
1210 {
1211 	struct scsi_qla_host *ha = to_qla_host(shost);
1212 	int rval = 0;
1213 	struct iscsi_iface_param_info *iface_param = NULL;
1214 	struct addr_ctrl_blk *init_fw_cb = NULL;
1215 	dma_addr_t init_fw_cb_dma;
1216 	uint32_t mbox_cmd[MBOX_REG_COUNT];
1217 	uint32_t mbox_sts[MBOX_REG_COUNT];
1218 	uint32_t rem = len;
1219 	struct nlattr *attr;
1220 
1221 	init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
1222 					sizeof(struct addr_ctrl_blk),
1223 					&init_fw_cb_dma, GFP_KERNEL);
1224 	if (!init_fw_cb) {
1225 		ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
1226 			   __func__);
1227 		return -ENOMEM;
1228 	}
1229 
1230 	memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
1231 	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1232 	memset(&mbox_sts, 0, sizeof(mbox_sts));
1233 
1234 	if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) {
1235 		ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__);
1236 		rval = -EIO;
1237 		goto exit_init_fw_cb;
1238 	}
1239 
1240 	nla_for_each_attr(attr, data, len, rem) {
1241 		iface_param = nla_data(attr);
1242 
1243 		if (iface_param->param_type != ISCSI_NET_PARAM)
1244 			continue;
1245 
1246 		switch (iface_param->iface_type) {
1247 		case ISCSI_IFACE_TYPE_IPV4:
1248 			switch (iface_param->iface_num) {
1249 			case 0:
1250 				qla4xxx_set_ipv4(ha, iface_param, init_fw_cb);
1251 				break;
1252 			default:
1253 				/* Cannot have more than one IPv4 interface */
1254 				ql4_printk(KERN_ERR, ha, "Invalid IPv4 iface "
1255 					   "number = %d\n",
1256 					   iface_param->iface_num);
1257 				break;
1258 			}
1259 			break;
1260 		case ISCSI_IFACE_TYPE_IPV6:
1261 			switch (iface_param->iface_num) {
1262 			case 0:
1263 			case 1:
1264 				qla4xxx_set_ipv6(ha, iface_param, init_fw_cb);
1265 				break;
1266 			default:
1267 				/* Cannot have more than two IPv6 interface */
1268 				ql4_printk(KERN_ERR, ha, "Invalid IPv6 iface "
1269 					   "number = %d\n",
1270 					   iface_param->iface_num);
1271 				break;
1272 			}
1273 			break;
1274 		default:
1275 			ql4_printk(KERN_ERR, ha, "Invalid iface type\n");
1276 			break;
1277 		}
1278 	}
1279 
1280 	init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A);
1281 
1282 	rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB,
1283 				 sizeof(struct addr_ctrl_blk),
1284 				 FLASH_OPT_RMW_COMMIT);
1285 	if (rval != QLA_SUCCESS) {
1286 		ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n",
1287 			   __func__);
1288 		rval = -EIO;
1289 		goto exit_init_fw_cb;
1290 	}
1291 
1292 	rval = qla4xxx_disable_acb(ha);
1293 	if (rval != QLA_SUCCESS) {
1294 		ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n",
1295 			   __func__);
1296 		rval = -EIO;
1297 		goto exit_init_fw_cb;
1298 	}
1299 
1300 	wait_for_completion_timeout(&ha->disable_acb_comp,
1301 				    DISABLE_ACB_TOV * HZ);
1302 
1303 	qla4xxx_initcb_to_acb(init_fw_cb);
1304 
1305 	rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma);
1306 	if (rval != QLA_SUCCESS) {
1307 		ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n",
1308 			   __func__);
1309 		rval = -EIO;
1310 		goto exit_init_fw_cb;
1311 	}
1312 
1313 	memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
1314 	qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb,
1315 				  init_fw_cb_dma);
1316 
1317 exit_init_fw_cb:
1318 	dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
1319 			  init_fw_cb, init_fw_cb_dma);
1320 
1321 	return rval;
1322 }
1323 
1324 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
1325 				     enum iscsi_param param, char *buf)
1326 {
1327 	struct iscsi_session *sess = cls_sess->dd_data;
1328 	struct ddb_entry *ddb_entry = sess->dd_data;
1329 	struct scsi_qla_host *ha = ddb_entry->ha;
1330 	int rval, len;
1331 	uint16_t idx;
1332 
1333 	switch (param) {
1334 	case ISCSI_PARAM_CHAP_IN_IDX:
1335 		rval = qla4xxx_get_chap_index(ha, sess->username_in,
1336 					      sess->password_in, BIDI_CHAP,
1337 					      &idx);
1338 		if (rval)
1339 			return -EINVAL;
1340 
1341 		len = sprintf(buf, "%hu\n", idx);
1342 		break;
1343 	case ISCSI_PARAM_CHAP_OUT_IDX:
1344 		rval = qla4xxx_get_chap_index(ha, sess->username,
1345 					      sess->password, LOCAL_CHAP,
1346 					      &idx);
1347 		if (rval)
1348 			return -EINVAL;
1349 
1350 		len = sprintf(buf, "%hu\n", idx);
1351 		break;
1352 	default:
1353 		return iscsi_session_get_param(cls_sess, param, buf);
1354 	}
1355 
1356 	return len;
1357 }
1358 
1359 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
1360 				  enum iscsi_param param, char *buf)
1361 {
1362 	struct iscsi_conn *conn;
1363 	struct qla_conn *qla_conn;
1364 	struct sockaddr *dst_addr;
1365 	int len = 0;
1366 
1367 	conn = cls_conn->dd_data;
1368 	qla_conn = conn->dd_data;
1369 	dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr;
1370 
1371 	switch (param) {
1372 	case ISCSI_PARAM_CONN_PORT:
1373 	case ISCSI_PARAM_CONN_ADDRESS:
1374 		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
1375 						 dst_addr, param, buf);
1376 	default:
1377 		return iscsi_conn_get_param(cls_conn, param, buf);
1378 	}
1379 
1380 	return len;
1381 
1382 }
1383 
1384 int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
1385 {
1386 	uint32_t mbx_sts = 0;
1387 	uint16_t tmp_ddb_index;
1388 	int ret;
1389 
1390 get_ddb_index:
1391 	tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
1392 
1393 	if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
1394 		DEBUG2(ql4_printk(KERN_INFO, ha,
1395 				  "Free DDB index not available\n"));
1396 		ret = QLA_ERROR;
1397 		goto exit_get_ddb_index;
1398 	}
1399 
1400 	if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
1401 		goto get_ddb_index;
1402 
1403 	DEBUG2(ql4_printk(KERN_INFO, ha,
1404 			  "Found a free DDB index at %d\n", tmp_ddb_index));
1405 	ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
1406 	if (ret == QLA_ERROR) {
1407 		if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
1408 			ql4_printk(KERN_INFO, ha,
1409 				   "DDB index = %d not available trying next\n",
1410 				   tmp_ddb_index);
1411 			goto get_ddb_index;
1412 		}
1413 		DEBUG2(ql4_printk(KERN_INFO, ha,
1414 				  "Free FW DDB not available\n"));
1415 	}
1416 
1417 	*ddb_index = tmp_ddb_index;
1418 
1419 exit_get_ddb_index:
1420 	return ret;
1421 }
1422 
1423 static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
1424 				   struct ddb_entry *ddb_entry,
1425 				   char *existing_ipaddr,
1426 				   char *user_ipaddr)
1427 {
1428 	uint8_t dst_ipaddr[IPv6_ADDR_LEN];
1429 	char formatted_ipaddr[DDB_IPADDR_LEN];
1430 	int status = QLA_SUCCESS, ret = 0;
1431 
1432 	if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
1433 		ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
1434 			       '\0', NULL);
1435 		if (ret == 0) {
1436 			status = QLA_ERROR;
1437 			goto out_match;
1438 		}
1439 		ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
1440 	} else {
1441 		ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
1442 			       '\0', NULL);
1443 		if (ret == 0) {
1444 			status = QLA_ERROR;
1445 			goto out_match;
1446 		}
1447 		ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
1448 	}
1449 
1450 	if (strcmp(existing_ipaddr, formatted_ipaddr))
1451 		status = QLA_ERROR;
1452 
1453 out_match:
1454 	return status;
1455 }
1456 
1457 static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
1458 				      struct iscsi_cls_conn *cls_conn)
1459 {
1460 	int idx = 0, max_ddbs, rval;
1461 	struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1462 	struct iscsi_session *sess, *existing_sess;
1463 	struct iscsi_conn *conn, *existing_conn;
1464 	struct ddb_entry *ddb_entry;
1465 
1466 	sess = cls_sess->dd_data;
1467 	conn = cls_conn->dd_data;
1468 
1469 	if (sess->targetname == NULL ||
1470 	    conn->persistent_address == NULL ||
1471 	    conn->persistent_port == 0)
1472 		return QLA_ERROR;
1473 
1474 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
1475 				     MAX_DEV_DB_ENTRIES;
1476 
1477 	for (idx = 0; idx < max_ddbs; idx++) {
1478 		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
1479 		if (ddb_entry == NULL)
1480 			continue;
1481 
1482 		if (ddb_entry->ddb_type != FLASH_DDB)
1483 			continue;
1484 
1485 		existing_sess = ddb_entry->sess->dd_data;
1486 		existing_conn = ddb_entry->conn->dd_data;
1487 
1488 		if (existing_sess->targetname == NULL ||
1489 		    existing_conn->persistent_address == NULL ||
1490 		    existing_conn->persistent_port == 0)
1491 			continue;
1492 
1493 		DEBUG2(ql4_printk(KERN_INFO, ha,
1494 				  "IQN = %s User IQN = %s\n",
1495 				  existing_sess->targetname,
1496 				  sess->targetname));
1497 
1498 		DEBUG2(ql4_printk(KERN_INFO, ha,
1499 				  "IP = %s User IP = %s\n",
1500 				  existing_conn->persistent_address,
1501 				  conn->persistent_address));
1502 
1503 		DEBUG2(ql4_printk(KERN_INFO, ha,
1504 				  "Port = %d User Port = %d\n",
1505 				  existing_conn->persistent_port,
1506 				  conn->persistent_port));
1507 
1508 		if (strcmp(existing_sess->targetname, sess->targetname))
1509 			continue;
1510 		rval = qla4xxx_match_ipaddress(ha, ddb_entry,
1511 					existing_conn->persistent_address,
1512 					conn->persistent_address);
1513 		if (rval == QLA_ERROR)
1514 			continue;
1515 		if (existing_conn->persistent_port != conn->persistent_port)
1516 			continue;
1517 		break;
1518 	}
1519 
1520 	if (idx == max_ddbs)
1521 		return QLA_ERROR;
1522 
1523 	DEBUG2(ql4_printk(KERN_INFO, ha,
1524 			  "Match found in fwdb sessions\n"));
1525 	return QLA_SUCCESS;
1526 }
1527 
1528 static struct iscsi_cls_session *
1529 qla4xxx_session_create(struct iscsi_endpoint *ep,
1530 			uint16_t cmds_max, uint16_t qdepth,
1531 			uint32_t initial_cmdsn)
1532 {
1533 	struct iscsi_cls_session *cls_sess;
1534 	struct scsi_qla_host *ha;
1535 	struct qla_endpoint *qla_ep;
1536 	struct ddb_entry *ddb_entry;
1537 	uint16_t ddb_index;
1538 	struct iscsi_session *sess;
1539 	struct sockaddr *dst_addr;
1540 	int ret;
1541 
1542 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1543 	if (!ep) {
1544 		printk(KERN_ERR "qla4xxx: missing ep.\n");
1545 		return NULL;
1546 	}
1547 
1548 	qla_ep = ep->dd_data;
1549 	dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
1550 	ha = to_qla_host(qla_ep->host);
1551 
1552 	ret = qla4xxx_get_ddb_index(ha, &ddb_index);
1553 	if (ret == QLA_ERROR)
1554 		return NULL;
1555 
1556 	cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
1557 				       cmds_max, sizeof(struct ddb_entry),
1558 				       sizeof(struct ql4_task_data),
1559 				       initial_cmdsn, ddb_index);
1560 	if (!cls_sess)
1561 		return NULL;
1562 
1563 	sess = cls_sess->dd_data;
1564 	ddb_entry = sess->dd_data;
1565 	ddb_entry->fw_ddb_index = ddb_index;
1566 	ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
1567 	ddb_entry->ha = ha;
1568 	ddb_entry->sess = cls_sess;
1569 	ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
1570 	ddb_entry->ddb_change = qla4xxx_ddb_change;
1571 	cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
1572 	ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
1573 	ha->tot_ddbs++;
1574 
1575 	return cls_sess;
1576 }
1577 
1578 static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
1579 {
1580 	struct iscsi_session *sess;
1581 	struct ddb_entry *ddb_entry;
1582 	struct scsi_qla_host *ha;
1583 	unsigned long flags, wtime;
1584 	struct dev_db_entry *fw_ddb_entry = NULL;
1585 	dma_addr_t fw_ddb_entry_dma;
1586 	uint32_t ddb_state;
1587 	int ret;
1588 
1589 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1590 	sess = cls_sess->dd_data;
1591 	ddb_entry = sess->dd_data;
1592 	ha = ddb_entry->ha;
1593 
1594 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1595 					  &fw_ddb_entry_dma, GFP_KERNEL);
1596 	if (!fw_ddb_entry) {
1597 		ql4_printk(KERN_ERR, ha,
1598 			   "%s: Unable to allocate dma buffer\n", __func__);
1599 		goto destroy_session;
1600 	}
1601 
1602 	wtime = jiffies + (HZ * LOGOUT_TOV);
1603 	do {
1604 		ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
1605 					      fw_ddb_entry, fw_ddb_entry_dma,
1606 					      NULL, NULL, &ddb_state, NULL,
1607 					      NULL, NULL);
1608 		if (ret == QLA_ERROR)
1609 			goto destroy_session;
1610 
1611 		if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
1612 		    (ddb_state == DDB_DS_SESSION_FAILED))
1613 			goto destroy_session;
1614 
1615 		schedule_timeout_uninterruptible(HZ);
1616 	} while ((time_after(wtime, jiffies)));
1617 
1618 destroy_session:
1619 	qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
1620 
1621 	spin_lock_irqsave(&ha->hardware_lock, flags);
1622 	qla4xxx_free_ddb(ha, ddb_entry);
1623 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1624 
1625 	iscsi_session_teardown(cls_sess);
1626 
1627 	if (fw_ddb_entry)
1628 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1629 				  fw_ddb_entry, fw_ddb_entry_dma);
1630 }
1631 
1632 static struct iscsi_cls_conn *
1633 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
1634 {
1635 	struct iscsi_cls_conn *cls_conn;
1636 	struct iscsi_session *sess;
1637 	struct ddb_entry *ddb_entry;
1638 
1639 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1640 	cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
1641 				    conn_idx);
1642 	if (!cls_conn)
1643 		return NULL;
1644 
1645 	sess = cls_sess->dd_data;
1646 	ddb_entry = sess->dd_data;
1647 	ddb_entry->conn = cls_conn;
1648 
1649 	return cls_conn;
1650 }
1651 
1652 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
1653 			     struct iscsi_cls_conn *cls_conn,
1654 			     uint64_t transport_fd, int is_leading)
1655 {
1656 	struct iscsi_conn *conn;
1657 	struct qla_conn *qla_conn;
1658 	struct iscsi_endpoint *ep;
1659 
1660 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1661 
1662 	if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
1663 		return -EINVAL;
1664 	ep = iscsi_lookup_endpoint(transport_fd);
1665 	conn = cls_conn->dd_data;
1666 	qla_conn = conn->dd_data;
1667 	qla_conn->qla_ep = ep->dd_data;
1668 	return 0;
1669 }
1670 
1671 static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
1672 {
1673 	struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1674 	struct iscsi_session *sess;
1675 	struct ddb_entry *ddb_entry;
1676 	struct scsi_qla_host *ha;
1677 	struct dev_db_entry *fw_ddb_entry = NULL;
1678 	dma_addr_t fw_ddb_entry_dma;
1679 	uint32_t mbx_sts = 0;
1680 	int ret = 0;
1681 	int status = QLA_SUCCESS;
1682 
1683 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1684 	sess = cls_sess->dd_data;
1685 	ddb_entry = sess->dd_data;
1686 	ha = ddb_entry->ha;
1687 
1688 	/* Check if we have  matching FW DDB, if yes then do not
1689 	 * login to this target. This could cause target to logout previous
1690 	 * connection
1691 	 */
1692 	ret = qla4xxx_match_fwdb_session(ha, cls_conn);
1693 	if (ret == QLA_SUCCESS) {
1694 		ql4_printk(KERN_INFO, ha,
1695 			   "Session already exist in FW.\n");
1696 		ret = -EEXIST;
1697 		goto exit_conn_start;
1698 	}
1699 
1700 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1701 					  &fw_ddb_entry_dma, GFP_KERNEL);
1702 	if (!fw_ddb_entry) {
1703 		ql4_printk(KERN_ERR, ha,
1704 			   "%s: Unable to allocate dma buffer\n", __func__);
1705 		ret = -ENOMEM;
1706 		goto exit_conn_start;
1707 	}
1708 
1709 	ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
1710 	if (ret) {
1711 		/* If iscsid is stopped and started then no need to do
1712 		* set param again since ddb state will be already
1713 		* active and FW does not allow set ddb to an
1714 		* active session.
1715 		*/
1716 		if (mbx_sts)
1717 			if (ddb_entry->fw_ddb_device_state ==
1718 						DDB_DS_SESSION_ACTIVE) {
1719 				ddb_entry->unblock_sess(ddb_entry->sess);
1720 				goto exit_set_param;
1721 			}
1722 
1723 		ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n",
1724 			   __func__, ddb_entry->fw_ddb_index);
1725 		goto exit_conn_start;
1726 	}
1727 
1728 	status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
1729 	if (status == QLA_ERROR) {
1730 		ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
1731 			   sess->targetname);
1732 		ret = -EINVAL;
1733 		goto exit_conn_start;
1734 	}
1735 
1736 	if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE)
1737 		ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
1738 
1739 	DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__,
1740 		      ddb_entry->fw_ddb_device_state));
1741 
1742 exit_set_param:
1743 	ret = 0;
1744 
1745 exit_conn_start:
1746 	if (fw_ddb_entry)
1747 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1748 				  fw_ddb_entry, fw_ddb_entry_dma);
1749 	return ret;
1750 }
1751 
1752 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
1753 {
1754 	struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1755 	struct iscsi_session *sess;
1756 	struct scsi_qla_host *ha;
1757 	struct ddb_entry *ddb_entry;
1758 	int options;
1759 
1760 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1761 	sess = cls_sess->dd_data;
1762 	ddb_entry = sess->dd_data;
1763 	ha = ddb_entry->ha;
1764 
1765 	options = LOGOUT_OPTION_CLOSE_SESSION;
1766 	if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
1767 		ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
1768 }
1769 
1770 static void qla4xxx_task_work(struct work_struct *wdata)
1771 {
1772 	struct ql4_task_data *task_data;
1773 	struct scsi_qla_host *ha;
1774 	struct passthru_status *sts;
1775 	struct iscsi_task *task;
1776 	struct iscsi_hdr *hdr;
1777 	uint8_t *data;
1778 	uint32_t data_len;
1779 	struct iscsi_conn *conn;
1780 	int hdr_len;
1781 	itt_t itt;
1782 
1783 	task_data = container_of(wdata, struct ql4_task_data, task_work);
1784 	ha = task_data->ha;
1785 	task = task_data->task;
1786 	sts = &task_data->sts;
1787 	hdr_len = sizeof(struct iscsi_hdr);
1788 
1789 	DEBUG3(printk(KERN_INFO "Status returned\n"));
1790 	DEBUG3(qla4xxx_dump_buffer(sts, 64));
1791 	DEBUG3(printk(KERN_INFO "Response buffer"));
1792 	DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64));
1793 
1794 	conn = task->conn;
1795 
1796 	switch (sts->completionStatus) {
1797 	case PASSTHRU_STATUS_COMPLETE:
1798 		hdr = (struct iscsi_hdr *)task_data->resp_buffer;
1799 		/* Assign back the itt in hdr, until we use the PREASSIGN_TAG */
1800 		itt = sts->handle;
1801 		hdr->itt = itt;
1802 		data = task_data->resp_buffer + hdr_len;
1803 		data_len = task_data->resp_len - hdr_len;
1804 		iscsi_complete_pdu(conn, hdr, data, data_len);
1805 		break;
1806 	default:
1807 		ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n",
1808 			   sts->completionStatus);
1809 		break;
1810 	}
1811 	return;
1812 }
1813 
1814 static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
1815 {
1816 	struct ql4_task_data *task_data;
1817 	struct iscsi_session *sess;
1818 	struct ddb_entry *ddb_entry;
1819 	struct scsi_qla_host *ha;
1820 	int hdr_len;
1821 
1822 	sess = task->conn->session;
1823 	ddb_entry = sess->dd_data;
1824 	ha = ddb_entry->ha;
1825 	task_data = task->dd_data;
1826 	memset(task_data, 0, sizeof(struct ql4_task_data));
1827 
1828 	if (task->sc) {
1829 		ql4_printk(KERN_INFO, ha,
1830 			   "%s: SCSI Commands not implemented\n", __func__);
1831 		return -EINVAL;
1832 	}
1833 
1834 	hdr_len = sizeof(struct iscsi_hdr);
1835 	task_data->ha = ha;
1836 	task_data->task = task;
1837 
1838 	if (task->data_count) {
1839 		task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
1840 						     task->data_count,
1841 						     PCI_DMA_TODEVICE);
1842 	}
1843 
1844 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1845 		      __func__, task->conn->max_recv_dlength, hdr_len));
1846 
1847 	task_data->resp_len = task->conn->max_recv_dlength + hdr_len;
1848 	task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev,
1849 						    task_data->resp_len,
1850 						    &task_data->resp_dma,
1851 						    GFP_ATOMIC);
1852 	if (!task_data->resp_buffer)
1853 		goto exit_alloc_pdu;
1854 
1855 	task_data->req_len = task->data_count + hdr_len;
1856 	task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev,
1857 						   task_data->req_len,
1858 						   &task_data->req_dma,
1859 						   GFP_ATOMIC);
1860 	if (!task_data->req_buffer)
1861 		goto exit_alloc_pdu;
1862 
1863 	task->hdr = task_data->req_buffer;
1864 
1865 	INIT_WORK(&task_data->task_work, qla4xxx_task_work);
1866 
1867 	return 0;
1868 
1869 exit_alloc_pdu:
1870 	if (task_data->resp_buffer)
1871 		dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1872 				  task_data->resp_buffer, task_data->resp_dma);
1873 
1874 	if (task_data->req_buffer)
1875 		dma_free_coherent(&ha->pdev->dev, task_data->req_len,
1876 				  task_data->req_buffer, task_data->req_dma);
1877 	return -ENOMEM;
1878 }
1879 
1880 static void qla4xxx_task_cleanup(struct iscsi_task *task)
1881 {
1882 	struct ql4_task_data *task_data;
1883 	struct iscsi_session *sess;
1884 	struct ddb_entry *ddb_entry;
1885 	struct scsi_qla_host *ha;
1886 	int hdr_len;
1887 
1888 	hdr_len = sizeof(struct iscsi_hdr);
1889 	sess = task->conn->session;
1890 	ddb_entry = sess->dd_data;
1891 	ha = ddb_entry->ha;
1892 	task_data = task->dd_data;
1893 
1894 	if (task->data_count) {
1895 		dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
1896 				 task->data_count, PCI_DMA_TODEVICE);
1897 	}
1898 
1899 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1900 		      __func__, task->conn->max_recv_dlength, hdr_len));
1901 
1902 	dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1903 			  task_data->resp_buffer, task_data->resp_dma);
1904 	dma_free_coherent(&ha->pdev->dev, task_data->req_len,
1905 			  task_data->req_buffer, task_data->req_dma);
1906 	return;
1907 }
1908 
1909 static int qla4xxx_task_xmit(struct iscsi_task *task)
1910 {
1911 	struct scsi_cmnd *sc = task->sc;
1912 	struct iscsi_session *sess = task->conn->session;
1913 	struct ddb_entry *ddb_entry = sess->dd_data;
1914 	struct scsi_qla_host *ha = ddb_entry->ha;
1915 
1916 	if (!sc)
1917 		return qla4xxx_send_passthru0(task);
1918 
1919 	ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n",
1920 		   __func__);
1921 	return -ENOSYS;
1922 }
1923 
1924 static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
1925 				     struct dev_db_entry *fw_ddb_entry,
1926 				     struct iscsi_cls_session *cls_sess,
1927 				     struct iscsi_cls_conn *cls_conn)
1928 {
1929 	int buflen = 0;
1930 	struct iscsi_session *sess;
1931 	struct ddb_entry *ddb_entry;
1932 	struct iscsi_conn *conn;
1933 	char ip_addr[DDB_IPADDR_LEN];
1934 	uint16_t options = 0;
1935 
1936 	sess = cls_sess->dd_data;
1937 	ddb_entry = sess->dd_data;
1938 	conn = cls_conn->dd_data;
1939 
1940 	ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
1941 
1942 	conn->max_recv_dlength = BYTE_UNITS *
1943 			  le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
1944 
1945 	conn->max_xmit_dlength = BYTE_UNITS *
1946 			  le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
1947 
1948 	sess->initial_r2t_en =
1949 			    (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1950 
1951 	sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
1952 
1953 	sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1954 
1955 	sess->first_burst = BYTE_UNITS *
1956 			       le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
1957 
1958 	sess->max_burst = BYTE_UNITS *
1959 				 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
1960 
1961 	sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
1962 
1963 	sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
1964 
1965 	conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
1966 
1967 	sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
1968 
1969 	options = le16_to_cpu(fw_ddb_entry->options);
1970 	if (options & DDB_OPT_IPV6_DEVICE)
1971 		sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
1972 	else
1973 		sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
1974 
1975 	iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
1976 			(char *)fw_ddb_entry->iscsi_name, buflen);
1977 	iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
1978 			(char *)ha->name_string, buflen);
1979 	iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
1980 			(char *)ip_addr, buflen);
1981 	iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS,
1982 			(char *)fw_ddb_entry->iscsi_alias, buflen);
1983 }
1984 
1985 void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
1986 					     struct ddb_entry *ddb_entry)
1987 {
1988 	struct iscsi_cls_session *cls_sess;
1989 	struct iscsi_cls_conn *cls_conn;
1990 	uint32_t ddb_state;
1991 	dma_addr_t fw_ddb_entry_dma;
1992 	struct dev_db_entry *fw_ddb_entry;
1993 
1994 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1995 					  &fw_ddb_entry_dma, GFP_KERNEL);
1996 	if (!fw_ddb_entry) {
1997 		ql4_printk(KERN_ERR, ha,
1998 			   "%s: Unable to allocate dma buffer\n", __func__);
1999 		goto exit_session_conn_fwddb_param;
2000 	}
2001 
2002 	if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
2003 				    fw_ddb_entry_dma, NULL, NULL, &ddb_state,
2004 				    NULL, NULL, NULL) == QLA_ERROR) {
2005 		DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
2006 				  "get_ddb_entry for fw_ddb_index %d\n",
2007 				  ha->host_no, __func__,
2008 				  ddb_entry->fw_ddb_index));
2009 		goto exit_session_conn_fwddb_param;
2010 	}
2011 
2012 	cls_sess = ddb_entry->sess;
2013 
2014 	cls_conn = ddb_entry->conn;
2015 
2016 	/* Update params */
2017 	qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
2018 
2019 exit_session_conn_fwddb_param:
2020 	if (fw_ddb_entry)
2021 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2022 				  fw_ddb_entry, fw_ddb_entry_dma);
2023 }
2024 
2025 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
2026 				       struct ddb_entry *ddb_entry)
2027 {
2028 	struct iscsi_cls_session *cls_sess;
2029 	struct iscsi_cls_conn *cls_conn;
2030 	struct iscsi_session *sess;
2031 	struct iscsi_conn *conn;
2032 	uint32_t ddb_state;
2033 	dma_addr_t fw_ddb_entry_dma;
2034 	struct dev_db_entry *fw_ddb_entry;
2035 
2036 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2037 					  &fw_ddb_entry_dma, GFP_KERNEL);
2038 	if (!fw_ddb_entry) {
2039 		ql4_printk(KERN_ERR, ha,
2040 			   "%s: Unable to allocate dma buffer\n", __func__);
2041 		goto exit_session_conn_param;
2042 	}
2043 
2044 	if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
2045 				    fw_ddb_entry_dma, NULL, NULL, &ddb_state,
2046 				    NULL, NULL, NULL) == QLA_ERROR) {
2047 		DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
2048 				  "get_ddb_entry for fw_ddb_index %d\n",
2049 				  ha->host_no, __func__,
2050 				  ddb_entry->fw_ddb_index));
2051 		goto exit_session_conn_param;
2052 	}
2053 
2054 	cls_sess = ddb_entry->sess;
2055 	sess = cls_sess->dd_data;
2056 
2057 	cls_conn = ddb_entry->conn;
2058 	conn = cls_conn->dd_data;
2059 
2060 	/* Update timers after login */
2061 	ddb_entry->default_relogin_timeout =
2062 		(le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
2063 		 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
2064 		 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
2065 	ddb_entry->default_time2wait =
2066 				le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2067 
2068 	/* Update params */
2069 	ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2070 	conn->max_recv_dlength = BYTE_UNITS *
2071 			  le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
2072 
2073 	conn->max_xmit_dlength = BYTE_UNITS *
2074 			  le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
2075 
2076 	sess->initial_r2t_en =
2077 			    (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
2078 
2079 	sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
2080 
2081 	sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
2082 
2083 	sess->first_burst = BYTE_UNITS *
2084 			       le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
2085 
2086 	sess->max_burst = BYTE_UNITS *
2087 				 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
2088 
2089 	sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2090 
2091 	sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
2092 
2093 	sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
2094 
2095 	memcpy(sess->initiatorname, ha->name_string,
2096 	       min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
2097 
2098 	iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS,
2099 			(char *)fw_ddb_entry->iscsi_alias, 0);
2100 
2101 exit_session_conn_param:
2102 	if (fw_ddb_entry)
2103 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2104 				  fw_ddb_entry, fw_ddb_entry_dma);
2105 }
2106 
2107 /*
2108  * Timer routines
2109  */
2110 
2111 static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
2112 				unsigned long interval)
2113 {
2114 	DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
2115 		     __func__, ha->host->host_no));
2116 	init_timer(&ha->timer);
2117 	ha->timer.expires = jiffies + interval * HZ;
2118 	ha->timer.data = (unsigned long)ha;
2119 	ha->timer.function = (void (*)(unsigned long))func;
2120 	add_timer(&ha->timer);
2121 	ha->timer_active = 1;
2122 }
2123 
2124 static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
2125 {
2126 	del_timer_sync(&ha->timer);
2127 	ha->timer_active = 0;
2128 }
2129 
2130 /***
2131  * qla4xxx_mark_device_missing - blocks the session
2132  * @cls_session: Pointer to the session to be blocked
2133  * @ddb_entry: Pointer to device database entry
2134  *
2135  * This routine marks a device missing and close connection.
2136  **/
2137 void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session)
2138 {
2139 	iscsi_block_session(cls_session);
2140 }
2141 
2142 /**
2143  * qla4xxx_mark_all_devices_missing - mark all devices as missing.
2144  * @ha: Pointer to host adapter structure.
2145  *
2146  * This routine marks a device missing and resets the relogin retry count.
2147  **/
2148 void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
2149 {
2150 	iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing);
2151 }
2152 
2153 static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
2154 				       struct ddb_entry *ddb_entry,
2155 				       struct scsi_cmnd *cmd)
2156 {
2157 	struct srb *srb;
2158 
2159 	srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
2160 	if (!srb)
2161 		return srb;
2162 
2163 	kref_init(&srb->srb_ref);
2164 	srb->ha = ha;
2165 	srb->ddb = ddb_entry;
2166 	srb->cmd = cmd;
2167 	srb->flags = 0;
2168 	CMD_SP(cmd) = (void *)srb;
2169 
2170 	return srb;
2171 }
2172 
2173 static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
2174 {
2175 	struct scsi_cmnd *cmd = srb->cmd;
2176 
2177 	if (srb->flags & SRB_DMA_VALID) {
2178 		scsi_dma_unmap(cmd);
2179 		srb->flags &= ~SRB_DMA_VALID;
2180 	}
2181 	CMD_SP(cmd) = NULL;
2182 }
2183 
2184 void qla4xxx_srb_compl(struct kref *ref)
2185 {
2186 	struct srb *srb = container_of(ref, struct srb, srb_ref);
2187 	struct scsi_cmnd *cmd = srb->cmd;
2188 	struct scsi_qla_host *ha = srb->ha;
2189 
2190 	qla4xxx_srb_free_dma(ha, srb);
2191 
2192 	mempool_free(srb, ha->srb_mempool);
2193 
2194 	cmd->scsi_done(cmd);
2195 }
2196 
2197 /**
2198  * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
2199  * @host: scsi host
2200  * @cmd: Pointer to Linux's SCSI command structure
2201  *
2202  * Remarks:
2203  * This routine is invoked by Linux to send a SCSI command to the driver.
2204  * The mid-level driver tries to ensure that queuecommand never gets
2205  * invoked concurrently with itself or the interrupt handler (although
2206  * the interrupt handler may call this routine as part of request-
2207  * completion handling).   Unfortunely, it sometimes calls the scheduler
2208  * in interrupt context which is a big NO! NO!.
2209  **/
2210 static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2211 {
2212 	struct scsi_qla_host *ha = to_qla_host(host);
2213 	struct ddb_entry *ddb_entry = cmd->device->hostdata;
2214 	struct iscsi_cls_session *sess = ddb_entry->sess;
2215 	struct srb *srb;
2216 	int rval;
2217 
2218 	if (test_bit(AF_EEH_BUSY, &ha->flags)) {
2219 		if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
2220 			cmd->result = DID_NO_CONNECT << 16;
2221 		else
2222 			cmd->result = DID_REQUEUE << 16;
2223 		goto qc_fail_command;
2224 	}
2225 
2226 	if (!sess) {
2227 		cmd->result = DID_IMM_RETRY << 16;
2228 		goto qc_fail_command;
2229 	}
2230 
2231 	rval = iscsi_session_chkready(sess);
2232 	if (rval) {
2233 		cmd->result = rval;
2234 		goto qc_fail_command;
2235 	}
2236 
2237 	if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
2238 	    test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
2239 	    test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2240 	    test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
2241 	    test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
2242 	    !test_bit(AF_ONLINE, &ha->flags) ||
2243 	    !test_bit(AF_LINK_UP, &ha->flags) ||
2244 	    test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
2245 		goto qc_host_busy;
2246 
2247 	srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
2248 	if (!srb)
2249 		goto qc_host_busy;
2250 
2251 	rval = qla4xxx_send_command_to_isp(ha, srb);
2252 	if (rval != QLA_SUCCESS)
2253 		goto qc_host_busy_free_sp;
2254 
2255 	return 0;
2256 
2257 qc_host_busy_free_sp:
2258 	qla4xxx_srb_free_dma(ha, srb);
2259 	mempool_free(srb, ha->srb_mempool);
2260 
2261 qc_host_busy:
2262 	return SCSI_MLQUEUE_HOST_BUSY;
2263 
2264 qc_fail_command:
2265 	cmd->scsi_done(cmd);
2266 
2267 	return 0;
2268 }
2269 
2270 /**
2271  * qla4xxx_mem_free - frees memory allocated to adapter
2272  * @ha: Pointer to host adapter structure.
2273  *
2274  * Frees memory previously allocated by qla4xxx_mem_alloc
2275  **/
2276 static void qla4xxx_mem_free(struct scsi_qla_host *ha)
2277 {
2278 	if (ha->queues)
2279 		dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
2280 				  ha->queues_dma);
2281 
2282 	 if (ha->fw_dump)
2283 		vfree(ha->fw_dump);
2284 
2285 	ha->queues_len = 0;
2286 	ha->queues = NULL;
2287 	ha->queues_dma = 0;
2288 	ha->request_ring = NULL;
2289 	ha->request_dma = 0;
2290 	ha->response_ring = NULL;
2291 	ha->response_dma = 0;
2292 	ha->shadow_regs = NULL;
2293 	ha->shadow_regs_dma = 0;
2294 	ha->fw_dump = NULL;
2295 	ha->fw_dump_size = 0;
2296 
2297 	/* Free srb pool. */
2298 	if (ha->srb_mempool)
2299 		mempool_destroy(ha->srb_mempool);
2300 
2301 	ha->srb_mempool = NULL;
2302 
2303 	if (ha->chap_dma_pool)
2304 		dma_pool_destroy(ha->chap_dma_pool);
2305 
2306 	if (ha->chap_list)
2307 		vfree(ha->chap_list);
2308 	ha->chap_list = NULL;
2309 
2310 	if (ha->fw_ddb_dma_pool)
2311 		dma_pool_destroy(ha->fw_ddb_dma_pool);
2312 
2313 	/* release io space registers  */
2314 	if (is_qla8022(ha)) {
2315 		if (ha->nx_pcibase)
2316 			iounmap(
2317 			    (struct device_reg_82xx __iomem *)ha->nx_pcibase);
2318 	} else if (ha->reg)
2319 		iounmap(ha->reg);
2320 	pci_release_regions(ha->pdev);
2321 }
2322 
2323 /**
2324  * qla4xxx_mem_alloc - allocates memory for use by adapter.
2325  * @ha: Pointer to host adapter structure
2326  *
2327  * Allocates DMA memory for request and response queues. Also allocates memory
2328  * for srbs.
2329  **/
2330 static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
2331 {
2332 	unsigned long align;
2333 
2334 	/* Allocate contiguous block of DMA memory for queues. */
2335 	ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
2336 			  (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
2337 			  sizeof(struct shadow_regs) +
2338 			  MEM_ALIGN_VALUE +
2339 			  (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
2340 	ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
2341 					&ha->queues_dma, GFP_KERNEL);
2342 	if (ha->queues == NULL) {
2343 		ql4_printk(KERN_WARNING, ha,
2344 		    "Memory Allocation failed - queues.\n");
2345 
2346 		goto mem_alloc_error_exit;
2347 	}
2348 	memset(ha->queues, 0, ha->queues_len);
2349 
2350 	/*
2351 	 * As per RISC alignment requirements -- the bus-address must be a
2352 	 * multiple of the request-ring size (in bytes).
2353 	 */
2354 	align = 0;
2355 	if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
2356 		align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
2357 					   (MEM_ALIGN_VALUE - 1));
2358 
2359 	/* Update request and response queue pointers. */
2360 	ha->request_dma = ha->queues_dma + align;
2361 	ha->request_ring = (struct queue_entry *) (ha->queues + align);
2362 	ha->response_dma = ha->queues_dma + align +
2363 		(REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
2364 	ha->response_ring = (struct queue_entry *) (ha->queues + align +
2365 						    (REQUEST_QUEUE_DEPTH *
2366 						     QUEUE_SIZE));
2367 	ha->shadow_regs_dma = ha->queues_dma + align +
2368 		(REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
2369 		(RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
2370 	ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
2371 						  (REQUEST_QUEUE_DEPTH *
2372 						   QUEUE_SIZE) +
2373 						  (RESPONSE_QUEUE_DEPTH *
2374 						   QUEUE_SIZE));
2375 
2376 	/* Allocate memory for srb pool. */
2377 	ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
2378 					 mempool_free_slab, srb_cachep);
2379 	if (ha->srb_mempool == NULL) {
2380 		ql4_printk(KERN_WARNING, ha,
2381 		    "Memory Allocation failed - SRB Pool.\n");
2382 
2383 		goto mem_alloc_error_exit;
2384 	}
2385 
2386 	ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev,
2387 					    CHAP_DMA_BLOCK_SIZE, 8, 0);
2388 
2389 	if (ha->chap_dma_pool == NULL) {
2390 		ql4_printk(KERN_WARNING, ha,
2391 		    "%s: chap_dma_pool allocation failed..\n", __func__);
2392 		goto mem_alloc_error_exit;
2393 	}
2394 
2395 	ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
2396 					      DDB_DMA_BLOCK_SIZE, 8, 0);
2397 
2398 	if (ha->fw_ddb_dma_pool == NULL) {
2399 		ql4_printk(KERN_WARNING, ha,
2400 			   "%s: fw_ddb_dma_pool allocation failed..\n",
2401 			   __func__);
2402 		goto mem_alloc_error_exit;
2403 	}
2404 
2405 	return QLA_SUCCESS;
2406 
2407 mem_alloc_error_exit:
2408 	qla4xxx_mem_free(ha);
2409 	return QLA_ERROR;
2410 }
2411 
2412 /**
2413  * qla4_8xxx_check_temp - Check the ISP82XX temperature.
2414  * @ha: adapter block pointer.
2415  *
2416  * Note: The caller should not hold the idc lock.
2417  **/
2418 static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
2419 {
2420 	uint32_t temp, temp_state, temp_val;
2421 	int status = QLA_SUCCESS;
2422 
2423 	temp = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_TEMP_STATE);
2424 
2425 	temp_state = qla82xx_get_temp_state(temp);
2426 	temp_val = qla82xx_get_temp_val(temp);
2427 
2428 	if (temp_state == QLA82XX_TEMP_PANIC) {
2429 		ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
2430 			   " exceeds maximum allowed. Hardware has been shut"
2431 			   " down.\n", temp_val);
2432 		status = QLA_ERROR;
2433 	} else if (temp_state == QLA82XX_TEMP_WARN) {
2434 		if (ha->temperature == QLA82XX_TEMP_NORMAL)
2435 			ql4_printk(KERN_WARNING, ha, "Device temperature %d"
2436 				   " degrees C exceeds operating range."
2437 				   " Immediate action needed.\n", temp_val);
2438 	} else {
2439 		if (ha->temperature == QLA82XX_TEMP_WARN)
2440 			ql4_printk(KERN_INFO, ha, "Device temperature is"
2441 				   " now %d degrees C in normal range.\n",
2442 				   temp_val);
2443 	}
2444 	ha->temperature = temp_state;
2445 	return status;
2446 }
2447 
2448 /**
2449  * qla4_8xxx_check_fw_alive  - Check firmware health
2450  * @ha: Pointer to host adapter structure.
2451  *
2452  * Context: Interrupt
2453  **/
2454 static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
2455 {
2456 	uint32_t fw_heartbeat_counter;
2457 	uint32_t halt_status1, halt_status2;
2458 	int status = QLA_SUCCESS;
2459 
2460 	fw_heartbeat_counter = qla4_8xxx_rd_direct(ha,
2461 						   QLA8XXX_PEG_ALIVE_COUNTER);
2462 	/* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
2463 	if (fw_heartbeat_counter == 0xffffffff) {
2464 		DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
2465 		    "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
2466 		    ha->host_no, __func__));
2467 		return status;
2468 	}
2469 
2470 	if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
2471 		ha->seconds_since_last_heartbeat++;
2472 		/* FW not alive after 2 seconds */
2473 		if (ha->seconds_since_last_heartbeat == 2) {
2474 			ha->seconds_since_last_heartbeat = 0;
2475 			halt_status1 = qla4_8xxx_rd_direct(ha,
2476 						QLA8XXX_PEG_HALT_STATUS1);
2477 			halt_status2 = qla4_8xxx_rd_direct(ha,
2478 						QLA8XXX_PEG_HALT_STATUS2);
2479 
2480 			ql4_printk(KERN_INFO, ha,
2481 				   "scsi(%ld): %s, Dumping hw/fw registers:\n "
2482 				   " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2:"
2483 				   " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
2484 				   " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
2485 				   " 0x%x,\n PEG_NET_4_PC: 0x%x\n", ha->host_no,
2486 				   __func__, halt_status1, halt_status2,
2487 				   qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
2488 						   0x3c),
2489 				   qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 +
2490 						   0x3c),
2491 				   qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 +
2492 						   0x3c),
2493 				   qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 +
2494 						   0x3c),
2495 				   qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
2496 						   0x3c));
2497 			status = QLA_ERROR;
2498 		}
2499 	} else
2500 		ha->seconds_since_last_heartbeat = 0;
2501 
2502 	ha->fw_heartbeat_counter = fw_heartbeat_counter;
2503 	return status;
2504 }
2505 
2506 /**
2507  * qla4_8xxx_watchdog - Poll dev state
2508  * @ha: Pointer to host adapter structure.
2509  *
2510  * Context: Interrupt
2511  **/
2512 void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2513 {
2514 	uint32_t dev_state, halt_status;
2515 
2516 	/* don't poll if reset is going on */
2517 	if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
2518 	    test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2519 	    test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
2520 		dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
2521 
2522 		if (qla4_8xxx_check_temp(ha)) {
2523 			ql4_printk(KERN_INFO, ha, "disabling pause"
2524 				   " transmit on port 0 & 1.\n");
2525 			qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2526 					CRB_NIU_XG_PAUSE_CTL_P0 |
2527 					CRB_NIU_XG_PAUSE_CTL_P1);
2528 			set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
2529 			qla4xxx_wake_dpc(ha);
2530 		} else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
2531 		    !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
2532 			if (!ql4xdontresethba) {
2533 				ql4_printk(KERN_INFO, ha, "%s: HW State: "
2534 				    "NEED RESET!\n", __func__);
2535 				set_bit(DPC_RESET_HA, &ha->dpc_flags);
2536 				qla4xxx_wake_dpc(ha);
2537 			}
2538 		} else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
2539 		    !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
2540 			ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
2541 			    __func__);
2542 			set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
2543 			qla4xxx_wake_dpc(ha);
2544 		} else  {
2545 			/* Check firmware health */
2546 			if (qla4_8xxx_check_fw_alive(ha)) {
2547 				ql4_printk(KERN_INFO, ha, "disabling pause"
2548 					   " transmit on port 0 & 1.\n");
2549 				qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2550 						CRB_NIU_XG_PAUSE_CTL_P0 |
2551 						CRB_NIU_XG_PAUSE_CTL_P1);
2552 				halt_status = qla4_8xxx_rd_direct(ha,
2553 						   QLA8XXX_PEG_HALT_STATUS1);
2554 
2555 				if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
2556 					ql4_printk(KERN_ERR, ha, "%s:"
2557 						   " Firmware aborted with"
2558 						   " error code 0x00006700."
2559 						   " Device is being reset\n",
2560 						   __func__);
2561 
2562 				/* Since we cannot change dev_state in interrupt
2563 				 * context, set appropriate DPC flag then wakeup
2564 				 * DPC */
2565 				if (halt_status & HALT_STATUS_UNRECOVERABLE)
2566 					set_bit(DPC_HA_UNRECOVERABLE,
2567 						&ha->dpc_flags);
2568 				else {
2569 					ql4_printk(KERN_INFO, ha, "%s: detect "
2570 						   "abort needed!\n", __func__);
2571 					set_bit(DPC_RESET_HA, &ha->dpc_flags);
2572 				}
2573 				qla4xxx_mailbox_premature_completion(ha);
2574 				qla4xxx_wake_dpc(ha);
2575 			}
2576 		}
2577 	}
2578 }
2579 
2580 static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
2581 {
2582 	struct iscsi_session *sess;
2583 	struct ddb_entry *ddb_entry;
2584 	struct scsi_qla_host *ha;
2585 
2586 	sess = cls_sess->dd_data;
2587 	ddb_entry = sess->dd_data;
2588 	ha = ddb_entry->ha;
2589 
2590 	if (!(ddb_entry->ddb_type == FLASH_DDB))
2591 		return;
2592 
2593 	if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
2594 	    !iscsi_is_session_online(cls_sess)) {
2595 		if (atomic_read(&ddb_entry->retry_relogin_timer) !=
2596 		    INVALID_ENTRY) {
2597 			if (atomic_read(&ddb_entry->retry_relogin_timer) ==
2598 					0) {
2599 				atomic_set(&ddb_entry->retry_relogin_timer,
2600 					   INVALID_ENTRY);
2601 				set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
2602 				set_bit(DF_RELOGIN, &ddb_entry->flags);
2603 				DEBUG2(ql4_printk(KERN_INFO, ha,
2604 				       "%s: index [%d] login device\n",
2605 					__func__, ddb_entry->fw_ddb_index));
2606 			} else
2607 				atomic_dec(&ddb_entry->retry_relogin_timer);
2608 		}
2609 	}
2610 
2611 	/* Wait for relogin to timeout */
2612 	if (atomic_read(&ddb_entry->relogin_timer) &&
2613 	    (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
2614 		/*
2615 		 * If the relogin times out and the device is
2616 		 * still NOT ONLINE then try and relogin again.
2617 		 */
2618 		if (!iscsi_is_session_online(cls_sess)) {
2619 			/* Reset retry relogin timer */
2620 			atomic_inc(&ddb_entry->relogin_retry_count);
2621 			DEBUG2(ql4_printk(KERN_INFO, ha,
2622 				"%s: index[%d] relogin timed out-retrying"
2623 				" relogin (%d), retry (%d)\n", __func__,
2624 				ddb_entry->fw_ddb_index,
2625 				atomic_read(&ddb_entry->relogin_retry_count),
2626 				ddb_entry->default_time2wait + 4));
2627 			set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
2628 			atomic_set(&ddb_entry->retry_relogin_timer,
2629 				   ddb_entry->default_time2wait + 4);
2630 		}
2631 	}
2632 }
2633 
2634 /**
2635  * qla4xxx_timer - checks every second for work to do.
2636  * @ha: Pointer to host adapter structure.
2637  **/
2638 static void qla4xxx_timer(struct scsi_qla_host *ha)
2639 {
2640 	int start_dpc = 0;
2641 	uint16_t w;
2642 
2643 	iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
2644 
2645 	/* If we are in the middle of AER/EEH processing
2646 	 * skip any processing and reschedule the timer
2647 	 */
2648 	if (test_bit(AF_EEH_BUSY, &ha->flags)) {
2649 		mod_timer(&ha->timer, jiffies + HZ);
2650 		return;
2651 	}
2652 
2653 	/* Hardware read to trigger an EEH error during mailbox waits. */
2654 	if (!pci_channel_offline(ha->pdev))
2655 		pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
2656 
2657 	if (is_qla8022(ha)) {
2658 		qla4_8xxx_watchdog(ha);
2659 	}
2660 
2661 	if (is_qla40XX(ha)) {
2662 		/* Check for heartbeat interval. */
2663 		if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
2664 		    ha->heartbeat_interval != 0) {
2665 			ha->seconds_since_last_heartbeat++;
2666 			if (ha->seconds_since_last_heartbeat >
2667 			    ha->heartbeat_interval + 2)
2668 				set_bit(DPC_RESET_HA, &ha->dpc_flags);
2669 		}
2670 	}
2671 
2672 	/* Process any deferred work. */
2673 	if (!list_empty(&ha->work_list))
2674 		start_dpc++;
2675 
2676 	/* Wakeup the dpc routine for this adapter, if needed. */
2677 	if (start_dpc ||
2678 	     test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2679 	     test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
2680 	     test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
2681 	     test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
2682 	     test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
2683 	     test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
2684 	     test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
2685 	     test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
2686 	     test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
2687 	     test_bit(DPC_AEN, &ha->dpc_flags)) {
2688 		DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
2689 			      " - dpc flags = 0x%lx\n",
2690 			      ha->host_no, __func__, ha->dpc_flags));
2691 		qla4xxx_wake_dpc(ha);
2692 	}
2693 
2694 	/* Reschedule timer thread to call us back in one second */
2695 	mod_timer(&ha->timer, jiffies + HZ);
2696 
2697 	DEBUG2(ha->seconds_since_last_intr++);
2698 }
2699 
2700 /**
2701  * qla4xxx_cmd_wait - waits for all outstanding commands to complete
2702  * @ha: Pointer to host adapter structure.
2703  *
2704  * This routine stalls the driver until all outstanding commands are returned.
2705  * Caller must release the Hardware Lock prior to calling this routine.
2706  **/
2707 static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
2708 {
2709 	uint32_t index = 0;
2710 	unsigned long flags;
2711 	struct scsi_cmnd *cmd;
2712 
2713 	unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ);
2714 
2715 	DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to "
2716 	    "complete\n", WAIT_CMD_TOV));
2717 
2718 	while (!time_after_eq(jiffies, wtime)) {
2719 		spin_lock_irqsave(&ha->hardware_lock, flags);
2720 		/* Find a command that hasn't completed. */
2721 		for (index = 0; index < ha->host->can_queue; index++) {
2722 			cmd = scsi_host_find_tag(ha->host, index);
2723 			/*
2724 			 * We cannot just check if the index is valid,
2725 			 * becase if we are run from the scsi eh, then
2726 			 * the scsi/block layer is going to prevent
2727 			 * the tag from being released.
2728 			 */
2729 			if (cmd != NULL && CMD_SP(cmd))
2730 				break;
2731 		}
2732 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
2733 
2734 		/* If No Commands are pending, wait is complete */
2735 		if (index == ha->host->can_queue)
2736 			return QLA_SUCCESS;
2737 
2738 		msleep(1000);
2739 	}
2740 	/* If we timed out on waiting for commands to come back
2741 	 * return ERROR. */
2742 	return QLA_ERROR;
2743 }
2744 
2745 int qla4xxx_hw_reset(struct scsi_qla_host *ha)
2746 {
2747 	uint32_t ctrl_status;
2748 	unsigned long flags = 0;
2749 
2750 	DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
2751 
2752 	if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
2753 		return QLA_ERROR;
2754 
2755 	spin_lock_irqsave(&ha->hardware_lock, flags);
2756 
2757 	/*
2758 	 * If the SCSI Reset Interrupt bit is set, clear it.
2759 	 * Otherwise, the Soft Reset won't work.
2760 	 */
2761 	ctrl_status = readw(&ha->reg->ctrl_status);
2762 	if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
2763 		writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
2764 
2765 	/* Issue Soft Reset */
2766 	writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
2767 	readl(&ha->reg->ctrl_status);
2768 
2769 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2770 	return QLA_SUCCESS;
2771 }
2772 
2773 /**
2774  * qla4xxx_soft_reset - performs soft reset.
2775  * @ha: Pointer to host adapter structure.
2776  **/
2777 int qla4xxx_soft_reset(struct scsi_qla_host *ha)
2778 {
2779 	uint32_t max_wait_time;
2780 	unsigned long flags = 0;
2781 	int status;
2782 	uint32_t ctrl_status;
2783 
2784 	status = qla4xxx_hw_reset(ha);
2785 	if (status != QLA_SUCCESS)
2786 		return status;
2787 
2788 	status = QLA_ERROR;
2789 	/* Wait until the Network Reset Intr bit is cleared */
2790 	max_wait_time = RESET_INTR_TOV;
2791 	do {
2792 		spin_lock_irqsave(&ha->hardware_lock, flags);
2793 		ctrl_status = readw(&ha->reg->ctrl_status);
2794 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
2795 
2796 		if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
2797 			break;
2798 
2799 		msleep(1000);
2800 	} while ((--max_wait_time));
2801 
2802 	if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
2803 		DEBUG2(printk(KERN_WARNING
2804 			      "scsi%ld: Network Reset Intr not cleared by "
2805 			      "Network function, clearing it now!\n",
2806 			      ha->host_no));
2807 		spin_lock_irqsave(&ha->hardware_lock, flags);
2808 		writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
2809 		readl(&ha->reg->ctrl_status);
2810 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
2811 	}
2812 
2813 	/* Wait until the firmware tells us the Soft Reset is done */
2814 	max_wait_time = SOFT_RESET_TOV;
2815 	do {
2816 		spin_lock_irqsave(&ha->hardware_lock, flags);
2817 		ctrl_status = readw(&ha->reg->ctrl_status);
2818 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
2819 
2820 		if ((ctrl_status & CSR_SOFT_RESET) == 0) {
2821 			status = QLA_SUCCESS;
2822 			break;
2823 		}
2824 
2825 		msleep(1000);
2826 	} while ((--max_wait_time));
2827 
2828 	/*
2829 	 * Also, make sure that the SCSI Reset Interrupt bit has been cleared
2830 	 * after the soft reset has taken place.
2831 	 */
2832 	spin_lock_irqsave(&ha->hardware_lock, flags);
2833 	ctrl_status = readw(&ha->reg->ctrl_status);
2834 	if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
2835 		writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
2836 		readl(&ha->reg->ctrl_status);
2837 	}
2838 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2839 
2840 	/* If soft reset fails then most probably the bios on other
2841 	 * function is also enabled.
2842 	 * Since the initialization is sequential the other fn
2843 	 * wont be able to acknowledge the soft reset.
2844 	 * Issue a force soft reset to workaround this scenario.
2845 	 */
2846 	if (max_wait_time == 0) {
2847 		/* Issue Force Soft Reset */
2848 		spin_lock_irqsave(&ha->hardware_lock, flags);
2849 		writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
2850 		readl(&ha->reg->ctrl_status);
2851 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
2852 		/* Wait until the firmware tells us the Soft Reset is done */
2853 		max_wait_time = SOFT_RESET_TOV;
2854 		do {
2855 			spin_lock_irqsave(&ha->hardware_lock, flags);
2856 			ctrl_status = readw(&ha->reg->ctrl_status);
2857 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
2858 
2859 			if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
2860 				status = QLA_SUCCESS;
2861 				break;
2862 			}
2863 
2864 			msleep(1000);
2865 		} while ((--max_wait_time));
2866 	}
2867 
2868 	return status;
2869 }
2870 
2871 /**
2872  * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S.
2873  * @ha: Pointer to host adapter structure.
2874  * @res: returned scsi status
2875  *
2876  * This routine is called just prior to a HARD RESET to return all
2877  * outstanding commands back to the Operating System.
2878  * Caller should make sure that the following locks are released
2879  * before this calling routine: Hardware lock, and io_request_lock.
2880  **/
2881 static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res)
2882 {
2883 	struct srb *srb;
2884 	int i;
2885 	unsigned long flags;
2886 
2887 	spin_lock_irqsave(&ha->hardware_lock, flags);
2888 	for (i = 0; i < ha->host->can_queue; i++) {
2889 		srb = qla4xxx_del_from_active_array(ha, i);
2890 		if (srb != NULL) {
2891 			srb->cmd->result = res;
2892 			kref_put(&srb->srb_ref, qla4xxx_srb_compl);
2893 		}
2894 	}
2895 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2896 }
2897 
2898 void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
2899 {
2900 	clear_bit(AF_ONLINE, &ha->flags);
2901 
2902 	/* Disable the board */
2903 	ql4_printk(KERN_INFO, ha, "Disabling the board\n");
2904 
2905 	qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
2906 	qla4xxx_mark_all_devices_missing(ha);
2907 	clear_bit(AF_INIT_DONE, &ha->flags);
2908 }
2909 
2910 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
2911 {
2912 	struct iscsi_session *sess;
2913 	struct ddb_entry *ddb_entry;
2914 
2915 	sess = cls_session->dd_data;
2916 	ddb_entry = sess->dd_data;
2917 	ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
2918 
2919 	if (ddb_entry->ddb_type == FLASH_DDB)
2920 		iscsi_block_session(ddb_entry->sess);
2921 	else
2922 		iscsi_session_failure(cls_session->dd_data,
2923 				      ISCSI_ERR_CONN_FAILED);
2924 }
2925 
2926 /**
2927  * qla4xxx_recover_adapter - recovers adapter after a fatal error
2928  * @ha: Pointer to host adapter structure.
2929  **/
2930 static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
2931 {
2932 	int status = QLA_ERROR;
2933 	uint8_t reset_chip = 0;
2934 	uint32_t dev_state;
2935 	unsigned long wait;
2936 
2937 	/* Stall incoming I/O until we are done */
2938 	scsi_block_requests(ha->host);
2939 	clear_bit(AF_ONLINE, &ha->flags);
2940 	clear_bit(AF_LINK_UP, &ha->flags);
2941 
2942 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
2943 
2944 	set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
2945 
2946 	iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
2947 
2948 	if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
2949 		reset_chip = 1;
2950 
2951 	/* For the DPC_RESET_HA_INTR case (ISP-4xxx specific)
2952 	 * do not reset adapter, jump to initialize_adapter */
2953 	if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
2954 		status = QLA_SUCCESS;
2955 		goto recover_ha_init_adapter;
2956 	}
2957 
2958 	/* For the ISP-82xx adapter, issue a stop_firmware if invoked
2959 	 * from eh_host_reset or ioctl module */
2960 	if (is_qla8022(ha) && !reset_chip &&
2961 	    test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
2962 
2963 		DEBUG2(ql4_printk(KERN_INFO, ha,
2964 		    "scsi%ld: %s - Performing stop_firmware...\n",
2965 		    ha->host_no, __func__));
2966 		status = ha->isp_ops->reset_firmware(ha);
2967 		if (status == QLA_SUCCESS) {
2968 			if (!test_bit(AF_FW_RECOVERY, &ha->flags))
2969 				qla4xxx_cmd_wait(ha);
2970 			ha->isp_ops->disable_intrs(ha);
2971 			qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2972 			qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2973 		} else {
2974 			/* If the stop_firmware fails then
2975 			 * reset the entire chip */
2976 			reset_chip = 1;
2977 			clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
2978 			set_bit(DPC_RESET_HA, &ha->dpc_flags);
2979 		}
2980 	}
2981 
2982 	/* Issue full chip reset if recovering from a catastrophic error,
2983 	 * or if stop_firmware fails for ISP-82xx.
2984 	 * This is the default case for ISP-4xxx */
2985 	if (is_qla40XX(ha) || reset_chip) {
2986 		if (is_qla40XX(ha))
2987 			goto chip_reset;
2988 
2989 		/* Check if 82XX firmware is alive or not
2990 		 * We may have arrived here from NEED_RESET
2991 		 * detection only */
2992 		if (test_bit(AF_FW_RECOVERY, &ha->flags))
2993 			goto chip_reset;
2994 
2995 		wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ);
2996 		while (time_before(jiffies, wait)) {
2997 			if (qla4_8xxx_check_fw_alive(ha)) {
2998 				qla4xxx_mailbox_premature_completion(ha);
2999 				break;
3000 			}
3001 
3002 			set_current_state(TASK_UNINTERRUPTIBLE);
3003 			schedule_timeout(HZ);
3004 		}
3005 chip_reset:
3006 		if (!test_bit(AF_FW_RECOVERY, &ha->flags))
3007 			qla4xxx_cmd_wait(ha);
3008 
3009 		qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3010 		qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
3011 		DEBUG2(ql4_printk(KERN_INFO, ha,
3012 		    "scsi%ld: %s - Performing chip reset..\n",
3013 		    ha->host_no, __func__));
3014 		status = ha->isp_ops->reset_chip(ha);
3015 	}
3016 
3017 	/* Flush any pending ddb changed AENs */
3018 	qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3019 
3020 recover_ha_init_adapter:
3021 	/* Upon successful firmware/chip reset, re-initialize the adapter */
3022 	if (status == QLA_SUCCESS) {
3023 		/* For ISP-4xxx, force function 1 to always initialize
3024 		 * before function 3 to prevent both funcions from
3025 		 * stepping on top of the other */
3026 		if (is_qla40XX(ha) && (ha->mac_index == 3))
3027 			ssleep(6);
3028 
3029 		/* NOTE: AF_ONLINE flag set upon successful completion of
3030 		 *       qla4xxx_initialize_adapter */
3031 		status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
3032 	}
3033 
3034 	/* Retry failed adapter initialization, if necessary
3035 	 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific)
3036 	 * case to prevent ping-pong resets between functions */
3037 	if (!test_bit(AF_ONLINE, &ha->flags) &&
3038 	    !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
3039 		/* Adapter initialization failed, see if we can retry
3040 		 * resetting the ha.
3041 		 * Since we don't want to block the DPC for too long
3042 		 * with multiple resets in the same thread,
3043 		 * utilize DPC to retry */
3044 		if (is_qla8022(ha)) {
3045 			ha->isp_ops->idc_lock(ha);
3046 			dev_state = qla4_8xxx_rd_direct(ha,
3047 							QLA8XXX_CRB_DEV_STATE);
3048 			ha->isp_ops->idc_unlock(ha);
3049 			if (dev_state == QLA8XXX_DEV_FAILED) {
3050 				ql4_printk(KERN_INFO, ha, "%s: don't retry "
3051 					   "recover adapter. H/W is in Failed "
3052 					   "state\n", __func__);
3053 				qla4xxx_dead_adapter_cleanup(ha);
3054 				clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3055 				clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3056 				clear_bit(DPC_RESET_HA_FW_CONTEXT,
3057 						&ha->dpc_flags);
3058 				status = QLA_ERROR;
3059 
3060 				goto exit_recover;
3061 			}
3062 		}
3063 
3064 		if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
3065 			ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
3066 			DEBUG2(printk("scsi%ld: recover adapter - retrying "
3067 				      "(%d) more times\n", ha->host_no,
3068 				      ha->retry_reset_ha_cnt));
3069 			set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3070 			status = QLA_ERROR;
3071 		} else {
3072 			if (ha->retry_reset_ha_cnt > 0) {
3073 				/* Schedule another Reset HA--DPC will retry */
3074 				ha->retry_reset_ha_cnt--;
3075 				DEBUG2(printk("scsi%ld: recover adapter - "
3076 					      "retry remaining %d\n",
3077 					      ha->host_no,
3078 					      ha->retry_reset_ha_cnt));
3079 				status = QLA_ERROR;
3080 			}
3081 
3082 			if (ha->retry_reset_ha_cnt == 0) {
3083 				/* Recover adapter retries have been exhausted.
3084 				 * Adapter DEAD */
3085 				DEBUG2(printk("scsi%ld: recover adapter "
3086 					      "failed - board disabled\n",
3087 					      ha->host_no));
3088 				qla4xxx_dead_adapter_cleanup(ha);
3089 				clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3090 				clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3091 				clear_bit(DPC_RESET_HA_FW_CONTEXT,
3092 					  &ha->dpc_flags);
3093 				status = QLA_ERROR;
3094 			}
3095 		}
3096 	} else {
3097 		clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3098 		clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3099 		clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3100 	}
3101 
3102 exit_recover:
3103 	ha->adapter_error_count++;
3104 
3105 	if (test_bit(AF_ONLINE, &ha->flags))
3106 		ha->isp_ops->enable_intrs(ha);
3107 
3108 	scsi_unblock_requests(ha->host);
3109 
3110 	clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
3111 	DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no,
3112 	    status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
3113 
3114 	return status;
3115 }
3116 
3117 static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
3118 {
3119 	struct iscsi_session *sess;
3120 	struct ddb_entry *ddb_entry;
3121 	struct scsi_qla_host *ha;
3122 
3123 	sess = cls_session->dd_data;
3124 	ddb_entry = sess->dd_data;
3125 	ha = ddb_entry->ha;
3126 	if (!iscsi_is_session_online(cls_session)) {
3127 		if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
3128 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3129 				   " unblock session\n", ha->host_no, __func__,
3130 				   ddb_entry->fw_ddb_index);
3131 			iscsi_unblock_session(ddb_entry->sess);
3132 		} else {
3133 			/* Trigger relogin */
3134 			if (ddb_entry->ddb_type == FLASH_DDB) {
3135 				if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
3136 					qla4xxx_arm_relogin_timer(ddb_entry);
3137 			} else
3138 				iscsi_session_failure(cls_session->dd_data,
3139 						      ISCSI_ERR_CONN_FAILED);
3140 		}
3141 	}
3142 }
3143 
3144 int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
3145 {
3146 	struct iscsi_session *sess;
3147 	struct ddb_entry *ddb_entry;
3148 	struct scsi_qla_host *ha;
3149 
3150 	sess = cls_session->dd_data;
3151 	ddb_entry = sess->dd_data;
3152 	ha = ddb_entry->ha;
3153 	ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3154 		   " unblock session\n", ha->host_no, __func__,
3155 		   ddb_entry->fw_ddb_index);
3156 
3157 	iscsi_unblock_session(ddb_entry->sess);
3158 
3159 	/* Start scan target */
3160 	if (test_bit(AF_ONLINE, &ha->flags)) {
3161 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3162 			   " start scan\n", ha->host_no, __func__,
3163 			   ddb_entry->fw_ddb_index);
3164 		scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
3165 	}
3166 	return QLA_SUCCESS;
3167 }
3168 
3169 int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
3170 {
3171 	struct iscsi_session *sess;
3172 	struct ddb_entry *ddb_entry;
3173 	struct scsi_qla_host *ha;
3174 	int status = QLA_SUCCESS;
3175 
3176 	sess = cls_session->dd_data;
3177 	ddb_entry = sess->dd_data;
3178 	ha = ddb_entry->ha;
3179 	ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3180 		   " unblock user space session\n", ha->host_no, __func__,
3181 		   ddb_entry->fw_ddb_index);
3182 
3183 	if (!iscsi_is_session_online(cls_session)) {
3184 		iscsi_conn_start(ddb_entry->conn);
3185 		iscsi_conn_login_event(ddb_entry->conn,
3186 				       ISCSI_CONN_STATE_LOGGED_IN);
3187 	} else {
3188 		ql4_printk(KERN_INFO, ha,
3189 			   "scsi%ld: %s: ddb[%d] session [%d] already logged in\n",
3190 			   ha->host_no, __func__, ddb_entry->fw_ddb_index,
3191 			   cls_session->sid);
3192 		status = QLA_ERROR;
3193 	}
3194 
3195 	return status;
3196 }
3197 
3198 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
3199 {
3200 	iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
3201 }
3202 
3203 static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
3204 {
3205 	uint16_t relogin_timer;
3206 	struct iscsi_session *sess;
3207 	struct ddb_entry *ddb_entry;
3208 	struct scsi_qla_host *ha;
3209 
3210 	sess = cls_sess->dd_data;
3211 	ddb_entry = sess->dd_data;
3212 	ha = ddb_entry->ha;
3213 
3214 	relogin_timer = max(ddb_entry->default_relogin_timeout,
3215 			    (uint16_t)RELOGIN_TOV);
3216 	atomic_set(&ddb_entry->relogin_timer, relogin_timer);
3217 
3218 	DEBUG2(ql4_printk(KERN_INFO, ha,
3219 			  "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
3220 			  ddb_entry->fw_ddb_index, relogin_timer));
3221 
3222 	qla4xxx_login_flash_ddb(cls_sess);
3223 }
3224 
3225 static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
3226 {
3227 	struct iscsi_session *sess;
3228 	struct ddb_entry *ddb_entry;
3229 	struct scsi_qla_host *ha;
3230 
3231 	sess = cls_sess->dd_data;
3232 	ddb_entry = sess->dd_data;
3233 	ha = ddb_entry->ha;
3234 
3235 	if (!(ddb_entry->ddb_type == FLASH_DDB))
3236 		return;
3237 
3238 	if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
3239 	    !iscsi_is_session_online(cls_sess)) {
3240 		DEBUG2(ql4_printk(KERN_INFO, ha,
3241 				  "relogin issued\n"));
3242 		qla4xxx_relogin_flash_ddb(cls_sess);
3243 	}
3244 }
3245 
3246 void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
3247 {
3248 	if (ha->dpc_thread)
3249 		queue_work(ha->dpc_thread, &ha->dpc_work);
3250 }
3251 
3252 static struct qla4_work_evt *
3253 qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size,
3254 		   enum qla4_work_type type)
3255 {
3256 	struct qla4_work_evt *e;
3257 	uint32_t size = sizeof(struct qla4_work_evt) + data_size;
3258 
3259 	e = kzalloc(size, GFP_ATOMIC);
3260 	if (!e)
3261 		return NULL;
3262 
3263 	INIT_LIST_HEAD(&e->list);
3264 	e->type = type;
3265 	return e;
3266 }
3267 
3268 static void qla4xxx_post_work(struct scsi_qla_host *ha,
3269 			     struct qla4_work_evt *e)
3270 {
3271 	unsigned long flags;
3272 
3273 	spin_lock_irqsave(&ha->work_lock, flags);
3274 	list_add_tail(&e->list, &ha->work_list);
3275 	spin_unlock_irqrestore(&ha->work_lock, flags);
3276 	qla4xxx_wake_dpc(ha);
3277 }
3278 
3279 int qla4xxx_post_aen_work(struct scsi_qla_host *ha,
3280 			  enum iscsi_host_event_code aen_code,
3281 			  uint32_t data_size, uint8_t *data)
3282 {
3283 	struct qla4_work_evt *e;
3284 
3285 	e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN);
3286 	if (!e)
3287 		return QLA_ERROR;
3288 
3289 	e->u.aen.code = aen_code;
3290 	e->u.aen.data_size = data_size;
3291 	memcpy(e->u.aen.data, data, data_size);
3292 
3293 	qla4xxx_post_work(ha, e);
3294 
3295 	return QLA_SUCCESS;
3296 }
3297 
3298 int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
3299 			       uint32_t status, uint32_t pid,
3300 			       uint32_t data_size, uint8_t *data)
3301 {
3302 	struct qla4_work_evt *e;
3303 
3304 	e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS);
3305 	if (!e)
3306 		return QLA_ERROR;
3307 
3308 	e->u.ping.status = status;
3309 	e->u.ping.pid = pid;
3310 	e->u.ping.data_size = data_size;
3311 	memcpy(e->u.ping.data, data, data_size);
3312 
3313 	qla4xxx_post_work(ha, e);
3314 
3315 	return QLA_SUCCESS;
3316 }
3317 
3318 static void qla4xxx_do_work(struct scsi_qla_host *ha)
3319 {
3320 	struct qla4_work_evt *e, *tmp;
3321 	unsigned long flags;
3322 	LIST_HEAD(work);
3323 
3324 	spin_lock_irqsave(&ha->work_lock, flags);
3325 	list_splice_init(&ha->work_list, &work);
3326 	spin_unlock_irqrestore(&ha->work_lock, flags);
3327 
3328 	list_for_each_entry_safe(e, tmp, &work, list) {
3329 		list_del_init(&e->list);
3330 
3331 		switch (e->type) {
3332 		case QLA4_EVENT_AEN:
3333 			iscsi_post_host_event(ha->host_no,
3334 					      &qla4xxx_iscsi_transport,
3335 					      e->u.aen.code,
3336 					      e->u.aen.data_size,
3337 					      e->u.aen.data);
3338 			break;
3339 		case QLA4_EVENT_PING_STATUS:
3340 			iscsi_ping_comp_event(ha->host_no,
3341 					      &qla4xxx_iscsi_transport,
3342 					      e->u.ping.status,
3343 					      e->u.ping.pid,
3344 					      e->u.ping.data_size,
3345 					      e->u.ping.data);
3346 			break;
3347 		default:
3348 			ql4_printk(KERN_WARNING, ha, "event type: 0x%x not "
3349 				   "supported", e->type);
3350 		}
3351 		kfree(e);
3352 	}
3353 }
3354 
3355 /**
3356  * qla4xxx_do_dpc - dpc routine
3357  * @data: in our case pointer to adapter structure
3358  *
3359  * This routine is a task that is schedule by the interrupt handler
3360  * to perform the background processing for interrupts.  We put it
3361  * on a task queue that is consumed whenever the scheduler runs; that's
3362  * so you can do anything (i.e. put the process to sleep etc).  In fact,
3363  * the mid-level tries to sleep when it reaches the driver threshold
3364  * "host->can_queue". This can cause a panic if we were in our interrupt code.
3365  **/
3366 static void qla4xxx_do_dpc(struct work_struct *work)
3367 {
3368 	struct scsi_qla_host *ha =
3369 		container_of(work, struct scsi_qla_host, dpc_work);
3370 	int status = QLA_ERROR;
3371 
3372 	DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
3373 	    "flags = 0x%08lx, dpc_flags = 0x%08lx\n",
3374 	    ha->host_no, __func__, ha->flags, ha->dpc_flags))
3375 
3376 	/* Initialization not yet finished. Don't do anything yet. */
3377 	if (!test_bit(AF_INIT_DONE, &ha->flags))
3378 		return;
3379 
3380 	if (test_bit(AF_EEH_BUSY, &ha->flags)) {
3381 		DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
3382 		    ha->host_no, __func__, ha->flags));
3383 		return;
3384 	}
3385 
3386 	/* post events to application */
3387 	qla4xxx_do_work(ha);
3388 
3389 	if (is_qla8022(ha)) {
3390 		if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
3391 			ha->isp_ops->idc_lock(ha);
3392 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
3393 					    QLA8XXX_DEV_FAILED);
3394 			ha->isp_ops->idc_unlock(ha);
3395 			ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
3396 			qla4_8xxx_device_state_handler(ha);
3397 		}
3398 		if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
3399 			qla4_8xxx_need_qsnt_handler(ha);
3400 		}
3401 	}
3402 
3403 	if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) &&
3404 	    (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
3405 	    test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
3406 	    test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
3407 		if (ql4xdontresethba) {
3408 			DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
3409 			    ha->host_no, __func__));
3410 			clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3411 			clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
3412 			clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3413 			goto dpc_post_reset_ha;
3414 		}
3415 		if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
3416 		    test_bit(DPC_RESET_HA, &ha->dpc_flags))
3417 			qla4xxx_recover_adapter(ha);
3418 
3419 		if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
3420 			uint8_t wait_time = RESET_INTR_TOV;
3421 
3422 			while ((readw(&ha->reg->ctrl_status) &
3423 				(CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
3424 				if (--wait_time == 0)
3425 					break;
3426 				msleep(1000);
3427 			}
3428 			if (wait_time == 0)
3429 				DEBUG2(printk("scsi%ld: %s: SR|FSR "
3430 					      "bit not cleared-- resetting\n",
3431 					      ha->host_no, __func__));
3432 			qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
3433 			if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
3434 				qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3435 				status = qla4xxx_recover_adapter(ha);
3436 			}
3437 			clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
3438 			if (status == QLA_SUCCESS)
3439 				ha->isp_ops->enable_intrs(ha);
3440 		}
3441 	}
3442 
3443 dpc_post_reset_ha:
3444 	/* ---- process AEN? --- */
3445 	if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
3446 		qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
3447 
3448 	/* ---- Get DHCP IP Address? --- */
3449 	if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
3450 		qla4xxx_get_dhcp_ip_address(ha);
3451 
3452 	/* ---- relogin device? --- */
3453 	if (adapter_up(ha) &&
3454 	    test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
3455 		iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
3456 	}
3457 
3458 	/* ---- link change? --- */
3459 	if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
3460 		if (!test_bit(AF_LINK_UP, &ha->flags)) {
3461 			/* ---- link down? --- */
3462 			qla4xxx_mark_all_devices_missing(ha);
3463 		} else {
3464 			/* ---- link up? --- *
3465 			 * F/W will auto login to all devices ONLY ONCE after
3466 			 * link up during driver initialization and runtime
3467 			 * fatal error recovery.  Therefore, the driver must
3468 			 * manually relogin to devices when recovering from
3469 			 * connection failures, logouts, expired KATO, etc. */
3470 			if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
3471 				qla4xxx_build_ddb_list(ha, ha->is_reset);
3472 				iscsi_host_for_each_session(ha->host,
3473 						qla4xxx_login_flash_ddb);
3474 			} else
3475 				qla4xxx_relogin_all_devices(ha);
3476 		}
3477 	}
3478 }
3479 
3480 /**
3481  * qla4xxx_free_adapter - release the adapter
3482  * @ha: pointer to adapter structure
3483  **/
3484 static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
3485 {
3486 	qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
3487 
3488 	if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
3489 		/* Turn-off interrupts on the card. */
3490 		ha->isp_ops->disable_intrs(ha);
3491 	}
3492 
3493 	if (is_qla40XX(ha)) {
3494 		writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
3495 		       &ha->reg->ctrl_status);
3496 		readl(&ha->reg->ctrl_status);
3497 	} else if (is_qla8022(ha)) {
3498 		writel(0, &ha->qla4_82xx_reg->host_int);
3499 		readl(&ha->qla4_82xx_reg->host_int);
3500 	}
3501 
3502 	/* Remove timer thread, if present */
3503 	if (ha->timer_active)
3504 		qla4xxx_stop_timer(ha);
3505 
3506 	/* Kill the kernel thread for this host */
3507 	if (ha->dpc_thread)
3508 		destroy_workqueue(ha->dpc_thread);
3509 
3510 	/* Kill the kernel thread for this host */
3511 	if (ha->task_wq)
3512 		destroy_workqueue(ha->task_wq);
3513 
3514 	/* Put firmware in known state */
3515 	ha->isp_ops->reset_firmware(ha);
3516 
3517 	if (is_qla8022(ha)) {
3518 		ha->isp_ops->idc_lock(ha);
3519 		qla4_8xxx_clear_drv_active(ha);
3520 		ha->isp_ops->idc_unlock(ha);
3521 	}
3522 
3523 	/* Detach interrupts */
3524 	if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
3525 		qla4xxx_free_irqs(ha);
3526 
3527 	/* free extra memory */
3528 	qla4xxx_mem_free(ha);
3529 }
3530 
3531 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
3532 {
3533 	int status = 0;
3534 	unsigned long mem_base, mem_len, db_base, db_len;
3535 	struct pci_dev *pdev = ha->pdev;
3536 
3537 	status = pci_request_regions(pdev, DRIVER_NAME);
3538 	if (status) {
3539 		printk(KERN_WARNING
3540 		    "scsi(%ld) Failed to reserve PIO regions (%s) "
3541 		    "status=%d\n", ha->host_no, pci_name(pdev), status);
3542 		goto iospace_error_exit;
3543 	}
3544 
3545 	DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
3546 	    __func__, pdev->revision));
3547 	ha->revision_id = pdev->revision;
3548 
3549 	/* remap phys address */
3550 	mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
3551 	mem_len = pci_resource_len(pdev, 0);
3552 	DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n",
3553 	    __func__, mem_base, mem_len));
3554 
3555 	/* mapping of pcibase pointer */
3556 	ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len);
3557 	if (!ha->nx_pcibase) {
3558 		printk(KERN_ERR
3559 		    "cannot remap MMIO (%s), aborting\n", pci_name(pdev));
3560 		pci_release_regions(ha->pdev);
3561 		goto iospace_error_exit;
3562 	}
3563 
3564 	/* Mapping of IO base pointer, door bell read and write pointer */
3565 
3566 	/* mapping of IO base pointer */
3567 	ha->qla4_82xx_reg =
3568 	    (struct device_reg_82xx  __iomem *)((uint8_t *)ha->nx_pcibase +
3569 	    0xbc000 + (ha->pdev->devfn << 11));
3570 
3571 	db_base = pci_resource_start(pdev, 4);  /* doorbell is on bar 4 */
3572 	db_len = pci_resource_len(pdev, 4);
3573 
3574 	ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
3575 	    QLA82XX_CAM_RAM_DB2);
3576 
3577 	return 0;
3578 iospace_error_exit:
3579 	return -ENOMEM;
3580 }
3581 
3582 /***
3583  * qla4xxx_iospace_config - maps registers
3584  * @ha: pointer to adapter structure
3585  *
3586  * This routines maps HBA's registers from the pci address space
3587  * into the kernel virtual address space for memory mapped i/o.
3588  **/
3589 int qla4xxx_iospace_config(struct scsi_qla_host *ha)
3590 {
3591 	unsigned long pio, pio_len, pio_flags;
3592 	unsigned long mmio, mmio_len, mmio_flags;
3593 
3594 	pio = pci_resource_start(ha->pdev, 0);
3595 	pio_len = pci_resource_len(ha->pdev, 0);
3596 	pio_flags = pci_resource_flags(ha->pdev, 0);
3597 	if (pio_flags & IORESOURCE_IO) {
3598 		if (pio_len < MIN_IOBASE_LEN) {
3599 			ql4_printk(KERN_WARNING, ha,
3600 				"Invalid PCI I/O region size\n");
3601 			pio = 0;
3602 		}
3603 	} else {
3604 		ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n");
3605 		pio = 0;
3606 	}
3607 
3608 	/* Use MMIO operations for all accesses. */
3609 	mmio = pci_resource_start(ha->pdev, 1);
3610 	mmio_len = pci_resource_len(ha->pdev, 1);
3611 	mmio_flags = pci_resource_flags(ha->pdev, 1);
3612 
3613 	if (!(mmio_flags & IORESOURCE_MEM)) {
3614 		ql4_printk(KERN_ERR, ha,
3615 		    "region #0 not an MMIO resource, aborting\n");
3616 
3617 		goto iospace_error_exit;
3618 	}
3619 
3620 	if (mmio_len < MIN_IOBASE_LEN) {
3621 		ql4_printk(KERN_ERR, ha,
3622 		    "Invalid PCI mem region size, aborting\n");
3623 		goto iospace_error_exit;
3624 	}
3625 
3626 	if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
3627 		ql4_printk(KERN_WARNING, ha,
3628 		    "Failed to reserve PIO/MMIO regions\n");
3629 
3630 		goto iospace_error_exit;
3631 	}
3632 
3633 	ha->pio_address = pio;
3634 	ha->pio_length = pio_len;
3635 	ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
3636 	if (!ha->reg) {
3637 		ql4_printk(KERN_ERR, ha,
3638 		    "cannot remap MMIO, aborting\n");
3639 
3640 		goto iospace_error_exit;
3641 	}
3642 
3643 	return 0;
3644 
3645 iospace_error_exit:
3646 	return -ENOMEM;
3647 }
3648 
3649 static struct isp_operations qla4xxx_isp_ops = {
3650 	.iospace_config         = qla4xxx_iospace_config,
3651 	.pci_config             = qla4xxx_pci_config,
3652 	.disable_intrs          = qla4xxx_disable_intrs,
3653 	.enable_intrs           = qla4xxx_enable_intrs,
3654 	.start_firmware         = qla4xxx_start_firmware,
3655 	.intr_handler           = qla4xxx_intr_handler,
3656 	.interrupt_service_routine = qla4xxx_interrupt_service_routine,
3657 	.reset_chip             = qla4xxx_soft_reset,
3658 	.reset_firmware         = qla4xxx_hw_reset,
3659 	.queue_iocb             = qla4xxx_queue_iocb,
3660 	.complete_iocb          = qla4xxx_complete_iocb,
3661 	.rd_shdw_req_q_out      = qla4xxx_rd_shdw_req_q_out,
3662 	.rd_shdw_rsp_q_in       = qla4xxx_rd_shdw_rsp_q_in,
3663 	.get_sys_info           = qla4xxx_get_sys_info,
3664 	.queue_mailbox_command	= qla4xxx_queue_mbox_cmd,
3665 	.process_mailbox_interrupt = qla4xxx_process_mbox_intr,
3666 };
3667 
3668 static struct isp_operations qla4_82xx_isp_ops = {
3669 	.iospace_config         = qla4_8xxx_iospace_config,
3670 	.pci_config             = qla4_8xxx_pci_config,
3671 	.disable_intrs          = qla4_82xx_disable_intrs,
3672 	.enable_intrs           = qla4_82xx_enable_intrs,
3673 	.start_firmware         = qla4_8xxx_load_risc,
3674 	.restart_firmware	= qla4_82xx_try_start_fw,
3675 	.intr_handler           = qla4_82xx_intr_handler,
3676 	.interrupt_service_routine = qla4_82xx_interrupt_service_routine,
3677 	.need_reset		= qla4_8xxx_need_reset,
3678 	.reset_chip             = qla4_82xx_isp_reset,
3679 	.reset_firmware         = qla4_8xxx_stop_firmware,
3680 	.queue_iocb             = qla4_82xx_queue_iocb,
3681 	.complete_iocb          = qla4_82xx_complete_iocb,
3682 	.rd_shdw_req_q_out      = qla4_82xx_rd_shdw_req_q_out,
3683 	.rd_shdw_rsp_q_in       = qla4_82xx_rd_shdw_rsp_q_in,
3684 	.get_sys_info           = qla4_8xxx_get_sys_info,
3685 	.rd_reg_direct		= qla4_82xx_rd_32,
3686 	.wr_reg_direct		= qla4_82xx_wr_32,
3687 	.rd_reg_indirect	= qla4_82xx_md_rd_32,
3688 	.wr_reg_indirect	= qla4_82xx_md_wr_32,
3689 	.idc_lock		= qla4_82xx_idc_lock,
3690 	.idc_unlock		= qla4_82xx_idc_unlock,
3691 	.rom_lock_recovery	= qla4_82xx_rom_lock_recovery,
3692 	.queue_mailbox_command	= qla4_82xx_queue_mbox_cmd,
3693 	.process_mailbox_interrupt = qla4_82xx_process_mbox_intr,
3694 };
3695 
3696 uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
3697 {
3698 	return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
3699 }
3700 
3701 uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
3702 {
3703 	return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out));
3704 }
3705 
3706 uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
3707 {
3708 	return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
3709 }
3710 
3711 uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
3712 {
3713 	return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in));
3714 }
3715 
3716 static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
3717 {
3718 	struct scsi_qla_host *ha = data;
3719 	char *str = buf;
3720 	int rc;
3721 
3722 	switch (type) {
3723 	case ISCSI_BOOT_ETH_FLAGS:
3724 		rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
3725 		break;
3726 	case ISCSI_BOOT_ETH_INDEX:
3727 		rc = sprintf(str, "0\n");
3728 		break;
3729 	case ISCSI_BOOT_ETH_MAC:
3730 		rc = sysfs_format_mac(str, ha->my_mac,
3731 				      MAC_ADDR_LEN);
3732 		break;
3733 	default:
3734 		rc = -ENOSYS;
3735 		break;
3736 	}
3737 	return rc;
3738 }
3739 
3740 static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
3741 {
3742 	int rc;
3743 
3744 	switch (type) {
3745 	case ISCSI_BOOT_ETH_FLAGS:
3746 	case ISCSI_BOOT_ETH_MAC:
3747 	case ISCSI_BOOT_ETH_INDEX:
3748 		rc = S_IRUGO;
3749 		break;
3750 	default:
3751 		rc = 0;
3752 		break;
3753 	}
3754 	return rc;
3755 }
3756 
3757 static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
3758 {
3759 	struct scsi_qla_host *ha = data;
3760 	char *str = buf;
3761 	int rc;
3762 
3763 	switch (type) {
3764 	case ISCSI_BOOT_INI_INITIATOR_NAME:
3765 		rc = sprintf(str, "%s\n", ha->name_string);
3766 		break;
3767 	default:
3768 		rc = -ENOSYS;
3769 		break;
3770 	}
3771 	return rc;
3772 }
3773 
3774 static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
3775 {
3776 	int rc;
3777 
3778 	switch (type) {
3779 	case ISCSI_BOOT_INI_INITIATOR_NAME:
3780 		rc = S_IRUGO;
3781 		break;
3782 	default:
3783 		rc = 0;
3784 		break;
3785 	}
3786 	return rc;
3787 }
3788 
3789 static ssize_t
3790 qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
3791 			   char *buf)
3792 {
3793 	struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
3794 	char *str = buf;
3795 	int rc;
3796 
3797 	switch (type) {
3798 	case ISCSI_BOOT_TGT_NAME:
3799 		rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name);
3800 		break;
3801 	case ISCSI_BOOT_TGT_IP_ADDR:
3802 		if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1)
3803 			rc = sprintf(buf, "%pI4\n",
3804 				     &boot_conn->dest_ipaddr.ip_address);
3805 		else
3806 			rc = sprintf(str, "%pI6\n",
3807 				     &boot_conn->dest_ipaddr.ip_address);
3808 		break;
3809 	case ISCSI_BOOT_TGT_PORT:
3810 			rc = sprintf(str, "%d\n", boot_conn->dest_port);
3811 		break;
3812 	case ISCSI_BOOT_TGT_CHAP_NAME:
3813 		rc = sprintf(str,  "%.*s\n",
3814 			     boot_conn->chap.target_chap_name_length,
3815 			     (char *)&boot_conn->chap.target_chap_name);
3816 		break;
3817 	case ISCSI_BOOT_TGT_CHAP_SECRET:
3818 		rc = sprintf(str,  "%.*s\n",
3819 			     boot_conn->chap.target_secret_length,
3820 			     (char *)&boot_conn->chap.target_secret);
3821 		break;
3822 	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
3823 		rc = sprintf(str,  "%.*s\n",
3824 			     boot_conn->chap.intr_chap_name_length,
3825 			     (char *)&boot_conn->chap.intr_chap_name);
3826 		break;
3827 	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
3828 		rc = sprintf(str,  "%.*s\n",
3829 			     boot_conn->chap.intr_secret_length,
3830 			     (char *)&boot_conn->chap.intr_secret);
3831 		break;
3832 	case ISCSI_BOOT_TGT_FLAGS:
3833 		rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
3834 		break;
3835 	case ISCSI_BOOT_TGT_NIC_ASSOC:
3836 		rc = sprintf(str, "0\n");
3837 		break;
3838 	default:
3839 		rc = -ENOSYS;
3840 		break;
3841 	}
3842 	return rc;
3843 }
3844 
3845 static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf)
3846 {
3847 	struct scsi_qla_host *ha = data;
3848 	struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess);
3849 
3850 	return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
3851 }
3852 
3853 static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
3854 {
3855 	struct scsi_qla_host *ha = data;
3856 	struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess);
3857 
3858 	return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
3859 }
3860 
3861 static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
3862 {
3863 	int rc;
3864 
3865 	switch (type) {
3866 	case ISCSI_BOOT_TGT_NAME:
3867 	case ISCSI_BOOT_TGT_IP_ADDR:
3868 	case ISCSI_BOOT_TGT_PORT:
3869 	case ISCSI_BOOT_TGT_CHAP_NAME:
3870 	case ISCSI_BOOT_TGT_CHAP_SECRET:
3871 	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
3872 	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
3873 	case ISCSI_BOOT_TGT_NIC_ASSOC:
3874 	case ISCSI_BOOT_TGT_FLAGS:
3875 		rc = S_IRUGO;
3876 		break;
3877 	default:
3878 		rc = 0;
3879 		break;
3880 	}
3881 	return rc;
3882 }
3883 
3884 static void qla4xxx_boot_release(void *data)
3885 {
3886 	struct scsi_qla_host *ha = data;
3887 
3888 	scsi_host_put(ha->host);
3889 }
3890 
3891 static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
3892 {
3893 	dma_addr_t buf_dma;
3894 	uint32_t addr, pri_addr, sec_addr;
3895 	uint32_t offset;
3896 	uint16_t func_num;
3897 	uint8_t val;
3898 	uint8_t *buf = NULL;
3899 	size_t size = 13 * sizeof(uint8_t);
3900 	int ret = QLA_SUCCESS;
3901 
3902 	func_num = PCI_FUNC(ha->pdev->devfn);
3903 
3904 	ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n",
3905 		   __func__, ha->pdev->device, func_num);
3906 
3907 	if (is_qla40XX(ha)) {
3908 		if (func_num == 1) {
3909 			addr = NVRAM_PORT0_BOOT_MODE;
3910 			pri_addr = NVRAM_PORT0_BOOT_PRI_TGT;
3911 			sec_addr = NVRAM_PORT0_BOOT_SEC_TGT;
3912 		} else if (func_num == 3) {
3913 			addr = NVRAM_PORT1_BOOT_MODE;
3914 			pri_addr = NVRAM_PORT1_BOOT_PRI_TGT;
3915 			sec_addr = NVRAM_PORT1_BOOT_SEC_TGT;
3916 		} else {
3917 			ret = QLA_ERROR;
3918 			goto exit_boot_info;
3919 		}
3920 
3921 		/* Check Boot Mode */
3922 		val = rd_nvram_byte(ha, addr);
3923 		if (!(val & 0x07)) {
3924 			DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot "
3925 					  "options : 0x%x\n", __func__, val));
3926 			ret = QLA_ERROR;
3927 			goto exit_boot_info;
3928 		}
3929 
3930 		/* get primary valid target index */
3931 		val = rd_nvram_byte(ha, pri_addr);
3932 		if (val & BIT_7)
3933 			ddb_index[0] = (val & 0x7f);
3934 
3935 		/* get secondary valid target index */
3936 		val = rd_nvram_byte(ha, sec_addr);
3937 		if (val & BIT_7)
3938 			ddb_index[1] = (val & 0x7f);
3939 
3940 	} else if (is_qla8022(ha)) {
3941 		buf = dma_alloc_coherent(&ha->pdev->dev, size,
3942 					 &buf_dma, GFP_KERNEL);
3943 		if (!buf) {
3944 			DEBUG2(ql4_printk(KERN_ERR, ha,
3945 					  "%s: Unable to allocate dma buffer\n",
3946 					   __func__));
3947 			ret = QLA_ERROR;
3948 			goto exit_boot_info;
3949 		}
3950 
3951 		if (ha->port_num == 0)
3952 			offset = BOOT_PARAM_OFFSET_PORT0;
3953 		else if (ha->port_num == 1)
3954 			offset = BOOT_PARAM_OFFSET_PORT1;
3955 		else {
3956 			ret = QLA_ERROR;
3957 			goto exit_boot_info_free;
3958 		}
3959 		addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) +
3960 		       offset;
3961 		if (qla4xxx_get_flash(ha, buf_dma, addr,
3962 				      13 * sizeof(uint8_t)) != QLA_SUCCESS) {
3963 			DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
3964 					  " failed\n", ha->host_no, __func__));
3965 			ret = QLA_ERROR;
3966 			goto exit_boot_info_free;
3967 		}
3968 		/* Check Boot Mode */
3969 		if (!(buf[1] & 0x07)) {
3970 			DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options"
3971 					  " : 0x%x\n", buf[1]));
3972 			ret = QLA_ERROR;
3973 			goto exit_boot_info_free;
3974 		}
3975 
3976 		/* get primary valid target index */
3977 		if (buf[2] & BIT_7)
3978 			ddb_index[0] = buf[2] & 0x7f;
3979 
3980 		/* get secondary valid target index */
3981 		if (buf[11] & BIT_7)
3982 			ddb_index[1] = buf[11] & 0x7f;
3983 	} else {
3984 		ret = QLA_ERROR;
3985 		goto exit_boot_info;
3986 	}
3987 
3988 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary"
3989 			  " target ID %d\n", __func__, ddb_index[0],
3990 			  ddb_index[1]));
3991 
3992 exit_boot_info_free:
3993 	dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
3994 exit_boot_info:
3995 	ha->pri_ddb_idx = ddb_index[0];
3996 	ha->sec_ddb_idx = ddb_index[1];
3997 	return ret;
3998 }
3999 
4000 /**
4001  * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password
4002  * @ha: pointer to adapter structure
4003  * @username: CHAP username to be returned
4004  * @password: CHAP password to be returned
4005  *
4006  * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP
4007  * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/.
4008  * So from the CHAP cache find the first BIDI CHAP entry and set it
4009  * to the boot record in sysfs.
4010  **/
4011 static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
4012 			    char *password)
4013 {
4014 	int i, ret = -EINVAL;
4015 	int max_chap_entries = 0;
4016 	struct ql4_chap_table *chap_table;
4017 
4018 	if (is_qla8022(ha))
4019 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
4020 						sizeof(struct ql4_chap_table);
4021 	else
4022 		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
4023 
4024 	if (!ha->chap_list) {
4025 		ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
4026 		return ret;
4027 	}
4028 
4029 	mutex_lock(&ha->chap_sem);
4030 	for (i = 0; i < max_chap_entries; i++) {
4031 		chap_table = (struct ql4_chap_table *)ha->chap_list + i;
4032 		if (chap_table->cookie !=
4033 		    __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
4034 			continue;
4035 		}
4036 
4037 		if (chap_table->flags & BIT_7) /* local */
4038 			continue;
4039 
4040 		if (!(chap_table->flags & BIT_6)) /* Not BIDI */
4041 			continue;
4042 
4043 		strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
4044 		strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
4045 		ret = 0;
4046 		break;
4047 	}
4048 	mutex_unlock(&ha->chap_sem);
4049 
4050 	return ret;
4051 }
4052 
4053 
4054 static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
4055 				   struct ql4_boot_session_info *boot_sess,
4056 				   uint16_t ddb_index)
4057 {
4058 	struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
4059 	struct dev_db_entry *fw_ddb_entry;
4060 	dma_addr_t fw_ddb_entry_dma;
4061 	uint16_t idx;
4062 	uint16_t options;
4063 	int ret = QLA_SUCCESS;
4064 
4065 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
4066 					  &fw_ddb_entry_dma, GFP_KERNEL);
4067 	if (!fw_ddb_entry) {
4068 		DEBUG2(ql4_printk(KERN_ERR, ha,
4069 				  "%s: Unable to allocate dma buffer.\n",
4070 				  __func__));
4071 		ret = QLA_ERROR;
4072 		return ret;
4073 	}
4074 
4075 	if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
4076 				   fw_ddb_entry_dma, ddb_index)) {
4077 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at "
4078 				  "index [%d]\n", __func__, ddb_index));
4079 		ret = QLA_ERROR;
4080 		goto exit_boot_target;
4081 	}
4082 
4083 	/* Update target name and IP from DDB */
4084 	memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name,
4085 	       min(sizeof(boot_sess->target_name),
4086 		   sizeof(fw_ddb_entry->iscsi_name)));
4087 
4088 	options = le16_to_cpu(fw_ddb_entry->options);
4089 	if (options & DDB_OPT_IPV6_DEVICE) {
4090 		memcpy(&boot_conn->dest_ipaddr.ip_address,
4091 		       &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN);
4092 	} else {
4093 		boot_conn->dest_ipaddr.ip_type = 0x1;
4094 		memcpy(&boot_conn->dest_ipaddr.ip_address,
4095 		       &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN);
4096 	}
4097 
4098 	boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port);
4099 
4100 	/* update chap information */
4101 	idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
4102 
4103 	if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options))	{
4104 
4105 		DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n"));
4106 
4107 		ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
4108 				       target_chap_name,
4109 				       (char *)&boot_conn->chap.target_secret,
4110 				       idx);
4111 		if (ret) {
4112 			ql4_printk(KERN_ERR, ha, "Failed to set chap\n");
4113 			ret = QLA_ERROR;
4114 			goto exit_boot_target;
4115 		}
4116 
4117 		boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
4118 		boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN;
4119 	}
4120 
4121 	if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
4122 
4123 		DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n"));
4124 
4125 		ret = qla4xxx_get_bidi_chap(ha,
4126 				    (char *)&boot_conn->chap.intr_chap_name,
4127 				    (char *)&boot_conn->chap.intr_secret);
4128 
4129 		if (ret) {
4130 			ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n");
4131 			ret = QLA_ERROR;
4132 			goto exit_boot_target;
4133 		}
4134 
4135 		boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
4136 		boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN;
4137 	}
4138 
4139 exit_boot_target:
4140 	dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
4141 			  fw_ddb_entry, fw_ddb_entry_dma);
4142 	return ret;
4143 }
4144 
4145 static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
4146 {
4147 	uint16_t ddb_index[2];
4148 	int ret = QLA_ERROR;
4149 	int rval;
4150 
4151 	memset(ddb_index, 0, sizeof(ddb_index));
4152 	ddb_index[0] = 0xffff;
4153 	ddb_index[1] = 0xffff;
4154 	ret = get_fw_boot_info(ha, ddb_index);
4155 	if (ret != QLA_SUCCESS) {
4156 		DEBUG2(ql4_printk(KERN_INFO, ha,
4157 				"%s: No boot target configured.\n", __func__));
4158 		return ret;
4159 	}
4160 
4161 	if (ql4xdisablesysfsboot)
4162 		return QLA_SUCCESS;
4163 
4164 	if (ddb_index[0] == 0xffff)
4165 		goto sec_target;
4166 
4167 	rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
4168 				      ddb_index[0]);
4169 	if (rval != QLA_SUCCESS) {
4170 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not "
4171 				  "configured\n", __func__));
4172 	} else
4173 		ret = QLA_SUCCESS;
4174 
4175 sec_target:
4176 	if (ddb_index[1] == 0xffff)
4177 		goto exit_get_boot_info;
4178 
4179 	rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
4180 				      ddb_index[1]);
4181 	if (rval != QLA_SUCCESS) {
4182 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not"
4183 				  " configured\n", __func__));
4184 	} else
4185 		ret = QLA_SUCCESS;
4186 
4187 exit_get_boot_info:
4188 	return ret;
4189 }
4190 
4191 static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
4192 {
4193 	struct iscsi_boot_kobj *boot_kobj;
4194 
4195 	if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
4196 		return QLA_ERROR;
4197 
4198 	if (ql4xdisablesysfsboot) {
4199 		ql4_printk(KERN_INFO, ha,
4200 			   "%s: syfsboot disabled - driver will trigger login "
4201 			   "and publish session for discovery .\n", __func__);
4202 		return QLA_SUCCESS;
4203 	}
4204 
4205 
4206 	ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
4207 	if (!ha->boot_kset)
4208 		goto kset_free;
4209 
4210 	if (!scsi_host_get(ha->host))
4211 		goto kset_free;
4212 	boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha,
4213 					     qla4xxx_show_boot_tgt_pri_info,
4214 					     qla4xxx_tgt_get_attr_visibility,
4215 					     qla4xxx_boot_release);
4216 	if (!boot_kobj)
4217 		goto put_host;
4218 
4219 	if (!scsi_host_get(ha->host))
4220 		goto kset_free;
4221 	boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha,
4222 					     qla4xxx_show_boot_tgt_sec_info,
4223 					     qla4xxx_tgt_get_attr_visibility,
4224 					     qla4xxx_boot_release);
4225 	if (!boot_kobj)
4226 		goto put_host;
4227 
4228 	if (!scsi_host_get(ha->host))
4229 		goto kset_free;
4230 	boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha,
4231 					       qla4xxx_show_boot_ini_info,
4232 					       qla4xxx_ini_get_attr_visibility,
4233 					       qla4xxx_boot_release);
4234 	if (!boot_kobj)
4235 		goto put_host;
4236 
4237 	if (!scsi_host_get(ha->host))
4238 		goto kset_free;
4239 	boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha,
4240 					       qla4xxx_show_boot_eth_info,
4241 					       qla4xxx_eth_get_attr_visibility,
4242 					       qla4xxx_boot_release);
4243 	if (!boot_kobj)
4244 		goto put_host;
4245 
4246 	return QLA_SUCCESS;
4247 
4248 put_host:
4249 	scsi_host_put(ha->host);
4250 kset_free:
4251 	iscsi_boot_destroy_kset(ha->boot_kset);
4252 	return -ENOMEM;
4253 }
4254 
4255 
4256 /**
4257  * qla4xxx_create chap_list - Create CHAP list from FLASH
4258  * @ha: pointer to adapter structure
4259  *
4260  * Read flash and make a list of CHAP entries, during login when a CHAP entry
4261  * is received, it will be checked in this list. If entry exist then the CHAP
4262  * entry index is set in the DDB. If CHAP entry does not exist in this list
4263  * then a new entry is added in FLASH in CHAP table and the index obtained is
4264  * used in the DDB.
4265  **/
4266 static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
4267 {
4268 	int rval = 0;
4269 	uint8_t *chap_flash_data = NULL;
4270 	uint32_t offset;
4271 	dma_addr_t chap_dma;
4272 	uint32_t chap_size = 0;
4273 
4274 	if (is_qla40XX(ha))
4275 		chap_size = MAX_CHAP_ENTRIES_40XX  *
4276 					sizeof(struct ql4_chap_table);
4277 	else	/* Single region contains CHAP info for both
4278 		 * ports which is divided into half for each port.
4279 		 */
4280 		chap_size = ha->hw.flt_chap_size / 2;
4281 
4282 	chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size,
4283 					  &chap_dma, GFP_KERNEL);
4284 	if (!chap_flash_data) {
4285 		ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n");
4286 		return;
4287 	}
4288 	if (is_qla40XX(ha))
4289 		offset = FLASH_CHAP_OFFSET;
4290 	else {
4291 		offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
4292 		if (ha->port_num == 1)
4293 			offset += chap_size;
4294 	}
4295 
4296 	rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
4297 	if (rval != QLA_SUCCESS)
4298 		goto exit_chap_list;
4299 
4300 	if (ha->chap_list == NULL)
4301 		ha->chap_list = vmalloc(chap_size);
4302 	if (ha->chap_list == NULL) {
4303 		ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n");
4304 		goto exit_chap_list;
4305 	}
4306 
4307 	memcpy(ha->chap_list, chap_flash_data, chap_size);
4308 
4309 exit_chap_list:
4310 	dma_free_coherent(&ha->pdev->dev, chap_size,
4311 			chap_flash_data, chap_dma);
4312 }
4313 
4314 static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
4315 				  struct ql4_tuple_ddb *tddb)
4316 {
4317 	struct scsi_qla_host *ha;
4318 	struct iscsi_cls_session *cls_sess;
4319 	struct iscsi_cls_conn *cls_conn;
4320 	struct iscsi_session *sess;
4321 	struct iscsi_conn *conn;
4322 
4323 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
4324 	ha = ddb_entry->ha;
4325 	cls_sess = ddb_entry->sess;
4326 	sess = cls_sess->dd_data;
4327 	cls_conn = ddb_entry->conn;
4328 	conn = cls_conn->dd_data;
4329 
4330 	tddb->tpgt = sess->tpgt;
4331 	tddb->port = conn->persistent_port;
4332 	strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
4333 	strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
4334 }
4335 
4336 static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
4337 				      struct ql4_tuple_ddb *tddb,
4338 				      uint8_t *flash_isid)
4339 {
4340 	uint16_t options = 0;
4341 
4342 	tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
4343 	memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
4344 	       min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
4345 
4346 	options = le16_to_cpu(fw_ddb_entry->options);
4347 	if (options & DDB_OPT_IPV6_DEVICE)
4348 		sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
4349 	else
4350 		sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
4351 
4352 	tddb->port = le16_to_cpu(fw_ddb_entry->port);
4353 
4354 	if (flash_isid == NULL)
4355 		memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0],
4356 		       sizeof(tddb->isid));
4357 	else
4358 		memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid));
4359 }
4360 
4361 static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
4362 				     struct ql4_tuple_ddb *old_tddb,
4363 				     struct ql4_tuple_ddb *new_tddb,
4364 				     uint8_t is_isid_compare)
4365 {
4366 	if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
4367 		return QLA_ERROR;
4368 
4369 	if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
4370 		return QLA_ERROR;
4371 
4372 	if (old_tddb->port != new_tddb->port)
4373 		return QLA_ERROR;
4374 
4375 	/* For multi sessions, driver generates the ISID, so do not compare
4376 	 * ISID in reset path since it would be a comparision between the
4377 	 * driver generated ISID and firmware generated ISID. This could
4378 	 * lead to adding duplicated DDBs in the list as driver generated
4379 	 * ISID would not match firmware generated ISID.
4380 	 */
4381 	if (is_isid_compare) {
4382 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x"
4383 			"%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n",
4384 			__func__, old_tddb->isid[5], old_tddb->isid[4],
4385 			old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1],
4386 			old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4],
4387 			new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1],
4388 			new_tddb->isid[0]));
4389 
4390 		if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
4391 			   sizeof(old_tddb->isid)))
4392 			return QLA_ERROR;
4393 	}
4394 
4395 	DEBUG2(ql4_printk(KERN_INFO, ha,
4396 			  "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
4397 			  old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
4398 			  old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
4399 			  new_tddb->ip_addr, new_tddb->iscsi_name));
4400 
4401 	return QLA_SUCCESS;
4402 }
4403 
4404 static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
4405 				     struct dev_db_entry *fw_ddb_entry)
4406 {
4407 	struct ddb_entry *ddb_entry;
4408 	struct ql4_tuple_ddb *fw_tddb = NULL;
4409 	struct ql4_tuple_ddb *tmp_tddb = NULL;
4410 	int idx;
4411 	int ret = QLA_ERROR;
4412 
4413 	fw_tddb = vzalloc(sizeof(*fw_tddb));
4414 	if (!fw_tddb) {
4415 		DEBUG2(ql4_printk(KERN_WARNING, ha,
4416 				  "Memory Allocation failed.\n"));
4417 		ret = QLA_SUCCESS;
4418 		goto exit_check;
4419 	}
4420 
4421 	tmp_tddb = vzalloc(sizeof(*tmp_tddb));
4422 	if (!tmp_tddb) {
4423 		DEBUG2(ql4_printk(KERN_WARNING, ha,
4424 				  "Memory Allocation failed.\n"));
4425 		ret = QLA_SUCCESS;
4426 		goto exit_check;
4427 	}
4428 
4429 	qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
4430 
4431 	for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
4432 		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
4433 		if (ddb_entry == NULL)
4434 			continue;
4435 
4436 		qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
4437 		if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) {
4438 			ret = QLA_SUCCESS; /* found */
4439 			goto exit_check;
4440 		}
4441 	}
4442 
4443 exit_check:
4444 	if (fw_tddb)
4445 		vfree(fw_tddb);
4446 	if (tmp_tddb)
4447 		vfree(tmp_tddb);
4448 	return ret;
4449 }
4450 
4451 /**
4452  * qla4xxx_check_existing_isid - check if target with same isid exist
4453  *				 in target list
4454  * @list_nt: list of target
4455  * @isid: isid to check
4456  *
4457  * This routine return QLA_SUCCESS if target with same isid exist
4458  **/
4459 static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid)
4460 {
4461 	struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
4462 	struct dev_db_entry *fw_ddb_entry;
4463 
4464 	list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
4465 		fw_ddb_entry = &nt_ddb_idx->fw_ddb;
4466 
4467 		if (memcmp(&fw_ddb_entry->isid[0], &isid[0],
4468 			   sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) {
4469 			return QLA_SUCCESS;
4470 		}
4471 	}
4472 	return QLA_ERROR;
4473 }
4474 
4475 /**
4476  * qla4xxx_update_isid - compare ddbs and updated isid
4477  * @ha: Pointer to host adapter structure.
4478  * @list_nt: list of nt target
4479  * @fw_ddb_entry: firmware ddb entry
4480  *
4481  * This routine update isid if ddbs have same iqn, same isid and
4482  * different IP addr.
4483  * Return QLA_SUCCESS if isid is updated.
4484  **/
4485 static int qla4xxx_update_isid(struct scsi_qla_host *ha,
4486 			       struct list_head *list_nt,
4487 			       struct dev_db_entry *fw_ddb_entry)
4488 {
4489 	uint8_t base_value, i;
4490 
4491 	base_value = fw_ddb_entry->isid[1] & 0x1f;
4492 	for (i = 0; i < 8; i++) {
4493 		fw_ddb_entry->isid[1] = (base_value | (i << 5));
4494 		if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
4495 			break;
4496 	}
4497 
4498 	if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
4499 		return QLA_ERROR;
4500 
4501 	return QLA_SUCCESS;
4502 }
4503 
4504 /**
4505  * qla4xxx_should_update_isid - check if isid need to update
4506  * @ha: Pointer to host adapter structure.
4507  * @old_tddb: ddb tuple
4508  * @new_tddb: ddb tuple
4509  *
4510  * Return QLA_SUCCESS if different IP, different PORT, same iqn,
4511  * same isid
4512  **/
4513 static int qla4xxx_should_update_isid(struct scsi_qla_host *ha,
4514 				      struct ql4_tuple_ddb *old_tddb,
4515 				      struct ql4_tuple_ddb *new_tddb)
4516 {
4517 	if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) {
4518 		/* Same ip */
4519 		if (old_tddb->port == new_tddb->port)
4520 			return QLA_ERROR;
4521 	}
4522 
4523 	if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
4524 		/* different iqn */
4525 		return QLA_ERROR;
4526 
4527 	if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
4528 		   sizeof(old_tddb->isid)))
4529 		/* different isid */
4530 		return QLA_ERROR;
4531 
4532 	return QLA_SUCCESS;
4533 }
4534 
4535 /**
4536  * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt
4537  * @ha: Pointer to host adapter structure.
4538  * @list_nt: list of nt target.
4539  * @fw_ddb_entry: firmware ddb entry.
4540  *
4541  * This routine check if fw_ddb_entry already exists in list_nt to avoid
4542  * duplicate ddb in list_nt.
4543  * Return QLA_SUCCESS if duplicate ddb exit in list_nl.
4544  * Note: This function also update isid of DDB if required.
4545  **/
4546 
4547 static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
4548 				       struct list_head *list_nt,
4549 				       struct dev_db_entry *fw_ddb_entry)
4550 {
4551 	struct qla_ddb_index  *nt_ddb_idx, *nt_ddb_idx_tmp;
4552 	struct ql4_tuple_ddb *fw_tddb = NULL;
4553 	struct ql4_tuple_ddb *tmp_tddb = NULL;
4554 	int rval, ret = QLA_ERROR;
4555 
4556 	fw_tddb = vzalloc(sizeof(*fw_tddb));
4557 	if (!fw_tddb) {
4558 		DEBUG2(ql4_printk(KERN_WARNING, ha,
4559 				  "Memory Allocation failed.\n"));
4560 		ret = QLA_SUCCESS;
4561 		goto exit_check;
4562 	}
4563 
4564 	tmp_tddb = vzalloc(sizeof(*tmp_tddb));
4565 	if (!tmp_tddb) {
4566 		DEBUG2(ql4_printk(KERN_WARNING, ha,
4567 				  "Memory Allocation failed.\n"));
4568 		ret = QLA_SUCCESS;
4569 		goto exit_check;
4570 	}
4571 
4572 	qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
4573 
4574 	list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
4575 		qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb,
4576 					  nt_ddb_idx->flash_isid);
4577 		ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true);
4578 		/* found duplicate ddb */
4579 		if (ret == QLA_SUCCESS)
4580 			goto exit_check;
4581 	}
4582 
4583 	list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
4584 		qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL);
4585 
4586 		ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb);
4587 		if (ret == QLA_SUCCESS) {
4588 			rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry);
4589 			if (rval == QLA_SUCCESS)
4590 				ret = QLA_ERROR;
4591 			else
4592 				ret = QLA_SUCCESS;
4593 
4594 			goto exit_check;
4595 		}
4596 	}
4597 
4598 exit_check:
4599 	if (fw_tddb)
4600 		vfree(fw_tddb);
4601 	if (tmp_tddb)
4602 		vfree(tmp_tddb);
4603 	return ret;
4604 }
4605 
4606 static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
4607 {
4608 	struct qla_ddb_index  *ddb_idx, *ddb_idx_tmp;
4609 
4610 	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
4611 		list_del_init(&ddb_idx->list);
4612 		vfree(ddb_idx);
4613 	}
4614 }
4615 
4616 static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
4617 					struct dev_db_entry *fw_ddb_entry)
4618 {
4619 	struct iscsi_endpoint *ep;
4620 	struct sockaddr_in *addr;
4621 	struct sockaddr_in6 *addr6;
4622 	struct sockaddr *dst_addr;
4623 	char *ip;
4624 
4625 	/* TODO: need to destroy on unload iscsi_endpoint*/
4626 	dst_addr = vmalloc(sizeof(*dst_addr));
4627 	if (!dst_addr)
4628 		return NULL;
4629 
4630 	if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
4631 		dst_addr->sa_family = AF_INET6;
4632 		addr6 = (struct sockaddr_in6 *)dst_addr;
4633 		ip = (char *)&addr6->sin6_addr;
4634 		memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
4635 		addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
4636 
4637 	} else {
4638 		dst_addr->sa_family = AF_INET;
4639 		addr = (struct sockaddr_in *)dst_addr;
4640 		ip = (char *)&addr->sin_addr;
4641 		memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
4642 		addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
4643 	}
4644 
4645 	ep = qla4xxx_ep_connect(ha->host, dst_addr, 0);
4646 	vfree(dst_addr);
4647 	return ep;
4648 }
4649 
4650 static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
4651 {
4652 	if (ql4xdisablesysfsboot)
4653 		return QLA_SUCCESS;
4654 	if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
4655 		return QLA_ERROR;
4656 	return QLA_SUCCESS;
4657 }
4658 
4659 static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
4660 					  struct ddb_entry *ddb_entry)
4661 {
4662 	uint16_t def_timeout;
4663 
4664 	ddb_entry->ddb_type = FLASH_DDB;
4665 	ddb_entry->fw_ddb_index = INVALID_ENTRY;
4666 	ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
4667 	ddb_entry->ha = ha;
4668 	ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
4669 	ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
4670 
4671 	atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
4672 	atomic_set(&ddb_entry->relogin_timer, 0);
4673 	atomic_set(&ddb_entry->relogin_retry_count, 0);
4674 	def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
4675 	ddb_entry->default_relogin_timeout =
4676 		(def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
4677 		def_timeout : LOGIN_TOV;
4678 	ddb_entry->default_time2wait =
4679 		le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
4680 }
4681 
4682 static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
4683 {
4684 	uint32_t idx = 0;
4685 	uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
4686 	uint32_t sts[MBOX_REG_COUNT];
4687 	uint32_t ip_state;
4688 	unsigned long wtime;
4689 	int ret;
4690 
4691 	wtime = jiffies + (HZ * IP_CONFIG_TOV);
4692 	do {
4693 		for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
4694 			if (ip_idx[idx] == -1)
4695 				continue;
4696 
4697 			ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
4698 
4699 			if (ret == QLA_ERROR) {
4700 				ip_idx[idx] = -1;
4701 				continue;
4702 			}
4703 
4704 			ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
4705 
4706 			DEBUG2(ql4_printk(KERN_INFO, ha,
4707 					  "Waiting for IP state for idx = %d, state = 0x%x\n",
4708 					  ip_idx[idx], ip_state));
4709 			if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
4710 			    ip_state == IP_ADDRSTATE_INVALID ||
4711 			    ip_state == IP_ADDRSTATE_PREFERRED ||
4712 			    ip_state == IP_ADDRSTATE_DEPRICATED ||
4713 			    ip_state == IP_ADDRSTATE_DISABLING)
4714 				ip_idx[idx] = -1;
4715 		}
4716 
4717 		/* Break if all IP states checked */
4718 		if ((ip_idx[0] == -1) &&
4719 		    (ip_idx[1] == -1) &&
4720 		    (ip_idx[2] == -1) &&
4721 		    (ip_idx[3] == -1))
4722 			break;
4723 		schedule_timeout_uninterruptible(HZ);
4724 	} while (time_after(wtime, jiffies));
4725 }
4726 
4727 static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
4728 				  struct list_head *list_st)
4729 {
4730 	struct qla_ddb_index  *st_ddb_idx;
4731 	int max_ddbs;
4732 	int fw_idx_size;
4733 	struct dev_db_entry *fw_ddb_entry;
4734 	dma_addr_t fw_ddb_dma;
4735 	int ret;
4736 	uint32_t idx = 0, next_idx = 0;
4737 	uint32_t state = 0, conn_err = 0;
4738 	uint16_t conn_id = 0;
4739 
4740 	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
4741 				      &fw_ddb_dma);
4742 	if (fw_ddb_entry == NULL) {
4743 		DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
4744 		goto exit_st_list;
4745 	}
4746 
4747 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
4748 				     MAX_DEV_DB_ENTRIES;
4749 	fw_idx_size = sizeof(struct qla_ddb_index);
4750 
4751 	for (idx = 0; idx < max_ddbs; idx = next_idx) {
4752 		ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
4753 					      NULL, &next_idx, &state,
4754 					      &conn_err, NULL, &conn_id);
4755 		if (ret == QLA_ERROR)
4756 			break;
4757 
4758 		/* Ignore DDB if invalid state (unassigned) */
4759 		if (state == DDB_DS_UNASSIGNED)
4760 			goto continue_next_st;
4761 
4762 		/* Check if ST, add to the list_st */
4763 		if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
4764 			goto continue_next_st;
4765 
4766 		st_ddb_idx = vzalloc(fw_idx_size);
4767 		if (!st_ddb_idx)
4768 			break;
4769 
4770 		st_ddb_idx->fw_ddb_idx = idx;
4771 
4772 		list_add_tail(&st_ddb_idx->list, list_st);
4773 continue_next_st:
4774 		if (next_idx == 0)
4775 			break;
4776 	}
4777 
4778 exit_st_list:
4779 	if (fw_ddb_entry)
4780 		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
4781 }
4782 
4783 /**
4784  * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
4785  * @ha: pointer to adapter structure
4786  * @list_ddb: List from which failed ddb to be removed
4787  *
4788  * Iterate over the list of DDBs and find and remove DDBs that are either in
4789  * no connection active state or failed state
4790  **/
4791 static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
4792 				      struct list_head *list_ddb)
4793 {
4794 	struct qla_ddb_index  *ddb_idx, *ddb_idx_tmp;
4795 	uint32_t next_idx = 0;
4796 	uint32_t state = 0, conn_err = 0;
4797 	int ret;
4798 
4799 	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
4800 		ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
4801 					      NULL, 0, NULL, &next_idx, &state,
4802 					      &conn_err, NULL, NULL);
4803 		if (ret == QLA_ERROR)
4804 			continue;
4805 
4806 		if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
4807 		    state == DDB_DS_SESSION_FAILED) {
4808 			list_del_init(&ddb_idx->list);
4809 			vfree(ddb_idx);
4810 		}
4811 	}
4812 }
4813 
4814 static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
4815 				   struct dev_db_entry *fw_ddb_entry,
4816 				   int is_reset)
4817 {
4818 	struct iscsi_cls_session *cls_sess;
4819 	struct iscsi_session *sess;
4820 	struct iscsi_cls_conn *cls_conn;
4821 	struct iscsi_endpoint *ep;
4822 	uint16_t cmds_max = 32;
4823 	uint16_t conn_id = 0;
4824 	uint32_t initial_cmdsn = 0;
4825 	int ret = QLA_SUCCESS;
4826 
4827 	struct ddb_entry *ddb_entry = NULL;
4828 
4829 	/* Create session object, with INVALID_ENTRY,
4830 	 * the targer_id would get set when we issue the login
4831 	 */
4832 	cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
4833 				       cmds_max, sizeof(struct ddb_entry),
4834 				       sizeof(struct ql4_task_data),
4835 				       initial_cmdsn, INVALID_ENTRY);
4836 	if (!cls_sess) {
4837 		ret = QLA_ERROR;
4838 		goto exit_setup;
4839 	}
4840 
4841 	/*
4842 	 * so calling module_put function to decrement the
4843 	 * reference count.
4844 	 **/
4845 	module_put(qla4xxx_iscsi_transport.owner);
4846 	sess = cls_sess->dd_data;
4847 	ddb_entry = sess->dd_data;
4848 	ddb_entry->sess = cls_sess;
4849 
4850 	cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
4851 	memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
4852 	       sizeof(struct dev_db_entry));
4853 
4854 	qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
4855 
4856 	cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
4857 
4858 	if (!cls_conn) {
4859 		ret = QLA_ERROR;
4860 		goto exit_setup;
4861 	}
4862 
4863 	ddb_entry->conn = cls_conn;
4864 
4865 	/* Setup ep, for displaying attributes in sysfs */
4866 	ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
4867 	if (ep) {
4868 		ep->conn = cls_conn;
4869 		cls_conn->ep = ep;
4870 	} else {
4871 		DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
4872 		ret = QLA_ERROR;
4873 		goto exit_setup;
4874 	}
4875 
4876 	/* Update sess/conn params */
4877 	qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
4878 
4879 	if (is_reset == RESET_ADAPTER) {
4880 		iscsi_block_session(cls_sess);
4881 		/* Use the relogin path to discover new devices
4882 		 *  by short-circuting the logic of setting
4883 		 *  timer to relogin - instead set the flags
4884 		 *  to initiate login right away.
4885 		 */
4886 		set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
4887 		set_bit(DF_RELOGIN, &ddb_entry->flags);
4888 	}
4889 
4890 exit_setup:
4891 	return ret;
4892 }
4893 
4894 static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
4895 				  struct list_head *list_nt, int is_reset)
4896 {
4897 	struct dev_db_entry *fw_ddb_entry;
4898 	dma_addr_t fw_ddb_dma;
4899 	int max_ddbs;
4900 	int fw_idx_size;
4901 	int ret;
4902 	uint32_t idx = 0, next_idx = 0;
4903 	uint32_t state = 0, conn_err = 0;
4904 	uint16_t conn_id = 0;
4905 	struct qla_ddb_index  *nt_ddb_idx;
4906 
4907 	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
4908 				      &fw_ddb_dma);
4909 	if (fw_ddb_entry == NULL) {
4910 		DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
4911 		goto exit_nt_list;
4912 	}
4913 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
4914 				     MAX_DEV_DB_ENTRIES;
4915 	fw_idx_size = sizeof(struct qla_ddb_index);
4916 
4917 	for (idx = 0; idx < max_ddbs; idx = next_idx) {
4918 		ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
4919 					      NULL, &next_idx, &state,
4920 					      &conn_err, NULL, &conn_id);
4921 		if (ret == QLA_ERROR)
4922 			break;
4923 
4924 		if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
4925 			goto continue_next_nt;
4926 
4927 		/* Check if NT, then add to list it */
4928 		if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
4929 			goto continue_next_nt;
4930 
4931 		if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
4932 		    state == DDB_DS_SESSION_FAILED))
4933 			goto continue_next_nt;
4934 
4935 		DEBUG2(ql4_printk(KERN_INFO, ha,
4936 				  "Adding  DDB to session = 0x%x\n", idx));
4937 		if (is_reset == INIT_ADAPTER) {
4938 			nt_ddb_idx = vmalloc(fw_idx_size);
4939 			if (!nt_ddb_idx)
4940 				break;
4941 
4942 			nt_ddb_idx->fw_ddb_idx = idx;
4943 
4944 			/* Copy original isid as it may get updated in function
4945 			 * qla4xxx_update_isid(). We need original isid in
4946 			 * function qla4xxx_compare_tuple_ddb to find duplicate
4947 			 * target */
4948 			memcpy(&nt_ddb_idx->flash_isid[0],
4949 			       &fw_ddb_entry->isid[0],
4950 			       sizeof(nt_ddb_idx->flash_isid));
4951 
4952 			ret = qla4xxx_is_flash_ddb_exists(ha, list_nt,
4953 							  fw_ddb_entry);
4954 			if (ret == QLA_SUCCESS) {
4955 				/* free nt_ddb_idx and do not add to list_nt */
4956 				vfree(nt_ddb_idx);
4957 				goto continue_next_nt;
4958 			}
4959 
4960 			/* Copy updated isid */
4961 			memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
4962 			       sizeof(struct dev_db_entry));
4963 
4964 			list_add_tail(&nt_ddb_idx->list, list_nt);
4965 		} else if (is_reset == RESET_ADAPTER) {
4966 			if (qla4xxx_is_session_exists(ha, fw_ddb_entry) ==
4967 								QLA_SUCCESS)
4968 				goto continue_next_nt;
4969 		}
4970 
4971 		ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset);
4972 		if (ret == QLA_ERROR)
4973 			goto exit_nt_list;
4974 
4975 continue_next_nt:
4976 		if (next_idx == 0)
4977 			break;
4978 	}
4979 
4980 exit_nt_list:
4981 	if (fw_ddb_entry)
4982 		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
4983 }
4984 
4985 /**
4986  * qla4xxx_build_ddb_list - Build ddb list and setup sessions
4987  * @ha: pointer to adapter structure
4988  * @is_reset: Is this init path or reset path
4989  *
4990  * Create a list of sendtargets (st) from firmware DDBs, issue send targets
4991  * using connection open, then create the list of normal targets (nt)
4992  * from firmware DDBs. Based on the list of nt setup session and connection
4993  * objects.
4994  **/
4995 void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
4996 {
4997 	uint16_t tmo = 0;
4998 	struct list_head list_st, list_nt;
4999 	struct qla_ddb_index  *st_ddb_idx, *st_ddb_idx_tmp;
5000 	unsigned long wtime;
5001 
5002 	if (!test_bit(AF_LINK_UP, &ha->flags)) {
5003 		set_bit(AF_BUILD_DDB_LIST, &ha->flags);
5004 		ha->is_reset = is_reset;
5005 		return;
5006 	}
5007 
5008 	INIT_LIST_HEAD(&list_st);
5009 	INIT_LIST_HEAD(&list_nt);
5010 
5011 	qla4xxx_build_st_list(ha, &list_st);
5012 
5013 	/* Before issuing conn open mbox, ensure all IPs states are configured
5014 	 * Note, conn open fails if IPs are not configured
5015 	 */
5016 	qla4xxx_wait_for_ip_configuration(ha);
5017 
5018 	/* Go thru the STs and fire the sendtargets by issuing conn open mbx */
5019 	list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
5020 		qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
5021 	}
5022 
5023 	/* Wait to ensure all sendtargets are done for min 12 sec wait */
5024 	tmo = ((ha->def_timeout > LOGIN_TOV) &&
5025 	       (ha->def_timeout < LOGIN_TOV * 10) ?
5026 	       ha->def_timeout : LOGIN_TOV);
5027 
5028 	DEBUG2(ql4_printk(KERN_INFO, ha,
5029 			  "Default time to wait for build ddb %d\n", tmo));
5030 
5031 	wtime = jiffies + (HZ * tmo);
5032 	do {
5033 		if (list_empty(&list_st))
5034 			break;
5035 
5036 		qla4xxx_remove_failed_ddb(ha, &list_st);
5037 		schedule_timeout_uninterruptible(HZ / 10);
5038 	} while (time_after(wtime, jiffies));
5039 
5040 	/* Free up the sendtargets list */
5041 	qla4xxx_free_ddb_list(&list_st);
5042 
5043 	qla4xxx_build_nt_list(ha, &list_nt, is_reset);
5044 
5045 	qla4xxx_free_ddb_list(&list_nt);
5046 
5047 	qla4xxx_free_ddb_index(ha);
5048 }
5049 
5050 /**
5051  * qla4xxx_probe_adapter - callback function to probe HBA
5052  * @pdev: pointer to pci_dev structure
5053  * @pci_device_id: pointer to pci_device entry
5054  *
5055  * This routine will probe for Qlogic 4xxx iSCSI host adapters.
5056  * It returns zero if successful. It also initializes all data necessary for
5057  * the driver.
5058  **/
5059 static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
5060 					   const struct pci_device_id *ent)
5061 {
5062 	int ret = -ENODEV, status;
5063 	struct Scsi_Host *host;
5064 	struct scsi_qla_host *ha;
5065 	uint8_t init_retry_count = 0;
5066 	char buf[34];
5067 	struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
5068 	uint32_t dev_state;
5069 
5070 	if (pci_enable_device(pdev))
5071 		return -1;
5072 
5073 	host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0);
5074 	if (host == NULL) {
5075 		printk(KERN_WARNING
5076 		       "qla4xxx: Couldn't allocate host from scsi layer!\n");
5077 		goto probe_disable_device;
5078 	}
5079 
5080 	/* Clear our data area */
5081 	ha = to_qla_host(host);
5082 	memset(ha, 0, sizeof(*ha));
5083 
5084 	/* Save the information from PCI BIOS.	*/
5085 	ha->pdev = pdev;
5086 	ha->host = host;
5087 	ha->host_no = host->host_no;
5088 
5089 	pci_enable_pcie_error_reporting(pdev);
5090 
5091 	/* Setup Runtime configurable options */
5092 	if (is_qla8022(ha)) {
5093 		ha->isp_ops = &qla4_82xx_isp_ops;
5094 		ha->reg_tbl = (uint32_t *) qla4_82xx_reg_tbl;
5095 		rwlock_init(&ha->hw_lock);
5096 		ha->qdr_sn_window = -1;
5097 		ha->ddr_mn_window = -1;
5098 		ha->curr_window = 255;
5099 		ha->func_num = PCI_FUNC(ha->pdev->devfn);
5100 		nx_legacy_intr = &legacy_intr[ha->func_num];
5101 		ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
5102 		ha->nx_legacy_intr.tgt_status_reg =
5103 			nx_legacy_intr->tgt_status_reg;
5104 		ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
5105 		ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
5106 	} else {
5107 		ha->isp_ops = &qla4xxx_isp_ops;
5108 	}
5109 
5110 	/* Set EEH reset type to fundamental if required by hba */
5111 	if (is_qla8022(ha))
5112 		pdev->needs_freset = 1;
5113 
5114 	/* Configure PCI I/O space. */
5115 	ret = ha->isp_ops->iospace_config(ha);
5116 	if (ret)
5117 		goto probe_failed_ioconfig;
5118 
5119 	ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n",
5120 		   pdev->device, pdev->irq, ha->reg);
5121 
5122 	qla4xxx_config_dma_addressing(ha);
5123 
5124 	/* Initialize lists and spinlocks. */
5125 	INIT_LIST_HEAD(&ha->free_srb_q);
5126 
5127 	mutex_init(&ha->mbox_sem);
5128 	mutex_init(&ha->chap_sem);
5129 	init_completion(&ha->mbx_intr_comp);
5130 	init_completion(&ha->disable_acb_comp);
5131 
5132 	spin_lock_init(&ha->hardware_lock);
5133 	spin_lock_init(&ha->work_lock);
5134 
5135 	/* Initialize work list */
5136 	INIT_LIST_HEAD(&ha->work_list);
5137 
5138 	/* Allocate dma buffers */
5139 	if (qla4xxx_mem_alloc(ha)) {
5140 		ql4_printk(KERN_WARNING, ha,
5141 		    "[ERROR] Failed to allocate memory for adapter\n");
5142 
5143 		ret = -ENOMEM;
5144 		goto probe_failed;
5145 	}
5146 
5147 	host->cmd_per_lun = 3;
5148 	host->max_channel = 0;
5149 	host->max_lun = MAX_LUNS - 1;
5150 	host->max_id = MAX_TARGETS;
5151 	host->max_cmd_len = IOCB_MAX_CDB_LEN;
5152 	host->can_queue = MAX_SRBS ;
5153 	host->transportt = qla4xxx_scsi_transport;
5154 
5155 	ret = scsi_init_shared_tag_map(host, MAX_SRBS);
5156 	if (ret) {
5157 		ql4_printk(KERN_WARNING, ha,
5158 			   "%s: scsi_init_shared_tag_map failed\n", __func__);
5159 		goto probe_failed;
5160 	}
5161 
5162 	pci_set_drvdata(pdev, ha);
5163 
5164 	ret = scsi_add_host(host, &pdev->dev);
5165 	if (ret)
5166 		goto probe_failed;
5167 
5168 	if (is_qla8022(ha))
5169 		(void) qla4_8xxx_get_flash_info(ha);
5170 
5171 	/*
5172 	 * Initialize the Host adapter request/response queues and
5173 	 * firmware
5174 	 * NOTE: interrupts enabled upon successful completion
5175 	 */
5176 	status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
5177 	while ((!test_bit(AF_ONLINE, &ha->flags)) &&
5178 	    init_retry_count++ < MAX_INIT_RETRIES) {
5179 
5180 		if (is_qla8022(ha)) {
5181 			ha->isp_ops->idc_lock(ha);
5182 			dev_state = qla4_8xxx_rd_direct(ha,
5183 							QLA82XX_CRB_DEV_STATE);
5184 			ha->isp_ops->idc_unlock(ha);
5185 			if (dev_state == QLA8XXX_DEV_FAILED) {
5186 				ql4_printk(KERN_WARNING, ha, "%s: don't retry "
5187 				    "initialize adapter. H/W is in failed state\n",
5188 				    __func__);
5189 				break;
5190 			}
5191 		}
5192 		DEBUG2(printk("scsi: %s: retrying adapter initialization "
5193 			      "(%d)\n", __func__, init_retry_count));
5194 
5195 		if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
5196 			continue;
5197 
5198 		status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
5199 	}
5200 
5201 	if (!test_bit(AF_ONLINE, &ha->flags)) {
5202 		ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
5203 
5204 		if (is_qla8022(ha) && ql4xdontresethba) {
5205 			/* Put the device in failed state. */
5206 			DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
5207 			ha->isp_ops->idc_lock(ha);
5208 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
5209 					    QLA8XXX_DEV_FAILED);
5210 			ha->isp_ops->idc_unlock(ha);
5211 		}
5212 		ret = -ENODEV;
5213 		goto remove_host;
5214 	}
5215 
5216 	/* Startup the kernel thread for this host adapter. */
5217 	DEBUG2(printk("scsi: %s: Starting kernel thread for "
5218 		      "qla4xxx_dpc\n", __func__));
5219 	sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
5220 	ha->dpc_thread = create_singlethread_workqueue(buf);
5221 	if (!ha->dpc_thread) {
5222 		ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
5223 		ret = -ENODEV;
5224 		goto remove_host;
5225 	}
5226 	INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
5227 
5228 	sprintf(buf, "qla4xxx_%lu_task", ha->host_no);
5229 	ha->task_wq = alloc_workqueue(buf, WQ_MEM_RECLAIM, 1);
5230 	if (!ha->task_wq) {
5231 		ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
5232 		ret = -ENODEV;
5233 		goto remove_host;
5234 	}
5235 
5236 	/* For ISP-82XX, request_irqs is called in qla4_8xxx_load_risc
5237 	 * (which is called indirectly by qla4xxx_initialize_adapter),
5238 	 * so that irqs will be registered after crbinit but before
5239 	 * mbx_intr_enable.
5240 	 */
5241 	if (is_qla40XX(ha)) {
5242 		ret = qla4xxx_request_irqs(ha);
5243 		if (ret) {
5244 			ql4_printk(KERN_WARNING, ha, "Failed to reserve "
5245 			    "interrupt %d already in use.\n", pdev->irq);
5246 			goto remove_host;
5247 		}
5248 	}
5249 
5250 	pci_save_state(ha->pdev);
5251 	ha->isp_ops->enable_intrs(ha);
5252 
5253 	/* Start timer thread. */
5254 	qla4xxx_start_timer(ha, qla4xxx_timer, 1);
5255 
5256 	set_bit(AF_INIT_DONE, &ha->flags);
5257 
5258 	qla4_8xxx_alloc_sysfs_attr(ha);
5259 
5260 	printk(KERN_INFO
5261 	       " QLogic iSCSI HBA Driver version: %s\n"
5262 	       "  QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
5263 	       qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
5264 	       ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
5265 	       ha->patch_number, ha->build_number);
5266 
5267 	if (qla4xxx_setup_boot_info(ha))
5268 		ql4_printk(KERN_ERR, ha,
5269 			   "%s: No iSCSI boot target configured\n", __func__);
5270 
5271 		/* Perform the build ddb list and login to each */
5272 	qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
5273 	iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
5274 
5275 	qla4xxx_create_chap_list(ha);
5276 
5277 	qla4xxx_create_ifaces(ha);
5278 	return 0;
5279 
5280 remove_host:
5281 	scsi_remove_host(ha->host);
5282 
5283 probe_failed:
5284 	qla4xxx_free_adapter(ha);
5285 
5286 probe_failed_ioconfig:
5287 	pci_disable_pcie_error_reporting(pdev);
5288 	scsi_host_put(ha->host);
5289 
5290 probe_disable_device:
5291 	pci_disable_device(pdev);
5292 
5293 	return ret;
5294 }
5295 
5296 /**
5297  * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
5298  * @ha: pointer to adapter structure
5299  *
5300  * Mark the other ISP-4xxx port to indicate that the driver is being removed,
5301  * so that the other port will not re-initialize while in the process of
5302  * removing the ha due to driver unload or hba hotplug.
5303  **/
5304 static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
5305 {
5306 	struct scsi_qla_host *other_ha = NULL;
5307 	struct pci_dev *other_pdev = NULL;
5308 	int fn = ISP4XXX_PCI_FN_2;
5309 
5310 	/*iscsi function numbers for ISP4xxx is 1 and 3*/
5311 	if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
5312 		fn = ISP4XXX_PCI_FN_1;
5313 
5314 	other_pdev =
5315 		pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
5316 		ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
5317 		fn));
5318 
5319 	/* Get other_ha if other_pdev is valid and state is enable*/
5320 	if (other_pdev) {
5321 		if (atomic_read(&other_pdev->enable_cnt)) {
5322 			other_ha = pci_get_drvdata(other_pdev);
5323 			if (other_ha) {
5324 				set_bit(AF_HA_REMOVAL, &other_ha->flags);
5325 				DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
5326 				    "Prevent %s reinit\n", __func__,
5327 				    dev_name(&other_ha->pdev->dev)));
5328 			}
5329 		}
5330 		pci_dev_put(other_pdev);
5331 	}
5332 }
5333 
5334 static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
5335 {
5336 	struct ddb_entry *ddb_entry;
5337 	int options;
5338 	int idx;
5339 
5340 	for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
5341 
5342 		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
5343 		if ((ddb_entry != NULL) &&
5344 		    (ddb_entry->ddb_type == FLASH_DDB)) {
5345 
5346 			options = LOGOUT_OPTION_CLOSE_SESSION;
5347 			if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
5348 			    == QLA_ERROR)
5349 				ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
5350 					   __func__);
5351 
5352 			qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
5353 			/*
5354 			 * we have decremented the reference count of the driver
5355 			 * when we setup the session to have the driver unload
5356 			 * to be seamless without actually destroying the
5357 			 * session
5358 			 **/
5359 			try_module_get(qla4xxx_iscsi_transport.owner);
5360 			iscsi_destroy_endpoint(ddb_entry->conn->ep);
5361 			qla4xxx_free_ddb(ha, ddb_entry);
5362 			iscsi_session_teardown(ddb_entry->sess);
5363 		}
5364 	}
5365 }
5366 /**
5367  * qla4xxx_remove_adapter - calback function to remove adapter.
5368  * @pci_dev: PCI device pointer
5369  **/
5370 static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
5371 {
5372 	struct scsi_qla_host *ha;
5373 
5374 	ha = pci_get_drvdata(pdev);
5375 
5376 	if (is_qla40XX(ha))
5377 		qla4xxx_prevent_other_port_reinit(ha);
5378 
5379 	/* destroy iface from sysfs */
5380 	qla4xxx_destroy_ifaces(ha);
5381 
5382 	if ((!ql4xdisablesysfsboot) && ha->boot_kset)
5383 		iscsi_boot_destroy_kset(ha->boot_kset);
5384 
5385 	qla4xxx_destroy_fw_ddb_session(ha);
5386 	qla4_8xxx_free_sysfs_attr(ha);
5387 
5388 	scsi_remove_host(ha->host);
5389 
5390 	qla4xxx_free_adapter(ha);
5391 
5392 	scsi_host_put(ha->host);
5393 
5394 	pci_disable_pcie_error_reporting(pdev);
5395 	pci_disable_device(pdev);
5396 	pci_set_drvdata(pdev, NULL);
5397 }
5398 
5399 /**
5400  * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
5401  * @ha: HA context
5402  *
5403  * At exit, the @ha's flags.enable_64bit_addressing set to indicated
5404  * supported addressing method.
5405  */
5406 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
5407 {
5408 	int retval;
5409 
5410 	/* Update our PCI device dma_mask for full 64 bit mask */
5411 	if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
5412 		if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
5413 			dev_dbg(&ha->pdev->dev,
5414 				  "Failed to set 64 bit PCI consistent mask; "
5415 				   "using 32 bit.\n");
5416 			retval = pci_set_consistent_dma_mask(ha->pdev,
5417 							     DMA_BIT_MASK(32));
5418 		}
5419 	} else
5420 		retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
5421 }
5422 
5423 static int qla4xxx_slave_alloc(struct scsi_device *sdev)
5424 {
5425 	struct iscsi_cls_session *cls_sess;
5426 	struct iscsi_session *sess;
5427 	struct ddb_entry *ddb;
5428 	int queue_depth = QL4_DEF_QDEPTH;
5429 
5430 	cls_sess = starget_to_session(sdev->sdev_target);
5431 	sess = cls_sess->dd_data;
5432 	ddb = sess->dd_data;
5433 
5434 	sdev->hostdata = ddb;
5435 	sdev->tagged_supported = 1;
5436 
5437 	if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
5438 		queue_depth = ql4xmaxqdepth;
5439 
5440 	scsi_activate_tcq(sdev, queue_depth);
5441 	return 0;
5442 }
5443 
5444 static int qla4xxx_slave_configure(struct scsi_device *sdev)
5445 {
5446 	sdev->tagged_supported = 1;
5447 	return 0;
5448 }
5449 
5450 static void qla4xxx_slave_destroy(struct scsi_device *sdev)
5451 {
5452 	scsi_deactivate_tcq(sdev, 1);
5453 }
5454 
5455 static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
5456 				      int reason)
5457 {
5458 	if (!ql4xqfulltracking)
5459 		return -EOPNOTSUPP;
5460 
5461 	return iscsi_change_queue_depth(sdev, qdepth, reason);
5462 }
5463 
5464 /**
5465  * qla4xxx_del_from_active_array - returns an active srb
5466  * @ha: Pointer to host adapter structure.
5467  * @index: index into the active_array
5468  *
5469  * This routine removes and returns the srb at the specified index
5470  **/
5471 struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
5472     uint32_t index)
5473 {
5474 	struct srb *srb = NULL;
5475 	struct scsi_cmnd *cmd = NULL;
5476 
5477 	cmd = scsi_host_find_tag(ha->host, index);
5478 	if (!cmd)
5479 		return srb;
5480 
5481 	srb = (struct srb *)CMD_SP(cmd);
5482 	if (!srb)
5483 		return srb;
5484 
5485 	/* update counters */
5486 	if (srb->flags & SRB_DMA_VALID) {
5487 		ha->req_q_count += srb->iocb_cnt;
5488 		ha->iocb_cnt -= srb->iocb_cnt;
5489 		if (srb->cmd)
5490 			srb->cmd->host_scribble =
5491 				(unsigned char *)(unsigned long) MAX_SRBS;
5492 	}
5493 	return srb;
5494 }
5495 
5496 /**
5497  * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
5498  * @ha: Pointer to host adapter structure.
5499  * @cmd: Scsi Command to wait on.
5500  *
5501  * This routine waits for the command to be returned by the Firmware
5502  * for some max time.
5503  **/
5504 static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
5505 				      struct scsi_cmnd *cmd)
5506 {
5507 	int done = 0;
5508 	struct srb *rp;
5509 	uint32_t max_wait_time = EH_WAIT_CMD_TOV;
5510 	int ret = SUCCESS;
5511 
5512 	/* Dont wait on command if PCI error is being handled
5513 	 * by PCI AER driver
5514 	 */
5515 	if (unlikely(pci_channel_offline(ha->pdev)) ||
5516 	    (test_bit(AF_EEH_BUSY, &ha->flags))) {
5517 		ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
5518 		    ha->host_no, __func__);
5519 		return ret;
5520 	}
5521 
5522 	do {
5523 		/* Checking to see if its returned to OS */
5524 		rp = (struct srb *) CMD_SP(cmd);
5525 		if (rp == NULL) {
5526 			done++;
5527 			break;
5528 		}
5529 
5530 		msleep(2000);
5531 	} while (max_wait_time--);
5532 
5533 	return done;
5534 }
5535 
5536 /**
5537  * qla4xxx_wait_for_hba_online - waits for HBA to come online
5538  * @ha: Pointer to host adapter structure
5539  **/
5540 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
5541 {
5542 	unsigned long wait_online;
5543 
5544 	wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
5545 	while (time_before(jiffies, wait_online)) {
5546 
5547 		if (adapter_up(ha))
5548 			return QLA_SUCCESS;
5549 
5550 		msleep(2000);
5551 	}
5552 
5553 	return QLA_ERROR;
5554 }
5555 
5556 /**
5557  * qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
5558  * @ha: pointer to HBA
5559  * @t: target id
5560  * @l: lun id
5561  *
5562  * This function waits for all outstanding commands to a lun to complete. It
5563  * returns 0 if all pending commands are returned and 1 otherwise.
5564  **/
5565 static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
5566 					struct scsi_target *stgt,
5567 					struct scsi_device *sdev)
5568 {
5569 	int cnt;
5570 	int status = 0;
5571 	struct scsi_cmnd *cmd;
5572 
5573 	/*
5574 	 * Waiting for all commands for the designated target or dev
5575 	 * in the active array
5576 	 */
5577 	for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
5578 		cmd = scsi_host_find_tag(ha->host, cnt);
5579 		if (cmd && stgt == scsi_target(cmd->device) &&
5580 		    (!sdev || sdev == cmd->device)) {
5581 			if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
5582 				status++;
5583 				break;
5584 			}
5585 		}
5586 	}
5587 	return status;
5588 }
5589 
5590 /**
5591  * qla4xxx_eh_abort - callback for abort task.
5592  * @cmd: Pointer to Linux's SCSI command structure
5593  *
5594  * This routine is called by the Linux OS to abort the specified
5595  * command.
5596  **/
5597 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
5598 {
5599 	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
5600 	unsigned int id = cmd->device->id;
5601 	unsigned int lun = cmd->device->lun;
5602 	unsigned long flags;
5603 	struct srb *srb = NULL;
5604 	int ret = SUCCESS;
5605 	int wait = 0;
5606 
5607 	ql4_printk(KERN_INFO, ha,
5608 	    "scsi%ld:%d:%d: Abort command issued cmd=%p\n",
5609 	    ha->host_no, id, lun, cmd);
5610 
5611 	spin_lock_irqsave(&ha->hardware_lock, flags);
5612 	srb = (struct srb *) CMD_SP(cmd);
5613 	if (!srb) {
5614 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
5615 		return SUCCESS;
5616 	}
5617 	kref_get(&srb->srb_ref);
5618 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
5619 
5620 	if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
5621 		DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
5622 		    ha->host_no, id, lun));
5623 		ret = FAILED;
5624 	} else {
5625 		DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n",
5626 		    ha->host_no, id, lun));
5627 		wait = 1;
5628 	}
5629 
5630 	kref_put(&srb->srb_ref, qla4xxx_srb_compl);
5631 
5632 	/* Wait for command to complete */
5633 	if (wait) {
5634 		if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
5635 			DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n",
5636 			    ha->host_no, id, lun));
5637 			ret = FAILED;
5638 		}
5639 	}
5640 
5641 	ql4_printk(KERN_INFO, ha,
5642 	    "scsi%ld:%d:%d: Abort command - %s\n",
5643 	    ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed");
5644 
5645 	return ret;
5646 }
5647 
5648 /**
5649  * qla4xxx_eh_device_reset - callback for target reset.
5650  * @cmd: Pointer to Linux's SCSI command structure
5651  *
5652  * This routine is called by the Linux OS to reset all luns on the
5653  * specified target.
5654  **/
5655 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
5656 {
5657 	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
5658 	struct ddb_entry *ddb_entry = cmd->device->hostdata;
5659 	int ret = FAILED, stat;
5660 
5661 	if (!ddb_entry)
5662 		return ret;
5663 
5664 	ret = iscsi_block_scsi_eh(cmd);
5665 	if (ret)
5666 		return ret;
5667 	ret = FAILED;
5668 
5669 	ql4_printk(KERN_INFO, ha,
5670 		   "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no,
5671 		   cmd->device->channel, cmd->device->id, cmd->device->lun);
5672 
5673 	DEBUG2(printk(KERN_INFO
5674 		      "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
5675 		      "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
5676 		      cmd, jiffies, cmd->request->timeout / HZ,
5677 		      ha->dpc_flags, cmd->result, cmd->allowed));
5678 
5679 	/* FIXME: wait for hba to go online */
5680 	stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
5681 	if (stat != QLA_SUCCESS) {
5682 		ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat);
5683 		goto eh_dev_reset_done;
5684 	}
5685 
5686 	if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
5687 					 cmd->device)) {
5688 		ql4_printk(KERN_INFO, ha,
5689 			   "DEVICE RESET FAILED - waiting for "
5690 			   "commands.\n");
5691 		goto eh_dev_reset_done;
5692 	}
5693 
5694 	/* Send marker. */
5695 	if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
5696 		MM_LUN_RESET) != QLA_SUCCESS)
5697 		goto eh_dev_reset_done;
5698 
5699 	ql4_printk(KERN_INFO, ha,
5700 		   "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
5701 		   ha->host_no, cmd->device->channel, cmd->device->id,
5702 		   cmd->device->lun);
5703 
5704 	ret = SUCCESS;
5705 
5706 eh_dev_reset_done:
5707 
5708 	return ret;
5709 }
5710 
5711 /**
5712  * qla4xxx_eh_target_reset - callback for target reset.
5713  * @cmd: Pointer to Linux's SCSI command structure
5714  *
5715  * This routine is called by the Linux OS to reset the target.
5716  **/
5717 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
5718 {
5719 	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
5720 	struct ddb_entry *ddb_entry = cmd->device->hostdata;
5721 	int stat, ret;
5722 
5723 	if (!ddb_entry)
5724 		return FAILED;
5725 
5726 	ret = iscsi_block_scsi_eh(cmd);
5727 	if (ret)
5728 		return ret;
5729 
5730 	starget_printk(KERN_INFO, scsi_target(cmd->device),
5731 		       "WARM TARGET RESET ISSUED.\n");
5732 
5733 	DEBUG2(printk(KERN_INFO
5734 		      "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
5735 		      "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
5736 		      ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
5737 		      ha->dpc_flags, cmd->result, cmd->allowed));
5738 
5739 	stat = qla4xxx_reset_target(ha, ddb_entry);
5740 	if (stat != QLA_SUCCESS) {
5741 		starget_printk(KERN_INFO, scsi_target(cmd->device),
5742 			       "WARM TARGET RESET FAILED.\n");
5743 		return FAILED;
5744 	}
5745 
5746 	if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
5747 					 NULL)) {
5748 		starget_printk(KERN_INFO, scsi_target(cmd->device),
5749 			       "WARM TARGET DEVICE RESET FAILED - "
5750 			       "waiting for commands.\n");
5751 		return FAILED;
5752 	}
5753 
5754 	/* Send marker. */
5755 	if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
5756 		MM_TGT_WARM_RESET) != QLA_SUCCESS) {
5757 		starget_printk(KERN_INFO, scsi_target(cmd->device),
5758 			       "WARM TARGET DEVICE RESET FAILED - "
5759 			       "marker iocb failed.\n");
5760 		return FAILED;
5761 	}
5762 
5763 	starget_printk(KERN_INFO, scsi_target(cmd->device),
5764 		       "WARM TARGET RESET SUCCEEDED.\n");
5765 	return SUCCESS;
5766 }
5767 
5768 /**
5769  * qla4xxx_is_eh_active - check if error handler is running
5770  * @shost: Pointer to SCSI Host struct
5771  *
5772  * This routine finds that if reset host is called in EH
5773  * scenario or from some application like sg_reset
5774  **/
5775 static int qla4xxx_is_eh_active(struct Scsi_Host *shost)
5776 {
5777 	if (shost->shost_state == SHOST_RECOVERY)
5778 		return 1;
5779 	return 0;
5780 }
5781 
5782 /**
5783  * qla4xxx_eh_host_reset - kernel callback
5784  * @cmd: Pointer to Linux's SCSI command structure
5785  *
5786  * This routine is invoked by the Linux kernel to perform fatal error
5787  * recovery on the specified adapter.
5788  **/
5789 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
5790 {
5791 	int return_status = FAILED;
5792 	struct scsi_qla_host *ha;
5793 
5794 	ha = to_qla_host(cmd->device->host);
5795 
5796 	if (ql4xdontresethba) {
5797 		DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
5798 		     ha->host_no, __func__));
5799 
5800 		/* Clear outstanding srb in queues */
5801 		if (qla4xxx_is_eh_active(cmd->device->host))
5802 			qla4xxx_abort_active_cmds(ha, DID_ABORT << 16);
5803 
5804 		return FAILED;
5805 	}
5806 
5807 	ql4_printk(KERN_INFO, ha,
5808 		   "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no,
5809 		   cmd->device->channel, cmd->device->id, cmd->device->lun);
5810 
5811 	if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
5812 		DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host.  Adapter "
5813 			      "DEAD.\n", ha->host_no, cmd->device->channel,
5814 			      __func__));
5815 
5816 		return FAILED;
5817 	}
5818 
5819 	if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
5820 		if (is_qla8022(ha))
5821 			set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
5822 		else
5823 			set_bit(DPC_RESET_HA, &ha->dpc_flags);
5824 	}
5825 
5826 	if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS)
5827 		return_status = SUCCESS;
5828 
5829 	ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n",
5830 		   return_status == FAILED ? "FAILED" : "SUCCEEDED");
5831 
5832 	return return_status;
5833 }
5834 
5835 static int qla4xxx_context_reset(struct scsi_qla_host *ha)
5836 {
5837 	uint32_t mbox_cmd[MBOX_REG_COUNT];
5838 	uint32_t mbox_sts[MBOX_REG_COUNT];
5839 	struct addr_ctrl_blk_def *acb = NULL;
5840 	uint32_t acb_len = sizeof(struct addr_ctrl_blk_def);
5841 	int rval = QLA_SUCCESS;
5842 	dma_addr_t acb_dma;
5843 
5844 	acb = dma_alloc_coherent(&ha->pdev->dev,
5845 				 sizeof(struct addr_ctrl_blk_def),
5846 				 &acb_dma, GFP_KERNEL);
5847 	if (!acb) {
5848 		ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
5849 			   __func__);
5850 		rval = -ENOMEM;
5851 		goto exit_port_reset;
5852 	}
5853 
5854 	memset(acb, 0, acb_len);
5855 
5856 	rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len);
5857 	if (rval != QLA_SUCCESS) {
5858 		rval = -EIO;
5859 		goto exit_free_acb;
5860 	}
5861 
5862 	rval = qla4xxx_disable_acb(ha);
5863 	if (rval != QLA_SUCCESS) {
5864 		rval = -EIO;
5865 		goto exit_free_acb;
5866 	}
5867 
5868 	wait_for_completion_timeout(&ha->disable_acb_comp,
5869 				    DISABLE_ACB_TOV * HZ);
5870 
5871 	rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
5872 	if (rval != QLA_SUCCESS) {
5873 		rval = -EIO;
5874 		goto exit_free_acb;
5875 	}
5876 
5877 exit_free_acb:
5878 	dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def),
5879 			  acb, acb_dma);
5880 exit_port_reset:
5881 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__,
5882 			  rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
5883 	return rval;
5884 }
5885 
5886 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
5887 {
5888 	struct scsi_qla_host *ha = to_qla_host(shost);
5889 	int rval = QLA_SUCCESS;
5890 
5891 	if (ql4xdontresethba) {
5892 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
5893 				  __func__));
5894 		rval = -EPERM;
5895 		goto exit_host_reset;
5896 	}
5897 
5898 	rval = qla4xxx_wait_for_hba_online(ha);
5899 	if (rval != QLA_SUCCESS) {
5900 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unable to reset host "
5901 				  "adapter\n", __func__));
5902 		rval = -EIO;
5903 		goto exit_host_reset;
5904 	}
5905 
5906 	if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
5907 		goto recover_adapter;
5908 
5909 	switch (reset_type) {
5910 	case SCSI_ADAPTER_RESET:
5911 		set_bit(DPC_RESET_HA, &ha->dpc_flags);
5912 		break;
5913 	case SCSI_FIRMWARE_RESET:
5914 		if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
5915 			if (is_qla8022(ha))
5916 				/* set firmware context reset */
5917 				set_bit(DPC_RESET_HA_FW_CONTEXT,
5918 					&ha->dpc_flags);
5919 			else {
5920 				rval = qla4xxx_context_reset(ha);
5921 				goto exit_host_reset;
5922 			}
5923 		}
5924 		break;
5925 	}
5926 
5927 recover_adapter:
5928 	rval = qla4xxx_recover_adapter(ha);
5929 	if (rval != QLA_SUCCESS) {
5930 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
5931 				  __func__));
5932 		rval = -EIO;
5933 	}
5934 
5935 exit_host_reset:
5936 	return rval;
5937 }
5938 
5939 /* PCI AER driver recovers from all correctable errors w/o
5940  * driver intervention. For uncorrectable errors PCI AER
5941  * driver calls the following device driver's callbacks
5942  *
5943  * - Fatal Errors - link_reset
5944  * - Non-Fatal Errors - driver's pci_error_detected() which
5945  * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
5946  *
5947  * PCI AER driver calls
5948  * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled
5949  *               returns RECOVERED or NEED_RESET if fw_hung
5950  * NEED_RESET - driver's slot_reset()
5951  * DISCONNECT - device is dead & cannot recover
5952  * RECOVERED - driver's pci_resume()
5953  */
5954 static pci_ers_result_t
5955 qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5956 {
5957 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
5958 
5959 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
5960 	    ha->host_no, __func__, state);
5961 
5962 	if (!is_aer_supported(ha))
5963 		return PCI_ERS_RESULT_NONE;
5964 
5965 	switch (state) {
5966 	case pci_channel_io_normal:
5967 		clear_bit(AF_EEH_BUSY, &ha->flags);
5968 		return PCI_ERS_RESULT_CAN_RECOVER;
5969 	case pci_channel_io_frozen:
5970 		set_bit(AF_EEH_BUSY, &ha->flags);
5971 		qla4xxx_mailbox_premature_completion(ha);
5972 		qla4xxx_free_irqs(ha);
5973 		pci_disable_device(pdev);
5974 		/* Return back all IOs */
5975 		qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
5976 		return PCI_ERS_RESULT_NEED_RESET;
5977 	case pci_channel_io_perm_failure:
5978 		set_bit(AF_EEH_BUSY, &ha->flags);
5979 		set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
5980 		qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
5981 		return PCI_ERS_RESULT_DISCONNECT;
5982 	}
5983 	return PCI_ERS_RESULT_NEED_RESET;
5984 }
5985 
5986 /**
5987  * qla4xxx_pci_mmio_enabled() gets called if
5988  * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
5989  * and read/write to the device still works.
5990  **/
5991 static pci_ers_result_t
5992 qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
5993 {
5994 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
5995 
5996 	if (!is_aer_supported(ha))
5997 		return PCI_ERS_RESULT_NONE;
5998 
5999 	return PCI_ERS_RESULT_RECOVERED;
6000 }
6001 
6002 static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
6003 {
6004 	uint32_t rval = QLA_ERROR;
6005 	uint32_t ret = 0;
6006 	int fn;
6007 	struct pci_dev *other_pdev = NULL;
6008 
6009 	ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
6010 
6011 	set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
6012 
6013 	if (test_bit(AF_ONLINE, &ha->flags)) {
6014 		clear_bit(AF_ONLINE, &ha->flags);
6015 		clear_bit(AF_LINK_UP, &ha->flags);
6016 		iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
6017 		qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
6018 	}
6019 
6020 	fn = PCI_FUNC(ha->pdev->devfn);
6021 	while (fn > 0) {
6022 		fn--;
6023 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at "
6024 		    "func %x\n", ha->host_no, __func__, fn);
6025 		/* Get the pci device given the domain, bus,
6026 		 * slot/function number */
6027 		other_pdev =
6028 		    pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
6029 		    ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
6030 		    fn));
6031 
6032 		if (!other_pdev)
6033 			continue;
6034 
6035 		if (atomic_read(&other_pdev->enable_cnt)) {
6036 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI "
6037 			    "func in enabled state%x\n", ha->host_no,
6038 			    __func__, fn);
6039 			pci_dev_put(other_pdev);
6040 			break;
6041 		}
6042 		pci_dev_put(other_pdev);
6043 	}
6044 
6045 	/* The first function on the card, the reset owner will
6046 	 * start & initialize the firmware. The other functions
6047 	 * on the card will reset the firmware context
6048 	 */
6049 	if (!fn) {
6050 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
6051 		    "0x%x is the owner\n", ha->host_no, __func__,
6052 		    ha->pdev->devfn);
6053 
6054 		ha->isp_ops->idc_lock(ha);
6055 		qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
6056 				    QLA8XXX_DEV_COLD);
6057 		qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION,
6058 				    QLA82XX_IDC_VERSION);
6059 		ha->isp_ops->idc_unlock(ha);
6060 		clear_bit(AF_FW_RECOVERY, &ha->flags);
6061 		rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
6062 		ha->isp_ops->idc_lock(ha);
6063 
6064 		if (rval != QLA_SUCCESS) {
6065 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
6066 			    "FAILED\n", ha->host_no, __func__);
6067 			qla4_8xxx_clear_drv_active(ha);
6068 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
6069 					    QLA8XXX_DEV_FAILED);
6070 		} else {
6071 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
6072 			    "READY\n", ha->host_no, __func__);
6073 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
6074 					    QLA8XXX_DEV_READY);
6075 			/* Clear driver state register */
6076 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0);
6077 			qla4_8xxx_set_drv_active(ha);
6078 			ret = qla4xxx_request_irqs(ha);
6079 			if (ret) {
6080 				ql4_printk(KERN_WARNING, ha, "Failed to "
6081 				    "reserve interrupt %d already in use.\n",
6082 				    ha->pdev->irq);
6083 				rval = QLA_ERROR;
6084 			} else {
6085 				ha->isp_ops->enable_intrs(ha);
6086 				rval = QLA_SUCCESS;
6087 			}
6088 		}
6089 		ha->isp_ops->idc_unlock(ha);
6090 	} else {
6091 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
6092 		    "the reset owner\n", ha->host_no, __func__,
6093 		    ha->pdev->devfn);
6094 		if ((qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE) ==
6095 		     QLA8XXX_DEV_READY)) {
6096 			clear_bit(AF_FW_RECOVERY, &ha->flags);
6097 			rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
6098 			if (rval == QLA_SUCCESS) {
6099 				ret = qla4xxx_request_irqs(ha);
6100 				if (ret) {
6101 					ql4_printk(KERN_WARNING, ha, "Failed to"
6102 					    " reserve interrupt %d already in"
6103 					    " use.\n", ha->pdev->irq);
6104 					rval = QLA_ERROR;
6105 				} else {
6106 					ha->isp_ops->enable_intrs(ha);
6107 					rval = QLA_SUCCESS;
6108 				}
6109 			}
6110 			ha->isp_ops->idc_lock(ha);
6111 			qla4_8xxx_set_drv_active(ha);
6112 			ha->isp_ops->idc_unlock(ha);
6113 		}
6114 	}
6115 	clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
6116 	return rval;
6117 }
6118 
6119 static pci_ers_result_t
6120 qla4xxx_pci_slot_reset(struct pci_dev *pdev)
6121 {
6122 	pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
6123 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
6124 	int rc;
6125 
6126 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
6127 	    ha->host_no, __func__);
6128 
6129 	if (!is_aer_supported(ha))
6130 		return PCI_ERS_RESULT_NONE;
6131 
6132 	/* Restore the saved state of PCIe device -
6133 	 * BAR registers, PCI Config space, PCIX, MSI,
6134 	 * IOV states
6135 	 */
6136 	pci_restore_state(pdev);
6137 
6138 	/* pci_restore_state() clears the saved_state flag of the device
6139 	 * save restored state which resets saved_state flag
6140 	 */
6141 	pci_save_state(pdev);
6142 
6143 	/* Initialize device or resume if in suspended state */
6144 	rc = pci_enable_device(pdev);
6145 	if (rc) {
6146 		ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable "
6147 		    "device after reset\n", ha->host_no, __func__);
6148 		goto exit_slot_reset;
6149 	}
6150 
6151 	ha->isp_ops->disable_intrs(ha);
6152 
6153 	if (is_qla8022(ha)) {
6154 		if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
6155 			ret = PCI_ERS_RESULT_RECOVERED;
6156 			goto exit_slot_reset;
6157 		} else
6158 			goto exit_slot_reset;
6159 	}
6160 
6161 exit_slot_reset:
6162 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
6163 	    "device after reset\n", ha->host_no, __func__, ret);
6164 	return ret;
6165 }
6166 
6167 static void
6168 qla4xxx_pci_resume(struct pci_dev *pdev)
6169 {
6170 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
6171 	int ret;
6172 
6173 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
6174 	    ha->host_no, __func__);
6175 
6176 	ret = qla4xxx_wait_for_hba_online(ha);
6177 	if (ret != QLA_SUCCESS) {
6178 		ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
6179 		    "resume I/O from slot/link_reset\n", ha->host_no,
6180 		     __func__);
6181 	}
6182 
6183 	pci_cleanup_aer_uncorrect_error_status(pdev);
6184 	clear_bit(AF_EEH_BUSY, &ha->flags);
6185 }
6186 
6187 static struct pci_error_handlers qla4xxx_err_handler = {
6188 	.error_detected = qla4xxx_pci_error_detected,
6189 	.mmio_enabled = qla4xxx_pci_mmio_enabled,
6190 	.slot_reset = qla4xxx_pci_slot_reset,
6191 	.resume = qla4xxx_pci_resume,
6192 };
6193 
6194 static struct pci_device_id qla4xxx_pci_tbl[] = {
6195 	{
6196 		.vendor		= PCI_VENDOR_ID_QLOGIC,
6197 		.device		= PCI_DEVICE_ID_QLOGIC_ISP4010,
6198 		.subvendor	= PCI_ANY_ID,
6199 		.subdevice	= PCI_ANY_ID,
6200 	},
6201 	{
6202 		.vendor		= PCI_VENDOR_ID_QLOGIC,
6203 		.device		= PCI_DEVICE_ID_QLOGIC_ISP4022,
6204 		.subvendor	= PCI_ANY_ID,
6205 		.subdevice	= PCI_ANY_ID,
6206 	},
6207 	{
6208 		.vendor		= PCI_VENDOR_ID_QLOGIC,
6209 		.device		= PCI_DEVICE_ID_QLOGIC_ISP4032,
6210 		.subvendor	= PCI_ANY_ID,
6211 		.subdevice	= PCI_ANY_ID,
6212 	},
6213 	{
6214 		.vendor         = PCI_VENDOR_ID_QLOGIC,
6215 		.device         = PCI_DEVICE_ID_QLOGIC_ISP8022,
6216 		.subvendor      = PCI_ANY_ID,
6217 		.subdevice      = PCI_ANY_ID,
6218 	},
6219 	{0, 0},
6220 };
6221 MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
6222 
6223 static struct pci_driver qla4xxx_pci_driver = {
6224 	.name		= DRIVER_NAME,
6225 	.id_table	= qla4xxx_pci_tbl,
6226 	.probe		= qla4xxx_probe_adapter,
6227 	.remove		= qla4xxx_remove_adapter,
6228 	.err_handler = &qla4xxx_err_handler,
6229 };
6230 
6231 static int __init qla4xxx_module_init(void)
6232 {
6233 	int ret;
6234 
6235 	/* Allocate cache for SRBs. */
6236 	srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
6237 				       SLAB_HWCACHE_ALIGN, NULL);
6238 	if (srb_cachep == NULL) {
6239 		printk(KERN_ERR
6240 		       "%s: Unable to allocate SRB cache..."
6241 		       "Failing load!\n", DRIVER_NAME);
6242 		ret = -ENOMEM;
6243 		goto no_srp_cache;
6244 	}
6245 
6246 	/* Derive version string. */
6247 	strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
6248 	if (ql4xextended_error_logging)
6249 		strcat(qla4xxx_version_str, "-debug");
6250 
6251 	qla4xxx_scsi_transport =
6252 		iscsi_register_transport(&qla4xxx_iscsi_transport);
6253 	if (!qla4xxx_scsi_transport){
6254 		ret = -ENODEV;
6255 		goto release_srb_cache;
6256 	}
6257 
6258 	ret = pci_register_driver(&qla4xxx_pci_driver);
6259 	if (ret)
6260 		goto unregister_transport;
6261 
6262 	printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
6263 	return 0;
6264 
6265 unregister_transport:
6266 	iscsi_unregister_transport(&qla4xxx_iscsi_transport);
6267 release_srb_cache:
6268 	kmem_cache_destroy(srb_cachep);
6269 no_srp_cache:
6270 	return ret;
6271 }
6272 
6273 static void __exit qla4xxx_module_exit(void)
6274 {
6275 	pci_unregister_driver(&qla4xxx_pci_driver);
6276 	iscsi_unregister_transport(&qla4xxx_iscsi_transport);
6277 	kmem_cache_destroy(srb_cachep);
6278 }
6279 
6280 module_init(qla4xxx_module_init);
6281 module_exit(qla4xxx_module_exit);
6282 
6283 MODULE_AUTHOR("QLogic Corporation");
6284 MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
6285 MODULE_LICENSE("GPL");
6286 MODULE_VERSION(QLA4XXX_DRIVER_VERSION);
6287