xref: /openbmc/linux/drivers/scsi/qla4xxx/ql4_os.c (revision 98270ab4)
1 /*
2  * QLogic iSCSI HBA Driver
3  * Copyright (c)  2003-2010 QLogic Corporation
4  *
5  * See LICENSE.qla4xxx for copyright and licensing details.
6  */
7 #include <linux/moduleparam.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/iscsi_boot_sysfs.h>
11 
12 #include <scsi/scsi_tcq.h>
13 #include <scsi/scsicam.h>
14 
15 #include "ql4_def.h"
16 #include "ql4_version.h"
17 #include "ql4_glbl.h"
18 #include "ql4_dbg.h"
19 #include "ql4_inline.h"
20 
21 /*
22  * Driver version
23  */
24 static char qla4xxx_version_str[40];
25 
26 /*
27  * SRB allocation cache
28  */
29 static struct kmem_cache *srb_cachep;
30 
31 /*
32  * Module parameter information and variables
33  */
34 int ql4xdontresethba = 0;
35 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
36 MODULE_PARM_DESC(ql4xdontresethba,
37 		"Don't reset the HBA for driver recovery \n"
38 		" 0 - It will reset HBA (Default)\n"
39 		" 1 - It will NOT reset HBA");
40 
41 int ql4xextended_error_logging = 0; /* 0 = off, 1 = log errors */
42 module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
43 MODULE_PARM_DESC(ql4xextended_error_logging,
44 		 "Option to enable extended error logging, "
45 		 "Default is 0 - no logging, 1 - debug logging");
46 
47 int ql4xenablemsix = 1;
48 module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
49 MODULE_PARM_DESC(ql4xenablemsix,
50 		"Set to enable MSI or MSI-X interrupt mechanism.\n"
51 		" 0 = enable INTx interrupt mechanism.\n"
52 		" 1 = enable MSI-X interrupt mechanism (Default).\n"
53 		" 2 = enable MSI interrupt mechanism.");
54 
55 #define QL4_DEF_QDEPTH 32
56 static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
57 module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
58 MODULE_PARM_DESC(ql4xmaxqdepth,
59 		"Maximum queue depth to report for target devices.\n"
60 		" Default: 32.");
61 
62 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
63 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
64 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
65 		"Target Session Recovery Timeout.\n"
66 		" Default: 30 sec.");
67 
68 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
69 /*
70  * SCSI host template entry points
71  */
72 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
73 
74 /*
75  * iSCSI template entry points
76  */
77 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
78 				  enum iscsi_param param, char *buf);
79 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
80 				  enum iscsi_host_param param, char *buf);
81 static int qla4xxx_iface_set_param(struct Scsi_Host *shost, char *data,
82 				   int count);
83 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
84 				   enum iscsi_param_type param_type,
85 				   int param, char *buf);
86 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
87 static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost,
88 						 struct sockaddr *dst_addr,
89 						 int non_blocking);
90 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
91 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep);
92 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
93 				enum iscsi_param param, char *buf);
94 static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
95 static struct iscsi_cls_conn *
96 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx);
97 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
98 			     struct iscsi_cls_conn *cls_conn,
99 			     uint64_t transport_fd, int is_leading);
100 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn);
101 static struct iscsi_cls_session *
102 qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
103 			uint16_t qdepth, uint32_t initial_cmdsn);
104 static void qla4xxx_session_destroy(struct iscsi_cls_session *sess);
105 static void qla4xxx_task_work(struct work_struct *wdata);
106 static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t);
107 static int qla4xxx_task_xmit(struct iscsi_task *);
108 static void qla4xxx_task_cleanup(struct iscsi_task *);
109 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
110 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
111 				   struct iscsi_stats *stats);
112 /*
113  * SCSI host template entry points
114  */
115 static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
116 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
117 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
118 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
119 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
120 static int qla4xxx_slave_alloc(struct scsi_device *device);
121 static int qla4xxx_slave_configure(struct scsi_device *device);
122 static void qla4xxx_slave_destroy(struct scsi_device *sdev);
123 static mode_t ql4_attr_is_visible(int param_type, int param);
124 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
125 
126 static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
127     QLA82XX_LEGACY_INTR_CONFIG;
128 
129 static struct scsi_host_template qla4xxx_driver_template = {
130 	.module			= THIS_MODULE,
131 	.name			= DRIVER_NAME,
132 	.proc_name		= DRIVER_NAME,
133 	.queuecommand		= qla4xxx_queuecommand,
134 
135 	.eh_abort_handler	= qla4xxx_eh_abort,
136 	.eh_device_reset_handler = qla4xxx_eh_device_reset,
137 	.eh_target_reset_handler = qla4xxx_eh_target_reset,
138 	.eh_host_reset_handler	= qla4xxx_eh_host_reset,
139 	.eh_timed_out		= qla4xxx_eh_cmd_timed_out,
140 
141 	.slave_configure	= qla4xxx_slave_configure,
142 	.slave_alloc		= qla4xxx_slave_alloc,
143 	.slave_destroy		= qla4xxx_slave_destroy,
144 
145 	.this_id		= -1,
146 	.cmd_per_lun		= 3,
147 	.use_clustering		= ENABLE_CLUSTERING,
148 	.sg_tablesize		= SG_ALL,
149 
150 	.max_sectors		= 0xFFFF,
151 	.shost_attrs		= qla4xxx_host_attrs,
152 	.host_reset		= qla4xxx_host_reset,
153 	.vendor_id		= SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
154 };
155 
156 static struct iscsi_transport qla4xxx_iscsi_transport = {
157 	.owner			= THIS_MODULE,
158 	.name			= DRIVER_NAME,
159 	.caps			= CAP_TEXT_NEGO |
160 				  CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
161 				  CAP_DATADGST | CAP_LOGIN_OFFLOAD |
162 				  CAP_MULTI_R2T,
163 	.attr_is_visible	= ql4_attr_is_visible,
164 	.create_session         = qla4xxx_session_create,
165 	.destroy_session        = qla4xxx_session_destroy,
166 	.start_conn             = qla4xxx_conn_start,
167 	.create_conn            = qla4xxx_conn_create,
168 	.bind_conn              = qla4xxx_conn_bind,
169 	.stop_conn              = iscsi_conn_stop,
170 	.destroy_conn           = qla4xxx_conn_destroy,
171 	.set_param              = iscsi_set_param,
172 	.get_conn_param		= qla4xxx_conn_get_param,
173 	.get_session_param	= iscsi_session_get_param,
174 	.get_ep_param           = qla4xxx_get_ep_param,
175 	.ep_connect		= qla4xxx_ep_connect,
176 	.ep_poll		= qla4xxx_ep_poll,
177 	.ep_disconnect		= qla4xxx_ep_disconnect,
178 	.get_stats		= qla4xxx_conn_get_stats,
179 	.send_pdu		= iscsi_conn_send_pdu,
180 	.xmit_task		= qla4xxx_task_xmit,
181 	.cleanup_task		= qla4xxx_task_cleanup,
182 	.alloc_pdu		= qla4xxx_alloc_pdu,
183 
184 	.get_host_param		= qla4xxx_host_get_param,
185 	.set_iface_param	= qla4xxx_iface_set_param,
186 	.get_iface_param	= qla4xxx_get_iface_param,
187 	.bsg_request		= qla4xxx_bsg_request,
188 };
189 
190 static struct scsi_transport_template *qla4xxx_scsi_transport;
191 
192 static mode_t ql4_attr_is_visible(int param_type, int param)
193 {
194 	switch (param_type) {
195 	case ISCSI_HOST_PARAM:
196 		switch (param) {
197 		case ISCSI_HOST_PARAM_HWADDRESS:
198 		case ISCSI_HOST_PARAM_IPADDRESS:
199 		case ISCSI_HOST_PARAM_INITIATOR_NAME:
200 			return S_IRUGO;
201 		default:
202 			return 0;
203 		}
204 	case ISCSI_PARAM:
205 		switch (param) {
206 		case ISCSI_PARAM_CONN_ADDRESS:
207 		case ISCSI_PARAM_CONN_PORT:
208 		case ISCSI_PARAM_TARGET_NAME:
209 		case ISCSI_PARAM_TPGT:
210 		case ISCSI_PARAM_TARGET_ALIAS:
211 		case ISCSI_PARAM_MAX_BURST:
212 		case ISCSI_PARAM_MAX_R2T:
213 		case ISCSI_PARAM_FIRST_BURST:
214 		case ISCSI_PARAM_MAX_RECV_DLENGTH:
215 		case ISCSI_PARAM_MAX_XMIT_DLENGTH:
216 		case ISCSI_PARAM_IFACE_NAME:
217 			return S_IRUGO;
218 		default:
219 			return 0;
220 		}
221 	case ISCSI_NET_PARAM:
222 		switch (param) {
223 		case ISCSI_NET_PARAM_IPV4_ADDR:
224 		case ISCSI_NET_PARAM_IPV4_SUBNET:
225 		case ISCSI_NET_PARAM_IPV4_GW:
226 		case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
227 		case ISCSI_NET_PARAM_IFACE_ENABLE:
228 		case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
229 		case ISCSI_NET_PARAM_IPV6_ADDR:
230 		case ISCSI_NET_PARAM_IPV6_ROUTER:
231 		case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
232 		case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
233 		case ISCSI_NET_PARAM_VLAN_ID:
234 		case ISCSI_NET_PARAM_VLAN_PRIORITY:
235 		case ISCSI_NET_PARAM_VLAN_ENABLED:
236 		case ISCSI_NET_PARAM_MTU:
237 		case ISCSI_NET_PARAM_PORT:
238 			return S_IRUGO;
239 		default:
240 			return 0;
241 		}
242 	}
243 
244 	return 0;
245 }
246 
247 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
248 				   enum iscsi_param_type param_type,
249 				   int param, char *buf)
250 {
251 	struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
252 	struct scsi_qla_host *ha = to_qla_host(shost);
253 	int len = -ENOSYS;
254 
255 	if (param_type != ISCSI_NET_PARAM)
256 		return -ENOSYS;
257 
258 	switch (param) {
259 	case ISCSI_NET_PARAM_IPV4_ADDR:
260 		len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
261 		break;
262 	case ISCSI_NET_PARAM_IPV4_SUBNET:
263 		len = sprintf(buf, "%pI4\n", &ha->ip_config.subnet_mask);
264 		break;
265 	case ISCSI_NET_PARAM_IPV4_GW:
266 		len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
267 		break;
268 	case ISCSI_NET_PARAM_IFACE_ENABLE:
269 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
270 			len = sprintf(buf, "%s\n",
271 				      (ha->ip_config.ipv4_options &
272 				       IPOPT_IPV4_PROTOCOL_ENABLE) ?
273 				      "enabled" : "disabled");
274 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
275 			len = sprintf(buf, "%s\n",
276 				      (ha->ip_config.ipv6_options &
277 				       IPV6_OPT_IPV6_PROTOCOL_ENABLE) ?
278 				       "enabled" : "disabled");
279 		break;
280 	case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
281 		len = sprintf(buf, "%s\n",
282 			      (ha->ip_config.tcp_options & TCPOPT_DHCP_ENABLE) ?
283 			      "dhcp" : "static");
284 		break;
285 	case ISCSI_NET_PARAM_IPV6_ADDR:
286 		if (iface->iface_num == 0)
287 			len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr0);
288 		if (iface->iface_num == 1)
289 			len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr1);
290 		break;
291 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
292 		len = sprintf(buf, "%pI6\n",
293 			      &ha->ip_config.ipv6_link_local_addr);
294 		break;
295 	case ISCSI_NET_PARAM_IPV6_ROUTER:
296 		len = sprintf(buf, "%pI6\n",
297 			      &ha->ip_config.ipv6_default_router_addr);
298 		break;
299 	case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
300 		len = sprintf(buf, "%s\n",
301 			      (ha->ip_config.ipv6_addl_options &
302 			       IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
303 			       "nd" : "static");
304 		break;
305 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
306 		len = sprintf(buf, "%s\n",
307 			      (ha->ip_config.ipv6_addl_options &
308 			       IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
309 			       "auto" : "static");
310 		break;
311 	case ISCSI_NET_PARAM_VLAN_ID:
312 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
313 			len = sprintf(buf, "%d\n",
314 				      (ha->ip_config.ipv4_vlan_tag &
315 				       ISCSI_MAX_VLAN_ID));
316 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
317 			len = sprintf(buf, "%d\n",
318 				      (ha->ip_config.ipv6_vlan_tag &
319 				       ISCSI_MAX_VLAN_ID));
320 		break;
321 	case ISCSI_NET_PARAM_VLAN_PRIORITY:
322 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
323 			len = sprintf(buf, "%d\n",
324 				      ((ha->ip_config.ipv4_vlan_tag >> 13) &
325 					ISCSI_MAX_VLAN_PRIORITY));
326 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
327 			len = sprintf(buf, "%d\n",
328 				      ((ha->ip_config.ipv6_vlan_tag >> 13) &
329 					ISCSI_MAX_VLAN_PRIORITY));
330 		break;
331 	case ISCSI_NET_PARAM_VLAN_ENABLED:
332 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
333 			len = sprintf(buf, "%s\n",
334 				      (ha->ip_config.ipv4_options &
335 				       IPOPT_VLAN_TAGGING_ENABLE) ?
336 				       "enabled" : "disabled");
337 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
338 			len = sprintf(buf, "%s\n",
339 				      (ha->ip_config.ipv6_options &
340 				       IPV6_OPT_VLAN_TAGGING_ENABLE) ?
341 				       "enabled" : "disabled");
342 		break;
343 	case ISCSI_NET_PARAM_MTU:
344 		len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
345 		break;
346 	case ISCSI_NET_PARAM_PORT:
347 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
348 			len = sprintf(buf, "%d\n", ha->ip_config.ipv4_port);
349 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
350 			len = sprintf(buf, "%d\n", ha->ip_config.ipv6_port);
351 		break;
352 	default:
353 		len = -ENOSYS;
354 	}
355 
356 	return len;
357 }
358 
359 static struct iscsi_endpoint *
360 qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
361 		   int non_blocking)
362 {
363 	int ret;
364 	struct iscsi_endpoint *ep;
365 	struct qla_endpoint *qla_ep;
366 	struct scsi_qla_host *ha;
367 	struct sockaddr_in *addr;
368 	struct sockaddr_in6 *addr6;
369 
370 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
371 	if (!shost) {
372 		ret = -ENXIO;
373 		printk(KERN_ERR "%s: shost is NULL\n",
374 		       __func__);
375 		return ERR_PTR(ret);
376 	}
377 
378 	ha = iscsi_host_priv(shost);
379 
380 	ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
381 	if (!ep) {
382 		ret = -ENOMEM;
383 		return ERR_PTR(ret);
384 	}
385 
386 	qla_ep = ep->dd_data;
387 	memset(qla_ep, 0, sizeof(struct qla_endpoint));
388 	if (dst_addr->sa_family == AF_INET) {
389 		memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in));
390 		addr = (struct sockaddr_in *)&qla_ep->dst_addr;
391 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__,
392 				  (char *)&addr->sin_addr));
393 	} else if (dst_addr->sa_family == AF_INET6) {
394 		memcpy(&qla_ep->dst_addr, dst_addr,
395 		       sizeof(struct sockaddr_in6));
396 		addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
397 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
398 				  (char *)&addr6->sin6_addr));
399 	}
400 
401 	qla_ep->host = shost;
402 
403 	return ep;
404 }
405 
406 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
407 {
408 	struct qla_endpoint *qla_ep;
409 	struct scsi_qla_host *ha;
410 	int ret = 0;
411 
412 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
413 	qla_ep = ep->dd_data;
414 	ha = to_qla_host(qla_ep->host);
415 
416 	if (adapter_up(ha))
417 		ret = 1;
418 
419 	return ret;
420 }
421 
422 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
423 {
424 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
425 	iscsi_destroy_endpoint(ep);
426 }
427 
428 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
429 				enum iscsi_param param,
430 				char *buf)
431 {
432 	struct qla_endpoint *qla_ep = ep->dd_data;
433 	struct sockaddr *dst_addr;
434 
435 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
436 
437 	switch (param) {
438 	case ISCSI_PARAM_CONN_PORT:
439 	case ISCSI_PARAM_CONN_ADDRESS:
440 		if (!qla_ep)
441 			return -ENOTCONN;
442 
443 		dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
444 		if (!dst_addr)
445 			return -ENOTCONN;
446 
447 		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
448 						 &qla_ep->dst_addr, param, buf);
449 	default:
450 		return -ENOSYS;
451 	}
452 }
453 
454 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
455 				   struct iscsi_stats *stats)
456 {
457 	struct iscsi_session *sess;
458 	struct iscsi_cls_session *cls_sess;
459 	struct ddb_entry *ddb_entry;
460 	struct scsi_qla_host *ha;
461 	struct ql_iscsi_stats *ql_iscsi_stats;
462 	int stats_size;
463 	int ret;
464 	dma_addr_t iscsi_stats_dma;
465 
466 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
467 
468 	cls_sess = iscsi_conn_to_session(cls_conn);
469 	sess = cls_sess->dd_data;
470 	ddb_entry = sess->dd_data;
471 	ha = ddb_entry->ha;
472 
473 	stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
474 	/* Allocate memory */
475 	ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
476 					    &iscsi_stats_dma, GFP_KERNEL);
477 	if (!ql_iscsi_stats) {
478 		ql4_printk(KERN_ERR, ha,
479 			   "Unable to allocate memory for iscsi stats\n");
480 		goto exit_get_stats;
481 	}
482 
483 	ret =  qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size,
484 				     iscsi_stats_dma);
485 	if (ret != QLA_SUCCESS) {
486 		ql4_printk(KERN_ERR, ha,
487 			   "Unable to retreive iscsi stats\n");
488 		goto free_stats;
489 	}
490 
491 	/* octets */
492 	stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets);
493 	stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets);
494 	/* xmit pdus */
495 	stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus);
496 	stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus);
497 	stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus);
498 	stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus);
499 	stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus);
500 	stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus);
501 	stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus);
502 	stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus);
503 	/* recv pdus */
504 	stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus);
505 	stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus);
506 	stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus);
507 	stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus);
508 	stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus);
509 	stats->logoutrsp_pdus =
510 			le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus);
511 	stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus);
512 	stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus);
513 	stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus);
514 
515 free_stats:
516 	dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats,
517 			  iscsi_stats_dma);
518 exit_get_stats:
519 	return;
520 }
521 
522 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
523 {
524 	struct iscsi_cls_session *session;
525 	struct iscsi_session *sess;
526 	unsigned long flags;
527 	enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED;
528 
529 	session = starget_to_session(scsi_target(sc->device));
530 	sess = session->dd_data;
531 
532 	spin_lock_irqsave(&session->lock, flags);
533 	if (session->state == ISCSI_SESSION_FAILED)
534 		ret = BLK_EH_RESET_TIMER;
535 	spin_unlock_irqrestore(&session->lock, flags);
536 
537 	return ret;
538 }
539 
540 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
541 				  enum iscsi_host_param param, char *buf)
542 {
543 	struct scsi_qla_host *ha = to_qla_host(shost);
544 	int len;
545 
546 	switch (param) {
547 	case ISCSI_HOST_PARAM_HWADDRESS:
548 		len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
549 		break;
550 	case ISCSI_HOST_PARAM_IPADDRESS:
551 		len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
552 		break;
553 	case ISCSI_HOST_PARAM_INITIATOR_NAME:
554 		len = sprintf(buf, "%s\n", ha->name_string);
555 		break;
556 	default:
557 		return -ENOSYS;
558 	}
559 
560 	return len;
561 }
562 
563 static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha)
564 {
565 	if (ha->iface_ipv4)
566 		return;
567 
568 	/* IPv4 */
569 	ha->iface_ipv4 = iscsi_create_iface(ha->host,
570 					    &qla4xxx_iscsi_transport,
571 					    ISCSI_IFACE_TYPE_IPV4, 0, 0);
572 	if (!ha->iface_ipv4)
573 		ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI "
574 			   "iface0.\n");
575 }
576 
577 static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha)
578 {
579 	if (!ha->iface_ipv6_0)
580 		/* IPv6 iface-0 */
581 		ha->iface_ipv6_0 = iscsi_create_iface(ha->host,
582 						      &qla4xxx_iscsi_transport,
583 						      ISCSI_IFACE_TYPE_IPV6, 0,
584 						      0);
585 	if (!ha->iface_ipv6_0)
586 		ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
587 			   "iface0.\n");
588 
589 	if (!ha->iface_ipv6_1)
590 		/* IPv6 iface-1 */
591 		ha->iface_ipv6_1 = iscsi_create_iface(ha->host,
592 						      &qla4xxx_iscsi_transport,
593 						      ISCSI_IFACE_TYPE_IPV6, 1,
594 						      0);
595 	if (!ha->iface_ipv6_1)
596 		ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
597 			   "iface1.\n");
598 }
599 
600 static void qla4xxx_create_ifaces(struct scsi_qla_host *ha)
601 {
602 	if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE)
603 		qla4xxx_create_ipv4_iface(ha);
604 
605 	if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE)
606 		qla4xxx_create_ipv6_iface(ha);
607 }
608 
609 static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha)
610 {
611 	if (ha->iface_ipv4) {
612 		iscsi_destroy_iface(ha->iface_ipv4);
613 		ha->iface_ipv4 = NULL;
614 	}
615 }
616 
617 static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha)
618 {
619 	if (ha->iface_ipv6_0) {
620 		iscsi_destroy_iface(ha->iface_ipv6_0);
621 		ha->iface_ipv6_0 = NULL;
622 	}
623 	if (ha->iface_ipv6_1) {
624 		iscsi_destroy_iface(ha->iface_ipv6_1);
625 		ha->iface_ipv6_1 = NULL;
626 	}
627 }
628 
629 static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha)
630 {
631 	qla4xxx_destroy_ipv4_iface(ha);
632 	qla4xxx_destroy_ipv6_iface(ha);
633 }
634 
635 static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
636 			     struct iscsi_iface_param_info *iface_param,
637 			     struct addr_ctrl_blk *init_fw_cb)
638 {
639 	/*
640 	 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg.
641 	 * iface_num 1 is valid only for IPv6 Addr.
642 	 */
643 	switch (iface_param->param) {
644 	case ISCSI_NET_PARAM_IPV6_ADDR:
645 		if (iface_param->iface_num & 0x1)
646 			/* IPv6 Addr 1 */
647 			memcpy(init_fw_cb->ipv6_addr1, iface_param->value,
648 			       sizeof(init_fw_cb->ipv6_addr1));
649 		else
650 			/* IPv6 Addr 0 */
651 			memcpy(init_fw_cb->ipv6_addr0, iface_param->value,
652 			       sizeof(init_fw_cb->ipv6_addr0));
653 		break;
654 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
655 		if (iface_param->iface_num & 0x1)
656 			break;
657 		memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8],
658 		       sizeof(init_fw_cb->ipv6_if_id));
659 		break;
660 	case ISCSI_NET_PARAM_IPV6_ROUTER:
661 		if (iface_param->iface_num & 0x1)
662 			break;
663 		memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value,
664 		       sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
665 		break;
666 	case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
667 		/* Autocfg applies to even interface */
668 		if (iface_param->iface_num & 0x1)
669 			break;
670 
671 		if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE)
672 			init_fw_cb->ipv6_addtl_opts &=
673 				cpu_to_le16(
674 				  ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
675 		else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE)
676 			init_fw_cb->ipv6_addtl_opts |=
677 				cpu_to_le16(
678 				  IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
679 		else
680 			ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
681 				   "IPv6 addr\n");
682 		break;
683 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
684 		/* Autocfg applies to even interface */
685 		if (iface_param->iface_num & 0x1)
686 			break;
687 
688 		if (iface_param->value[0] ==
689 		    ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE)
690 			init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(
691 					IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
692 		else if (iface_param->value[0] ==
693 			 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE)
694 			init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
695 				       ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
696 		else
697 			ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
698 				   "IPv6 linklocal addr\n");
699 		break;
700 	case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
701 		/* Autocfg applies to even interface */
702 		if (iface_param->iface_num & 0x1)
703 			break;
704 
705 		if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE)
706 			memset(init_fw_cb->ipv6_dflt_rtr_addr, 0,
707 			       sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
708 		break;
709 	case ISCSI_NET_PARAM_IFACE_ENABLE:
710 		if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
711 			init_fw_cb->ipv6_opts |=
712 				cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE);
713 			qla4xxx_create_ipv6_iface(ha);
714 		} else {
715 			init_fw_cb->ipv6_opts &=
716 				cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE &
717 					    0xFFFF);
718 			qla4xxx_destroy_ipv6_iface(ha);
719 		}
720 		break;
721 	case ISCSI_NET_PARAM_VLAN_ID:
722 		if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag))
723 			break;
724 		init_fw_cb->ipv6_vlan_tag =
725 				cpu_to_be16(*(uint16_t *)iface_param->value);
726 		break;
727 	case ISCSI_NET_PARAM_VLAN_ENABLED:
728 		if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
729 			init_fw_cb->ipv6_opts |=
730 				cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE);
731 		else
732 			init_fw_cb->ipv6_opts &=
733 				cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE);
734 		break;
735 	case ISCSI_NET_PARAM_MTU:
736 		init_fw_cb->eth_mtu_size =
737 				cpu_to_le16(*(uint16_t *)iface_param->value);
738 		break;
739 	case ISCSI_NET_PARAM_PORT:
740 		/* Autocfg applies to even interface */
741 		if (iface_param->iface_num & 0x1)
742 			break;
743 
744 		init_fw_cb->ipv6_port =
745 				cpu_to_le16(*(uint16_t *)iface_param->value);
746 		break;
747 	default:
748 		ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
749 			   iface_param->param);
750 		break;
751 	}
752 }
753 
754 static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
755 			     struct iscsi_iface_param_info *iface_param,
756 			     struct addr_ctrl_blk *init_fw_cb)
757 {
758 	switch (iface_param->param) {
759 	case ISCSI_NET_PARAM_IPV4_ADDR:
760 		memcpy(init_fw_cb->ipv4_addr, iface_param->value,
761 		       sizeof(init_fw_cb->ipv4_addr));
762 		break;
763 	case ISCSI_NET_PARAM_IPV4_SUBNET:
764 		memcpy(init_fw_cb->ipv4_subnet,	iface_param->value,
765 		       sizeof(init_fw_cb->ipv4_subnet));
766 		break;
767 	case ISCSI_NET_PARAM_IPV4_GW:
768 		memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value,
769 		       sizeof(init_fw_cb->ipv4_gw_addr));
770 		break;
771 	case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
772 		if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
773 			init_fw_cb->ipv4_tcp_opts |=
774 					cpu_to_le16(TCPOPT_DHCP_ENABLE);
775 		else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
776 			init_fw_cb->ipv4_tcp_opts &=
777 					cpu_to_le16(~TCPOPT_DHCP_ENABLE);
778 		else
779 			ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n");
780 		break;
781 	case ISCSI_NET_PARAM_IFACE_ENABLE:
782 		if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
783 			init_fw_cb->ipv4_ip_opts |=
784 				cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE);
785 			qla4xxx_create_ipv4_iface(ha);
786 		} else {
787 			init_fw_cb->ipv4_ip_opts &=
788 				cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE &
789 					    0xFFFF);
790 			qla4xxx_destroy_ipv4_iface(ha);
791 		}
792 		break;
793 	case ISCSI_NET_PARAM_VLAN_ID:
794 		if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag))
795 			break;
796 		init_fw_cb->ipv4_vlan_tag =
797 				cpu_to_be16(*(uint16_t *)iface_param->value);
798 		break;
799 	case ISCSI_NET_PARAM_VLAN_ENABLED:
800 		if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
801 			init_fw_cb->ipv4_ip_opts |=
802 					cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE);
803 		else
804 			init_fw_cb->ipv4_ip_opts &=
805 					cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE);
806 		break;
807 	case ISCSI_NET_PARAM_MTU:
808 		init_fw_cb->eth_mtu_size =
809 				cpu_to_le16(*(uint16_t *)iface_param->value);
810 		break;
811 	case ISCSI_NET_PARAM_PORT:
812 		init_fw_cb->ipv4_port =
813 				cpu_to_le16(*(uint16_t *)iface_param->value);
814 		break;
815 	default:
816 		ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
817 			   iface_param->param);
818 		break;
819 	}
820 }
821 
822 static void
823 qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
824 {
825 	struct addr_ctrl_blk_def *acb;
826 	acb = (struct addr_ctrl_blk_def *)init_fw_cb;
827 	memset(acb->reserved1, 0, sizeof(acb->reserved1));
828 	memset(acb->reserved2, 0, sizeof(acb->reserved2));
829 	memset(acb->reserved3, 0, sizeof(acb->reserved3));
830 	memset(acb->reserved4, 0, sizeof(acb->reserved4));
831 	memset(acb->reserved5, 0, sizeof(acb->reserved5));
832 	memset(acb->reserved6, 0, sizeof(acb->reserved6));
833 	memset(acb->reserved7, 0, sizeof(acb->reserved7));
834 	memset(acb->reserved8, 0, sizeof(acb->reserved8));
835 	memset(acb->reserved9, 0, sizeof(acb->reserved9));
836 	memset(acb->reserved10, 0, sizeof(acb->reserved10));
837 	memset(acb->reserved11, 0, sizeof(acb->reserved11));
838 	memset(acb->reserved12, 0, sizeof(acb->reserved12));
839 	memset(acb->reserved13, 0, sizeof(acb->reserved13));
840 	memset(acb->reserved14, 0, sizeof(acb->reserved14));
841 	memset(acb->reserved15, 0, sizeof(acb->reserved15));
842 }
843 
844 static int
845 qla4xxx_iface_set_param(struct Scsi_Host *shost, char *data, int count)
846 {
847 	struct scsi_qla_host *ha = to_qla_host(shost);
848 	int rval = 0;
849 	struct iscsi_iface_param_info *iface_param = NULL;
850 	struct addr_ctrl_blk *init_fw_cb = NULL;
851 	dma_addr_t init_fw_cb_dma;
852 	uint32_t mbox_cmd[MBOX_REG_COUNT];
853 	uint32_t mbox_sts[MBOX_REG_COUNT];
854 	uint32_t total_param_count;
855 	uint32_t length;
856 
857 	init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
858 					sizeof(struct addr_ctrl_blk),
859 					&init_fw_cb_dma, GFP_KERNEL);
860 	if (!init_fw_cb) {
861 		ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
862 			   __func__);
863 		return -ENOMEM;
864 	}
865 
866 	memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
867 	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
868 	memset(&mbox_sts, 0, sizeof(mbox_sts));
869 
870 	if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) {
871 		ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__);
872 		rval = -EIO;
873 		goto exit_init_fw_cb;
874 	}
875 
876 	total_param_count = count;
877 	iface_param = (struct iscsi_iface_param_info *)data;
878 
879 	for ( ; total_param_count != 0; total_param_count--) {
880 		length = iface_param->len;
881 
882 		if (iface_param->param_type != ISCSI_NET_PARAM)
883 			continue;
884 
885 		switch (iface_param->iface_type) {
886 		case ISCSI_IFACE_TYPE_IPV4:
887 			switch (iface_param->iface_num) {
888 			case 0:
889 				qla4xxx_set_ipv4(ha, iface_param, init_fw_cb);
890 				break;
891 			default:
892 				/* Cannot have more than one IPv4 interface */
893 				ql4_printk(KERN_ERR, ha, "Invalid IPv4 iface "
894 					   "number = %d\n",
895 					   iface_param->iface_num);
896 				break;
897 			}
898 			break;
899 		case ISCSI_IFACE_TYPE_IPV6:
900 			switch (iface_param->iface_num) {
901 			case 0:
902 			case 1:
903 				qla4xxx_set_ipv6(ha, iface_param, init_fw_cb);
904 				break;
905 			default:
906 				/* Cannot have more than two IPv6 interface */
907 				ql4_printk(KERN_ERR, ha, "Invalid IPv6 iface "
908 					   "number = %d\n",
909 					   iface_param->iface_num);
910 				break;
911 			}
912 			break;
913 		default:
914 			ql4_printk(KERN_ERR, ha, "Invalid iface type\n");
915 			break;
916 		}
917 
918 		iface_param = (struct iscsi_iface_param_info *)
919 						((uint8_t *)iface_param +
920 			    sizeof(struct iscsi_iface_param_info) + length);
921 	}
922 
923 	init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A);
924 
925 	rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB,
926 				 sizeof(struct addr_ctrl_blk),
927 				 FLASH_OPT_RMW_COMMIT);
928 	if (rval != QLA_SUCCESS) {
929 		ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n",
930 			   __func__);
931 		rval = -EIO;
932 		goto exit_init_fw_cb;
933 	}
934 
935 	qla4xxx_disable_acb(ha);
936 
937 	qla4xxx_initcb_to_acb(init_fw_cb);
938 
939 	rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma);
940 	if (rval != QLA_SUCCESS) {
941 		ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n",
942 			   __func__);
943 		rval = -EIO;
944 		goto exit_init_fw_cb;
945 	}
946 
947 	memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
948 	qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb,
949 				  init_fw_cb_dma);
950 
951 exit_init_fw_cb:
952 	dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
953 			  init_fw_cb, init_fw_cb_dma);
954 
955 	return rval;
956 }
957 
958 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
959 				  enum iscsi_param param, char *buf)
960 {
961 	struct iscsi_conn *conn;
962 	struct qla_conn *qla_conn;
963 	struct sockaddr *dst_addr;
964 	int len = 0;
965 
966 	conn = cls_conn->dd_data;
967 	qla_conn = conn->dd_data;
968 	dst_addr = &qla_conn->qla_ep->dst_addr;
969 
970 	switch (param) {
971 	case ISCSI_PARAM_CONN_PORT:
972 	case ISCSI_PARAM_CONN_ADDRESS:
973 		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
974 						 dst_addr, param, buf);
975 	default:
976 		return iscsi_conn_get_param(cls_conn, param, buf);
977 	}
978 
979 	return len;
980 
981 }
982 
983 static struct iscsi_cls_session *
984 qla4xxx_session_create(struct iscsi_endpoint *ep,
985 			uint16_t cmds_max, uint16_t qdepth,
986 			uint32_t initial_cmdsn)
987 {
988 	struct iscsi_cls_session *cls_sess;
989 	struct scsi_qla_host *ha;
990 	struct qla_endpoint *qla_ep;
991 	struct ddb_entry *ddb_entry;
992 	uint32_t ddb_index;
993 	uint32_t mbx_sts = 0;
994 	struct iscsi_session *sess;
995 	struct sockaddr *dst_addr;
996 	int ret;
997 
998 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
999 	if (!ep) {
1000 		printk(KERN_ERR "qla4xxx: missing ep.\n");
1001 		return NULL;
1002 	}
1003 
1004 	qla_ep = ep->dd_data;
1005 	dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
1006 	ha = to_qla_host(qla_ep->host);
1007 
1008 get_ddb_index:
1009 	ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
1010 
1011 	if (ddb_index >= MAX_DDB_ENTRIES) {
1012 		DEBUG2(ql4_printk(KERN_INFO, ha,
1013 				  "Free DDB index not available\n"));
1014 		return NULL;
1015 	}
1016 
1017 	if (test_and_set_bit(ddb_index, ha->ddb_idx_map))
1018 		goto get_ddb_index;
1019 
1020 	DEBUG2(ql4_printk(KERN_INFO, ha,
1021 			  "Found a free DDB index at %d\n", ddb_index));
1022 	ret = qla4xxx_req_ddb_entry(ha, ddb_index, &mbx_sts);
1023 	if (ret == QLA_ERROR) {
1024 		if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
1025 			ql4_printk(KERN_INFO, ha,
1026 				   "DDB index = %d not available trying next\n",
1027 				   ddb_index);
1028 			goto get_ddb_index;
1029 		}
1030 		DEBUG2(ql4_printk(KERN_INFO, ha,
1031 				  "Free FW DDB not available\n"));
1032 		return NULL;
1033 	}
1034 
1035 	cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
1036 				       cmds_max, sizeof(struct ddb_entry),
1037 				       sizeof(struct ql4_task_data),
1038 				       initial_cmdsn, ddb_index);
1039 	if (!cls_sess)
1040 		return NULL;
1041 
1042 	sess = cls_sess->dd_data;
1043 	ddb_entry = sess->dd_data;
1044 	ddb_entry->fw_ddb_index = ddb_index;
1045 	ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
1046 	ddb_entry->ha = ha;
1047 	ddb_entry->sess = cls_sess;
1048 	cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
1049 	ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
1050 	ha->tot_ddbs++;
1051 
1052 	return cls_sess;
1053 }
1054 
1055 static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
1056 {
1057 	struct iscsi_session *sess;
1058 	struct ddb_entry *ddb_entry;
1059 	struct scsi_qla_host *ha;
1060 	unsigned long flags;
1061 
1062 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1063 	sess = cls_sess->dd_data;
1064 	ddb_entry = sess->dd_data;
1065 	ha = ddb_entry->ha;
1066 
1067 	qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
1068 
1069 	spin_lock_irqsave(&ha->hardware_lock, flags);
1070 	qla4xxx_free_ddb(ha, ddb_entry);
1071 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1072 	iscsi_session_teardown(cls_sess);
1073 }
1074 
1075 static struct iscsi_cls_conn *
1076 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
1077 {
1078 	struct iscsi_cls_conn *cls_conn;
1079 	struct iscsi_session *sess;
1080 	struct ddb_entry *ddb_entry;
1081 
1082 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1083 	cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
1084 				    conn_idx);
1085 	sess = cls_sess->dd_data;
1086 	ddb_entry = sess->dd_data;
1087 	ddb_entry->conn = cls_conn;
1088 
1089 	return cls_conn;
1090 }
1091 
1092 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
1093 			     struct iscsi_cls_conn *cls_conn,
1094 			     uint64_t transport_fd, int is_leading)
1095 {
1096 	struct iscsi_conn *conn;
1097 	struct qla_conn *qla_conn;
1098 	struct iscsi_endpoint *ep;
1099 
1100 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1101 
1102 	if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
1103 		return -EINVAL;
1104 	ep = iscsi_lookup_endpoint(transport_fd);
1105 	conn = cls_conn->dd_data;
1106 	qla_conn = conn->dd_data;
1107 	qla_conn->qla_ep = ep->dd_data;
1108 	return 0;
1109 }
1110 
1111 static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
1112 {
1113 	struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1114 	struct iscsi_session *sess;
1115 	struct ddb_entry *ddb_entry;
1116 	struct scsi_qla_host *ha;
1117 	struct dev_db_entry *fw_ddb_entry;
1118 	dma_addr_t fw_ddb_entry_dma;
1119 	uint32_t mbx_sts = 0;
1120 	int ret = 0;
1121 	int status = QLA_SUCCESS;
1122 
1123 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1124 	sess = cls_sess->dd_data;
1125 	ddb_entry = sess->dd_data;
1126 	ha = ddb_entry->ha;
1127 
1128 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1129 					  &fw_ddb_entry_dma, GFP_KERNEL);
1130 	if (!fw_ddb_entry) {
1131 		ql4_printk(KERN_ERR, ha,
1132 			   "%s: Unable to allocate dma buffer\n", __func__);
1133 		return -ENOMEM;
1134 	}
1135 
1136 	ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
1137 	if (ret) {
1138 		/* If iscsid is stopped and started then no need to do
1139 		* set param again since ddb state will be already
1140 		* active and FW does not allow set ddb to an
1141 		* active session.
1142 		*/
1143 		if (mbx_sts)
1144 			if (ddb_entry->fw_ddb_device_state ==
1145 							DDB_DS_SESSION_ACTIVE)
1146 				goto exit_set_param;
1147 
1148 		ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n",
1149 			   __func__, ddb_entry->fw_ddb_index);
1150 		goto exit_conn_start;
1151 	}
1152 
1153 	status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
1154 	if (status == QLA_ERROR) {
1155 		ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
1156 			   sess->targetname);
1157 		ret = -EINVAL;
1158 		goto exit_conn_start;
1159 	}
1160 
1161 	if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE)
1162 		ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
1163 
1164 	DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__,
1165 		      ddb_entry->fw_ddb_device_state));
1166 
1167 exit_set_param:
1168 	iscsi_conn_start(cls_conn);
1169 	ret = 0;
1170 
1171 exit_conn_start:
1172 	dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1173 			  fw_ddb_entry, fw_ddb_entry_dma);
1174 	return ret;
1175 }
1176 
1177 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
1178 {
1179 	struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1180 	struct iscsi_session *sess;
1181 	struct scsi_qla_host *ha;
1182 	struct ddb_entry *ddb_entry;
1183 	int options;
1184 
1185 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1186 	sess = cls_sess->dd_data;
1187 	ddb_entry = sess->dd_data;
1188 	ha = ddb_entry->ha;
1189 
1190 	options = LOGOUT_OPTION_CLOSE_SESSION;
1191 	if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
1192 		ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
1193 }
1194 
1195 static void qla4xxx_task_work(struct work_struct *wdata)
1196 {
1197 	struct ql4_task_data *task_data;
1198 	struct scsi_qla_host *ha;
1199 	struct passthru_status *sts;
1200 	struct iscsi_task *task;
1201 	struct iscsi_hdr *hdr;
1202 	uint8_t *data;
1203 	uint32_t data_len;
1204 	struct iscsi_conn *conn;
1205 	int hdr_len;
1206 	itt_t itt;
1207 
1208 	task_data = container_of(wdata, struct ql4_task_data, task_work);
1209 	ha = task_data->ha;
1210 	task = task_data->task;
1211 	sts = &task_data->sts;
1212 	hdr_len = sizeof(struct iscsi_hdr);
1213 
1214 	DEBUG3(printk(KERN_INFO "Status returned\n"));
1215 	DEBUG3(qla4xxx_dump_buffer(sts, 64));
1216 	DEBUG3(printk(KERN_INFO "Response buffer"));
1217 	DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64));
1218 
1219 	conn = task->conn;
1220 
1221 	switch (sts->completionStatus) {
1222 	case PASSTHRU_STATUS_COMPLETE:
1223 		hdr = (struct iscsi_hdr *)task_data->resp_buffer;
1224 		/* Assign back the itt in hdr, until we use the PREASSIGN_TAG */
1225 		itt = sts->handle;
1226 		hdr->itt = itt;
1227 		data = task_data->resp_buffer + hdr_len;
1228 		data_len = task_data->resp_len - hdr_len;
1229 		iscsi_complete_pdu(conn, hdr, data, data_len);
1230 		break;
1231 	default:
1232 		ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n",
1233 			   sts->completionStatus);
1234 		break;
1235 	}
1236 	return;
1237 }
1238 
1239 static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
1240 {
1241 	struct ql4_task_data *task_data;
1242 	struct iscsi_session *sess;
1243 	struct ddb_entry *ddb_entry;
1244 	struct scsi_qla_host *ha;
1245 	int hdr_len;
1246 
1247 	sess = task->conn->session;
1248 	ddb_entry = sess->dd_data;
1249 	ha = ddb_entry->ha;
1250 	task_data = task->dd_data;
1251 	memset(task_data, 0, sizeof(struct ql4_task_data));
1252 
1253 	if (task->sc) {
1254 		ql4_printk(KERN_INFO, ha,
1255 			   "%s: SCSI Commands not implemented\n", __func__);
1256 		return -EINVAL;
1257 	}
1258 
1259 	hdr_len = sizeof(struct iscsi_hdr);
1260 	task_data->ha = ha;
1261 	task_data->task = task;
1262 
1263 	if (task->data_count) {
1264 		task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
1265 						     task->data_count,
1266 						     PCI_DMA_TODEVICE);
1267 	}
1268 
1269 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1270 		      __func__, task->conn->max_recv_dlength, hdr_len));
1271 
1272 	task_data->resp_len = task->conn->max_recv_dlength;
1273 	task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev,
1274 						    task_data->resp_len,
1275 						    &task_data->resp_dma,
1276 						    GFP_ATOMIC);
1277 	if (!task_data->resp_buffer)
1278 		goto exit_alloc_pdu;
1279 
1280 	task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev,
1281 						   task->data_count + hdr_len,
1282 						   &task_data->req_dma,
1283 						   GFP_ATOMIC);
1284 	if (!task_data->req_buffer)
1285 		goto exit_alloc_pdu;
1286 
1287 	task->hdr = task_data->req_buffer;
1288 
1289 	INIT_WORK(&task_data->task_work, qla4xxx_task_work);
1290 
1291 	return 0;
1292 
1293 exit_alloc_pdu:
1294 	if (task_data->resp_buffer)
1295 		dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1296 				  task_data->resp_buffer, task_data->resp_dma);
1297 
1298 	if (task_data->req_buffer)
1299 		dma_free_coherent(&ha->pdev->dev, task->data_count + hdr_len,
1300 				  task_data->req_buffer, task_data->req_dma);
1301 	return -ENOMEM;
1302 }
1303 
1304 static void qla4xxx_task_cleanup(struct iscsi_task *task)
1305 {
1306 	struct ql4_task_data *task_data;
1307 	struct iscsi_session *sess;
1308 	struct ddb_entry *ddb_entry;
1309 	struct scsi_qla_host *ha;
1310 	int hdr_len;
1311 
1312 	hdr_len = sizeof(struct iscsi_hdr);
1313 	sess = task->conn->session;
1314 	ddb_entry = sess->dd_data;
1315 	ha = ddb_entry->ha;
1316 	task_data = task->dd_data;
1317 
1318 	if (task->data_count) {
1319 		dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
1320 				 task->data_count, PCI_DMA_TODEVICE);
1321 	}
1322 
1323 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1324 		      __func__, task->conn->max_recv_dlength, hdr_len));
1325 
1326 	dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1327 			  task_data->resp_buffer, task_data->resp_dma);
1328 	dma_free_coherent(&ha->pdev->dev, task->data_count + hdr_len,
1329 			  task_data->req_buffer, task_data->req_dma);
1330 	return;
1331 }
1332 
1333 static int qla4xxx_task_xmit(struct iscsi_task *task)
1334 {
1335 	struct scsi_cmnd *sc = task->sc;
1336 	struct iscsi_session *sess = task->conn->session;
1337 	struct ddb_entry *ddb_entry = sess->dd_data;
1338 	struct scsi_qla_host *ha = ddb_entry->ha;
1339 
1340 	if (!sc)
1341 		return qla4xxx_send_passthru0(task);
1342 
1343 	ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n",
1344 		   __func__);
1345 	return -ENOSYS;
1346 }
1347 
1348 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
1349 				       struct ddb_entry *ddb_entry)
1350 {
1351 	struct iscsi_cls_session *cls_sess;
1352 	struct iscsi_cls_conn *cls_conn;
1353 	struct iscsi_session *sess;
1354 	struct iscsi_conn *conn;
1355 	uint32_t ddb_state;
1356 	dma_addr_t fw_ddb_entry_dma;
1357 	struct dev_db_entry *fw_ddb_entry;
1358 
1359 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1360 					  &fw_ddb_entry_dma, GFP_KERNEL);
1361 	if (!fw_ddb_entry) {
1362 		ql4_printk(KERN_ERR, ha,
1363 			   "%s: Unable to allocate dma buffer\n", __func__);
1364 		return;
1365 	}
1366 
1367 	if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
1368 				    fw_ddb_entry_dma, NULL, NULL, &ddb_state,
1369 				    NULL, NULL, NULL) == QLA_ERROR) {
1370 		DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
1371 				  "get_ddb_entry for fw_ddb_index %d\n",
1372 				  ha->host_no, __func__,
1373 				  ddb_entry->fw_ddb_index));
1374 		return;
1375 	}
1376 
1377 	cls_sess = ddb_entry->sess;
1378 	sess = cls_sess->dd_data;
1379 
1380 	cls_conn = ddb_entry->conn;
1381 	conn = cls_conn->dd_data;
1382 
1383 	/* Update params */
1384 	conn->max_recv_dlength = BYTE_UNITS *
1385 			  le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
1386 
1387 	conn->max_xmit_dlength = BYTE_UNITS *
1388 			  le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
1389 
1390 	sess->initial_r2t_en =
1391 			    (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1392 
1393 	sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
1394 
1395 	sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1396 
1397 	sess->first_burst = BYTE_UNITS *
1398 			       le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
1399 
1400 	sess->max_burst = BYTE_UNITS *
1401 				 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
1402 
1403 	sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
1404 
1405 	sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
1406 
1407 	sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
1408 
1409 	memcpy(sess->initiatorname, ha->name_string,
1410 	       min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
1411 }
1412 
1413 /*
1414  * Timer routines
1415  */
1416 
1417 static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
1418 				unsigned long interval)
1419 {
1420 	DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
1421 		     __func__, ha->host->host_no));
1422 	init_timer(&ha->timer);
1423 	ha->timer.expires = jiffies + interval * HZ;
1424 	ha->timer.data = (unsigned long)ha;
1425 	ha->timer.function = (void (*)(unsigned long))func;
1426 	add_timer(&ha->timer);
1427 	ha->timer_active = 1;
1428 }
1429 
1430 static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
1431 {
1432 	del_timer_sync(&ha->timer);
1433 	ha->timer_active = 0;
1434 }
1435 
1436 /***
1437  * qla4xxx_mark_device_missing - blocks the session
1438  * @cls_session: Pointer to the session to be blocked
1439  * @ddb_entry: Pointer to device database entry
1440  *
1441  * This routine marks a device missing and close connection.
1442  **/
1443 void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session)
1444 {
1445 	iscsi_block_session(cls_session);
1446 }
1447 
1448 /**
1449  * qla4xxx_mark_all_devices_missing - mark all devices as missing.
1450  * @ha: Pointer to host adapter structure.
1451  *
1452  * This routine marks a device missing and resets the relogin retry count.
1453  **/
1454 void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
1455 {
1456 	iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing);
1457 }
1458 
1459 static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
1460 				       struct ddb_entry *ddb_entry,
1461 				       struct scsi_cmnd *cmd)
1462 {
1463 	struct srb *srb;
1464 
1465 	srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
1466 	if (!srb)
1467 		return srb;
1468 
1469 	kref_init(&srb->srb_ref);
1470 	srb->ha = ha;
1471 	srb->ddb = ddb_entry;
1472 	srb->cmd = cmd;
1473 	srb->flags = 0;
1474 	CMD_SP(cmd) = (void *)srb;
1475 
1476 	return srb;
1477 }
1478 
1479 static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
1480 {
1481 	struct scsi_cmnd *cmd = srb->cmd;
1482 
1483 	if (srb->flags & SRB_DMA_VALID) {
1484 		scsi_dma_unmap(cmd);
1485 		srb->flags &= ~SRB_DMA_VALID;
1486 	}
1487 	CMD_SP(cmd) = NULL;
1488 }
1489 
1490 void qla4xxx_srb_compl(struct kref *ref)
1491 {
1492 	struct srb *srb = container_of(ref, struct srb, srb_ref);
1493 	struct scsi_cmnd *cmd = srb->cmd;
1494 	struct scsi_qla_host *ha = srb->ha;
1495 
1496 	qla4xxx_srb_free_dma(ha, srb);
1497 
1498 	mempool_free(srb, ha->srb_mempool);
1499 
1500 	cmd->scsi_done(cmd);
1501 }
1502 
1503 /**
1504  * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
1505  * @host: scsi host
1506  * @cmd: Pointer to Linux's SCSI command structure
1507  *
1508  * Remarks:
1509  * This routine is invoked by Linux to send a SCSI command to the driver.
1510  * The mid-level driver tries to ensure that queuecommand never gets
1511  * invoked concurrently with itself or the interrupt handler (although
1512  * the interrupt handler may call this routine as part of request-
1513  * completion handling).   Unfortunely, it sometimes calls the scheduler
1514  * in interrupt context which is a big NO! NO!.
1515  **/
1516 static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1517 {
1518 	struct scsi_qla_host *ha = to_qla_host(host);
1519 	struct ddb_entry *ddb_entry = cmd->device->hostdata;
1520 	struct iscsi_cls_session *sess = ddb_entry->sess;
1521 	struct srb *srb;
1522 	int rval;
1523 
1524 	if (test_bit(AF_EEH_BUSY, &ha->flags)) {
1525 		if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
1526 			cmd->result = DID_NO_CONNECT << 16;
1527 		else
1528 			cmd->result = DID_REQUEUE << 16;
1529 		goto qc_fail_command;
1530 	}
1531 
1532 	if (!sess) {
1533 		cmd->result = DID_IMM_RETRY << 16;
1534 		goto qc_fail_command;
1535 	}
1536 
1537 	rval = iscsi_session_chkready(sess);
1538 	if (rval) {
1539 		cmd->result = rval;
1540 		goto qc_fail_command;
1541 	}
1542 
1543 	if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
1544 	    test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
1545 	    test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
1546 	    test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
1547 	    test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
1548 	    !test_bit(AF_ONLINE, &ha->flags) ||
1549 	    !test_bit(AF_LINK_UP, &ha->flags) ||
1550 	    test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
1551 		goto qc_host_busy;
1552 
1553 	srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
1554 	if (!srb)
1555 		goto qc_host_busy;
1556 
1557 	rval = qla4xxx_send_command_to_isp(ha, srb);
1558 	if (rval != QLA_SUCCESS)
1559 		goto qc_host_busy_free_sp;
1560 
1561 	return 0;
1562 
1563 qc_host_busy_free_sp:
1564 	qla4xxx_srb_free_dma(ha, srb);
1565 	mempool_free(srb, ha->srb_mempool);
1566 
1567 qc_host_busy:
1568 	return SCSI_MLQUEUE_HOST_BUSY;
1569 
1570 qc_fail_command:
1571 	cmd->scsi_done(cmd);
1572 
1573 	return 0;
1574 }
1575 
1576 /**
1577  * qla4xxx_mem_free - frees memory allocated to adapter
1578  * @ha: Pointer to host adapter structure.
1579  *
1580  * Frees memory previously allocated by qla4xxx_mem_alloc
1581  **/
1582 static void qla4xxx_mem_free(struct scsi_qla_host *ha)
1583 {
1584 	if (ha->queues)
1585 		dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
1586 				  ha->queues_dma);
1587 
1588 	ha->queues_len = 0;
1589 	ha->queues = NULL;
1590 	ha->queues_dma = 0;
1591 	ha->request_ring = NULL;
1592 	ha->request_dma = 0;
1593 	ha->response_ring = NULL;
1594 	ha->response_dma = 0;
1595 	ha->shadow_regs = NULL;
1596 	ha->shadow_regs_dma = 0;
1597 
1598 	/* Free srb pool. */
1599 	if (ha->srb_mempool)
1600 		mempool_destroy(ha->srb_mempool);
1601 
1602 	ha->srb_mempool = NULL;
1603 
1604 	if (ha->chap_dma_pool)
1605 		dma_pool_destroy(ha->chap_dma_pool);
1606 
1607 	if (ha->chap_list)
1608 		vfree(ha->chap_list);
1609 	ha->chap_list = NULL;
1610 
1611 	/* release io space registers  */
1612 	if (is_qla8022(ha)) {
1613 		if (ha->nx_pcibase)
1614 			iounmap(
1615 			    (struct device_reg_82xx __iomem *)ha->nx_pcibase);
1616 	} else if (ha->reg)
1617 		iounmap(ha->reg);
1618 	pci_release_regions(ha->pdev);
1619 }
1620 
1621 /**
1622  * qla4xxx_mem_alloc - allocates memory for use by adapter.
1623  * @ha: Pointer to host adapter structure
1624  *
1625  * Allocates DMA memory for request and response queues. Also allocates memory
1626  * for srbs.
1627  **/
1628 static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
1629 {
1630 	unsigned long align;
1631 
1632 	/* Allocate contiguous block of DMA memory for queues. */
1633 	ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
1634 			  (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
1635 			  sizeof(struct shadow_regs) +
1636 			  MEM_ALIGN_VALUE +
1637 			  (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
1638 	ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
1639 					&ha->queues_dma, GFP_KERNEL);
1640 	if (ha->queues == NULL) {
1641 		ql4_printk(KERN_WARNING, ha,
1642 		    "Memory Allocation failed - queues.\n");
1643 
1644 		goto mem_alloc_error_exit;
1645 	}
1646 	memset(ha->queues, 0, ha->queues_len);
1647 
1648 	/*
1649 	 * As per RISC alignment requirements -- the bus-address must be a
1650 	 * multiple of the request-ring size (in bytes).
1651 	 */
1652 	align = 0;
1653 	if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
1654 		align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
1655 					   (MEM_ALIGN_VALUE - 1));
1656 
1657 	/* Update request and response queue pointers. */
1658 	ha->request_dma = ha->queues_dma + align;
1659 	ha->request_ring = (struct queue_entry *) (ha->queues + align);
1660 	ha->response_dma = ha->queues_dma + align +
1661 		(REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
1662 	ha->response_ring = (struct queue_entry *) (ha->queues + align +
1663 						    (REQUEST_QUEUE_DEPTH *
1664 						     QUEUE_SIZE));
1665 	ha->shadow_regs_dma = ha->queues_dma + align +
1666 		(REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
1667 		(RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
1668 	ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
1669 						  (REQUEST_QUEUE_DEPTH *
1670 						   QUEUE_SIZE) +
1671 						  (RESPONSE_QUEUE_DEPTH *
1672 						   QUEUE_SIZE));
1673 
1674 	/* Allocate memory for srb pool. */
1675 	ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
1676 					 mempool_free_slab, srb_cachep);
1677 	if (ha->srb_mempool == NULL) {
1678 		ql4_printk(KERN_WARNING, ha,
1679 		    "Memory Allocation failed - SRB Pool.\n");
1680 
1681 		goto mem_alloc_error_exit;
1682 	}
1683 
1684 	ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev,
1685 					    CHAP_DMA_BLOCK_SIZE, 8, 0);
1686 
1687 	if (ha->chap_dma_pool == NULL) {
1688 		ql4_printk(KERN_WARNING, ha,
1689 		    "%s: chap_dma_pool allocation failed..\n", __func__);
1690 		goto mem_alloc_error_exit;
1691 	}
1692 
1693 	return QLA_SUCCESS;
1694 
1695 mem_alloc_error_exit:
1696 	qla4xxx_mem_free(ha);
1697 	return QLA_ERROR;
1698 }
1699 
1700 /**
1701  * qla4_8xxx_check_fw_alive  - Check firmware health
1702  * @ha: Pointer to host adapter structure.
1703  *
1704  * Context: Interrupt
1705  **/
1706 static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
1707 {
1708 	uint32_t fw_heartbeat_counter, halt_status;
1709 
1710 	fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
1711 	/* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
1712 	if (fw_heartbeat_counter == 0xffffffff) {
1713 		DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
1714 		    "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
1715 		    ha->host_no, __func__));
1716 		return;
1717 	}
1718 
1719 	if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
1720 		ha->seconds_since_last_heartbeat++;
1721 		/* FW not alive after 2 seconds */
1722 		if (ha->seconds_since_last_heartbeat == 2) {
1723 			ha->seconds_since_last_heartbeat = 0;
1724 			halt_status = qla4_8xxx_rd_32(ha,
1725 						      QLA82XX_PEG_HALT_STATUS1);
1726 
1727 			ql4_printk(KERN_INFO, ha,
1728 				   "scsi(%ld): %s, Dumping hw/fw registers:\n "
1729 				   " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2:"
1730 				   " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
1731 				   " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
1732 				   " 0x%x,\n PEG_NET_4_PC: 0x%x\n",
1733 				   ha->host_no, __func__, halt_status,
1734 				   qla4_8xxx_rd_32(ha,
1735 						   QLA82XX_PEG_HALT_STATUS2),
1736 				   qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
1737 						   0x3c),
1738 				   qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 +
1739 						   0x3c),
1740 				   qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 +
1741 						   0x3c),
1742 				   qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 +
1743 						   0x3c),
1744 				   qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
1745 						   0x3c));
1746 
1747 			/* Since we cannot change dev_state in interrupt
1748 			 * context, set appropriate DPC flag then wakeup
1749 			 * DPC */
1750 			if (halt_status & HALT_STATUS_UNRECOVERABLE)
1751 				set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
1752 			else {
1753 				printk("scsi%ld: %s: detect abort needed!\n",
1754 				    ha->host_no, __func__);
1755 				set_bit(DPC_RESET_HA, &ha->dpc_flags);
1756 			}
1757 			qla4xxx_wake_dpc(ha);
1758 			qla4xxx_mailbox_premature_completion(ha);
1759 		}
1760 	} else
1761 		ha->seconds_since_last_heartbeat = 0;
1762 
1763 	ha->fw_heartbeat_counter = fw_heartbeat_counter;
1764 }
1765 
1766 /**
1767  * qla4_8xxx_watchdog - Poll dev state
1768  * @ha: Pointer to host adapter structure.
1769  *
1770  * Context: Interrupt
1771  **/
1772 void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
1773 {
1774 	uint32_t dev_state;
1775 
1776 	dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
1777 
1778 	/* don't poll if reset is going on */
1779 	if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
1780 	    test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
1781 	    test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
1782 		if (dev_state == QLA82XX_DEV_NEED_RESET &&
1783 		    !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
1784 			if (!ql4xdontresethba) {
1785 				ql4_printk(KERN_INFO, ha, "%s: HW State: "
1786 				    "NEED RESET!\n", __func__);
1787 				set_bit(DPC_RESET_HA, &ha->dpc_flags);
1788 				qla4xxx_wake_dpc(ha);
1789 				qla4xxx_mailbox_premature_completion(ha);
1790 			}
1791 		} else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
1792 		    !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
1793 			ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
1794 			    __func__);
1795 			set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
1796 			qla4xxx_wake_dpc(ha);
1797 		} else  {
1798 			/* Check firmware health */
1799 			qla4_8xxx_check_fw_alive(ha);
1800 		}
1801 	}
1802 }
1803 
1804 /**
1805  * qla4xxx_timer - checks every second for work to do.
1806  * @ha: Pointer to host adapter structure.
1807  **/
1808 static void qla4xxx_timer(struct scsi_qla_host *ha)
1809 {
1810 	int start_dpc = 0;
1811 	uint16_t w;
1812 
1813 	/* If we are in the middle of AER/EEH processing
1814 	 * skip any processing and reschedule the timer
1815 	 */
1816 	if (test_bit(AF_EEH_BUSY, &ha->flags)) {
1817 		mod_timer(&ha->timer, jiffies + HZ);
1818 		return;
1819 	}
1820 
1821 	/* Hardware read to trigger an EEH error during mailbox waits. */
1822 	if (!pci_channel_offline(ha->pdev))
1823 		pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
1824 
1825 	if (is_qla8022(ha)) {
1826 		qla4_8xxx_watchdog(ha);
1827 	}
1828 
1829 	if (!is_qla8022(ha)) {
1830 		/* Check for heartbeat interval. */
1831 		if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
1832 		    ha->heartbeat_interval != 0) {
1833 			ha->seconds_since_last_heartbeat++;
1834 			if (ha->seconds_since_last_heartbeat >
1835 			    ha->heartbeat_interval + 2)
1836 				set_bit(DPC_RESET_HA, &ha->dpc_flags);
1837 		}
1838 	}
1839 
1840 	/* Wakeup the dpc routine for this adapter, if needed. */
1841 	if (start_dpc ||
1842 	     test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
1843 	     test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
1844 	     test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
1845 	     test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
1846 	     test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
1847 	     test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
1848 	     test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
1849 	     test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
1850 	     test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
1851 	     test_bit(DPC_AEN, &ha->dpc_flags)) {
1852 		DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
1853 			      " - dpc flags = 0x%lx\n",
1854 			      ha->host_no, __func__, ha->dpc_flags));
1855 		qla4xxx_wake_dpc(ha);
1856 	}
1857 
1858 	/* Reschedule timer thread to call us back in one second */
1859 	mod_timer(&ha->timer, jiffies + HZ);
1860 
1861 	DEBUG2(ha->seconds_since_last_intr++);
1862 }
1863 
1864 /**
1865  * qla4xxx_cmd_wait - waits for all outstanding commands to complete
1866  * @ha: Pointer to host adapter structure.
1867  *
1868  * This routine stalls the driver until all outstanding commands are returned.
1869  * Caller must release the Hardware Lock prior to calling this routine.
1870  **/
1871 static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
1872 {
1873 	uint32_t index = 0;
1874 	unsigned long flags;
1875 	struct scsi_cmnd *cmd;
1876 
1877 	unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ);
1878 
1879 	DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to "
1880 	    "complete\n", WAIT_CMD_TOV));
1881 
1882 	while (!time_after_eq(jiffies, wtime)) {
1883 		spin_lock_irqsave(&ha->hardware_lock, flags);
1884 		/* Find a command that hasn't completed. */
1885 		for (index = 0; index < ha->host->can_queue; index++) {
1886 			cmd = scsi_host_find_tag(ha->host, index);
1887 			/*
1888 			 * We cannot just check if the index is valid,
1889 			 * becase if we are run from the scsi eh, then
1890 			 * the scsi/block layer is going to prevent
1891 			 * the tag from being released.
1892 			 */
1893 			if (cmd != NULL && CMD_SP(cmd))
1894 				break;
1895 		}
1896 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1897 
1898 		/* If No Commands are pending, wait is complete */
1899 		if (index == ha->host->can_queue)
1900 			return QLA_SUCCESS;
1901 
1902 		msleep(1000);
1903 	}
1904 	/* If we timed out on waiting for commands to come back
1905 	 * return ERROR. */
1906 	return QLA_ERROR;
1907 }
1908 
1909 int qla4xxx_hw_reset(struct scsi_qla_host *ha)
1910 {
1911 	uint32_t ctrl_status;
1912 	unsigned long flags = 0;
1913 
1914 	DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
1915 
1916 	if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
1917 		return QLA_ERROR;
1918 
1919 	spin_lock_irqsave(&ha->hardware_lock, flags);
1920 
1921 	/*
1922 	 * If the SCSI Reset Interrupt bit is set, clear it.
1923 	 * Otherwise, the Soft Reset won't work.
1924 	 */
1925 	ctrl_status = readw(&ha->reg->ctrl_status);
1926 	if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
1927 		writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
1928 
1929 	/* Issue Soft Reset */
1930 	writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
1931 	readl(&ha->reg->ctrl_status);
1932 
1933 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1934 	return QLA_SUCCESS;
1935 }
1936 
1937 /**
1938  * qla4xxx_soft_reset - performs soft reset.
1939  * @ha: Pointer to host adapter structure.
1940  **/
1941 int qla4xxx_soft_reset(struct scsi_qla_host *ha)
1942 {
1943 	uint32_t max_wait_time;
1944 	unsigned long flags = 0;
1945 	int status;
1946 	uint32_t ctrl_status;
1947 
1948 	status = qla4xxx_hw_reset(ha);
1949 	if (status != QLA_SUCCESS)
1950 		return status;
1951 
1952 	status = QLA_ERROR;
1953 	/* Wait until the Network Reset Intr bit is cleared */
1954 	max_wait_time = RESET_INTR_TOV;
1955 	do {
1956 		spin_lock_irqsave(&ha->hardware_lock, flags);
1957 		ctrl_status = readw(&ha->reg->ctrl_status);
1958 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1959 
1960 		if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
1961 			break;
1962 
1963 		msleep(1000);
1964 	} while ((--max_wait_time));
1965 
1966 	if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
1967 		DEBUG2(printk(KERN_WARNING
1968 			      "scsi%ld: Network Reset Intr not cleared by "
1969 			      "Network function, clearing it now!\n",
1970 			      ha->host_no));
1971 		spin_lock_irqsave(&ha->hardware_lock, flags);
1972 		writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
1973 		readl(&ha->reg->ctrl_status);
1974 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1975 	}
1976 
1977 	/* Wait until the firmware tells us the Soft Reset is done */
1978 	max_wait_time = SOFT_RESET_TOV;
1979 	do {
1980 		spin_lock_irqsave(&ha->hardware_lock, flags);
1981 		ctrl_status = readw(&ha->reg->ctrl_status);
1982 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1983 
1984 		if ((ctrl_status & CSR_SOFT_RESET) == 0) {
1985 			status = QLA_SUCCESS;
1986 			break;
1987 		}
1988 
1989 		msleep(1000);
1990 	} while ((--max_wait_time));
1991 
1992 	/*
1993 	 * Also, make sure that the SCSI Reset Interrupt bit has been cleared
1994 	 * after the soft reset has taken place.
1995 	 */
1996 	spin_lock_irqsave(&ha->hardware_lock, flags);
1997 	ctrl_status = readw(&ha->reg->ctrl_status);
1998 	if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
1999 		writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
2000 		readl(&ha->reg->ctrl_status);
2001 	}
2002 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2003 
2004 	/* If soft reset fails then most probably the bios on other
2005 	 * function is also enabled.
2006 	 * Since the initialization is sequential the other fn
2007 	 * wont be able to acknowledge the soft reset.
2008 	 * Issue a force soft reset to workaround this scenario.
2009 	 */
2010 	if (max_wait_time == 0) {
2011 		/* Issue Force Soft Reset */
2012 		spin_lock_irqsave(&ha->hardware_lock, flags);
2013 		writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
2014 		readl(&ha->reg->ctrl_status);
2015 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
2016 		/* Wait until the firmware tells us the Soft Reset is done */
2017 		max_wait_time = SOFT_RESET_TOV;
2018 		do {
2019 			spin_lock_irqsave(&ha->hardware_lock, flags);
2020 			ctrl_status = readw(&ha->reg->ctrl_status);
2021 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
2022 
2023 			if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
2024 				status = QLA_SUCCESS;
2025 				break;
2026 			}
2027 
2028 			msleep(1000);
2029 		} while ((--max_wait_time));
2030 	}
2031 
2032 	return status;
2033 }
2034 
2035 /**
2036  * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S.
2037  * @ha: Pointer to host adapter structure.
2038  * @res: returned scsi status
2039  *
2040  * This routine is called just prior to a HARD RESET to return all
2041  * outstanding commands back to the Operating System.
2042  * Caller should make sure that the following locks are released
2043  * before this calling routine: Hardware lock, and io_request_lock.
2044  **/
2045 static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res)
2046 {
2047 	struct srb *srb;
2048 	int i;
2049 	unsigned long flags;
2050 
2051 	spin_lock_irqsave(&ha->hardware_lock, flags);
2052 	for (i = 0; i < ha->host->can_queue; i++) {
2053 		srb = qla4xxx_del_from_active_array(ha, i);
2054 		if (srb != NULL) {
2055 			srb->cmd->result = res;
2056 			kref_put(&srb->srb_ref, qla4xxx_srb_compl);
2057 		}
2058 	}
2059 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2060 }
2061 
2062 void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
2063 {
2064 	clear_bit(AF_ONLINE, &ha->flags);
2065 
2066 	/* Disable the board */
2067 	ql4_printk(KERN_INFO, ha, "Disabling the board\n");
2068 
2069 	qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
2070 	qla4xxx_mark_all_devices_missing(ha);
2071 	clear_bit(AF_INIT_DONE, &ha->flags);
2072 }
2073 
2074 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
2075 {
2076 	struct iscsi_session *sess;
2077 	struct ddb_entry *ddb_entry;
2078 
2079 	sess = cls_session->dd_data;
2080 	ddb_entry = sess->dd_data;
2081 	ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
2082 	iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
2083 }
2084 
2085 /**
2086  * qla4xxx_recover_adapter - recovers adapter after a fatal error
2087  * @ha: Pointer to host adapter structure.
2088  **/
2089 static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
2090 {
2091 	int status = QLA_ERROR;
2092 	uint8_t reset_chip = 0;
2093 
2094 	/* Stall incoming I/O until we are done */
2095 	scsi_block_requests(ha->host);
2096 	clear_bit(AF_ONLINE, &ha->flags);
2097 	clear_bit(AF_LINK_UP, &ha->flags);
2098 
2099 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
2100 
2101 	set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
2102 
2103 	iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
2104 
2105 	if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
2106 		reset_chip = 1;
2107 
2108 	/* For the DPC_RESET_HA_INTR case (ISP-4xxx specific)
2109 	 * do not reset adapter, jump to initialize_adapter */
2110 	if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
2111 		status = QLA_SUCCESS;
2112 		goto recover_ha_init_adapter;
2113 	}
2114 
2115 	/* For the ISP-82xx adapter, issue a stop_firmware if invoked
2116 	 * from eh_host_reset or ioctl module */
2117 	if (is_qla8022(ha) && !reset_chip &&
2118 	    test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
2119 
2120 		DEBUG2(ql4_printk(KERN_INFO, ha,
2121 		    "scsi%ld: %s - Performing stop_firmware...\n",
2122 		    ha->host_no, __func__));
2123 		status = ha->isp_ops->reset_firmware(ha);
2124 		if (status == QLA_SUCCESS) {
2125 			if (!test_bit(AF_FW_RECOVERY, &ha->flags))
2126 				qla4xxx_cmd_wait(ha);
2127 			ha->isp_ops->disable_intrs(ha);
2128 			qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2129 			qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2130 		} else {
2131 			/* If the stop_firmware fails then
2132 			 * reset the entire chip */
2133 			reset_chip = 1;
2134 			clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
2135 			set_bit(DPC_RESET_HA, &ha->dpc_flags);
2136 		}
2137 	}
2138 
2139 	/* Issue full chip reset if recovering from a catastrophic error,
2140 	 * or if stop_firmware fails for ISP-82xx.
2141 	 * This is the default case for ISP-4xxx */
2142 	if (!is_qla8022(ha) || reset_chip) {
2143 		if (!test_bit(AF_FW_RECOVERY, &ha->flags))
2144 			qla4xxx_cmd_wait(ha);
2145 		qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2146 		qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2147 		DEBUG2(ql4_printk(KERN_INFO, ha,
2148 		    "scsi%ld: %s - Performing chip reset..\n",
2149 		    ha->host_no, __func__));
2150 		status = ha->isp_ops->reset_chip(ha);
2151 	}
2152 
2153 	/* Flush any pending ddb changed AENs */
2154 	qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2155 
2156 recover_ha_init_adapter:
2157 	/* Upon successful firmware/chip reset, re-initialize the adapter */
2158 	if (status == QLA_SUCCESS) {
2159 		/* For ISP-4xxx, force function 1 to always initialize
2160 		 * before function 3 to prevent both funcions from
2161 		 * stepping on top of the other */
2162 		if (!is_qla8022(ha) && (ha->mac_index == 3))
2163 			ssleep(6);
2164 
2165 		/* NOTE: AF_ONLINE flag set upon successful completion of
2166 		 *       qla4xxx_initialize_adapter */
2167 		status = qla4xxx_initialize_adapter(ha);
2168 	}
2169 
2170 	/* Retry failed adapter initialization, if necessary
2171 	 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific)
2172 	 * case to prevent ping-pong resets between functions */
2173 	if (!test_bit(AF_ONLINE, &ha->flags) &&
2174 	    !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
2175 		/* Adapter initialization failed, see if we can retry
2176 		 * resetting the ha.
2177 		 * Since we don't want to block the DPC for too long
2178 		 * with multiple resets in the same thread,
2179 		 * utilize DPC to retry */
2180 		if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
2181 			ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
2182 			DEBUG2(printk("scsi%ld: recover adapter - retrying "
2183 				      "(%d) more times\n", ha->host_no,
2184 				      ha->retry_reset_ha_cnt));
2185 			set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
2186 			status = QLA_ERROR;
2187 		} else {
2188 			if (ha->retry_reset_ha_cnt > 0) {
2189 				/* Schedule another Reset HA--DPC will retry */
2190 				ha->retry_reset_ha_cnt--;
2191 				DEBUG2(printk("scsi%ld: recover adapter - "
2192 					      "retry remaining %d\n",
2193 					      ha->host_no,
2194 					      ha->retry_reset_ha_cnt));
2195 				status = QLA_ERROR;
2196 			}
2197 
2198 			if (ha->retry_reset_ha_cnt == 0) {
2199 				/* Recover adapter retries have been exhausted.
2200 				 * Adapter DEAD */
2201 				DEBUG2(printk("scsi%ld: recover adapter "
2202 					      "failed - board disabled\n",
2203 					      ha->host_no));
2204 				qla4xxx_dead_adapter_cleanup(ha);
2205 				clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
2206 				clear_bit(DPC_RESET_HA, &ha->dpc_flags);
2207 				clear_bit(DPC_RESET_HA_FW_CONTEXT,
2208 					  &ha->dpc_flags);
2209 				status = QLA_ERROR;
2210 			}
2211 		}
2212 	} else {
2213 		clear_bit(DPC_RESET_HA, &ha->dpc_flags);
2214 		clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
2215 		clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
2216 	}
2217 
2218 	ha->adapter_error_count++;
2219 
2220 	if (test_bit(AF_ONLINE, &ha->flags))
2221 		ha->isp_ops->enable_intrs(ha);
2222 
2223 	scsi_unblock_requests(ha->host);
2224 
2225 	clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
2226 	DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no,
2227 	    status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
2228 
2229 	return status;
2230 }
2231 
2232 static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
2233 {
2234 	struct iscsi_session *sess;
2235 	struct ddb_entry *ddb_entry;
2236 	struct scsi_qla_host *ha;
2237 
2238 	sess = cls_session->dd_data;
2239 	ddb_entry = sess->dd_data;
2240 	ha = ddb_entry->ha;
2241 	if (!iscsi_is_session_online(cls_session)) {
2242 		if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
2243 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
2244 				   " unblock session\n", ha->host_no, __func__,
2245 				   ddb_entry->fw_ddb_index);
2246 			iscsi_unblock_session(ddb_entry->sess);
2247 		} else {
2248 			/* Trigger relogin */
2249 			iscsi_session_failure(cls_session->dd_data,
2250 					      ISCSI_ERR_CONN_FAILED);
2251 		}
2252 	}
2253 }
2254 
2255 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
2256 {
2257 	iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
2258 }
2259 
2260 void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
2261 {
2262 	if (ha->dpc_thread)
2263 		queue_work(ha->dpc_thread, &ha->dpc_work);
2264 }
2265 
2266 /**
2267  * qla4xxx_do_dpc - dpc routine
2268  * @data: in our case pointer to adapter structure
2269  *
2270  * This routine is a task that is schedule by the interrupt handler
2271  * to perform the background processing for interrupts.  We put it
2272  * on a task queue that is consumed whenever the scheduler runs; that's
2273  * so you can do anything (i.e. put the process to sleep etc).  In fact,
2274  * the mid-level tries to sleep when it reaches the driver threshold
2275  * "host->can_queue". This can cause a panic if we were in our interrupt code.
2276  **/
2277 static void qla4xxx_do_dpc(struct work_struct *work)
2278 {
2279 	struct scsi_qla_host *ha =
2280 		container_of(work, struct scsi_qla_host, dpc_work);
2281 	int status = QLA_ERROR;
2282 
2283 	DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
2284 	    "flags = 0x%08lx, dpc_flags = 0x%08lx\n",
2285 	    ha->host_no, __func__, ha->flags, ha->dpc_flags))
2286 
2287 	/* Initialization not yet finished. Don't do anything yet. */
2288 	if (!test_bit(AF_INIT_DONE, &ha->flags))
2289 		return;
2290 
2291 	if (test_bit(AF_EEH_BUSY, &ha->flags)) {
2292 		DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
2293 		    ha->host_no, __func__, ha->flags));
2294 		return;
2295 	}
2296 
2297 	if (is_qla8022(ha)) {
2298 		if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
2299 			qla4_8xxx_idc_lock(ha);
2300 			qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2301 			    QLA82XX_DEV_FAILED);
2302 			qla4_8xxx_idc_unlock(ha);
2303 			ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
2304 			qla4_8xxx_device_state_handler(ha);
2305 		}
2306 		if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
2307 			qla4_8xxx_need_qsnt_handler(ha);
2308 		}
2309 	}
2310 
2311 	if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) &&
2312 	    (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2313 	    test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
2314 	    test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
2315 		if (ql4xdontresethba) {
2316 			DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
2317 			    ha->host_no, __func__));
2318 			clear_bit(DPC_RESET_HA, &ha->dpc_flags);
2319 			clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
2320 			clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
2321 			goto dpc_post_reset_ha;
2322 		}
2323 		if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
2324 		    test_bit(DPC_RESET_HA, &ha->dpc_flags))
2325 			qla4xxx_recover_adapter(ha);
2326 
2327 		if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
2328 			uint8_t wait_time = RESET_INTR_TOV;
2329 
2330 			while ((readw(&ha->reg->ctrl_status) &
2331 				(CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
2332 				if (--wait_time == 0)
2333 					break;
2334 				msleep(1000);
2335 			}
2336 			if (wait_time == 0)
2337 				DEBUG2(printk("scsi%ld: %s: SR|FSR "
2338 					      "bit not cleared-- resetting\n",
2339 					      ha->host_no, __func__));
2340 			qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2341 			if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
2342 				qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2343 				status = qla4xxx_recover_adapter(ha);
2344 			}
2345 			clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
2346 			if (status == QLA_SUCCESS)
2347 				ha->isp_ops->enable_intrs(ha);
2348 		}
2349 	}
2350 
2351 dpc_post_reset_ha:
2352 	/* ---- process AEN? --- */
2353 	if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
2354 		qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
2355 
2356 	/* ---- Get DHCP IP Address? --- */
2357 	if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
2358 		qla4xxx_get_dhcp_ip_address(ha);
2359 
2360 	/* ---- link change? --- */
2361 	if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
2362 		if (!test_bit(AF_LINK_UP, &ha->flags)) {
2363 			/* ---- link down? --- */
2364 			qla4xxx_mark_all_devices_missing(ha);
2365 		} else {
2366 			/* ---- link up? --- *
2367 			 * F/W will auto login to all devices ONLY ONCE after
2368 			 * link up during driver initialization and runtime
2369 			 * fatal error recovery.  Therefore, the driver must
2370 			 * manually relogin to devices when recovering from
2371 			 * connection failures, logouts, expired KATO, etc. */
2372 
2373 			qla4xxx_relogin_all_devices(ha);
2374 		}
2375 	}
2376 }
2377 
2378 /**
2379  * qla4xxx_free_adapter - release the adapter
2380  * @ha: pointer to adapter structure
2381  **/
2382 static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
2383 {
2384 
2385 	if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
2386 		/* Turn-off interrupts on the card. */
2387 		ha->isp_ops->disable_intrs(ha);
2388 	}
2389 
2390 	/* Remove timer thread, if present */
2391 	if (ha->timer_active)
2392 		qla4xxx_stop_timer(ha);
2393 
2394 	/* Kill the kernel thread for this host */
2395 	if (ha->dpc_thread)
2396 		destroy_workqueue(ha->dpc_thread);
2397 
2398 	/* Kill the kernel thread for this host */
2399 	if (ha->task_wq)
2400 		destroy_workqueue(ha->task_wq);
2401 
2402 	/* Put firmware in known state */
2403 	ha->isp_ops->reset_firmware(ha);
2404 
2405 	if (is_qla8022(ha)) {
2406 		qla4_8xxx_idc_lock(ha);
2407 		qla4_8xxx_clear_drv_active(ha);
2408 		qla4_8xxx_idc_unlock(ha);
2409 	}
2410 
2411 	/* Detach interrupts */
2412 	if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
2413 		qla4xxx_free_irqs(ha);
2414 
2415 	/* free extra memory */
2416 	qla4xxx_mem_free(ha);
2417 }
2418 
2419 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
2420 {
2421 	int status = 0;
2422 	uint8_t revision_id;
2423 	unsigned long mem_base, mem_len, db_base, db_len;
2424 	struct pci_dev *pdev = ha->pdev;
2425 
2426 	status = pci_request_regions(pdev, DRIVER_NAME);
2427 	if (status) {
2428 		printk(KERN_WARNING
2429 		    "scsi(%ld) Failed to reserve PIO regions (%s) "
2430 		    "status=%d\n", ha->host_no, pci_name(pdev), status);
2431 		goto iospace_error_exit;
2432 	}
2433 
2434 	pci_read_config_byte(pdev, PCI_REVISION_ID, &revision_id);
2435 	DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
2436 	    __func__, revision_id));
2437 	ha->revision_id = revision_id;
2438 
2439 	/* remap phys address */
2440 	mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
2441 	mem_len = pci_resource_len(pdev, 0);
2442 	DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n",
2443 	    __func__, mem_base, mem_len));
2444 
2445 	/* mapping of pcibase pointer */
2446 	ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len);
2447 	if (!ha->nx_pcibase) {
2448 		printk(KERN_ERR
2449 		    "cannot remap MMIO (%s), aborting\n", pci_name(pdev));
2450 		pci_release_regions(ha->pdev);
2451 		goto iospace_error_exit;
2452 	}
2453 
2454 	/* Mapping of IO base pointer, door bell read and write pointer */
2455 
2456 	/* mapping of IO base pointer */
2457 	ha->qla4_8xxx_reg =
2458 	    (struct device_reg_82xx  __iomem *)((uint8_t *)ha->nx_pcibase +
2459 	    0xbc000 + (ha->pdev->devfn << 11));
2460 
2461 	db_base = pci_resource_start(pdev, 4);  /* doorbell is on bar 4 */
2462 	db_len = pci_resource_len(pdev, 4);
2463 
2464 	ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
2465 	    QLA82XX_CAM_RAM_DB2);
2466 
2467 	return 0;
2468 iospace_error_exit:
2469 	return -ENOMEM;
2470 }
2471 
2472 /***
2473  * qla4xxx_iospace_config - maps registers
2474  * @ha: pointer to adapter structure
2475  *
2476  * This routines maps HBA's registers from the pci address space
2477  * into the kernel virtual address space for memory mapped i/o.
2478  **/
2479 int qla4xxx_iospace_config(struct scsi_qla_host *ha)
2480 {
2481 	unsigned long pio, pio_len, pio_flags;
2482 	unsigned long mmio, mmio_len, mmio_flags;
2483 
2484 	pio = pci_resource_start(ha->pdev, 0);
2485 	pio_len = pci_resource_len(ha->pdev, 0);
2486 	pio_flags = pci_resource_flags(ha->pdev, 0);
2487 	if (pio_flags & IORESOURCE_IO) {
2488 		if (pio_len < MIN_IOBASE_LEN) {
2489 			ql4_printk(KERN_WARNING, ha,
2490 				"Invalid PCI I/O region size\n");
2491 			pio = 0;
2492 		}
2493 	} else {
2494 		ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n");
2495 		pio = 0;
2496 	}
2497 
2498 	/* Use MMIO operations for all accesses. */
2499 	mmio = pci_resource_start(ha->pdev, 1);
2500 	mmio_len = pci_resource_len(ha->pdev, 1);
2501 	mmio_flags = pci_resource_flags(ha->pdev, 1);
2502 
2503 	if (!(mmio_flags & IORESOURCE_MEM)) {
2504 		ql4_printk(KERN_ERR, ha,
2505 		    "region #0 not an MMIO resource, aborting\n");
2506 
2507 		goto iospace_error_exit;
2508 	}
2509 
2510 	if (mmio_len < MIN_IOBASE_LEN) {
2511 		ql4_printk(KERN_ERR, ha,
2512 		    "Invalid PCI mem region size, aborting\n");
2513 		goto iospace_error_exit;
2514 	}
2515 
2516 	if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
2517 		ql4_printk(KERN_WARNING, ha,
2518 		    "Failed to reserve PIO/MMIO regions\n");
2519 
2520 		goto iospace_error_exit;
2521 	}
2522 
2523 	ha->pio_address = pio;
2524 	ha->pio_length = pio_len;
2525 	ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
2526 	if (!ha->reg) {
2527 		ql4_printk(KERN_ERR, ha,
2528 		    "cannot remap MMIO, aborting\n");
2529 
2530 		goto iospace_error_exit;
2531 	}
2532 
2533 	return 0;
2534 
2535 iospace_error_exit:
2536 	return -ENOMEM;
2537 }
2538 
2539 static struct isp_operations qla4xxx_isp_ops = {
2540 	.iospace_config         = qla4xxx_iospace_config,
2541 	.pci_config             = qla4xxx_pci_config,
2542 	.disable_intrs          = qla4xxx_disable_intrs,
2543 	.enable_intrs           = qla4xxx_enable_intrs,
2544 	.start_firmware         = qla4xxx_start_firmware,
2545 	.intr_handler           = qla4xxx_intr_handler,
2546 	.interrupt_service_routine = qla4xxx_interrupt_service_routine,
2547 	.reset_chip             = qla4xxx_soft_reset,
2548 	.reset_firmware         = qla4xxx_hw_reset,
2549 	.queue_iocb             = qla4xxx_queue_iocb,
2550 	.complete_iocb          = qla4xxx_complete_iocb,
2551 	.rd_shdw_req_q_out      = qla4xxx_rd_shdw_req_q_out,
2552 	.rd_shdw_rsp_q_in       = qla4xxx_rd_shdw_rsp_q_in,
2553 	.get_sys_info           = qla4xxx_get_sys_info,
2554 };
2555 
2556 static struct isp_operations qla4_8xxx_isp_ops = {
2557 	.iospace_config         = qla4_8xxx_iospace_config,
2558 	.pci_config             = qla4_8xxx_pci_config,
2559 	.disable_intrs          = qla4_8xxx_disable_intrs,
2560 	.enable_intrs           = qla4_8xxx_enable_intrs,
2561 	.start_firmware         = qla4_8xxx_load_risc,
2562 	.intr_handler           = qla4_8xxx_intr_handler,
2563 	.interrupt_service_routine = qla4_8xxx_interrupt_service_routine,
2564 	.reset_chip             = qla4_8xxx_isp_reset,
2565 	.reset_firmware         = qla4_8xxx_stop_firmware,
2566 	.queue_iocb             = qla4_8xxx_queue_iocb,
2567 	.complete_iocb          = qla4_8xxx_complete_iocb,
2568 	.rd_shdw_req_q_out      = qla4_8xxx_rd_shdw_req_q_out,
2569 	.rd_shdw_rsp_q_in       = qla4_8xxx_rd_shdw_rsp_q_in,
2570 	.get_sys_info           = qla4_8xxx_get_sys_info,
2571 };
2572 
2573 uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
2574 {
2575 	return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
2576 }
2577 
2578 uint16_t qla4_8xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
2579 {
2580 	return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->req_q_out));
2581 }
2582 
2583 uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
2584 {
2585 	return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
2586 }
2587 
2588 uint16_t qla4_8xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
2589 {
2590 	return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->rsp_q_in));
2591 }
2592 
2593 static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
2594 {
2595 	struct scsi_qla_host *ha = data;
2596 	char *str = buf;
2597 	int rc;
2598 
2599 	switch (type) {
2600 	case ISCSI_BOOT_ETH_FLAGS:
2601 		rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
2602 		break;
2603 	case ISCSI_BOOT_ETH_INDEX:
2604 		rc = sprintf(str, "0\n");
2605 		break;
2606 	case ISCSI_BOOT_ETH_MAC:
2607 		rc = sysfs_format_mac(str, ha->my_mac,
2608 				      MAC_ADDR_LEN);
2609 		break;
2610 	default:
2611 		rc = -ENOSYS;
2612 		break;
2613 	}
2614 	return rc;
2615 }
2616 
2617 static mode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
2618 {
2619 	int rc;
2620 
2621 	switch (type) {
2622 	case ISCSI_BOOT_ETH_FLAGS:
2623 	case ISCSI_BOOT_ETH_MAC:
2624 	case ISCSI_BOOT_ETH_INDEX:
2625 		rc = S_IRUGO;
2626 		break;
2627 	default:
2628 		rc = 0;
2629 		break;
2630 	}
2631 	return rc;
2632 }
2633 
2634 static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
2635 {
2636 	struct scsi_qla_host *ha = data;
2637 	char *str = buf;
2638 	int rc;
2639 
2640 	switch (type) {
2641 	case ISCSI_BOOT_INI_INITIATOR_NAME:
2642 		rc = sprintf(str, "%s\n", ha->name_string);
2643 		break;
2644 	default:
2645 		rc = -ENOSYS;
2646 		break;
2647 	}
2648 	return rc;
2649 }
2650 
2651 static mode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
2652 {
2653 	int rc;
2654 
2655 	switch (type) {
2656 	case ISCSI_BOOT_INI_INITIATOR_NAME:
2657 		rc = S_IRUGO;
2658 		break;
2659 	default:
2660 		rc = 0;
2661 		break;
2662 	}
2663 	return rc;
2664 }
2665 
2666 static ssize_t
2667 qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
2668 			   char *buf)
2669 {
2670 	struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
2671 	char *str = buf;
2672 	int rc;
2673 
2674 	switch (type) {
2675 	case ISCSI_BOOT_TGT_NAME:
2676 		rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name);
2677 		break;
2678 	case ISCSI_BOOT_TGT_IP_ADDR:
2679 		if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1)
2680 			rc = sprintf(buf, "%pI4\n",
2681 				     &boot_conn->dest_ipaddr.ip_address);
2682 		else
2683 			rc = sprintf(str, "%pI6\n",
2684 				     &boot_conn->dest_ipaddr.ip_address);
2685 		break;
2686 	case ISCSI_BOOT_TGT_PORT:
2687 			rc = sprintf(str, "%d\n", boot_conn->dest_port);
2688 		break;
2689 	case ISCSI_BOOT_TGT_CHAP_NAME:
2690 		rc = sprintf(str,  "%.*s\n",
2691 			     boot_conn->chap.target_chap_name_length,
2692 			     (char *)&boot_conn->chap.target_chap_name);
2693 		break;
2694 	case ISCSI_BOOT_TGT_CHAP_SECRET:
2695 		rc = sprintf(str,  "%.*s\n",
2696 			     boot_conn->chap.target_secret_length,
2697 			     (char *)&boot_conn->chap.target_secret);
2698 		break;
2699 	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
2700 		rc = sprintf(str,  "%.*s\n",
2701 			     boot_conn->chap.intr_chap_name_length,
2702 			     (char *)&boot_conn->chap.intr_chap_name);
2703 		break;
2704 	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
2705 		rc = sprintf(str,  "%.*s\n",
2706 			     boot_conn->chap.intr_secret_length,
2707 			     (char *)&boot_conn->chap.intr_secret);
2708 		break;
2709 	case ISCSI_BOOT_TGT_FLAGS:
2710 		rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
2711 		break;
2712 	case ISCSI_BOOT_TGT_NIC_ASSOC:
2713 		rc = sprintf(str, "0\n");
2714 		break;
2715 	default:
2716 		rc = -ENOSYS;
2717 		break;
2718 	}
2719 	return rc;
2720 }
2721 
2722 static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf)
2723 {
2724 	struct scsi_qla_host *ha = data;
2725 	struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess);
2726 
2727 	return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
2728 }
2729 
2730 static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
2731 {
2732 	struct scsi_qla_host *ha = data;
2733 	struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess);
2734 
2735 	return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
2736 }
2737 
2738 static mode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
2739 {
2740 	int rc;
2741 
2742 	switch (type) {
2743 	case ISCSI_BOOT_TGT_NAME:
2744 	case ISCSI_BOOT_TGT_IP_ADDR:
2745 	case ISCSI_BOOT_TGT_PORT:
2746 	case ISCSI_BOOT_TGT_CHAP_NAME:
2747 	case ISCSI_BOOT_TGT_CHAP_SECRET:
2748 	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
2749 	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
2750 	case ISCSI_BOOT_TGT_NIC_ASSOC:
2751 	case ISCSI_BOOT_TGT_FLAGS:
2752 		rc = S_IRUGO;
2753 		break;
2754 	default:
2755 		rc = 0;
2756 		break;
2757 	}
2758 	return rc;
2759 }
2760 
2761 static void qla4xxx_boot_release(void *data)
2762 {
2763 	struct scsi_qla_host *ha = data;
2764 
2765 	scsi_host_put(ha->host);
2766 }
2767 
2768 static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
2769 {
2770 	dma_addr_t buf_dma;
2771 	uint32_t addr, pri_addr, sec_addr;
2772 	uint32_t offset;
2773 	uint16_t func_num;
2774 	uint8_t val;
2775 	uint8_t *buf = NULL;
2776 	size_t size = 13 * sizeof(uint8_t);
2777 	int ret = QLA_SUCCESS;
2778 
2779 	func_num = PCI_FUNC(ha->pdev->devfn);
2780 
2781 	DEBUG2(ql4_printk(KERN_INFO, ha,
2782 			  "%s: Get FW  boot info for 0x%x func %d\n", __func__,
2783 			  (is_qla4032(ha) ? PCI_DEVICE_ID_QLOGIC_ISP4032 :
2784 			   PCI_DEVICE_ID_QLOGIC_ISP8022), func_num));
2785 
2786 	if (is_qla4032(ha)) {
2787 		if (func_num == 1) {
2788 			addr = NVRAM_PORT0_BOOT_MODE;
2789 			pri_addr = NVRAM_PORT0_BOOT_PRI_TGT;
2790 			sec_addr = NVRAM_PORT0_BOOT_SEC_TGT;
2791 		} else if (func_num == 3) {
2792 			addr = NVRAM_PORT1_BOOT_MODE;
2793 			pri_addr = NVRAM_PORT1_BOOT_PRI_TGT;
2794 			sec_addr = NVRAM_PORT1_BOOT_SEC_TGT;
2795 		} else {
2796 			ret = QLA_ERROR;
2797 			goto exit_boot_info;
2798 		}
2799 
2800 		/* Check Boot Mode */
2801 		val = rd_nvram_byte(ha, addr);
2802 		if (!(val & 0x07)) {
2803 			DEBUG2(ql4_printk(KERN_ERR, ha,
2804 					  "%s: Failed Boot options : 0x%x\n",
2805 					  __func__, val));
2806 			ret = QLA_ERROR;
2807 			goto exit_boot_info;
2808 		}
2809 
2810 		/* get primary valid target index */
2811 		val = rd_nvram_byte(ha, pri_addr);
2812 		if (val & BIT_7)
2813 			ddb_index[0] = (val & 0x7f);
2814 
2815 		/* get secondary valid target index */
2816 		val = rd_nvram_byte(ha, sec_addr);
2817 		if (val & BIT_7)
2818 			ddb_index[1] = (val & 0x7f);
2819 
2820 	} else if (is_qla8022(ha)) {
2821 		buf = dma_alloc_coherent(&ha->pdev->dev, size,
2822 					 &buf_dma, GFP_KERNEL);
2823 		if (!buf) {
2824 			DEBUG2(ql4_printk(KERN_ERR, ha,
2825 					  "%s: Unable to allocate dma buffer\n",
2826 					   __func__));
2827 			ret = QLA_ERROR;
2828 			goto exit_boot_info;
2829 		}
2830 
2831 		if (ha->port_num == 0)
2832 			offset = BOOT_PARAM_OFFSET_PORT0;
2833 		else if (ha->port_num == 1)
2834 			offset = BOOT_PARAM_OFFSET_PORT1;
2835 		else {
2836 			ret = QLA_ERROR;
2837 			goto exit_boot_info_free;
2838 		}
2839 		addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) +
2840 		       offset;
2841 		if (qla4xxx_get_flash(ha, buf_dma, addr,
2842 				      13 * sizeof(uint8_t)) != QLA_SUCCESS) {
2843 			DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
2844 					  "failed\n", ha->host_no, __func__));
2845 			ret = QLA_ERROR;
2846 			goto exit_boot_info_free;
2847 		}
2848 		/* Check Boot Mode */
2849 		if (!(buf[1] & 0x07)) {
2850 			DEBUG2(ql4_printk(KERN_INFO, ha,
2851 					  "Failed: Boot options : 0x%x\n",
2852 					  buf[1]));
2853 			ret = QLA_ERROR;
2854 			goto exit_boot_info_free;
2855 		}
2856 
2857 		/* get primary valid target index */
2858 		if (buf[2] & BIT_7)
2859 			ddb_index[0] = buf[2] & 0x7f;
2860 
2861 		/* get secondary valid target index */
2862 		if (buf[11] & BIT_7)
2863 			ddb_index[1] = buf[11] & 0x7f;
2864 	} else {
2865 		ret = QLA_ERROR;
2866 		goto exit_boot_info;
2867 	}
2868 
2869 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary"
2870 			  " target ID %d\n", __func__, ddb_index[0],
2871 			  ddb_index[1]));
2872 
2873 exit_boot_info_free:
2874 	dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
2875 exit_boot_info:
2876 	return ret;
2877 }
2878 
2879 /**
2880  * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password
2881  * @ha: pointer to adapter structure
2882  * @username: CHAP username to be returned
2883  * @password: CHAP password to be returned
2884  *
2885  * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP
2886  * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/.
2887  * So from the CHAP cache find the first BIDI CHAP entry and set it
2888  * to the boot record in sysfs.
2889  **/
2890 static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
2891 			    char *password)
2892 {
2893 	int i, ret = -EINVAL;
2894 	int max_chap_entries = 0;
2895 	struct ql4_chap_table *chap_table;
2896 
2897 	if (is_qla8022(ha))
2898 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
2899 						sizeof(struct ql4_chap_table);
2900 	else
2901 		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
2902 
2903 	if (!ha->chap_list) {
2904 		ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
2905 		return ret;
2906 	}
2907 
2908 	mutex_lock(&ha->chap_sem);
2909 	for (i = 0; i < max_chap_entries; i++) {
2910 		chap_table = (struct ql4_chap_table *)ha->chap_list + i;
2911 		if (chap_table->cookie !=
2912 		    __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
2913 			continue;
2914 		}
2915 
2916 		if (chap_table->flags & BIT_7) /* local */
2917 			continue;
2918 
2919 		if (!(chap_table->flags & BIT_6)) /* Not BIDI */
2920 			continue;
2921 
2922 		strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
2923 		strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
2924 		ret = 0;
2925 		break;
2926 	}
2927 	mutex_unlock(&ha->chap_sem);
2928 
2929 	return ret;
2930 }
2931 
2932 
2933 static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
2934 				   struct ql4_boot_session_info *boot_sess,
2935 				   uint16_t ddb_index)
2936 {
2937 	struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
2938 	struct dev_db_entry *fw_ddb_entry;
2939 	dma_addr_t fw_ddb_entry_dma;
2940 	uint16_t idx;
2941 	uint16_t options;
2942 	int ret = QLA_SUCCESS;
2943 
2944 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2945 					  &fw_ddb_entry_dma, GFP_KERNEL);
2946 	if (!fw_ddb_entry) {
2947 		DEBUG2(ql4_printk(KERN_ERR, ha,
2948 				  "%s: Unable to allocate dma buffer.\n",
2949 				  __func__));
2950 		ret = QLA_ERROR;
2951 		return ret;
2952 	}
2953 
2954 	if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
2955 				   fw_ddb_entry_dma, ddb_index)) {
2956 		DEBUG2(ql4_printk(KERN_ERR, ha,
2957 				  "%s: Flash DDB read Failed\n", __func__));
2958 		ret = QLA_ERROR;
2959 		goto exit_boot_target;
2960 	}
2961 
2962 	/* Update target name and IP from DDB */
2963 	memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name,
2964 	       min(sizeof(boot_sess->target_name),
2965 		   sizeof(fw_ddb_entry->iscsi_name)));
2966 
2967 	options = le16_to_cpu(fw_ddb_entry->options);
2968 	if (options & DDB_OPT_IPV6_DEVICE) {
2969 		memcpy(&boot_conn->dest_ipaddr.ip_address,
2970 		       &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN);
2971 	} else {
2972 		boot_conn->dest_ipaddr.ip_type = 0x1;
2973 		memcpy(&boot_conn->dest_ipaddr.ip_address,
2974 		       &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN);
2975 	}
2976 
2977 	boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port);
2978 
2979 	/* update chap information */
2980 	idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2981 
2982 	if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options))	{
2983 
2984 		DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n"));
2985 
2986 		ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
2987 				       target_chap_name,
2988 				       (char *)&boot_conn->chap.target_secret,
2989 				       idx);
2990 		if (ret) {
2991 			ql4_printk(KERN_ERR, ha, "Failed to set chap\n");
2992 			ret = QLA_ERROR;
2993 			goto exit_boot_target;
2994 		}
2995 
2996 		boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
2997 		boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN;
2998 	}
2999 
3000 	if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
3001 
3002 		DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n"));
3003 
3004 		ret = qla4xxx_get_bidi_chap(ha,
3005 				    (char *)&boot_conn->chap.intr_chap_name,
3006 				    (char *)&boot_conn->chap.intr_secret);
3007 
3008 		if (ret) {
3009 			ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n");
3010 			ret = QLA_ERROR;
3011 			goto exit_boot_target;
3012 		}
3013 
3014 		boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
3015 		boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN;
3016 	}
3017 
3018 exit_boot_target:
3019 	dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3020 			  fw_ddb_entry, fw_ddb_entry_dma);
3021 	return ret;
3022 }
3023 
3024 static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
3025 {
3026 	uint16_t ddb_index[2];
3027 	int ret = QLA_ERROR;
3028 	int rval;
3029 
3030 	memset(ddb_index, 0, sizeof(ddb_index));
3031 	ddb_index[0] = 0xffff;
3032 	ddb_index[1] = 0xffff;
3033 	ret = get_fw_boot_info(ha, ddb_index);
3034 	if (ret != QLA_SUCCESS) {
3035 		DEBUG2(ql4_printk(KERN_ERR, ha,
3036 				  "%s: Failed to set boot info.\n", __func__));
3037 		return ret;
3038 	}
3039 
3040 	if (ddb_index[0] == 0xffff)
3041 		goto sec_target;
3042 
3043 	rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
3044 				      ddb_index[0]);
3045 	if (rval != QLA_SUCCESS) {
3046 		DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Failed to get "
3047 				  "primary target\n", __func__));
3048 	} else
3049 		ret = QLA_SUCCESS;
3050 
3051 sec_target:
3052 	if (ddb_index[1] == 0xffff)
3053 		goto exit_get_boot_info;
3054 
3055 	rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
3056 				      ddb_index[1]);
3057 	if (rval != QLA_SUCCESS) {
3058 		DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Failed to get "
3059 				  "secondary target\n", __func__));
3060 	} else
3061 		ret = QLA_SUCCESS;
3062 
3063 exit_get_boot_info:
3064 	return ret;
3065 }
3066 
3067 static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
3068 {
3069 	struct iscsi_boot_kobj *boot_kobj;
3070 
3071 	if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
3072 		return 0;
3073 
3074 	ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
3075 	if (!ha->boot_kset)
3076 		goto kset_free;
3077 
3078 	if (!scsi_host_get(ha->host))
3079 		goto kset_free;
3080 	boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha,
3081 					     qla4xxx_show_boot_tgt_pri_info,
3082 					     qla4xxx_tgt_get_attr_visibility,
3083 					     qla4xxx_boot_release);
3084 	if (!boot_kobj)
3085 		goto put_host;
3086 
3087 	if (!scsi_host_get(ha->host))
3088 		goto kset_free;
3089 	boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha,
3090 					     qla4xxx_show_boot_tgt_sec_info,
3091 					     qla4xxx_tgt_get_attr_visibility,
3092 					     qla4xxx_boot_release);
3093 	if (!boot_kobj)
3094 		goto put_host;
3095 
3096 	if (!scsi_host_get(ha->host))
3097 		goto kset_free;
3098 	boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha,
3099 					       qla4xxx_show_boot_ini_info,
3100 					       qla4xxx_ini_get_attr_visibility,
3101 					       qla4xxx_boot_release);
3102 	if (!boot_kobj)
3103 		goto put_host;
3104 
3105 	if (!scsi_host_get(ha->host))
3106 		goto kset_free;
3107 	boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha,
3108 					       qla4xxx_show_boot_eth_info,
3109 					       qla4xxx_eth_get_attr_visibility,
3110 					       qla4xxx_boot_release);
3111 	if (!boot_kobj)
3112 		goto put_host;
3113 
3114 	return 0;
3115 
3116 put_host:
3117 	scsi_host_put(ha->host);
3118 kset_free:
3119 	iscsi_boot_destroy_kset(ha->boot_kset);
3120 	return -ENOMEM;
3121 }
3122 
3123 
3124 /**
3125  * qla4xxx_create chap_list - Create CHAP list from FLASH
3126  * @ha: pointer to adapter structure
3127  *
3128  * Read flash and make a list of CHAP entries, during login when a CHAP entry
3129  * is received, it will be checked in this list. If entry exist then the CHAP
3130  * entry index is set in the DDB. If CHAP entry does not exist in this list
3131  * then a new entry is added in FLASH in CHAP table and the index obtained is
3132  * used in the DDB.
3133  **/
3134 static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
3135 {
3136 	int rval = 0;
3137 	uint8_t *chap_flash_data = NULL;
3138 	uint32_t offset;
3139 	dma_addr_t chap_dma;
3140 	uint32_t chap_size = 0;
3141 
3142 	if (is_qla40XX(ha))
3143 		chap_size = MAX_CHAP_ENTRIES_40XX  *
3144 					sizeof(struct ql4_chap_table);
3145 	else	/* Single region contains CHAP info for both
3146 		 * ports which is divided into half for each port.
3147 		 */
3148 		chap_size = ha->hw.flt_chap_size / 2;
3149 
3150 	chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size,
3151 					  &chap_dma, GFP_KERNEL);
3152 	if (!chap_flash_data) {
3153 		ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n");
3154 		return;
3155 	}
3156 	if (is_qla40XX(ha))
3157 		offset = FLASH_CHAP_OFFSET;
3158 	else {
3159 		offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
3160 		if (ha->port_num == 1)
3161 			offset += chap_size;
3162 	}
3163 
3164 	rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
3165 	if (rval != QLA_SUCCESS)
3166 		goto exit_chap_list;
3167 
3168 	if (ha->chap_list == NULL)
3169 		ha->chap_list = vmalloc(chap_size);
3170 	if (ha->chap_list == NULL) {
3171 		ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n");
3172 		goto exit_chap_list;
3173 	}
3174 
3175 	memcpy(ha->chap_list, chap_flash_data, chap_size);
3176 
3177 exit_chap_list:
3178 	dma_free_coherent(&ha->pdev->dev, chap_size,
3179 			chap_flash_data, chap_dma);
3180 	return;
3181 }
3182 
3183 /**
3184  * qla4xxx_probe_adapter - callback function to probe HBA
3185  * @pdev: pointer to pci_dev structure
3186  * @pci_device_id: pointer to pci_device entry
3187  *
3188  * This routine will probe for Qlogic 4xxx iSCSI host adapters.
3189  * It returns zero if successful. It also initializes all data necessary for
3190  * the driver.
3191  **/
3192 static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
3193 					   const struct pci_device_id *ent)
3194 {
3195 	int ret = -ENODEV, status;
3196 	struct Scsi_Host *host;
3197 	struct scsi_qla_host *ha;
3198 	uint8_t init_retry_count = 0;
3199 	char buf[34];
3200 	struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
3201 	uint32_t dev_state;
3202 
3203 	if (pci_enable_device(pdev))
3204 		return -1;
3205 
3206 	host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0);
3207 	if (host == NULL) {
3208 		printk(KERN_WARNING
3209 		       "qla4xxx: Couldn't allocate host from scsi layer!\n");
3210 		goto probe_disable_device;
3211 	}
3212 
3213 	/* Clear our data area */
3214 	ha = to_qla_host(host);
3215 	memset(ha, 0, sizeof(*ha));
3216 
3217 	/* Save the information from PCI BIOS.	*/
3218 	ha->pdev = pdev;
3219 	ha->host = host;
3220 	ha->host_no = host->host_no;
3221 
3222 	pci_enable_pcie_error_reporting(pdev);
3223 
3224 	/* Setup Runtime configurable options */
3225 	if (is_qla8022(ha)) {
3226 		ha->isp_ops = &qla4_8xxx_isp_ops;
3227 		rwlock_init(&ha->hw_lock);
3228 		ha->qdr_sn_window = -1;
3229 		ha->ddr_mn_window = -1;
3230 		ha->curr_window = 255;
3231 		ha->func_num = PCI_FUNC(ha->pdev->devfn);
3232 		nx_legacy_intr = &legacy_intr[ha->func_num];
3233 		ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
3234 		ha->nx_legacy_intr.tgt_status_reg =
3235 			nx_legacy_intr->tgt_status_reg;
3236 		ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
3237 		ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
3238 	} else {
3239 		ha->isp_ops = &qla4xxx_isp_ops;
3240 	}
3241 
3242 	/* Set EEH reset type to fundamental if required by hba */
3243 	if (is_qla8022(ha))
3244 		pdev->needs_freset = 1;
3245 
3246 	/* Configure PCI I/O space. */
3247 	ret = ha->isp_ops->iospace_config(ha);
3248 	if (ret)
3249 		goto probe_failed_ioconfig;
3250 
3251 	ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n",
3252 		   pdev->device, pdev->irq, ha->reg);
3253 
3254 	qla4xxx_config_dma_addressing(ha);
3255 
3256 	/* Initialize lists and spinlocks. */
3257 	INIT_LIST_HEAD(&ha->free_srb_q);
3258 
3259 	mutex_init(&ha->mbox_sem);
3260 	mutex_init(&ha->chap_sem);
3261 	init_completion(&ha->mbx_intr_comp);
3262 	init_completion(&ha->disable_acb_comp);
3263 
3264 	spin_lock_init(&ha->hardware_lock);
3265 
3266 	/* Allocate dma buffers */
3267 	if (qla4xxx_mem_alloc(ha)) {
3268 		ql4_printk(KERN_WARNING, ha,
3269 		    "[ERROR] Failed to allocate memory for adapter\n");
3270 
3271 		ret = -ENOMEM;
3272 		goto probe_failed;
3273 	}
3274 
3275 	host->cmd_per_lun = 3;
3276 	host->max_channel = 0;
3277 	host->max_lun = MAX_LUNS - 1;
3278 	host->max_id = MAX_TARGETS;
3279 	host->max_cmd_len = IOCB_MAX_CDB_LEN;
3280 	host->can_queue = MAX_SRBS ;
3281 	host->transportt = qla4xxx_scsi_transport;
3282 
3283 	ret = scsi_init_shared_tag_map(host, MAX_SRBS);
3284 	if (ret) {
3285 		ql4_printk(KERN_WARNING, ha,
3286 			   "%s: scsi_init_shared_tag_map failed\n", __func__);
3287 		goto probe_failed;
3288 	}
3289 
3290 	pci_set_drvdata(pdev, ha);
3291 
3292 	ret = scsi_add_host(host, &pdev->dev);
3293 	if (ret)
3294 		goto probe_failed;
3295 
3296 	if (is_qla8022(ha))
3297 		(void) qla4_8xxx_get_flash_info(ha);
3298 
3299 	/*
3300 	 * Initialize the Host adapter request/response queues and
3301 	 * firmware
3302 	 * NOTE: interrupts enabled upon successful completion
3303 	 */
3304 	status = qla4xxx_initialize_adapter(ha);
3305 	while ((!test_bit(AF_ONLINE, &ha->flags)) &&
3306 	    init_retry_count++ < MAX_INIT_RETRIES) {
3307 
3308 		if (is_qla8022(ha)) {
3309 			qla4_8xxx_idc_lock(ha);
3310 			dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3311 			qla4_8xxx_idc_unlock(ha);
3312 			if (dev_state == QLA82XX_DEV_FAILED) {
3313 				ql4_printk(KERN_WARNING, ha, "%s: don't retry "
3314 				    "initialize adapter. H/W is in failed state\n",
3315 				    __func__);
3316 				break;
3317 			}
3318 		}
3319 		DEBUG2(printk("scsi: %s: retrying adapter initialization "
3320 			      "(%d)\n", __func__, init_retry_count));
3321 
3322 		if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
3323 			continue;
3324 
3325 		status = qla4xxx_initialize_adapter(ha);
3326 	}
3327 
3328 	if (!test_bit(AF_ONLINE, &ha->flags)) {
3329 		ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
3330 
3331 		if (is_qla8022(ha) && ql4xdontresethba) {
3332 			/* Put the device in failed state. */
3333 			DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
3334 			qla4_8xxx_idc_lock(ha);
3335 			qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3336 			    QLA82XX_DEV_FAILED);
3337 			qla4_8xxx_idc_unlock(ha);
3338 		}
3339 		ret = -ENODEV;
3340 		goto remove_host;
3341 	}
3342 
3343 	/* Startup the kernel thread for this host adapter. */
3344 	DEBUG2(printk("scsi: %s: Starting kernel thread for "
3345 		      "qla4xxx_dpc\n", __func__));
3346 	sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
3347 	ha->dpc_thread = create_singlethread_workqueue(buf);
3348 	if (!ha->dpc_thread) {
3349 		ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
3350 		ret = -ENODEV;
3351 		goto remove_host;
3352 	}
3353 	INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
3354 
3355 	sprintf(buf, "qla4xxx_%lu_task", ha->host_no);
3356 	ha->task_wq = alloc_workqueue(buf, WQ_MEM_RECLAIM, 1);
3357 	if (!ha->task_wq) {
3358 		ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
3359 		ret = -ENODEV;
3360 		goto remove_host;
3361 	}
3362 
3363 	/* For ISP-82XX, request_irqs is called in qla4_8xxx_load_risc
3364 	 * (which is called indirectly by qla4xxx_initialize_adapter),
3365 	 * so that irqs will be registered after crbinit but before
3366 	 * mbx_intr_enable.
3367 	 */
3368 	if (!is_qla8022(ha)) {
3369 		ret = qla4xxx_request_irqs(ha);
3370 		if (ret) {
3371 			ql4_printk(KERN_WARNING, ha, "Failed to reserve "
3372 			    "interrupt %d already in use.\n", pdev->irq);
3373 			goto remove_host;
3374 		}
3375 	}
3376 
3377 	pci_save_state(ha->pdev);
3378 	ha->isp_ops->enable_intrs(ha);
3379 
3380 	/* Start timer thread. */
3381 	qla4xxx_start_timer(ha, qla4xxx_timer, 1);
3382 
3383 	set_bit(AF_INIT_DONE, &ha->flags);
3384 
3385 	printk(KERN_INFO
3386 	       " QLogic iSCSI HBA Driver version: %s\n"
3387 	       "  QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
3388 	       qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
3389 	       ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
3390 	       ha->patch_number, ha->build_number);
3391 
3392 	qla4xxx_create_chap_list(ha);
3393 
3394 	if (qla4xxx_setup_boot_info(ha))
3395 		ql4_printk(KERN_ERR, ha, "%s:ISCSI boot info setup failed\n",
3396 			   __func__);
3397 
3398 	qla4xxx_create_ifaces(ha);
3399 	return 0;
3400 
3401 remove_host:
3402 	scsi_remove_host(ha->host);
3403 
3404 probe_failed:
3405 	qla4xxx_free_adapter(ha);
3406 
3407 probe_failed_ioconfig:
3408 	pci_disable_pcie_error_reporting(pdev);
3409 	scsi_host_put(ha->host);
3410 
3411 probe_disable_device:
3412 	pci_disable_device(pdev);
3413 
3414 	return ret;
3415 }
3416 
3417 /**
3418  * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
3419  * @ha: pointer to adapter structure
3420  *
3421  * Mark the other ISP-4xxx port to indicate that the driver is being removed,
3422  * so that the other port will not re-initialize while in the process of
3423  * removing the ha due to driver unload or hba hotplug.
3424  **/
3425 static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
3426 {
3427 	struct scsi_qla_host *other_ha = NULL;
3428 	struct pci_dev *other_pdev = NULL;
3429 	int fn = ISP4XXX_PCI_FN_2;
3430 
3431 	/*iscsi function numbers for ISP4xxx is 1 and 3*/
3432 	if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
3433 		fn = ISP4XXX_PCI_FN_1;
3434 
3435 	other_pdev =
3436 		pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
3437 		ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
3438 		fn));
3439 
3440 	/* Get other_ha if other_pdev is valid and state is enable*/
3441 	if (other_pdev) {
3442 		if (atomic_read(&other_pdev->enable_cnt)) {
3443 			other_ha = pci_get_drvdata(other_pdev);
3444 			if (other_ha) {
3445 				set_bit(AF_HA_REMOVAL, &other_ha->flags);
3446 				DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
3447 				    "Prevent %s reinit\n", __func__,
3448 				    dev_name(&other_ha->pdev->dev)));
3449 			}
3450 		}
3451 		pci_dev_put(other_pdev);
3452 	}
3453 }
3454 
3455 /**
3456  * qla4xxx_remove_adapter - calback function to remove adapter.
3457  * @pci_dev: PCI device pointer
3458  **/
3459 static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
3460 {
3461 	struct scsi_qla_host *ha;
3462 
3463 	ha = pci_get_drvdata(pdev);
3464 
3465 	if (!is_qla8022(ha))
3466 		qla4xxx_prevent_other_port_reinit(ha);
3467 
3468 	/* destroy iface from sysfs */
3469 	qla4xxx_destroy_ifaces(ha);
3470 
3471 	if (ha->boot_kset)
3472 		iscsi_boot_destroy_kset(ha->boot_kset);
3473 
3474 	scsi_remove_host(ha->host);
3475 
3476 	qla4xxx_free_adapter(ha);
3477 
3478 	scsi_host_put(ha->host);
3479 
3480 	pci_disable_pcie_error_reporting(pdev);
3481 	pci_disable_device(pdev);
3482 	pci_set_drvdata(pdev, NULL);
3483 }
3484 
3485 /**
3486  * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
3487  * @ha: HA context
3488  *
3489  * At exit, the @ha's flags.enable_64bit_addressing set to indicated
3490  * supported addressing method.
3491  */
3492 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
3493 {
3494 	int retval;
3495 
3496 	/* Update our PCI device dma_mask for full 64 bit mask */
3497 	if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
3498 		if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
3499 			dev_dbg(&ha->pdev->dev,
3500 				  "Failed to set 64 bit PCI consistent mask; "
3501 				   "using 32 bit.\n");
3502 			retval = pci_set_consistent_dma_mask(ha->pdev,
3503 							     DMA_BIT_MASK(32));
3504 		}
3505 	} else
3506 		retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
3507 }
3508 
3509 static int qla4xxx_slave_alloc(struct scsi_device *sdev)
3510 {
3511 	struct iscsi_cls_session *cls_sess;
3512 	struct iscsi_session *sess;
3513 	struct ddb_entry *ddb;
3514 	int queue_depth = QL4_DEF_QDEPTH;
3515 
3516 	cls_sess = starget_to_session(sdev->sdev_target);
3517 	sess = cls_sess->dd_data;
3518 	ddb = sess->dd_data;
3519 
3520 	sdev->hostdata = ddb;
3521 	sdev->tagged_supported = 1;
3522 
3523 	if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
3524 		queue_depth = ql4xmaxqdepth;
3525 
3526 	scsi_activate_tcq(sdev, queue_depth);
3527 	return 0;
3528 }
3529 
3530 static int qla4xxx_slave_configure(struct scsi_device *sdev)
3531 {
3532 	sdev->tagged_supported = 1;
3533 	return 0;
3534 }
3535 
3536 static void qla4xxx_slave_destroy(struct scsi_device *sdev)
3537 {
3538 	scsi_deactivate_tcq(sdev, 1);
3539 }
3540 
3541 /**
3542  * qla4xxx_del_from_active_array - returns an active srb
3543  * @ha: Pointer to host adapter structure.
3544  * @index: index into the active_array
3545  *
3546  * This routine removes and returns the srb at the specified index
3547  **/
3548 struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
3549     uint32_t index)
3550 {
3551 	struct srb *srb = NULL;
3552 	struct scsi_cmnd *cmd = NULL;
3553 
3554 	cmd = scsi_host_find_tag(ha->host, index);
3555 	if (!cmd)
3556 		return srb;
3557 
3558 	srb = (struct srb *)CMD_SP(cmd);
3559 	if (!srb)
3560 		return srb;
3561 
3562 	/* update counters */
3563 	if (srb->flags & SRB_DMA_VALID) {
3564 		ha->req_q_count += srb->iocb_cnt;
3565 		ha->iocb_cnt -= srb->iocb_cnt;
3566 		if (srb->cmd)
3567 			srb->cmd->host_scribble =
3568 				(unsigned char *)(unsigned long) MAX_SRBS;
3569 	}
3570 	return srb;
3571 }
3572 
3573 /**
3574  * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
3575  * @ha: Pointer to host adapter structure.
3576  * @cmd: Scsi Command to wait on.
3577  *
3578  * This routine waits for the command to be returned by the Firmware
3579  * for some max time.
3580  **/
3581 static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
3582 				      struct scsi_cmnd *cmd)
3583 {
3584 	int done = 0;
3585 	struct srb *rp;
3586 	uint32_t max_wait_time = EH_WAIT_CMD_TOV;
3587 	int ret = SUCCESS;
3588 
3589 	/* Dont wait on command if PCI error is being handled
3590 	 * by PCI AER driver
3591 	 */
3592 	if (unlikely(pci_channel_offline(ha->pdev)) ||
3593 	    (test_bit(AF_EEH_BUSY, &ha->flags))) {
3594 		ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
3595 		    ha->host_no, __func__);
3596 		return ret;
3597 	}
3598 
3599 	do {
3600 		/* Checking to see if its returned to OS */
3601 		rp = (struct srb *) CMD_SP(cmd);
3602 		if (rp == NULL) {
3603 			done++;
3604 			break;
3605 		}
3606 
3607 		msleep(2000);
3608 	} while (max_wait_time--);
3609 
3610 	return done;
3611 }
3612 
3613 /**
3614  * qla4xxx_wait_for_hba_online - waits for HBA to come online
3615  * @ha: Pointer to host adapter structure
3616  **/
3617 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
3618 {
3619 	unsigned long wait_online;
3620 
3621 	wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
3622 	while (time_before(jiffies, wait_online)) {
3623 
3624 		if (adapter_up(ha))
3625 			return QLA_SUCCESS;
3626 
3627 		msleep(2000);
3628 	}
3629 
3630 	return QLA_ERROR;
3631 }
3632 
3633 /**
3634  * qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
3635  * @ha: pointer to HBA
3636  * @t: target id
3637  * @l: lun id
3638  *
3639  * This function waits for all outstanding commands to a lun to complete. It
3640  * returns 0 if all pending commands are returned and 1 otherwise.
3641  **/
3642 static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
3643 					struct scsi_target *stgt,
3644 					struct scsi_device *sdev)
3645 {
3646 	int cnt;
3647 	int status = 0;
3648 	struct scsi_cmnd *cmd;
3649 
3650 	/*
3651 	 * Waiting for all commands for the designated target or dev
3652 	 * in the active array
3653 	 */
3654 	for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
3655 		cmd = scsi_host_find_tag(ha->host, cnt);
3656 		if (cmd && stgt == scsi_target(cmd->device) &&
3657 		    (!sdev || sdev == cmd->device)) {
3658 			if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
3659 				status++;
3660 				break;
3661 			}
3662 		}
3663 	}
3664 	return status;
3665 }
3666 
3667 /**
3668  * qla4xxx_eh_abort - callback for abort task.
3669  * @cmd: Pointer to Linux's SCSI command structure
3670  *
3671  * This routine is called by the Linux OS to abort the specified
3672  * command.
3673  **/
3674 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
3675 {
3676 	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
3677 	unsigned int id = cmd->device->id;
3678 	unsigned int lun = cmd->device->lun;
3679 	unsigned long flags;
3680 	struct srb *srb = NULL;
3681 	int ret = SUCCESS;
3682 	int wait = 0;
3683 
3684 	ql4_printk(KERN_INFO, ha,
3685 	    "scsi%ld:%d:%d: Abort command issued cmd=%p\n",
3686 	    ha->host_no, id, lun, cmd);
3687 
3688 	spin_lock_irqsave(&ha->hardware_lock, flags);
3689 	srb = (struct srb *) CMD_SP(cmd);
3690 	if (!srb) {
3691 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
3692 		return SUCCESS;
3693 	}
3694 	kref_get(&srb->srb_ref);
3695 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3696 
3697 	if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
3698 		DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
3699 		    ha->host_no, id, lun));
3700 		ret = FAILED;
3701 	} else {
3702 		DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n",
3703 		    ha->host_no, id, lun));
3704 		wait = 1;
3705 	}
3706 
3707 	kref_put(&srb->srb_ref, qla4xxx_srb_compl);
3708 
3709 	/* Wait for command to complete */
3710 	if (wait) {
3711 		if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
3712 			DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n",
3713 			    ha->host_no, id, lun));
3714 			ret = FAILED;
3715 		}
3716 	}
3717 
3718 	ql4_printk(KERN_INFO, ha,
3719 	    "scsi%ld:%d:%d: Abort command - %s\n",
3720 	    ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed");
3721 
3722 	return ret;
3723 }
3724 
3725 /**
3726  * qla4xxx_eh_device_reset - callback for target reset.
3727  * @cmd: Pointer to Linux's SCSI command structure
3728  *
3729  * This routine is called by the Linux OS to reset all luns on the
3730  * specified target.
3731  **/
3732 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
3733 {
3734 	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
3735 	struct ddb_entry *ddb_entry = cmd->device->hostdata;
3736 	int ret = FAILED, stat;
3737 
3738 	if (!ddb_entry)
3739 		return ret;
3740 
3741 	ret = iscsi_block_scsi_eh(cmd);
3742 	if (ret)
3743 		return ret;
3744 	ret = FAILED;
3745 
3746 	ql4_printk(KERN_INFO, ha,
3747 		   "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no,
3748 		   cmd->device->channel, cmd->device->id, cmd->device->lun);
3749 
3750 	DEBUG2(printk(KERN_INFO
3751 		      "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
3752 		      "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
3753 		      cmd, jiffies, cmd->request->timeout / HZ,
3754 		      ha->dpc_flags, cmd->result, cmd->allowed));
3755 
3756 	/* FIXME: wait for hba to go online */
3757 	stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
3758 	if (stat != QLA_SUCCESS) {
3759 		ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat);
3760 		goto eh_dev_reset_done;
3761 	}
3762 
3763 	if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
3764 					 cmd->device)) {
3765 		ql4_printk(KERN_INFO, ha,
3766 			   "DEVICE RESET FAILED - waiting for "
3767 			   "commands.\n");
3768 		goto eh_dev_reset_done;
3769 	}
3770 
3771 	/* Send marker. */
3772 	if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
3773 		MM_LUN_RESET) != QLA_SUCCESS)
3774 		goto eh_dev_reset_done;
3775 
3776 	ql4_printk(KERN_INFO, ha,
3777 		   "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
3778 		   ha->host_no, cmd->device->channel, cmd->device->id,
3779 		   cmd->device->lun);
3780 
3781 	ret = SUCCESS;
3782 
3783 eh_dev_reset_done:
3784 
3785 	return ret;
3786 }
3787 
3788 /**
3789  * qla4xxx_eh_target_reset - callback for target reset.
3790  * @cmd: Pointer to Linux's SCSI command structure
3791  *
3792  * This routine is called by the Linux OS to reset the target.
3793  **/
3794 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
3795 {
3796 	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
3797 	struct ddb_entry *ddb_entry = cmd->device->hostdata;
3798 	int stat, ret;
3799 
3800 	if (!ddb_entry)
3801 		return FAILED;
3802 
3803 	ret = iscsi_block_scsi_eh(cmd);
3804 	if (ret)
3805 		return ret;
3806 
3807 	starget_printk(KERN_INFO, scsi_target(cmd->device),
3808 		       "WARM TARGET RESET ISSUED.\n");
3809 
3810 	DEBUG2(printk(KERN_INFO
3811 		      "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
3812 		      "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
3813 		      ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
3814 		      ha->dpc_flags, cmd->result, cmd->allowed));
3815 
3816 	stat = qla4xxx_reset_target(ha, ddb_entry);
3817 	if (stat != QLA_SUCCESS) {
3818 		starget_printk(KERN_INFO, scsi_target(cmd->device),
3819 			       "WARM TARGET RESET FAILED.\n");
3820 		return FAILED;
3821 	}
3822 
3823 	if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
3824 					 NULL)) {
3825 		starget_printk(KERN_INFO, scsi_target(cmd->device),
3826 			       "WARM TARGET DEVICE RESET FAILED - "
3827 			       "waiting for commands.\n");
3828 		return FAILED;
3829 	}
3830 
3831 	/* Send marker. */
3832 	if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
3833 		MM_TGT_WARM_RESET) != QLA_SUCCESS) {
3834 		starget_printk(KERN_INFO, scsi_target(cmd->device),
3835 			       "WARM TARGET DEVICE RESET FAILED - "
3836 			       "marker iocb failed.\n");
3837 		return FAILED;
3838 	}
3839 
3840 	starget_printk(KERN_INFO, scsi_target(cmd->device),
3841 		       "WARM TARGET RESET SUCCEEDED.\n");
3842 	return SUCCESS;
3843 }
3844 
3845 /**
3846  * qla4xxx_eh_host_reset - kernel callback
3847  * @cmd: Pointer to Linux's SCSI command structure
3848  *
3849  * This routine is invoked by the Linux kernel to perform fatal error
3850  * recovery on the specified adapter.
3851  **/
3852 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
3853 {
3854 	int return_status = FAILED;
3855 	struct scsi_qla_host *ha;
3856 
3857 	ha = to_qla_host(cmd->device->host);
3858 
3859 	if (ql4xdontresethba) {
3860 		DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
3861 		     ha->host_no, __func__));
3862 		return FAILED;
3863 	}
3864 
3865 	ql4_printk(KERN_INFO, ha,
3866 		   "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no,
3867 		   cmd->device->channel, cmd->device->id, cmd->device->lun);
3868 
3869 	if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
3870 		DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host.  Adapter "
3871 			      "DEAD.\n", ha->host_no, cmd->device->channel,
3872 			      __func__));
3873 
3874 		return FAILED;
3875 	}
3876 
3877 	if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
3878 		if (is_qla8022(ha))
3879 			set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3880 		else
3881 			set_bit(DPC_RESET_HA, &ha->dpc_flags);
3882 	}
3883 
3884 	if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS)
3885 		return_status = SUCCESS;
3886 
3887 	ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n",
3888 		   return_status == FAILED ? "FAILED" : "SUCCEEDED");
3889 
3890 	return return_status;
3891 }
3892 
3893 static int qla4xxx_context_reset(struct scsi_qla_host *ha)
3894 {
3895 	uint32_t mbox_cmd[MBOX_REG_COUNT];
3896 	uint32_t mbox_sts[MBOX_REG_COUNT];
3897 	struct addr_ctrl_blk_def *acb = NULL;
3898 	uint32_t acb_len = sizeof(struct addr_ctrl_blk_def);
3899 	int rval = QLA_SUCCESS;
3900 	dma_addr_t acb_dma;
3901 
3902 	acb = dma_alloc_coherent(&ha->pdev->dev,
3903 				 sizeof(struct addr_ctrl_blk_def),
3904 				 &acb_dma, GFP_KERNEL);
3905 	if (!acb) {
3906 		ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
3907 			   __func__);
3908 		rval = -ENOMEM;
3909 		goto exit_port_reset;
3910 	}
3911 
3912 	memset(acb, 0, acb_len);
3913 
3914 	rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len);
3915 	if (rval != QLA_SUCCESS) {
3916 		rval = -EIO;
3917 		goto exit_free_acb;
3918 	}
3919 
3920 	rval = qla4xxx_disable_acb(ha);
3921 	if (rval != QLA_SUCCESS) {
3922 		rval = -EIO;
3923 		goto exit_free_acb;
3924 	}
3925 
3926 	wait_for_completion_timeout(&ha->disable_acb_comp,
3927 				    DISABLE_ACB_TOV * HZ);
3928 
3929 	rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
3930 	if (rval != QLA_SUCCESS) {
3931 		rval = -EIO;
3932 		goto exit_free_acb;
3933 	}
3934 
3935 exit_free_acb:
3936 	dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def),
3937 			  acb, acb_dma);
3938 exit_port_reset:
3939 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__,
3940 			  rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
3941 	return rval;
3942 }
3943 
3944 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
3945 {
3946 	struct scsi_qla_host *ha = to_qla_host(shost);
3947 	int rval = QLA_SUCCESS;
3948 
3949 	if (ql4xdontresethba) {
3950 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
3951 				  __func__));
3952 		rval = -EPERM;
3953 		goto exit_host_reset;
3954 	}
3955 
3956 	rval = qla4xxx_wait_for_hba_online(ha);
3957 	if (rval != QLA_SUCCESS) {
3958 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unable to reset host "
3959 				  "adapter\n", __func__));
3960 		rval = -EIO;
3961 		goto exit_host_reset;
3962 	}
3963 
3964 	if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
3965 		goto recover_adapter;
3966 
3967 	switch (reset_type) {
3968 	case SCSI_ADAPTER_RESET:
3969 		set_bit(DPC_RESET_HA, &ha->dpc_flags);
3970 		break;
3971 	case SCSI_FIRMWARE_RESET:
3972 		if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
3973 			if (is_qla8022(ha))
3974 				/* set firmware context reset */
3975 				set_bit(DPC_RESET_HA_FW_CONTEXT,
3976 					&ha->dpc_flags);
3977 			else {
3978 				rval = qla4xxx_context_reset(ha);
3979 				goto exit_host_reset;
3980 			}
3981 		}
3982 		break;
3983 	}
3984 
3985 recover_adapter:
3986 	rval = qla4xxx_recover_adapter(ha);
3987 	if (rval != QLA_SUCCESS) {
3988 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
3989 				  __func__));
3990 		rval = -EIO;
3991 	}
3992 
3993 exit_host_reset:
3994 	return rval;
3995 }
3996 
3997 /* PCI AER driver recovers from all correctable errors w/o
3998  * driver intervention. For uncorrectable errors PCI AER
3999  * driver calls the following device driver's callbacks
4000  *
4001  * - Fatal Errors - link_reset
4002  * - Non-Fatal Errors - driver's pci_error_detected() which
4003  * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
4004  *
4005  * PCI AER driver calls
4006  * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled
4007  *               returns RECOVERED or NEED_RESET if fw_hung
4008  * NEED_RESET - driver's slot_reset()
4009  * DISCONNECT - device is dead & cannot recover
4010  * RECOVERED - driver's pci_resume()
4011  */
4012 static pci_ers_result_t
4013 qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
4014 {
4015 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
4016 
4017 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
4018 	    ha->host_no, __func__, state);
4019 
4020 	if (!is_aer_supported(ha))
4021 		return PCI_ERS_RESULT_NONE;
4022 
4023 	switch (state) {
4024 	case pci_channel_io_normal:
4025 		clear_bit(AF_EEH_BUSY, &ha->flags);
4026 		return PCI_ERS_RESULT_CAN_RECOVER;
4027 	case pci_channel_io_frozen:
4028 		set_bit(AF_EEH_BUSY, &ha->flags);
4029 		qla4xxx_mailbox_premature_completion(ha);
4030 		qla4xxx_free_irqs(ha);
4031 		pci_disable_device(pdev);
4032 		/* Return back all IOs */
4033 		qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
4034 		return PCI_ERS_RESULT_NEED_RESET;
4035 	case pci_channel_io_perm_failure:
4036 		set_bit(AF_EEH_BUSY, &ha->flags);
4037 		set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
4038 		qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
4039 		return PCI_ERS_RESULT_DISCONNECT;
4040 	}
4041 	return PCI_ERS_RESULT_NEED_RESET;
4042 }
4043 
4044 /**
4045  * qla4xxx_pci_mmio_enabled() gets called if
4046  * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
4047  * and read/write to the device still works.
4048  **/
4049 static pci_ers_result_t
4050 qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
4051 {
4052 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
4053 
4054 	if (!is_aer_supported(ha))
4055 		return PCI_ERS_RESULT_NONE;
4056 
4057 	return PCI_ERS_RESULT_RECOVERED;
4058 }
4059 
4060 static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
4061 {
4062 	uint32_t rval = QLA_ERROR;
4063 	uint32_t ret = 0;
4064 	int fn;
4065 	struct pci_dev *other_pdev = NULL;
4066 
4067 	ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
4068 
4069 	set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
4070 
4071 	if (test_bit(AF_ONLINE, &ha->flags)) {
4072 		clear_bit(AF_ONLINE, &ha->flags);
4073 		clear_bit(AF_LINK_UP, &ha->flags);
4074 		iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
4075 		qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
4076 	}
4077 
4078 	fn = PCI_FUNC(ha->pdev->devfn);
4079 	while (fn > 0) {
4080 		fn--;
4081 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at "
4082 		    "func %x\n", ha->host_no, __func__, fn);
4083 		/* Get the pci device given the domain, bus,
4084 		 * slot/function number */
4085 		other_pdev =
4086 		    pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
4087 		    ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
4088 		    fn));
4089 
4090 		if (!other_pdev)
4091 			continue;
4092 
4093 		if (atomic_read(&other_pdev->enable_cnt)) {
4094 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI "
4095 			    "func in enabled state%x\n", ha->host_no,
4096 			    __func__, fn);
4097 			pci_dev_put(other_pdev);
4098 			break;
4099 		}
4100 		pci_dev_put(other_pdev);
4101 	}
4102 
4103 	/* The first function on the card, the reset owner will
4104 	 * start & initialize the firmware. The other functions
4105 	 * on the card will reset the firmware context
4106 	 */
4107 	if (!fn) {
4108 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
4109 		    "0x%x is the owner\n", ha->host_no, __func__,
4110 		    ha->pdev->devfn);
4111 
4112 		qla4_8xxx_idc_lock(ha);
4113 		qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4114 		    QLA82XX_DEV_COLD);
4115 
4116 		qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
4117 		    QLA82XX_IDC_VERSION);
4118 
4119 		qla4_8xxx_idc_unlock(ha);
4120 		clear_bit(AF_FW_RECOVERY, &ha->flags);
4121 		rval = qla4xxx_initialize_adapter(ha);
4122 		qla4_8xxx_idc_lock(ha);
4123 
4124 		if (rval != QLA_SUCCESS) {
4125 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
4126 			    "FAILED\n", ha->host_no, __func__);
4127 			qla4_8xxx_clear_drv_active(ha);
4128 			qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4129 			    QLA82XX_DEV_FAILED);
4130 		} else {
4131 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
4132 			    "READY\n", ha->host_no, __func__);
4133 			qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4134 			    QLA82XX_DEV_READY);
4135 			/* Clear driver state register */
4136 			qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
4137 			qla4_8xxx_set_drv_active(ha);
4138 			ret = qla4xxx_request_irqs(ha);
4139 			if (ret) {
4140 				ql4_printk(KERN_WARNING, ha, "Failed to "
4141 				    "reserve interrupt %d already in use.\n",
4142 				    ha->pdev->irq);
4143 				rval = QLA_ERROR;
4144 			} else {
4145 				ha->isp_ops->enable_intrs(ha);
4146 				rval = QLA_SUCCESS;
4147 			}
4148 		}
4149 		qla4_8xxx_idc_unlock(ha);
4150 	} else {
4151 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
4152 		    "the reset owner\n", ha->host_no, __func__,
4153 		    ha->pdev->devfn);
4154 		if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
4155 		    QLA82XX_DEV_READY)) {
4156 			clear_bit(AF_FW_RECOVERY, &ha->flags);
4157 			rval = qla4xxx_initialize_adapter(ha);
4158 			if (rval == QLA_SUCCESS) {
4159 				ret = qla4xxx_request_irqs(ha);
4160 				if (ret) {
4161 					ql4_printk(KERN_WARNING, ha, "Failed to"
4162 					    " reserve interrupt %d already in"
4163 					    " use.\n", ha->pdev->irq);
4164 					rval = QLA_ERROR;
4165 				} else {
4166 					ha->isp_ops->enable_intrs(ha);
4167 					rval = QLA_SUCCESS;
4168 				}
4169 			}
4170 			qla4_8xxx_idc_lock(ha);
4171 			qla4_8xxx_set_drv_active(ha);
4172 			qla4_8xxx_idc_unlock(ha);
4173 		}
4174 	}
4175 	clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
4176 	return rval;
4177 }
4178 
4179 static pci_ers_result_t
4180 qla4xxx_pci_slot_reset(struct pci_dev *pdev)
4181 {
4182 	pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
4183 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
4184 	int rc;
4185 
4186 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
4187 	    ha->host_no, __func__);
4188 
4189 	if (!is_aer_supported(ha))
4190 		return PCI_ERS_RESULT_NONE;
4191 
4192 	/* Restore the saved state of PCIe device -
4193 	 * BAR registers, PCI Config space, PCIX, MSI,
4194 	 * IOV states
4195 	 */
4196 	pci_restore_state(pdev);
4197 
4198 	/* pci_restore_state() clears the saved_state flag of the device
4199 	 * save restored state which resets saved_state flag
4200 	 */
4201 	pci_save_state(pdev);
4202 
4203 	/* Initialize device or resume if in suspended state */
4204 	rc = pci_enable_device(pdev);
4205 	if (rc) {
4206 		ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable "
4207 		    "device after reset\n", ha->host_no, __func__);
4208 		goto exit_slot_reset;
4209 	}
4210 
4211 	ha->isp_ops->disable_intrs(ha);
4212 
4213 	if (is_qla8022(ha)) {
4214 		if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
4215 			ret = PCI_ERS_RESULT_RECOVERED;
4216 			goto exit_slot_reset;
4217 		} else
4218 			goto exit_slot_reset;
4219 	}
4220 
4221 exit_slot_reset:
4222 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
4223 	    "device after reset\n", ha->host_no, __func__, ret);
4224 	return ret;
4225 }
4226 
4227 static void
4228 qla4xxx_pci_resume(struct pci_dev *pdev)
4229 {
4230 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
4231 	int ret;
4232 
4233 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
4234 	    ha->host_no, __func__);
4235 
4236 	ret = qla4xxx_wait_for_hba_online(ha);
4237 	if (ret != QLA_SUCCESS) {
4238 		ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
4239 		    "resume I/O from slot/link_reset\n", ha->host_no,
4240 		     __func__);
4241 	}
4242 
4243 	pci_cleanup_aer_uncorrect_error_status(pdev);
4244 	clear_bit(AF_EEH_BUSY, &ha->flags);
4245 }
4246 
4247 static struct pci_error_handlers qla4xxx_err_handler = {
4248 	.error_detected = qla4xxx_pci_error_detected,
4249 	.mmio_enabled = qla4xxx_pci_mmio_enabled,
4250 	.slot_reset = qla4xxx_pci_slot_reset,
4251 	.resume = qla4xxx_pci_resume,
4252 };
4253 
4254 static struct pci_device_id qla4xxx_pci_tbl[] = {
4255 	{
4256 		.vendor		= PCI_VENDOR_ID_QLOGIC,
4257 		.device		= PCI_DEVICE_ID_QLOGIC_ISP4010,
4258 		.subvendor	= PCI_ANY_ID,
4259 		.subdevice	= PCI_ANY_ID,
4260 	},
4261 	{
4262 		.vendor		= PCI_VENDOR_ID_QLOGIC,
4263 		.device		= PCI_DEVICE_ID_QLOGIC_ISP4022,
4264 		.subvendor	= PCI_ANY_ID,
4265 		.subdevice	= PCI_ANY_ID,
4266 	},
4267 	{
4268 		.vendor		= PCI_VENDOR_ID_QLOGIC,
4269 		.device		= PCI_DEVICE_ID_QLOGIC_ISP4032,
4270 		.subvendor	= PCI_ANY_ID,
4271 		.subdevice	= PCI_ANY_ID,
4272 	},
4273 	{
4274 		.vendor         = PCI_VENDOR_ID_QLOGIC,
4275 		.device         = PCI_DEVICE_ID_QLOGIC_ISP8022,
4276 		.subvendor      = PCI_ANY_ID,
4277 		.subdevice      = PCI_ANY_ID,
4278 	},
4279 	{0, 0},
4280 };
4281 MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
4282 
4283 static struct pci_driver qla4xxx_pci_driver = {
4284 	.name		= DRIVER_NAME,
4285 	.id_table	= qla4xxx_pci_tbl,
4286 	.probe		= qla4xxx_probe_adapter,
4287 	.remove		= qla4xxx_remove_adapter,
4288 	.err_handler = &qla4xxx_err_handler,
4289 };
4290 
4291 static int __init qla4xxx_module_init(void)
4292 {
4293 	int ret;
4294 
4295 	/* Allocate cache for SRBs. */
4296 	srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
4297 				       SLAB_HWCACHE_ALIGN, NULL);
4298 	if (srb_cachep == NULL) {
4299 		printk(KERN_ERR
4300 		       "%s: Unable to allocate SRB cache..."
4301 		       "Failing load!\n", DRIVER_NAME);
4302 		ret = -ENOMEM;
4303 		goto no_srp_cache;
4304 	}
4305 
4306 	/* Derive version string. */
4307 	strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
4308 	if (ql4xextended_error_logging)
4309 		strcat(qla4xxx_version_str, "-debug");
4310 
4311 	qla4xxx_scsi_transport =
4312 		iscsi_register_transport(&qla4xxx_iscsi_transport);
4313 	if (!qla4xxx_scsi_transport){
4314 		ret = -ENODEV;
4315 		goto release_srb_cache;
4316 	}
4317 
4318 	ret = pci_register_driver(&qla4xxx_pci_driver);
4319 	if (ret)
4320 		goto unregister_transport;
4321 
4322 	printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
4323 	return 0;
4324 
4325 unregister_transport:
4326 	iscsi_unregister_transport(&qla4xxx_iscsi_transport);
4327 release_srb_cache:
4328 	kmem_cache_destroy(srb_cachep);
4329 no_srp_cache:
4330 	return ret;
4331 }
4332 
4333 static void __exit qla4xxx_module_exit(void)
4334 {
4335 	pci_unregister_driver(&qla4xxx_pci_driver);
4336 	iscsi_unregister_transport(&qla4xxx_iscsi_transport);
4337 	kmem_cache_destroy(srb_cachep);
4338 }
4339 
4340 module_init(qla4xxx_module_init);
4341 module_exit(qla4xxx_module_exit);
4342 
4343 MODULE_AUTHOR("QLogic Corporation");
4344 MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
4345 MODULE_LICENSE("GPL");
4346 MODULE_VERSION(QLA4XXX_DRIVER_VERSION);
4347