xref: /openbmc/linux/drivers/scsi/qla4xxx/ql4_os.c (revision 0d5b36b8)
1 /*
2  * QLogic iSCSI HBA Driver
3  * Copyright (c)  2003-2010 QLogic Corporation
4  *
5  * See LICENSE.qla4xxx for copyright and licensing details.
6  */
7 #include <linux/moduleparam.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/iscsi_boot_sysfs.h>
11 
12 #include <scsi/scsi_tcq.h>
13 #include <scsi/scsicam.h>
14 
15 #include "ql4_def.h"
16 #include "ql4_version.h"
17 #include "ql4_glbl.h"
18 #include "ql4_dbg.h"
19 #include "ql4_inline.h"
20 
21 /*
22  * Driver version
23  */
24 static char qla4xxx_version_str[40];
25 
26 /*
27  * SRB allocation cache
28  */
29 static struct kmem_cache *srb_cachep;
30 
31 /*
32  * Module parameter information and variables
33  */
34 int ql4xdontresethba = 0;
35 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
36 MODULE_PARM_DESC(ql4xdontresethba,
37 		"Don't reset the HBA for driver recovery \n"
38 		" 0 - It will reset HBA (Default)\n"
39 		" 1 - It will NOT reset HBA");
40 
41 int ql4xextended_error_logging = 0; /* 0 = off, 1 = log errors */
42 module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
43 MODULE_PARM_DESC(ql4xextended_error_logging,
44 		 "Option to enable extended error logging, "
45 		 "Default is 0 - no logging, 1 - debug logging");
46 
47 int ql4xenablemsix = 1;
48 module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
49 MODULE_PARM_DESC(ql4xenablemsix,
50 		"Set to enable MSI or MSI-X interrupt mechanism.\n"
51 		" 0 = enable INTx interrupt mechanism.\n"
52 		" 1 = enable MSI-X interrupt mechanism (Default).\n"
53 		" 2 = enable MSI interrupt mechanism.");
54 
55 #define QL4_DEF_QDEPTH 32
56 static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
57 module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
58 MODULE_PARM_DESC(ql4xmaxqdepth,
59 		"Maximum queue depth to report for target devices.\n"
60 		" Default: 32.");
61 
62 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
63 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
64 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
65 		"Target Session Recovery Timeout.\n"
66 		" Default: 30 sec.");
67 
68 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
69 /*
70  * SCSI host template entry points
71  */
72 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
73 
74 /*
75  * iSCSI template entry points
76  */
77 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
78 				  enum iscsi_param param, char *buf);
79 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
80 				  enum iscsi_host_param param, char *buf);
81 static int qla4xxx_iface_set_param(struct Scsi_Host *shost, char *data,
82 				   int count);
83 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
84 				   enum iscsi_param_type param_type,
85 				   int param, char *buf);
86 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
87 static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost,
88 						 struct sockaddr *dst_addr,
89 						 int non_blocking);
90 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
91 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep);
92 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
93 				enum iscsi_param param, char *buf);
94 static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
95 static struct iscsi_cls_conn *
96 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx);
97 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
98 			     struct iscsi_cls_conn *cls_conn,
99 			     uint64_t transport_fd, int is_leading);
100 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn);
101 static struct iscsi_cls_session *
102 qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
103 			uint16_t qdepth, uint32_t initial_cmdsn);
104 static void qla4xxx_session_destroy(struct iscsi_cls_session *sess);
105 static void qla4xxx_task_work(struct work_struct *wdata);
106 static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t);
107 static int qla4xxx_task_xmit(struct iscsi_task *);
108 static void qla4xxx_task_cleanup(struct iscsi_task *);
109 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
110 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
111 				   struct iscsi_stats *stats);
112 /*
113  * SCSI host template entry points
114  */
115 static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
116 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
117 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
118 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
119 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
120 static int qla4xxx_slave_alloc(struct scsi_device *device);
121 static int qla4xxx_slave_configure(struct scsi_device *device);
122 static void qla4xxx_slave_destroy(struct scsi_device *sdev);
123 static mode_t ql4_attr_is_visible(int param_type, int param);
124 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
125 
126 static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
127     QLA82XX_LEGACY_INTR_CONFIG;
128 
129 static struct scsi_host_template qla4xxx_driver_template = {
130 	.module			= THIS_MODULE,
131 	.name			= DRIVER_NAME,
132 	.proc_name		= DRIVER_NAME,
133 	.queuecommand		= qla4xxx_queuecommand,
134 
135 	.eh_abort_handler	= qla4xxx_eh_abort,
136 	.eh_device_reset_handler = qla4xxx_eh_device_reset,
137 	.eh_target_reset_handler = qla4xxx_eh_target_reset,
138 	.eh_host_reset_handler	= qla4xxx_eh_host_reset,
139 	.eh_timed_out		= qla4xxx_eh_cmd_timed_out,
140 
141 	.slave_configure	= qla4xxx_slave_configure,
142 	.slave_alloc		= qla4xxx_slave_alloc,
143 	.slave_destroy		= qla4xxx_slave_destroy,
144 
145 	.this_id		= -1,
146 	.cmd_per_lun		= 3,
147 	.use_clustering		= ENABLE_CLUSTERING,
148 	.sg_tablesize		= SG_ALL,
149 
150 	.max_sectors		= 0xFFFF,
151 	.shost_attrs		= qla4xxx_host_attrs,
152 	.host_reset		= qla4xxx_host_reset,
153 	.vendor_id		= SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
154 };
155 
156 static struct iscsi_transport qla4xxx_iscsi_transport = {
157 	.owner			= THIS_MODULE,
158 	.name			= DRIVER_NAME,
159 	.caps			= CAP_TEXT_NEGO |
160 				  CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
161 				  CAP_DATADGST | CAP_LOGIN_OFFLOAD |
162 				  CAP_MULTI_R2T,
163 	.attr_is_visible	= ql4_attr_is_visible,
164 	.create_session         = qla4xxx_session_create,
165 	.destroy_session        = qla4xxx_session_destroy,
166 	.start_conn             = qla4xxx_conn_start,
167 	.create_conn            = qla4xxx_conn_create,
168 	.bind_conn              = qla4xxx_conn_bind,
169 	.stop_conn              = iscsi_conn_stop,
170 	.destroy_conn           = qla4xxx_conn_destroy,
171 	.set_param              = iscsi_set_param,
172 	.get_conn_param		= qla4xxx_conn_get_param,
173 	.get_session_param	= iscsi_session_get_param,
174 	.get_ep_param           = qla4xxx_get_ep_param,
175 	.ep_connect		= qla4xxx_ep_connect,
176 	.ep_poll		= qla4xxx_ep_poll,
177 	.ep_disconnect		= qla4xxx_ep_disconnect,
178 	.get_stats		= qla4xxx_conn_get_stats,
179 	.send_pdu		= iscsi_conn_send_pdu,
180 	.xmit_task		= qla4xxx_task_xmit,
181 	.cleanup_task		= qla4xxx_task_cleanup,
182 	.alloc_pdu		= qla4xxx_alloc_pdu,
183 
184 	.get_host_param		= qla4xxx_host_get_param,
185 	.set_iface_param	= qla4xxx_iface_set_param,
186 	.get_iface_param	= qla4xxx_get_iface_param,
187 	.bsg_request		= qla4xxx_bsg_request,
188 };
189 
190 static struct scsi_transport_template *qla4xxx_scsi_transport;
191 
192 static mode_t ql4_attr_is_visible(int param_type, int param)
193 {
194 	switch (param_type) {
195 	case ISCSI_HOST_PARAM:
196 		switch (param) {
197 		case ISCSI_HOST_PARAM_HWADDRESS:
198 		case ISCSI_HOST_PARAM_IPADDRESS:
199 		case ISCSI_HOST_PARAM_INITIATOR_NAME:
200 			return S_IRUGO;
201 		default:
202 			return 0;
203 		}
204 	case ISCSI_PARAM:
205 		switch (param) {
206 		case ISCSI_PARAM_CONN_ADDRESS:
207 		case ISCSI_PARAM_CONN_PORT:
208 		case ISCSI_PARAM_TARGET_NAME:
209 		case ISCSI_PARAM_TPGT:
210 		case ISCSI_PARAM_TARGET_ALIAS:
211 		case ISCSI_PARAM_MAX_BURST:
212 		case ISCSI_PARAM_MAX_R2T:
213 		case ISCSI_PARAM_FIRST_BURST:
214 		case ISCSI_PARAM_MAX_RECV_DLENGTH:
215 		case ISCSI_PARAM_MAX_XMIT_DLENGTH:
216 		case ISCSI_PARAM_IFACE_NAME:
217 			return S_IRUGO;
218 		default:
219 			return 0;
220 		}
221 	case ISCSI_NET_PARAM:
222 		switch (param) {
223 		case ISCSI_NET_PARAM_IPV4_ADDR:
224 		case ISCSI_NET_PARAM_IPV4_SUBNET:
225 		case ISCSI_NET_PARAM_IPV4_GW:
226 		case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
227 		case ISCSI_NET_PARAM_IFACE_ENABLE:
228 		case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
229 		case ISCSI_NET_PARAM_IPV6_ADDR:
230 		case ISCSI_NET_PARAM_IPV6_ROUTER:
231 		case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
232 		case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
233 		case ISCSI_NET_PARAM_VLAN_ID:
234 		case ISCSI_NET_PARAM_VLAN_PRIORITY:
235 		case ISCSI_NET_PARAM_VLAN_ENABLED:
236 		case ISCSI_NET_PARAM_MTU:
237 		case ISCSI_NET_PARAM_PORT:
238 			return S_IRUGO;
239 		default:
240 			return 0;
241 		}
242 	}
243 
244 	return 0;
245 }
246 
247 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
248 				   enum iscsi_param_type param_type,
249 				   int param, char *buf)
250 {
251 	struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
252 	struct scsi_qla_host *ha = to_qla_host(shost);
253 	int len = -ENOSYS;
254 
255 	if (param_type != ISCSI_NET_PARAM)
256 		return -ENOSYS;
257 
258 	switch (param) {
259 	case ISCSI_NET_PARAM_IPV4_ADDR:
260 		len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
261 		break;
262 	case ISCSI_NET_PARAM_IPV4_SUBNET:
263 		len = sprintf(buf, "%pI4\n", &ha->ip_config.subnet_mask);
264 		break;
265 	case ISCSI_NET_PARAM_IPV4_GW:
266 		len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
267 		break;
268 	case ISCSI_NET_PARAM_IFACE_ENABLE:
269 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
270 			len = sprintf(buf, "%s\n",
271 				      (ha->ip_config.ipv4_options &
272 				       IPOPT_IPV4_PROTOCOL_ENABLE) ?
273 				      "enabled" : "disabled");
274 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
275 			len = sprintf(buf, "%s\n",
276 				      (ha->ip_config.ipv6_options &
277 				       IPV6_OPT_IPV6_PROTOCOL_ENABLE) ?
278 				       "enabled" : "disabled");
279 		break;
280 	case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
281 		len = sprintf(buf, "%s\n",
282 			      (ha->ip_config.tcp_options & TCPOPT_DHCP_ENABLE) ?
283 			      "dhcp" : "static");
284 		break;
285 	case ISCSI_NET_PARAM_IPV6_ADDR:
286 		if (iface->iface_num == 0)
287 			len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr0);
288 		if (iface->iface_num == 1)
289 			len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr1);
290 		break;
291 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
292 		len = sprintf(buf, "%pI6\n",
293 			      &ha->ip_config.ipv6_link_local_addr);
294 		break;
295 	case ISCSI_NET_PARAM_IPV6_ROUTER:
296 		len = sprintf(buf, "%pI6\n",
297 			      &ha->ip_config.ipv6_default_router_addr);
298 		break;
299 	case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
300 		len = sprintf(buf, "%s\n",
301 			      (ha->ip_config.ipv6_addl_options &
302 			       IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
303 			       "nd" : "static");
304 		break;
305 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
306 		len = sprintf(buf, "%s\n",
307 			      (ha->ip_config.ipv6_addl_options &
308 			       IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
309 			       "auto" : "static");
310 		break;
311 	case ISCSI_NET_PARAM_VLAN_ID:
312 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
313 			len = sprintf(buf, "%d\n",
314 				      (ha->ip_config.ipv4_vlan_tag &
315 				       ISCSI_MAX_VLAN_ID));
316 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
317 			len = sprintf(buf, "%d\n",
318 				      (ha->ip_config.ipv6_vlan_tag &
319 				       ISCSI_MAX_VLAN_ID));
320 		break;
321 	case ISCSI_NET_PARAM_VLAN_PRIORITY:
322 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
323 			len = sprintf(buf, "%d\n",
324 				      ((ha->ip_config.ipv4_vlan_tag >> 13) &
325 					ISCSI_MAX_VLAN_PRIORITY));
326 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
327 			len = sprintf(buf, "%d\n",
328 				      ((ha->ip_config.ipv6_vlan_tag >> 13) &
329 					ISCSI_MAX_VLAN_PRIORITY));
330 		break;
331 	case ISCSI_NET_PARAM_VLAN_ENABLED:
332 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
333 			len = sprintf(buf, "%s\n",
334 				      (ha->ip_config.ipv4_options &
335 				       IPOPT_VLAN_TAGGING_ENABLE) ?
336 				       "enabled" : "disabled");
337 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
338 			len = sprintf(buf, "%s\n",
339 				      (ha->ip_config.ipv6_options &
340 				       IPV6_OPT_VLAN_TAGGING_ENABLE) ?
341 				       "enabled" : "disabled");
342 		break;
343 	case ISCSI_NET_PARAM_MTU:
344 		len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
345 		break;
346 	case ISCSI_NET_PARAM_PORT:
347 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
348 			len = sprintf(buf, "%d\n", ha->ip_config.ipv4_port);
349 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
350 			len = sprintf(buf, "%d\n", ha->ip_config.ipv6_port);
351 		break;
352 	default:
353 		len = -ENOSYS;
354 	}
355 
356 	return len;
357 }
358 
359 static struct iscsi_endpoint *
360 qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
361 		   int non_blocking)
362 {
363 	int ret;
364 	struct iscsi_endpoint *ep;
365 	struct qla_endpoint *qla_ep;
366 	struct scsi_qla_host *ha;
367 	struct sockaddr_in *addr;
368 	struct sockaddr_in6 *addr6;
369 
370 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
371 	if (!shost) {
372 		ret = -ENXIO;
373 		printk(KERN_ERR "%s: shost is NULL\n",
374 		       __func__);
375 		return ERR_PTR(ret);
376 	}
377 
378 	ha = iscsi_host_priv(shost);
379 
380 	ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
381 	if (!ep) {
382 		ret = -ENOMEM;
383 		return ERR_PTR(ret);
384 	}
385 
386 	qla_ep = ep->dd_data;
387 	memset(qla_ep, 0, sizeof(struct qla_endpoint));
388 	if (dst_addr->sa_family == AF_INET) {
389 		memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in));
390 		addr = (struct sockaddr_in *)&qla_ep->dst_addr;
391 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__,
392 				  (char *)&addr->sin_addr));
393 	} else if (dst_addr->sa_family == AF_INET6) {
394 		memcpy(&qla_ep->dst_addr, dst_addr,
395 		       sizeof(struct sockaddr_in6));
396 		addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
397 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
398 				  (char *)&addr6->sin6_addr));
399 	}
400 
401 	qla_ep->host = shost;
402 
403 	return ep;
404 }
405 
406 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
407 {
408 	struct qla_endpoint *qla_ep;
409 	struct scsi_qla_host *ha;
410 	int ret = 0;
411 
412 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
413 	qla_ep = ep->dd_data;
414 	ha = to_qla_host(qla_ep->host);
415 
416 	if (adapter_up(ha))
417 		ret = 1;
418 
419 	return ret;
420 }
421 
422 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
423 {
424 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
425 	iscsi_destroy_endpoint(ep);
426 }
427 
428 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
429 				enum iscsi_param param,
430 				char *buf)
431 {
432 	struct qla_endpoint *qla_ep = ep->dd_data;
433 	struct sockaddr *dst_addr;
434 
435 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
436 
437 	switch (param) {
438 	case ISCSI_PARAM_CONN_PORT:
439 	case ISCSI_PARAM_CONN_ADDRESS:
440 		if (!qla_ep)
441 			return -ENOTCONN;
442 
443 		dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
444 		if (!dst_addr)
445 			return -ENOTCONN;
446 
447 		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
448 						 &qla_ep->dst_addr, param, buf);
449 	default:
450 		return -ENOSYS;
451 	}
452 }
453 
454 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
455 				   struct iscsi_stats *stats)
456 {
457 	struct iscsi_session *sess;
458 	struct iscsi_cls_session *cls_sess;
459 	struct ddb_entry *ddb_entry;
460 	struct scsi_qla_host *ha;
461 	struct ql_iscsi_stats *ql_iscsi_stats;
462 	int stats_size;
463 	int ret;
464 	dma_addr_t iscsi_stats_dma;
465 
466 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
467 
468 	cls_sess = iscsi_conn_to_session(cls_conn);
469 	sess = cls_sess->dd_data;
470 	ddb_entry = sess->dd_data;
471 	ha = ddb_entry->ha;
472 
473 	stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
474 	/* Allocate memory */
475 	ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
476 					    &iscsi_stats_dma, GFP_KERNEL);
477 	if (!ql_iscsi_stats) {
478 		ql4_printk(KERN_ERR, ha,
479 			   "Unable to allocate memory for iscsi stats\n");
480 		goto exit_get_stats;
481 	}
482 
483 	ret =  qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size,
484 				     iscsi_stats_dma);
485 	if (ret != QLA_SUCCESS) {
486 		ql4_printk(KERN_ERR, ha,
487 			   "Unable to retreive iscsi stats\n");
488 		goto free_stats;
489 	}
490 
491 	/* octets */
492 	stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets);
493 	stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets);
494 	/* xmit pdus */
495 	stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus);
496 	stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus);
497 	stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus);
498 	stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus);
499 	stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus);
500 	stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus);
501 	stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus);
502 	stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus);
503 	/* recv pdus */
504 	stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus);
505 	stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus);
506 	stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus);
507 	stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus);
508 	stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus);
509 	stats->logoutrsp_pdus =
510 			le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus);
511 	stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus);
512 	stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus);
513 	stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus);
514 
515 free_stats:
516 	dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats,
517 			  iscsi_stats_dma);
518 exit_get_stats:
519 	return;
520 }
521 
522 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
523 {
524 	struct iscsi_cls_session *session;
525 	struct iscsi_session *sess;
526 	unsigned long flags;
527 	enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED;
528 
529 	session = starget_to_session(scsi_target(sc->device));
530 	sess = session->dd_data;
531 
532 	spin_lock_irqsave(&session->lock, flags);
533 	if (session->state == ISCSI_SESSION_FAILED)
534 		ret = BLK_EH_RESET_TIMER;
535 	spin_unlock_irqrestore(&session->lock, flags);
536 
537 	return ret;
538 }
539 
540 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
541 				  enum iscsi_host_param param, char *buf)
542 {
543 	struct scsi_qla_host *ha = to_qla_host(shost);
544 	int len;
545 
546 	switch (param) {
547 	case ISCSI_HOST_PARAM_HWADDRESS:
548 		len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
549 		break;
550 	case ISCSI_HOST_PARAM_IPADDRESS:
551 		len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
552 		break;
553 	case ISCSI_HOST_PARAM_INITIATOR_NAME:
554 		len = sprintf(buf, "%s\n", ha->name_string);
555 		break;
556 	default:
557 		return -ENOSYS;
558 	}
559 
560 	return len;
561 }
562 
563 static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha)
564 {
565 	if (ha->iface_ipv4)
566 		return;
567 
568 	/* IPv4 */
569 	ha->iface_ipv4 = iscsi_create_iface(ha->host,
570 					    &qla4xxx_iscsi_transport,
571 					    ISCSI_IFACE_TYPE_IPV4, 0, 0);
572 	if (!ha->iface_ipv4)
573 		ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI "
574 			   "iface0.\n");
575 }
576 
577 static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha)
578 {
579 	if (!ha->iface_ipv6_0)
580 		/* IPv6 iface-0 */
581 		ha->iface_ipv6_0 = iscsi_create_iface(ha->host,
582 						      &qla4xxx_iscsi_transport,
583 						      ISCSI_IFACE_TYPE_IPV6, 0,
584 						      0);
585 	if (!ha->iface_ipv6_0)
586 		ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
587 			   "iface0.\n");
588 
589 	if (!ha->iface_ipv6_1)
590 		/* IPv6 iface-1 */
591 		ha->iface_ipv6_1 = iscsi_create_iface(ha->host,
592 						      &qla4xxx_iscsi_transport,
593 						      ISCSI_IFACE_TYPE_IPV6, 1,
594 						      0);
595 	if (!ha->iface_ipv6_1)
596 		ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
597 			   "iface1.\n");
598 }
599 
600 static void qla4xxx_create_ifaces(struct scsi_qla_host *ha)
601 {
602 	if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE)
603 		qla4xxx_create_ipv4_iface(ha);
604 
605 	if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE)
606 		qla4xxx_create_ipv6_iface(ha);
607 }
608 
609 static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha)
610 {
611 	if (ha->iface_ipv4) {
612 		iscsi_destroy_iface(ha->iface_ipv4);
613 		ha->iface_ipv4 = NULL;
614 	}
615 }
616 
617 static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha)
618 {
619 	if (ha->iface_ipv6_0) {
620 		iscsi_destroy_iface(ha->iface_ipv6_0);
621 		ha->iface_ipv6_0 = NULL;
622 	}
623 	if (ha->iface_ipv6_1) {
624 		iscsi_destroy_iface(ha->iface_ipv6_1);
625 		ha->iface_ipv6_1 = NULL;
626 	}
627 }
628 
629 static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha)
630 {
631 	qla4xxx_destroy_ipv4_iface(ha);
632 	qla4xxx_destroy_ipv6_iface(ha);
633 }
634 
635 static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
636 			     struct iscsi_iface_param_info *iface_param,
637 			     struct addr_ctrl_blk *init_fw_cb)
638 {
639 	/*
640 	 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg.
641 	 * iface_num 1 is valid only for IPv6 Addr.
642 	 */
643 	switch (iface_param->param) {
644 	case ISCSI_NET_PARAM_IPV6_ADDR:
645 		if (iface_param->iface_num & 0x1)
646 			/* IPv6 Addr 1 */
647 			memcpy(init_fw_cb->ipv6_addr1, iface_param->value,
648 			       sizeof(init_fw_cb->ipv6_addr1));
649 		else
650 			/* IPv6 Addr 0 */
651 			memcpy(init_fw_cb->ipv6_addr0, iface_param->value,
652 			       sizeof(init_fw_cb->ipv6_addr0));
653 		break;
654 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
655 		if (iface_param->iface_num & 0x1)
656 			break;
657 		memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8],
658 		       sizeof(init_fw_cb->ipv6_if_id));
659 		break;
660 	case ISCSI_NET_PARAM_IPV6_ROUTER:
661 		if (iface_param->iface_num & 0x1)
662 			break;
663 		memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value,
664 		       sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
665 		break;
666 	case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
667 		/* Autocfg applies to even interface */
668 		if (iface_param->iface_num & 0x1)
669 			break;
670 
671 		if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE)
672 			init_fw_cb->ipv6_addtl_opts &=
673 				cpu_to_le16(
674 				  ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
675 		else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE)
676 			init_fw_cb->ipv6_addtl_opts |=
677 				cpu_to_le16(
678 				  IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
679 		else
680 			ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
681 				   "IPv6 addr\n");
682 		break;
683 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
684 		/* Autocfg applies to even interface */
685 		if (iface_param->iface_num & 0x1)
686 			break;
687 
688 		if (iface_param->value[0] ==
689 		    ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE)
690 			init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(
691 					IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
692 		else if (iface_param->value[0] ==
693 			 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE)
694 			init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
695 				       ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
696 		else
697 			ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
698 				   "IPv6 linklocal addr\n");
699 		break;
700 	case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
701 		/* Autocfg applies to even interface */
702 		if (iface_param->iface_num & 0x1)
703 			break;
704 
705 		if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE)
706 			memset(init_fw_cb->ipv6_dflt_rtr_addr, 0,
707 			       sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
708 		break;
709 	case ISCSI_NET_PARAM_IFACE_ENABLE:
710 		if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
711 			init_fw_cb->ipv6_opts |=
712 				cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE);
713 			qla4xxx_create_ipv6_iface(ha);
714 		} else {
715 			init_fw_cb->ipv6_opts &=
716 				cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE &
717 					    0xFFFF);
718 			qla4xxx_destroy_ipv6_iface(ha);
719 		}
720 		break;
721 	case ISCSI_NET_PARAM_VLAN_ID:
722 		if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag))
723 			break;
724 		init_fw_cb->ipv6_vlan_tag =
725 				cpu_to_be16(*(uint16_t *)iface_param->value);
726 		break;
727 	case ISCSI_NET_PARAM_VLAN_ENABLED:
728 		if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
729 			init_fw_cb->ipv6_opts |=
730 				cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE);
731 		else
732 			init_fw_cb->ipv6_opts &=
733 				cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE);
734 		break;
735 	case ISCSI_NET_PARAM_MTU:
736 		init_fw_cb->eth_mtu_size =
737 				cpu_to_le16(*(uint16_t *)iface_param->value);
738 		break;
739 	case ISCSI_NET_PARAM_PORT:
740 		/* Autocfg applies to even interface */
741 		if (iface_param->iface_num & 0x1)
742 			break;
743 
744 		init_fw_cb->ipv6_port =
745 				cpu_to_le16(*(uint16_t *)iface_param->value);
746 		break;
747 	default:
748 		ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
749 			   iface_param->param);
750 		break;
751 	}
752 }
753 
754 static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
755 			     struct iscsi_iface_param_info *iface_param,
756 			     struct addr_ctrl_blk *init_fw_cb)
757 {
758 	switch (iface_param->param) {
759 	case ISCSI_NET_PARAM_IPV4_ADDR:
760 		memcpy(init_fw_cb->ipv4_addr, iface_param->value,
761 		       sizeof(init_fw_cb->ipv4_addr));
762 		break;
763 	case ISCSI_NET_PARAM_IPV4_SUBNET:
764 		memcpy(init_fw_cb->ipv4_subnet,	iface_param->value,
765 		       sizeof(init_fw_cb->ipv4_subnet));
766 		break;
767 	case ISCSI_NET_PARAM_IPV4_GW:
768 		memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value,
769 		       sizeof(init_fw_cb->ipv4_gw_addr));
770 		break;
771 	case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
772 		if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
773 			init_fw_cb->ipv4_tcp_opts |=
774 					cpu_to_le16(TCPOPT_DHCP_ENABLE);
775 		else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
776 			init_fw_cb->ipv4_tcp_opts &=
777 					cpu_to_le16(~TCPOPT_DHCP_ENABLE);
778 		else
779 			ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n");
780 		break;
781 	case ISCSI_NET_PARAM_IFACE_ENABLE:
782 		if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
783 			init_fw_cb->ipv4_ip_opts |=
784 				cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE);
785 			qla4xxx_create_ipv4_iface(ha);
786 		} else {
787 			init_fw_cb->ipv4_ip_opts &=
788 				cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE &
789 					    0xFFFF);
790 			qla4xxx_destroy_ipv4_iface(ha);
791 		}
792 		break;
793 	case ISCSI_NET_PARAM_VLAN_ID:
794 		if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag))
795 			break;
796 		init_fw_cb->ipv4_vlan_tag =
797 				cpu_to_be16(*(uint16_t *)iface_param->value);
798 		break;
799 	case ISCSI_NET_PARAM_VLAN_ENABLED:
800 		if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
801 			init_fw_cb->ipv4_ip_opts |=
802 					cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE);
803 		else
804 			init_fw_cb->ipv4_ip_opts &=
805 					cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE);
806 		break;
807 	case ISCSI_NET_PARAM_MTU:
808 		init_fw_cb->eth_mtu_size =
809 				cpu_to_le16(*(uint16_t *)iface_param->value);
810 		break;
811 	case ISCSI_NET_PARAM_PORT:
812 		init_fw_cb->ipv4_port =
813 				cpu_to_le16(*(uint16_t *)iface_param->value);
814 		break;
815 	default:
816 		ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
817 			   iface_param->param);
818 		break;
819 	}
820 }
821 
822 static void
823 qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
824 {
825 	struct addr_ctrl_blk_def *acb;
826 	acb = (struct addr_ctrl_blk_def *)init_fw_cb;
827 	memset(acb->reserved1, 0, sizeof(acb->reserved1));
828 	memset(acb->reserved2, 0, sizeof(acb->reserved2));
829 	memset(acb->reserved3, 0, sizeof(acb->reserved3));
830 	memset(acb->reserved4, 0, sizeof(acb->reserved4));
831 	memset(acb->reserved5, 0, sizeof(acb->reserved5));
832 	memset(acb->reserved6, 0, sizeof(acb->reserved6));
833 	memset(acb->reserved7, 0, sizeof(acb->reserved7));
834 	memset(acb->reserved8, 0, sizeof(acb->reserved8));
835 	memset(acb->reserved9, 0, sizeof(acb->reserved9));
836 	memset(acb->reserved10, 0, sizeof(acb->reserved10));
837 	memset(acb->reserved11, 0, sizeof(acb->reserved11));
838 	memset(acb->reserved12, 0, sizeof(acb->reserved12));
839 	memset(acb->reserved13, 0, sizeof(acb->reserved13));
840 	memset(acb->reserved14, 0, sizeof(acb->reserved14));
841 	memset(acb->reserved15, 0, sizeof(acb->reserved15));
842 }
843 
844 static int
845 qla4xxx_iface_set_param(struct Scsi_Host *shost, char *data, int count)
846 {
847 	struct scsi_qla_host *ha = to_qla_host(shost);
848 	int rval = 0;
849 	struct iscsi_iface_param_info *iface_param = NULL;
850 	struct addr_ctrl_blk *init_fw_cb = NULL;
851 	dma_addr_t init_fw_cb_dma;
852 	uint32_t mbox_cmd[MBOX_REG_COUNT];
853 	uint32_t mbox_sts[MBOX_REG_COUNT];
854 	uint32_t total_param_count;
855 	uint32_t length;
856 
857 	init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
858 					sizeof(struct addr_ctrl_blk),
859 					&init_fw_cb_dma, GFP_KERNEL);
860 	if (!init_fw_cb) {
861 		ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
862 			   __func__);
863 		return -ENOMEM;
864 	}
865 
866 	memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
867 	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
868 	memset(&mbox_sts, 0, sizeof(mbox_sts));
869 
870 	if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) {
871 		ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__);
872 		rval = -EIO;
873 		goto exit_init_fw_cb;
874 	}
875 
876 	total_param_count = count;
877 	iface_param = (struct iscsi_iface_param_info *)data;
878 
879 	for ( ; total_param_count != 0; total_param_count--) {
880 		length = iface_param->len;
881 
882 		if (iface_param->param_type != ISCSI_NET_PARAM)
883 			continue;
884 
885 		switch (iface_param->iface_type) {
886 		case ISCSI_IFACE_TYPE_IPV4:
887 			switch (iface_param->iface_num) {
888 			case 0:
889 				qla4xxx_set_ipv4(ha, iface_param, init_fw_cb);
890 				break;
891 			default:
892 				/* Cannot have more than one IPv4 interface */
893 				ql4_printk(KERN_ERR, ha, "Invalid IPv4 iface "
894 					   "number = %d\n",
895 					   iface_param->iface_num);
896 				break;
897 			}
898 			break;
899 		case ISCSI_IFACE_TYPE_IPV6:
900 			switch (iface_param->iface_num) {
901 			case 0:
902 			case 1:
903 				qla4xxx_set_ipv6(ha, iface_param, init_fw_cb);
904 				break;
905 			default:
906 				/* Cannot have more than two IPv6 interface */
907 				ql4_printk(KERN_ERR, ha, "Invalid IPv6 iface "
908 					   "number = %d\n",
909 					   iface_param->iface_num);
910 				break;
911 			}
912 			break;
913 		default:
914 			ql4_printk(KERN_ERR, ha, "Invalid iface type\n");
915 			break;
916 		}
917 
918 		iface_param = (struct iscsi_iface_param_info *)
919 						((uint8_t *)iface_param +
920 			    sizeof(struct iscsi_iface_param_info) + length);
921 	}
922 
923 	init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A);
924 
925 	rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB,
926 				 sizeof(struct addr_ctrl_blk),
927 				 FLASH_OPT_RMW_COMMIT);
928 	if (rval != QLA_SUCCESS) {
929 		ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n",
930 			   __func__);
931 		rval = -EIO;
932 		goto exit_init_fw_cb;
933 	}
934 
935 	qla4xxx_disable_acb(ha);
936 
937 	qla4xxx_initcb_to_acb(init_fw_cb);
938 
939 	rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma);
940 	if (rval != QLA_SUCCESS) {
941 		ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n",
942 			   __func__);
943 		rval = -EIO;
944 		goto exit_init_fw_cb;
945 	}
946 
947 	memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
948 	qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb,
949 				  init_fw_cb_dma);
950 
951 exit_init_fw_cb:
952 	dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
953 			  init_fw_cb, init_fw_cb_dma);
954 
955 	return rval;
956 }
957 
958 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
959 				  enum iscsi_param param, char *buf)
960 {
961 	struct iscsi_conn *conn;
962 	struct qla_conn *qla_conn;
963 	struct sockaddr *dst_addr;
964 	int len = 0;
965 
966 	conn = cls_conn->dd_data;
967 	qla_conn = conn->dd_data;
968 	dst_addr = &qla_conn->qla_ep->dst_addr;
969 
970 	switch (param) {
971 	case ISCSI_PARAM_CONN_PORT:
972 	case ISCSI_PARAM_CONN_ADDRESS:
973 		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
974 						 dst_addr, param, buf);
975 	default:
976 		return iscsi_conn_get_param(cls_conn, param, buf);
977 	}
978 
979 	return len;
980 
981 }
982 
983 static struct iscsi_cls_session *
984 qla4xxx_session_create(struct iscsi_endpoint *ep,
985 			uint16_t cmds_max, uint16_t qdepth,
986 			uint32_t initial_cmdsn)
987 {
988 	struct iscsi_cls_session *cls_sess;
989 	struct scsi_qla_host *ha;
990 	struct qla_endpoint *qla_ep;
991 	struct ddb_entry *ddb_entry;
992 	uint32_t ddb_index;
993 	uint32_t mbx_sts = 0;
994 	struct iscsi_session *sess;
995 	struct sockaddr *dst_addr;
996 	int ret;
997 
998 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
999 	if (!ep) {
1000 		printk(KERN_ERR "qla4xxx: missing ep.\n");
1001 		return NULL;
1002 	}
1003 
1004 	qla_ep = ep->dd_data;
1005 	dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
1006 	ha = to_qla_host(qla_ep->host);
1007 
1008 get_ddb_index:
1009 	ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
1010 
1011 	if (ddb_index >= MAX_DDB_ENTRIES) {
1012 		DEBUG2(ql4_printk(KERN_INFO, ha,
1013 				  "Free DDB index not available\n"));
1014 		return NULL;
1015 	}
1016 
1017 	if (test_and_set_bit(ddb_index, ha->ddb_idx_map))
1018 		goto get_ddb_index;
1019 
1020 	DEBUG2(ql4_printk(KERN_INFO, ha,
1021 			  "Found a free DDB index at %d\n", ddb_index));
1022 	ret = qla4xxx_req_ddb_entry(ha, ddb_index, &mbx_sts);
1023 	if (ret == QLA_ERROR) {
1024 		if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
1025 			ql4_printk(KERN_INFO, ha,
1026 				   "DDB index = %d not available trying next\n",
1027 				   ddb_index);
1028 			goto get_ddb_index;
1029 		}
1030 		DEBUG2(ql4_printk(KERN_INFO, ha,
1031 				  "Free FW DDB not available\n"));
1032 		return NULL;
1033 	}
1034 
1035 	cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
1036 				       cmds_max, sizeof(struct ddb_entry),
1037 				       sizeof(struct ql4_task_data),
1038 				       initial_cmdsn, ddb_index);
1039 	if (!cls_sess)
1040 		return NULL;
1041 
1042 	sess = cls_sess->dd_data;
1043 	ddb_entry = sess->dd_data;
1044 	ddb_entry->fw_ddb_index = ddb_index;
1045 	ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
1046 	ddb_entry->ha = ha;
1047 	ddb_entry->sess = cls_sess;
1048 	cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
1049 	ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
1050 	ha->tot_ddbs++;
1051 
1052 	return cls_sess;
1053 }
1054 
1055 static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
1056 {
1057 	struct iscsi_session *sess;
1058 	struct ddb_entry *ddb_entry;
1059 	struct scsi_qla_host *ha;
1060 	unsigned long flags;
1061 
1062 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1063 	sess = cls_sess->dd_data;
1064 	ddb_entry = sess->dd_data;
1065 	ha = ddb_entry->ha;
1066 
1067 	qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
1068 
1069 	spin_lock_irqsave(&ha->hardware_lock, flags);
1070 	qla4xxx_free_ddb(ha, ddb_entry);
1071 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1072 	iscsi_session_teardown(cls_sess);
1073 }
1074 
1075 static struct iscsi_cls_conn *
1076 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
1077 {
1078 	struct iscsi_cls_conn *cls_conn;
1079 	struct iscsi_session *sess;
1080 	struct ddb_entry *ddb_entry;
1081 
1082 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1083 	cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
1084 				    conn_idx);
1085 	sess = cls_sess->dd_data;
1086 	ddb_entry = sess->dd_data;
1087 	ddb_entry->conn = cls_conn;
1088 
1089 	return cls_conn;
1090 }
1091 
1092 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
1093 			     struct iscsi_cls_conn *cls_conn,
1094 			     uint64_t transport_fd, int is_leading)
1095 {
1096 	struct iscsi_conn *conn;
1097 	struct qla_conn *qla_conn;
1098 	struct iscsi_endpoint *ep;
1099 
1100 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1101 
1102 	if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
1103 		return -EINVAL;
1104 	ep = iscsi_lookup_endpoint(transport_fd);
1105 	conn = cls_conn->dd_data;
1106 	qla_conn = conn->dd_data;
1107 	qla_conn->qla_ep = ep->dd_data;
1108 	return 0;
1109 }
1110 
1111 static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
1112 {
1113 	struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1114 	struct iscsi_session *sess;
1115 	struct ddb_entry *ddb_entry;
1116 	struct scsi_qla_host *ha;
1117 	struct dev_db_entry *fw_ddb_entry;
1118 	dma_addr_t fw_ddb_entry_dma;
1119 	uint32_t mbx_sts = 0;
1120 	int ret = 0;
1121 	int status = QLA_SUCCESS;
1122 
1123 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1124 	sess = cls_sess->dd_data;
1125 	ddb_entry = sess->dd_data;
1126 	ha = ddb_entry->ha;
1127 
1128 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1129 					  &fw_ddb_entry_dma, GFP_KERNEL);
1130 	if (!fw_ddb_entry) {
1131 		ql4_printk(KERN_ERR, ha,
1132 			   "%s: Unable to allocate dma buffer\n", __func__);
1133 		return -ENOMEM;
1134 	}
1135 
1136 	ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
1137 	if (ret) {
1138 		/* If iscsid is stopped and started then no need to do
1139 		* set param again since ddb state will be already
1140 		* active and FW does not allow set ddb to an
1141 		* active session.
1142 		*/
1143 		if (mbx_sts)
1144 			if (ddb_entry->fw_ddb_device_state ==
1145 						DDB_DS_SESSION_ACTIVE) {
1146 				iscsi_conn_start(ddb_entry->conn);
1147 				iscsi_conn_login_event(ddb_entry->conn,
1148 						ISCSI_CONN_STATE_LOGGED_IN);
1149 				goto exit_set_param;
1150 			}
1151 
1152 		ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n",
1153 			   __func__, ddb_entry->fw_ddb_index);
1154 		goto exit_conn_start;
1155 	}
1156 
1157 	status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
1158 	if (status == QLA_ERROR) {
1159 		ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
1160 			   sess->targetname);
1161 		ret = -EINVAL;
1162 		goto exit_conn_start;
1163 	}
1164 
1165 	if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE)
1166 		ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
1167 
1168 	DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__,
1169 		      ddb_entry->fw_ddb_device_state));
1170 
1171 exit_set_param:
1172 	ret = 0;
1173 
1174 exit_conn_start:
1175 	dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1176 			  fw_ddb_entry, fw_ddb_entry_dma);
1177 	return ret;
1178 }
1179 
1180 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
1181 {
1182 	struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1183 	struct iscsi_session *sess;
1184 	struct scsi_qla_host *ha;
1185 	struct ddb_entry *ddb_entry;
1186 	int options;
1187 
1188 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1189 	sess = cls_sess->dd_data;
1190 	ddb_entry = sess->dd_data;
1191 	ha = ddb_entry->ha;
1192 
1193 	options = LOGOUT_OPTION_CLOSE_SESSION;
1194 	if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
1195 		ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
1196 }
1197 
1198 static void qla4xxx_task_work(struct work_struct *wdata)
1199 {
1200 	struct ql4_task_data *task_data;
1201 	struct scsi_qla_host *ha;
1202 	struct passthru_status *sts;
1203 	struct iscsi_task *task;
1204 	struct iscsi_hdr *hdr;
1205 	uint8_t *data;
1206 	uint32_t data_len;
1207 	struct iscsi_conn *conn;
1208 	int hdr_len;
1209 	itt_t itt;
1210 
1211 	task_data = container_of(wdata, struct ql4_task_data, task_work);
1212 	ha = task_data->ha;
1213 	task = task_data->task;
1214 	sts = &task_data->sts;
1215 	hdr_len = sizeof(struct iscsi_hdr);
1216 
1217 	DEBUG3(printk(KERN_INFO "Status returned\n"));
1218 	DEBUG3(qla4xxx_dump_buffer(sts, 64));
1219 	DEBUG3(printk(KERN_INFO "Response buffer"));
1220 	DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64));
1221 
1222 	conn = task->conn;
1223 
1224 	switch (sts->completionStatus) {
1225 	case PASSTHRU_STATUS_COMPLETE:
1226 		hdr = (struct iscsi_hdr *)task_data->resp_buffer;
1227 		/* Assign back the itt in hdr, until we use the PREASSIGN_TAG */
1228 		itt = sts->handle;
1229 		hdr->itt = itt;
1230 		data = task_data->resp_buffer + hdr_len;
1231 		data_len = task_data->resp_len - hdr_len;
1232 		iscsi_complete_pdu(conn, hdr, data, data_len);
1233 		break;
1234 	default:
1235 		ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n",
1236 			   sts->completionStatus);
1237 		break;
1238 	}
1239 	return;
1240 }
1241 
1242 static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
1243 {
1244 	struct ql4_task_data *task_data;
1245 	struct iscsi_session *sess;
1246 	struct ddb_entry *ddb_entry;
1247 	struct scsi_qla_host *ha;
1248 	int hdr_len;
1249 
1250 	sess = task->conn->session;
1251 	ddb_entry = sess->dd_data;
1252 	ha = ddb_entry->ha;
1253 	task_data = task->dd_data;
1254 	memset(task_data, 0, sizeof(struct ql4_task_data));
1255 
1256 	if (task->sc) {
1257 		ql4_printk(KERN_INFO, ha,
1258 			   "%s: SCSI Commands not implemented\n", __func__);
1259 		return -EINVAL;
1260 	}
1261 
1262 	hdr_len = sizeof(struct iscsi_hdr);
1263 	task_data->ha = ha;
1264 	task_data->task = task;
1265 
1266 	if (task->data_count) {
1267 		task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
1268 						     task->data_count,
1269 						     PCI_DMA_TODEVICE);
1270 	}
1271 
1272 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1273 		      __func__, task->conn->max_recv_dlength, hdr_len));
1274 
1275 	task_data->resp_len = task->conn->max_recv_dlength + hdr_len;
1276 	task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev,
1277 						    task_data->resp_len,
1278 						    &task_data->resp_dma,
1279 						    GFP_ATOMIC);
1280 	if (!task_data->resp_buffer)
1281 		goto exit_alloc_pdu;
1282 
1283 	task_data->req_len = task->data_count + hdr_len;
1284 	task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev,
1285 						   task_data->req_len,
1286 						   &task_data->req_dma,
1287 						   GFP_ATOMIC);
1288 	if (!task_data->req_buffer)
1289 		goto exit_alloc_pdu;
1290 
1291 	task->hdr = task_data->req_buffer;
1292 
1293 	INIT_WORK(&task_data->task_work, qla4xxx_task_work);
1294 
1295 	return 0;
1296 
1297 exit_alloc_pdu:
1298 	if (task_data->resp_buffer)
1299 		dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1300 				  task_data->resp_buffer, task_data->resp_dma);
1301 
1302 	if (task_data->req_buffer)
1303 		dma_free_coherent(&ha->pdev->dev, task_data->req_len,
1304 				  task_data->req_buffer, task_data->req_dma);
1305 	return -ENOMEM;
1306 }
1307 
1308 static void qla4xxx_task_cleanup(struct iscsi_task *task)
1309 {
1310 	struct ql4_task_data *task_data;
1311 	struct iscsi_session *sess;
1312 	struct ddb_entry *ddb_entry;
1313 	struct scsi_qla_host *ha;
1314 	int hdr_len;
1315 
1316 	hdr_len = sizeof(struct iscsi_hdr);
1317 	sess = task->conn->session;
1318 	ddb_entry = sess->dd_data;
1319 	ha = ddb_entry->ha;
1320 	task_data = task->dd_data;
1321 
1322 	if (task->data_count) {
1323 		dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
1324 				 task->data_count, PCI_DMA_TODEVICE);
1325 	}
1326 
1327 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1328 		      __func__, task->conn->max_recv_dlength, hdr_len));
1329 
1330 	dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1331 			  task_data->resp_buffer, task_data->resp_dma);
1332 	dma_free_coherent(&ha->pdev->dev, task_data->req_len,
1333 			  task_data->req_buffer, task_data->req_dma);
1334 	return;
1335 }
1336 
1337 static int qla4xxx_task_xmit(struct iscsi_task *task)
1338 {
1339 	struct scsi_cmnd *sc = task->sc;
1340 	struct iscsi_session *sess = task->conn->session;
1341 	struct ddb_entry *ddb_entry = sess->dd_data;
1342 	struct scsi_qla_host *ha = ddb_entry->ha;
1343 
1344 	if (!sc)
1345 		return qla4xxx_send_passthru0(task);
1346 
1347 	ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n",
1348 		   __func__);
1349 	return -ENOSYS;
1350 }
1351 
1352 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
1353 				       struct ddb_entry *ddb_entry)
1354 {
1355 	struct iscsi_cls_session *cls_sess;
1356 	struct iscsi_cls_conn *cls_conn;
1357 	struct iscsi_session *sess;
1358 	struct iscsi_conn *conn;
1359 	uint32_t ddb_state;
1360 	dma_addr_t fw_ddb_entry_dma;
1361 	struct dev_db_entry *fw_ddb_entry;
1362 
1363 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1364 					  &fw_ddb_entry_dma, GFP_KERNEL);
1365 	if (!fw_ddb_entry) {
1366 		ql4_printk(KERN_ERR, ha,
1367 			   "%s: Unable to allocate dma buffer\n", __func__);
1368 		return;
1369 	}
1370 
1371 	if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
1372 				    fw_ddb_entry_dma, NULL, NULL, &ddb_state,
1373 				    NULL, NULL, NULL) == QLA_ERROR) {
1374 		DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
1375 				  "get_ddb_entry for fw_ddb_index %d\n",
1376 				  ha->host_no, __func__,
1377 				  ddb_entry->fw_ddb_index));
1378 		return;
1379 	}
1380 
1381 	cls_sess = ddb_entry->sess;
1382 	sess = cls_sess->dd_data;
1383 
1384 	cls_conn = ddb_entry->conn;
1385 	conn = cls_conn->dd_data;
1386 
1387 	/* Update params */
1388 	conn->max_recv_dlength = BYTE_UNITS *
1389 			  le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
1390 
1391 	conn->max_xmit_dlength = BYTE_UNITS *
1392 			  le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
1393 
1394 	sess->initial_r2t_en =
1395 			    (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1396 
1397 	sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
1398 
1399 	sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1400 
1401 	sess->first_burst = BYTE_UNITS *
1402 			       le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
1403 
1404 	sess->max_burst = BYTE_UNITS *
1405 				 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
1406 
1407 	sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
1408 
1409 	sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
1410 
1411 	sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
1412 
1413 	memcpy(sess->initiatorname, ha->name_string,
1414 	       min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
1415 }
1416 
1417 /*
1418  * Timer routines
1419  */
1420 
1421 static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
1422 				unsigned long interval)
1423 {
1424 	DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
1425 		     __func__, ha->host->host_no));
1426 	init_timer(&ha->timer);
1427 	ha->timer.expires = jiffies + interval * HZ;
1428 	ha->timer.data = (unsigned long)ha;
1429 	ha->timer.function = (void (*)(unsigned long))func;
1430 	add_timer(&ha->timer);
1431 	ha->timer_active = 1;
1432 }
1433 
1434 static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
1435 {
1436 	del_timer_sync(&ha->timer);
1437 	ha->timer_active = 0;
1438 }
1439 
1440 /***
1441  * qla4xxx_mark_device_missing - blocks the session
1442  * @cls_session: Pointer to the session to be blocked
1443  * @ddb_entry: Pointer to device database entry
1444  *
1445  * This routine marks a device missing and close connection.
1446  **/
1447 void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session)
1448 {
1449 	iscsi_block_session(cls_session);
1450 }
1451 
1452 /**
1453  * qla4xxx_mark_all_devices_missing - mark all devices as missing.
1454  * @ha: Pointer to host adapter structure.
1455  *
1456  * This routine marks a device missing and resets the relogin retry count.
1457  **/
1458 void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
1459 {
1460 	iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing);
1461 }
1462 
1463 static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
1464 				       struct ddb_entry *ddb_entry,
1465 				       struct scsi_cmnd *cmd)
1466 {
1467 	struct srb *srb;
1468 
1469 	srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
1470 	if (!srb)
1471 		return srb;
1472 
1473 	kref_init(&srb->srb_ref);
1474 	srb->ha = ha;
1475 	srb->ddb = ddb_entry;
1476 	srb->cmd = cmd;
1477 	srb->flags = 0;
1478 	CMD_SP(cmd) = (void *)srb;
1479 
1480 	return srb;
1481 }
1482 
1483 static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
1484 {
1485 	struct scsi_cmnd *cmd = srb->cmd;
1486 
1487 	if (srb->flags & SRB_DMA_VALID) {
1488 		scsi_dma_unmap(cmd);
1489 		srb->flags &= ~SRB_DMA_VALID;
1490 	}
1491 	CMD_SP(cmd) = NULL;
1492 }
1493 
1494 void qla4xxx_srb_compl(struct kref *ref)
1495 {
1496 	struct srb *srb = container_of(ref, struct srb, srb_ref);
1497 	struct scsi_cmnd *cmd = srb->cmd;
1498 	struct scsi_qla_host *ha = srb->ha;
1499 
1500 	qla4xxx_srb_free_dma(ha, srb);
1501 
1502 	mempool_free(srb, ha->srb_mempool);
1503 
1504 	cmd->scsi_done(cmd);
1505 }
1506 
1507 /**
1508  * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
1509  * @host: scsi host
1510  * @cmd: Pointer to Linux's SCSI command structure
1511  *
1512  * Remarks:
1513  * This routine is invoked by Linux to send a SCSI command to the driver.
1514  * The mid-level driver tries to ensure that queuecommand never gets
1515  * invoked concurrently with itself or the interrupt handler (although
1516  * the interrupt handler may call this routine as part of request-
1517  * completion handling).   Unfortunely, it sometimes calls the scheduler
1518  * in interrupt context which is a big NO! NO!.
1519  **/
1520 static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1521 {
1522 	struct scsi_qla_host *ha = to_qla_host(host);
1523 	struct ddb_entry *ddb_entry = cmd->device->hostdata;
1524 	struct iscsi_cls_session *sess = ddb_entry->sess;
1525 	struct srb *srb;
1526 	int rval;
1527 
1528 	if (test_bit(AF_EEH_BUSY, &ha->flags)) {
1529 		if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
1530 			cmd->result = DID_NO_CONNECT << 16;
1531 		else
1532 			cmd->result = DID_REQUEUE << 16;
1533 		goto qc_fail_command;
1534 	}
1535 
1536 	if (!sess) {
1537 		cmd->result = DID_IMM_RETRY << 16;
1538 		goto qc_fail_command;
1539 	}
1540 
1541 	rval = iscsi_session_chkready(sess);
1542 	if (rval) {
1543 		cmd->result = rval;
1544 		goto qc_fail_command;
1545 	}
1546 
1547 	if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
1548 	    test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
1549 	    test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
1550 	    test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
1551 	    test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
1552 	    !test_bit(AF_ONLINE, &ha->flags) ||
1553 	    !test_bit(AF_LINK_UP, &ha->flags) ||
1554 	    test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
1555 		goto qc_host_busy;
1556 
1557 	srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
1558 	if (!srb)
1559 		goto qc_host_busy;
1560 
1561 	rval = qla4xxx_send_command_to_isp(ha, srb);
1562 	if (rval != QLA_SUCCESS)
1563 		goto qc_host_busy_free_sp;
1564 
1565 	return 0;
1566 
1567 qc_host_busy_free_sp:
1568 	qla4xxx_srb_free_dma(ha, srb);
1569 	mempool_free(srb, ha->srb_mempool);
1570 
1571 qc_host_busy:
1572 	return SCSI_MLQUEUE_HOST_BUSY;
1573 
1574 qc_fail_command:
1575 	cmd->scsi_done(cmd);
1576 
1577 	return 0;
1578 }
1579 
1580 /**
1581  * qla4xxx_mem_free - frees memory allocated to adapter
1582  * @ha: Pointer to host adapter structure.
1583  *
1584  * Frees memory previously allocated by qla4xxx_mem_alloc
1585  **/
1586 static void qla4xxx_mem_free(struct scsi_qla_host *ha)
1587 {
1588 	if (ha->queues)
1589 		dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
1590 				  ha->queues_dma);
1591 
1592 	ha->queues_len = 0;
1593 	ha->queues = NULL;
1594 	ha->queues_dma = 0;
1595 	ha->request_ring = NULL;
1596 	ha->request_dma = 0;
1597 	ha->response_ring = NULL;
1598 	ha->response_dma = 0;
1599 	ha->shadow_regs = NULL;
1600 	ha->shadow_regs_dma = 0;
1601 
1602 	/* Free srb pool. */
1603 	if (ha->srb_mempool)
1604 		mempool_destroy(ha->srb_mempool);
1605 
1606 	ha->srb_mempool = NULL;
1607 
1608 	if (ha->chap_dma_pool)
1609 		dma_pool_destroy(ha->chap_dma_pool);
1610 
1611 	if (ha->chap_list)
1612 		vfree(ha->chap_list);
1613 	ha->chap_list = NULL;
1614 
1615 	/* release io space registers  */
1616 	if (is_qla8022(ha)) {
1617 		if (ha->nx_pcibase)
1618 			iounmap(
1619 			    (struct device_reg_82xx __iomem *)ha->nx_pcibase);
1620 	} else if (ha->reg)
1621 		iounmap(ha->reg);
1622 	pci_release_regions(ha->pdev);
1623 }
1624 
1625 /**
1626  * qla4xxx_mem_alloc - allocates memory for use by adapter.
1627  * @ha: Pointer to host adapter structure
1628  *
1629  * Allocates DMA memory for request and response queues. Also allocates memory
1630  * for srbs.
1631  **/
1632 static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
1633 {
1634 	unsigned long align;
1635 
1636 	/* Allocate contiguous block of DMA memory for queues. */
1637 	ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
1638 			  (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
1639 			  sizeof(struct shadow_regs) +
1640 			  MEM_ALIGN_VALUE +
1641 			  (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
1642 	ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
1643 					&ha->queues_dma, GFP_KERNEL);
1644 	if (ha->queues == NULL) {
1645 		ql4_printk(KERN_WARNING, ha,
1646 		    "Memory Allocation failed - queues.\n");
1647 
1648 		goto mem_alloc_error_exit;
1649 	}
1650 	memset(ha->queues, 0, ha->queues_len);
1651 
1652 	/*
1653 	 * As per RISC alignment requirements -- the bus-address must be a
1654 	 * multiple of the request-ring size (in bytes).
1655 	 */
1656 	align = 0;
1657 	if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
1658 		align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
1659 					   (MEM_ALIGN_VALUE - 1));
1660 
1661 	/* Update request and response queue pointers. */
1662 	ha->request_dma = ha->queues_dma + align;
1663 	ha->request_ring = (struct queue_entry *) (ha->queues + align);
1664 	ha->response_dma = ha->queues_dma + align +
1665 		(REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
1666 	ha->response_ring = (struct queue_entry *) (ha->queues + align +
1667 						    (REQUEST_QUEUE_DEPTH *
1668 						     QUEUE_SIZE));
1669 	ha->shadow_regs_dma = ha->queues_dma + align +
1670 		(REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
1671 		(RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
1672 	ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
1673 						  (REQUEST_QUEUE_DEPTH *
1674 						   QUEUE_SIZE) +
1675 						  (RESPONSE_QUEUE_DEPTH *
1676 						   QUEUE_SIZE));
1677 
1678 	/* Allocate memory for srb pool. */
1679 	ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
1680 					 mempool_free_slab, srb_cachep);
1681 	if (ha->srb_mempool == NULL) {
1682 		ql4_printk(KERN_WARNING, ha,
1683 		    "Memory Allocation failed - SRB Pool.\n");
1684 
1685 		goto mem_alloc_error_exit;
1686 	}
1687 
1688 	ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev,
1689 					    CHAP_DMA_BLOCK_SIZE, 8, 0);
1690 
1691 	if (ha->chap_dma_pool == NULL) {
1692 		ql4_printk(KERN_WARNING, ha,
1693 		    "%s: chap_dma_pool allocation failed..\n", __func__);
1694 		goto mem_alloc_error_exit;
1695 	}
1696 
1697 	return QLA_SUCCESS;
1698 
1699 mem_alloc_error_exit:
1700 	qla4xxx_mem_free(ha);
1701 	return QLA_ERROR;
1702 }
1703 
1704 /**
1705  * qla4_8xxx_check_fw_alive  - Check firmware health
1706  * @ha: Pointer to host adapter structure.
1707  *
1708  * Context: Interrupt
1709  **/
1710 static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
1711 {
1712 	uint32_t fw_heartbeat_counter, halt_status;
1713 
1714 	fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
1715 	/* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
1716 	if (fw_heartbeat_counter == 0xffffffff) {
1717 		DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
1718 		    "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
1719 		    ha->host_no, __func__));
1720 		return;
1721 	}
1722 
1723 	if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
1724 		ha->seconds_since_last_heartbeat++;
1725 		/* FW not alive after 2 seconds */
1726 		if (ha->seconds_since_last_heartbeat == 2) {
1727 			ha->seconds_since_last_heartbeat = 0;
1728 			halt_status = qla4_8xxx_rd_32(ha,
1729 						      QLA82XX_PEG_HALT_STATUS1);
1730 
1731 			ql4_printk(KERN_INFO, ha,
1732 				   "scsi(%ld): %s, Dumping hw/fw registers:\n "
1733 				   " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2:"
1734 				   " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
1735 				   " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
1736 				   " 0x%x,\n PEG_NET_4_PC: 0x%x\n",
1737 				   ha->host_no, __func__, halt_status,
1738 				   qla4_8xxx_rd_32(ha,
1739 						   QLA82XX_PEG_HALT_STATUS2),
1740 				   qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
1741 						   0x3c),
1742 				   qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 +
1743 						   0x3c),
1744 				   qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 +
1745 						   0x3c),
1746 				   qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 +
1747 						   0x3c),
1748 				   qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
1749 						   0x3c));
1750 
1751 			/* Since we cannot change dev_state in interrupt
1752 			 * context, set appropriate DPC flag then wakeup
1753 			 * DPC */
1754 			if (halt_status & HALT_STATUS_UNRECOVERABLE)
1755 				set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
1756 			else {
1757 				printk("scsi%ld: %s: detect abort needed!\n",
1758 				    ha->host_no, __func__);
1759 				set_bit(DPC_RESET_HA, &ha->dpc_flags);
1760 			}
1761 			qla4xxx_wake_dpc(ha);
1762 			qla4xxx_mailbox_premature_completion(ha);
1763 		}
1764 	} else
1765 		ha->seconds_since_last_heartbeat = 0;
1766 
1767 	ha->fw_heartbeat_counter = fw_heartbeat_counter;
1768 }
1769 
1770 /**
1771  * qla4_8xxx_watchdog - Poll dev state
1772  * @ha: Pointer to host adapter structure.
1773  *
1774  * Context: Interrupt
1775  **/
1776 void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
1777 {
1778 	uint32_t dev_state;
1779 
1780 	dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
1781 
1782 	/* don't poll if reset is going on */
1783 	if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
1784 	    test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
1785 	    test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
1786 		if (dev_state == QLA82XX_DEV_NEED_RESET &&
1787 		    !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
1788 			if (!ql4xdontresethba) {
1789 				ql4_printk(KERN_INFO, ha, "%s: HW State: "
1790 				    "NEED RESET!\n", __func__);
1791 				set_bit(DPC_RESET_HA, &ha->dpc_flags);
1792 				qla4xxx_wake_dpc(ha);
1793 				qla4xxx_mailbox_premature_completion(ha);
1794 			}
1795 		} else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
1796 		    !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
1797 			ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
1798 			    __func__);
1799 			set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
1800 			qla4xxx_wake_dpc(ha);
1801 		} else  {
1802 			/* Check firmware health */
1803 			qla4_8xxx_check_fw_alive(ha);
1804 		}
1805 	}
1806 }
1807 
1808 /**
1809  * qla4xxx_timer - checks every second for work to do.
1810  * @ha: Pointer to host adapter structure.
1811  **/
1812 static void qla4xxx_timer(struct scsi_qla_host *ha)
1813 {
1814 	int start_dpc = 0;
1815 	uint16_t w;
1816 
1817 	/* If we are in the middle of AER/EEH processing
1818 	 * skip any processing and reschedule the timer
1819 	 */
1820 	if (test_bit(AF_EEH_BUSY, &ha->flags)) {
1821 		mod_timer(&ha->timer, jiffies + HZ);
1822 		return;
1823 	}
1824 
1825 	/* Hardware read to trigger an EEH error during mailbox waits. */
1826 	if (!pci_channel_offline(ha->pdev))
1827 		pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
1828 
1829 	if (is_qla8022(ha)) {
1830 		qla4_8xxx_watchdog(ha);
1831 	}
1832 
1833 	if (!is_qla8022(ha)) {
1834 		/* Check for heartbeat interval. */
1835 		if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
1836 		    ha->heartbeat_interval != 0) {
1837 			ha->seconds_since_last_heartbeat++;
1838 			if (ha->seconds_since_last_heartbeat >
1839 			    ha->heartbeat_interval + 2)
1840 				set_bit(DPC_RESET_HA, &ha->dpc_flags);
1841 		}
1842 	}
1843 
1844 	/* Wakeup the dpc routine for this adapter, if needed. */
1845 	if (start_dpc ||
1846 	     test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
1847 	     test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
1848 	     test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
1849 	     test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
1850 	     test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
1851 	     test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
1852 	     test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
1853 	     test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
1854 	     test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
1855 	     test_bit(DPC_AEN, &ha->dpc_flags)) {
1856 		DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
1857 			      " - dpc flags = 0x%lx\n",
1858 			      ha->host_no, __func__, ha->dpc_flags));
1859 		qla4xxx_wake_dpc(ha);
1860 	}
1861 
1862 	/* Reschedule timer thread to call us back in one second */
1863 	mod_timer(&ha->timer, jiffies + HZ);
1864 
1865 	DEBUG2(ha->seconds_since_last_intr++);
1866 }
1867 
1868 /**
1869  * qla4xxx_cmd_wait - waits for all outstanding commands to complete
1870  * @ha: Pointer to host adapter structure.
1871  *
1872  * This routine stalls the driver until all outstanding commands are returned.
1873  * Caller must release the Hardware Lock prior to calling this routine.
1874  **/
1875 static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
1876 {
1877 	uint32_t index = 0;
1878 	unsigned long flags;
1879 	struct scsi_cmnd *cmd;
1880 
1881 	unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ);
1882 
1883 	DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to "
1884 	    "complete\n", WAIT_CMD_TOV));
1885 
1886 	while (!time_after_eq(jiffies, wtime)) {
1887 		spin_lock_irqsave(&ha->hardware_lock, flags);
1888 		/* Find a command that hasn't completed. */
1889 		for (index = 0; index < ha->host->can_queue; index++) {
1890 			cmd = scsi_host_find_tag(ha->host, index);
1891 			/*
1892 			 * We cannot just check if the index is valid,
1893 			 * becase if we are run from the scsi eh, then
1894 			 * the scsi/block layer is going to prevent
1895 			 * the tag from being released.
1896 			 */
1897 			if (cmd != NULL && CMD_SP(cmd))
1898 				break;
1899 		}
1900 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1901 
1902 		/* If No Commands are pending, wait is complete */
1903 		if (index == ha->host->can_queue)
1904 			return QLA_SUCCESS;
1905 
1906 		msleep(1000);
1907 	}
1908 	/* If we timed out on waiting for commands to come back
1909 	 * return ERROR. */
1910 	return QLA_ERROR;
1911 }
1912 
1913 int qla4xxx_hw_reset(struct scsi_qla_host *ha)
1914 {
1915 	uint32_t ctrl_status;
1916 	unsigned long flags = 0;
1917 
1918 	DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
1919 
1920 	if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
1921 		return QLA_ERROR;
1922 
1923 	spin_lock_irqsave(&ha->hardware_lock, flags);
1924 
1925 	/*
1926 	 * If the SCSI Reset Interrupt bit is set, clear it.
1927 	 * Otherwise, the Soft Reset won't work.
1928 	 */
1929 	ctrl_status = readw(&ha->reg->ctrl_status);
1930 	if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
1931 		writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
1932 
1933 	/* Issue Soft Reset */
1934 	writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
1935 	readl(&ha->reg->ctrl_status);
1936 
1937 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1938 	return QLA_SUCCESS;
1939 }
1940 
1941 /**
1942  * qla4xxx_soft_reset - performs soft reset.
1943  * @ha: Pointer to host adapter structure.
1944  **/
1945 int qla4xxx_soft_reset(struct scsi_qla_host *ha)
1946 {
1947 	uint32_t max_wait_time;
1948 	unsigned long flags = 0;
1949 	int status;
1950 	uint32_t ctrl_status;
1951 
1952 	status = qla4xxx_hw_reset(ha);
1953 	if (status != QLA_SUCCESS)
1954 		return status;
1955 
1956 	status = QLA_ERROR;
1957 	/* Wait until the Network Reset Intr bit is cleared */
1958 	max_wait_time = RESET_INTR_TOV;
1959 	do {
1960 		spin_lock_irqsave(&ha->hardware_lock, flags);
1961 		ctrl_status = readw(&ha->reg->ctrl_status);
1962 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1963 
1964 		if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
1965 			break;
1966 
1967 		msleep(1000);
1968 	} while ((--max_wait_time));
1969 
1970 	if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
1971 		DEBUG2(printk(KERN_WARNING
1972 			      "scsi%ld: Network Reset Intr not cleared by "
1973 			      "Network function, clearing it now!\n",
1974 			      ha->host_no));
1975 		spin_lock_irqsave(&ha->hardware_lock, flags);
1976 		writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
1977 		readl(&ha->reg->ctrl_status);
1978 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1979 	}
1980 
1981 	/* Wait until the firmware tells us the Soft Reset is done */
1982 	max_wait_time = SOFT_RESET_TOV;
1983 	do {
1984 		spin_lock_irqsave(&ha->hardware_lock, flags);
1985 		ctrl_status = readw(&ha->reg->ctrl_status);
1986 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1987 
1988 		if ((ctrl_status & CSR_SOFT_RESET) == 0) {
1989 			status = QLA_SUCCESS;
1990 			break;
1991 		}
1992 
1993 		msleep(1000);
1994 	} while ((--max_wait_time));
1995 
1996 	/*
1997 	 * Also, make sure that the SCSI Reset Interrupt bit has been cleared
1998 	 * after the soft reset has taken place.
1999 	 */
2000 	spin_lock_irqsave(&ha->hardware_lock, flags);
2001 	ctrl_status = readw(&ha->reg->ctrl_status);
2002 	if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
2003 		writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
2004 		readl(&ha->reg->ctrl_status);
2005 	}
2006 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2007 
2008 	/* If soft reset fails then most probably the bios on other
2009 	 * function is also enabled.
2010 	 * Since the initialization is sequential the other fn
2011 	 * wont be able to acknowledge the soft reset.
2012 	 * Issue a force soft reset to workaround this scenario.
2013 	 */
2014 	if (max_wait_time == 0) {
2015 		/* Issue Force Soft Reset */
2016 		spin_lock_irqsave(&ha->hardware_lock, flags);
2017 		writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
2018 		readl(&ha->reg->ctrl_status);
2019 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
2020 		/* Wait until the firmware tells us the Soft Reset is done */
2021 		max_wait_time = SOFT_RESET_TOV;
2022 		do {
2023 			spin_lock_irqsave(&ha->hardware_lock, flags);
2024 			ctrl_status = readw(&ha->reg->ctrl_status);
2025 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
2026 
2027 			if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
2028 				status = QLA_SUCCESS;
2029 				break;
2030 			}
2031 
2032 			msleep(1000);
2033 		} while ((--max_wait_time));
2034 	}
2035 
2036 	return status;
2037 }
2038 
2039 /**
2040  * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S.
2041  * @ha: Pointer to host adapter structure.
2042  * @res: returned scsi status
2043  *
2044  * This routine is called just prior to a HARD RESET to return all
2045  * outstanding commands back to the Operating System.
2046  * Caller should make sure that the following locks are released
2047  * before this calling routine: Hardware lock, and io_request_lock.
2048  **/
2049 static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res)
2050 {
2051 	struct srb *srb;
2052 	int i;
2053 	unsigned long flags;
2054 
2055 	spin_lock_irqsave(&ha->hardware_lock, flags);
2056 	for (i = 0; i < ha->host->can_queue; i++) {
2057 		srb = qla4xxx_del_from_active_array(ha, i);
2058 		if (srb != NULL) {
2059 			srb->cmd->result = res;
2060 			kref_put(&srb->srb_ref, qla4xxx_srb_compl);
2061 		}
2062 	}
2063 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2064 }
2065 
2066 void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
2067 {
2068 	clear_bit(AF_ONLINE, &ha->flags);
2069 
2070 	/* Disable the board */
2071 	ql4_printk(KERN_INFO, ha, "Disabling the board\n");
2072 
2073 	qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
2074 	qla4xxx_mark_all_devices_missing(ha);
2075 	clear_bit(AF_INIT_DONE, &ha->flags);
2076 }
2077 
2078 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
2079 {
2080 	struct iscsi_session *sess;
2081 	struct ddb_entry *ddb_entry;
2082 
2083 	sess = cls_session->dd_data;
2084 	ddb_entry = sess->dd_data;
2085 	ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
2086 	iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
2087 }
2088 
2089 /**
2090  * qla4xxx_recover_adapter - recovers adapter after a fatal error
2091  * @ha: Pointer to host adapter structure.
2092  **/
2093 static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
2094 {
2095 	int status = QLA_ERROR;
2096 	uint8_t reset_chip = 0;
2097 
2098 	/* Stall incoming I/O until we are done */
2099 	scsi_block_requests(ha->host);
2100 	clear_bit(AF_ONLINE, &ha->flags);
2101 	clear_bit(AF_LINK_UP, &ha->flags);
2102 
2103 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
2104 
2105 	set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
2106 
2107 	iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
2108 
2109 	if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
2110 		reset_chip = 1;
2111 
2112 	/* For the DPC_RESET_HA_INTR case (ISP-4xxx specific)
2113 	 * do not reset adapter, jump to initialize_adapter */
2114 	if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
2115 		status = QLA_SUCCESS;
2116 		goto recover_ha_init_adapter;
2117 	}
2118 
2119 	/* For the ISP-82xx adapter, issue a stop_firmware if invoked
2120 	 * from eh_host_reset or ioctl module */
2121 	if (is_qla8022(ha) && !reset_chip &&
2122 	    test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
2123 
2124 		DEBUG2(ql4_printk(KERN_INFO, ha,
2125 		    "scsi%ld: %s - Performing stop_firmware...\n",
2126 		    ha->host_no, __func__));
2127 		status = ha->isp_ops->reset_firmware(ha);
2128 		if (status == QLA_SUCCESS) {
2129 			if (!test_bit(AF_FW_RECOVERY, &ha->flags))
2130 				qla4xxx_cmd_wait(ha);
2131 			ha->isp_ops->disable_intrs(ha);
2132 			qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2133 			qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2134 		} else {
2135 			/* If the stop_firmware fails then
2136 			 * reset the entire chip */
2137 			reset_chip = 1;
2138 			clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
2139 			set_bit(DPC_RESET_HA, &ha->dpc_flags);
2140 		}
2141 	}
2142 
2143 	/* Issue full chip reset if recovering from a catastrophic error,
2144 	 * or if stop_firmware fails for ISP-82xx.
2145 	 * This is the default case for ISP-4xxx */
2146 	if (!is_qla8022(ha) || reset_chip) {
2147 		if (!test_bit(AF_FW_RECOVERY, &ha->flags))
2148 			qla4xxx_cmd_wait(ha);
2149 		qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2150 		qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2151 		DEBUG2(ql4_printk(KERN_INFO, ha,
2152 		    "scsi%ld: %s - Performing chip reset..\n",
2153 		    ha->host_no, __func__));
2154 		status = ha->isp_ops->reset_chip(ha);
2155 	}
2156 
2157 	/* Flush any pending ddb changed AENs */
2158 	qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2159 
2160 recover_ha_init_adapter:
2161 	/* Upon successful firmware/chip reset, re-initialize the adapter */
2162 	if (status == QLA_SUCCESS) {
2163 		/* For ISP-4xxx, force function 1 to always initialize
2164 		 * before function 3 to prevent both funcions from
2165 		 * stepping on top of the other */
2166 		if (!is_qla8022(ha) && (ha->mac_index == 3))
2167 			ssleep(6);
2168 
2169 		/* NOTE: AF_ONLINE flag set upon successful completion of
2170 		 *       qla4xxx_initialize_adapter */
2171 		status = qla4xxx_initialize_adapter(ha);
2172 	}
2173 
2174 	/* Retry failed adapter initialization, if necessary
2175 	 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific)
2176 	 * case to prevent ping-pong resets between functions */
2177 	if (!test_bit(AF_ONLINE, &ha->flags) &&
2178 	    !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
2179 		/* Adapter initialization failed, see if we can retry
2180 		 * resetting the ha.
2181 		 * Since we don't want to block the DPC for too long
2182 		 * with multiple resets in the same thread,
2183 		 * utilize DPC to retry */
2184 		if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
2185 			ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
2186 			DEBUG2(printk("scsi%ld: recover adapter - retrying "
2187 				      "(%d) more times\n", ha->host_no,
2188 				      ha->retry_reset_ha_cnt));
2189 			set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
2190 			status = QLA_ERROR;
2191 		} else {
2192 			if (ha->retry_reset_ha_cnt > 0) {
2193 				/* Schedule another Reset HA--DPC will retry */
2194 				ha->retry_reset_ha_cnt--;
2195 				DEBUG2(printk("scsi%ld: recover adapter - "
2196 					      "retry remaining %d\n",
2197 					      ha->host_no,
2198 					      ha->retry_reset_ha_cnt));
2199 				status = QLA_ERROR;
2200 			}
2201 
2202 			if (ha->retry_reset_ha_cnt == 0) {
2203 				/* Recover adapter retries have been exhausted.
2204 				 * Adapter DEAD */
2205 				DEBUG2(printk("scsi%ld: recover adapter "
2206 					      "failed - board disabled\n",
2207 					      ha->host_no));
2208 				qla4xxx_dead_adapter_cleanup(ha);
2209 				clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
2210 				clear_bit(DPC_RESET_HA, &ha->dpc_flags);
2211 				clear_bit(DPC_RESET_HA_FW_CONTEXT,
2212 					  &ha->dpc_flags);
2213 				status = QLA_ERROR;
2214 			}
2215 		}
2216 	} else {
2217 		clear_bit(DPC_RESET_HA, &ha->dpc_flags);
2218 		clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
2219 		clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
2220 	}
2221 
2222 	ha->adapter_error_count++;
2223 
2224 	if (test_bit(AF_ONLINE, &ha->flags))
2225 		ha->isp_ops->enable_intrs(ha);
2226 
2227 	scsi_unblock_requests(ha->host);
2228 
2229 	clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
2230 	DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no,
2231 	    status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
2232 
2233 	return status;
2234 }
2235 
2236 static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
2237 {
2238 	struct iscsi_session *sess;
2239 	struct ddb_entry *ddb_entry;
2240 	struct scsi_qla_host *ha;
2241 
2242 	sess = cls_session->dd_data;
2243 	ddb_entry = sess->dd_data;
2244 	ha = ddb_entry->ha;
2245 	if (!iscsi_is_session_online(cls_session)) {
2246 		if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
2247 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
2248 				   " unblock session\n", ha->host_no, __func__,
2249 				   ddb_entry->fw_ddb_index);
2250 			iscsi_unblock_session(ddb_entry->sess);
2251 		} else {
2252 			/* Trigger relogin */
2253 			iscsi_session_failure(cls_session->dd_data,
2254 					      ISCSI_ERR_CONN_FAILED);
2255 		}
2256 	}
2257 }
2258 
2259 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
2260 {
2261 	iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
2262 }
2263 
2264 void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
2265 {
2266 	if (ha->dpc_thread)
2267 		queue_work(ha->dpc_thread, &ha->dpc_work);
2268 }
2269 
2270 /**
2271  * qla4xxx_do_dpc - dpc routine
2272  * @data: in our case pointer to adapter structure
2273  *
2274  * This routine is a task that is schedule by the interrupt handler
2275  * to perform the background processing for interrupts.  We put it
2276  * on a task queue that is consumed whenever the scheduler runs; that's
2277  * so you can do anything (i.e. put the process to sleep etc).  In fact,
2278  * the mid-level tries to sleep when it reaches the driver threshold
2279  * "host->can_queue". This can cause a panic if we were in our interrupt code.
2280  **/
2281 static void qla4xxx_do_dpc(struct work_struct *work)
2282 {
2283 	struct scsi_qla_host *ha =
2284 		container_of(work, struct scsi_qla_host, dpc_work);
2285 	int status = QLA_ERROR;
2286 
2287 	DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
2288 	    "flags = 0x%08lx, dpc_flags = 0x%08lx\n",
2289 	    ha->host_no, __func__, ha->flags, ha->dpc_flags))
2290 
2291 	/* Initialization not yet finished. Don't do anything yet. */
2292 	if (!test_bit(AF_INIT_DONE, &ha->flags))
2293 		return;
2294 
2295 	if (test_bit(AF_EEH_BUSY, &ha->flags)) {
2296 		DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
2297 		    ha->host_no, __func__, ha->flags));
2298 		return;
2299 	}
2300 
2301 	if (is_qla8022(ha)) {
2302 		if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
2303 			qla4_8xxx_idc_lock(ha);
2304 			qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2305 			    QLA82XX_DEV_FAILED);
2306 			qla4_8xxx_idc_unlock(ha);
2307 			ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
2308 			qla4_8xxx_device_state_handler(ha);
2309 		}
2310 		if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
2311 			qla4_8xxx_need_qsnt_handler(ha);
2312 		}
2313 	}
2314 
2315 	if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) &&
2316 	    (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2317 	    test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
2318 	    test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
2319 		if (ql4xdontresethba) {
2320 			DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
2321 			    ha->host_no, __func__));
2322 			clear_bit(DPC_RESET_HA, &ha->dpc_flags);
2323 			clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
2324 			clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
2325 			goto dpc_post_reset_ha;
2326 		}
2327 		if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
2328 		    test_bit(DPC_RESET_HA, &ha->dpc_flags))
2329 			qla4xxx_recover_adapter(ha);
2330 
2331 		if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
2332 			uint8_t wait_time = RESET_INTR_TOV;
2333 
2334 			while ((readw(&ha->reg->ctrl_status) &
2335 				(CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
2336 				if (--wait_time == 0)
2337 					break;
2338 				msleep(1000);
2339 			}
2340 			if (wait_time == 0)
2341 				DEBUG2(printk("scsi%ld: %s: SR|FSR "
2342 					      "bit not cleared-- resetting\n",
2343 					      ha->host_no, __func__));
2344 			qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2345 			if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
2346 				qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2347 				status = qla4xxx_recover_adapter(ha);
2348 			}
2349 			clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
2350 			if (status == QLA_SUCCESS)
2351 				ha->isp_ops->enable_intrs(ha);
2352 		}
2353 	}
2354 
2355 dpc_post_reset_ha:
2356 	/* ---- process AEN? --- */
2357 	if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
2358 		qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
2359 
2360 	/* ---- Get DHCP IP Address? --- */
2361 	if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
2362 		qla4xxx_get_dhcp_ip_address(ha);
2363 
2364 	/* ---- link change? --- */
2365 	if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
2366 		if (!test_bit(AF_LINK_UP, &ha->flags)) {
2367 			/* ---- link down? --- */
2368 			qla4xxx_mark_all_devices_missing(ha);
2369 		} else {
2370 			/* ---- link up? --- *
2371 			 * F/W will auto login to all devices ONLY ONCE after
2372 			 * link up during driver initialization and runtime
2373 			 * fatal error recovery.  Therefore, the driver must
2374 			 * manually relogin to devices when recovering from
2375 			 * connection failures, logouts, expired KATO, etc. */
2376 
2377 			qla4xxx_relogin_all_devices(ha);
2378 		}
2379 	}
2380 }
2381 
2382 /**
2383  * qla4xxx_free_adapter - release the adapter
2384  * @ha: pointer to adapter structure
2385  **/
2386 static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
2387 {
2388 
2389 	if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
2390 		/* Turn-off interrupts on the card. */
2391 		ha->isp_ops->disable_intrs(ha);
2392 	}
2393 
2394 	/* Remove timer thread, if present */
2395 	if (ha->timer_active)
2396 		qla4xxx_stop_timer(ha);
2397 
2398 	/* Kill the kernel thread for this host */
2399 	if (ha->dpc_thread)
2400 		destroy_workqueue(ha->dpc_thread);
2401 
2402 	/* Kill the kernel thread for this host */
2403 	if (ha->task_wq)
2404 		destroy_workqueue(ha->task_wq);
2405 
2406 	/* Put firmware in known state */
2407 	ha->isp_ops->reset_firmware(ha);
2408 
2409 	if (is_qla8022(ha)) {
2410 		qla4_8xxx_idc_lock(ha);
2411 		qla4_8xxx_clear_drv_active(ha);
2412 		qla4_8xxx_idc_unlock(ha);
2413 	}
2414 
2415 	/* Detach interrupts */
2416 	if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
2417 		qla4xxx_free_irqs(ha);
2418 
2419 	/* free extra memory */
2420 	qla4xxx_mem_free(ha);
2421 }
2422 
2423 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
2424 {
2425 	int status = 0;
2426 	uint8_t revision_id;
2427 	unsigned long mem_base, mem_len, db_base, db_len;
2428 	struct pci_dev *pdev = ha->pdev;
2429 
2430 	status = pci_request_regions(pdev, DRIVER_NAME);
2431 	if (status) {
2432 		printk(KERN_WARNING
2433 		    "scsi(%ld) Failed to reserve PIO regions (%s) "
2434 		    "status=%d\n", ha->host_no, pci_name(pdev), status);
2435 		goto iospace_error_exit;
2436 	}
2437 
2438 	pci_read_config_byte(pdev, PCI_REVISION_ID, &revision_id);
2439 	DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
2440 	    __func__, revision_id));
2441 	ha->revision_id = revision_id;
2442 
2443 	/* remap phys address */
2444 	mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
2445 	mem_len = pci_resource_len(pdev, 0);
2446 	DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n",
2447 	    __func__, mem_base, mem_len));
2448 
2449 	/* mapping of pcibase pointer */
2450 	ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len);
2451 	if (!ha->nx_pcibase) {
2452 		printk(KERN_ERR
2453 		    "cannot remap MMIO (%s), aborting\n", pci_name(pdev));
2454 		pci_release_regions(ha->pdev);
2455 		goto iospace_error_exit;
2456 	}
2457 
2458 	/* Mapping of IO base pointer, door bell read and write pointer */
2459 
2460 	/* mapping of IO base pointer */
2461 	ha->qla4_8xxx_reg =
2462 	    (struct device_reg_82xx  __iomem *)((uint8_t *)ha->nx_pcibase +
2463 	    0xbc000 + (ha->pdev->devfn << 11));
2464 
2465 	db_base = pci_resource_start(pdev, 4);  /* doorbell is on bar 4 */
2466 	db_len = pci_resource_len(pdev, 4);
2467 
2468 	ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
2469 	    QLA82XX_CAM_RAM_DB2);
2470 
2471 	return 0;
2472 iospace_error_exit:
2473 	return -ENOMEM;
2474 }
2475 
2476 /***
2477  * qla4xxx_iospace_config - maps registers
2478  * @ha: pointer to adapter structure
2479  *
2480  * This routines maps HBA's registers from the pci address space
2481  * into the kernel virtual address space for memory mapped i/o.
2482  **/
2483 int qla4xxx_iospace_config(struct scsi_qla_host *ha)
2484 {
2485 	unsigned long pio, pio_len, pio_flags;
2486 	unsigned long mmio, mmio_len, mmio_flags;
2487 
2488 	pio = pci_resource_start(ha->pdev, 0);
2489 	pio_len = pci_resource_len(ha->pdev, 0);
2490 	pio_flags = pci_resource_flags(ha->pdev, 0);
2491 	if (pio_flags & IORESOURCE_IO) {
2492 		if (pio_len < MIN_IOBASE_LEN) {
2493 			ql4_printk(KERN_WARNING, ha,
2494 				"Invalid PCI I/O region size\n");
2495 			pio = 0;
2496 		}
2497 	} else {
2498 		ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n");
2499 		pio = 0;
2500 	}
2501 
2502 	/* Use MMIO operations for all accesses. */
2503 	mmio = pci_resource_start(ha->pdev, 1);
2504 	mmio_len = pci_resource_len(ha->pdev, 1);
2505 	mmio_flags = pci_resource_flags(ha->pdev, 1);
2506 
2507 	if (!(mmio_flags & IORESOURCE_MEM)) {
2508 		ql4_printk(KERN_ERR, ha,
2509 		    "region #0 not an MMIO resource, aborting\n");
2510 
2511 		goto iospace_error_exit;
2512 	}
2513 
2514 	if (mmio_len < MIN_IOBASE_LEN) {
2515 		ql4_printk(KERN_ERR, ha,
2516 		    "Invalid PCI mem region size, aborting\n");
2517 		goto iospace_error_exit;
2518 	}
2519 
2520 	if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
2521 		ql4_printk(KERN_WARNING, ha,
2522 		    "Failed to reserve PIO/MMIO regions\n");
2523 
2524 		goto iospace_error_exit;
2525 	}
2526 
2527 	ha->pio_address = pio;
2528 	ha->pio_length = pio_len;
2529 	ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
2530 	if (!ha->reg) {
2531 		ql4_printk(KERN_ERR, ha,
2532 		    "cannot remap MMIO, aborting\n");
2533 
2534 		goto iospace_error_exit;
2535 	}
2536 
2537 	return 0;
2538 
2539 iospace_error_exit:
2540 	return -ENOMEM;
2541 }
2542 
2543 static struct isp_operations qla4xxx_isp_ops = {
2544 	.iospace_config         = qla4xxx_iospace_config,
2545 	.pci_config             = qla4xxx_pci_config,
2546 	.disable_intrs          = qla4xxx_disable_intrs,
2547 	.enable_intrs           = qla4xxx_enable_intrs,
2548 	.start_firmware         = qla4xxx_start_firmware,
2549 	.intr_handler           = qla4xxx_intr_handler,
2550 	.interrupt_service_routine = qla4xxx_interrupt_service_routine,
2551 	.reset_chip             = qla4xxx_soft_reset,
2552 	.reset_firmware         = qla4xxx_hw_reset,
2553 	.queue_iocb             = qla4xxx_queue_iocb,
2554 	.complete_iocb          = qla4xxx_complete_iocb,
2555 	.rd_shdw_req_q_out      = qla4xxx_rd_shdw_req_q_out,
2556 	.rd_shdw_rsp_q_in       = qla4xxx_rd_shdw_rsp_q_in,
2557 	.get_sys_info           = qla4xxx_get_sys_info,
2558 };
2559 
2560 static struct isp_operations qla4_8xxx_isp_ops = {
2561 	.iospace_config         = qla4_8xxx_iospace_config,
2562 	.pci_config             = qla4_8xxx_pci_config,
2563 	.disable_intrs          = qla4_8xxx_disable_intrs,
2564 	.enable_intrs           = qla4_8xxx_enable_intrs,
2565 	.start_firmware         = qla4_8xxx_load_risc,
2566 	.intr_handler           = qla4_8xxx_intr_handler,
2567 	.interrupt_service_routine = qla4_8xxx_interrupt_service_routine,
2568 	.reset_chip             = qla4_8xxx_isp_reset,
2569 	.reset_firmware         = qla4_8xxx_stop_firmware,
2570 	.queue_iocb             = qla4_8xxx_queue_iocb,
2571 	.complete_iocb          = qla4_8xxx_complete_iocb,
2572 	.rd_shdw_req_q_out      = qla4_8xxx_rd_shdw_req_q_out,
2573 	.rd_shdw_rsp_q_in       = qla4_8xxx_rd_shdw_rsp_q_in,
2574 	.get_sys_info           = qla4_8xxx_get_sys_info,
2575 };
2576 
2577 uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
2578 {
2579 	return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
2580 }
2581 
2582 uint16_t qla4_8xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
2583 {
2584 	return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->req_q_out));
2585 }
2586 
2587 uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
2588 {
2589 	return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
2590 }
2591 
2592 uint16_t qla4_8xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
2593 {
2594 	return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->rsp_q_in));
2595 }
2596 
2597 static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
2598 {
2599 	struct scsi_qla_host *ha = data;
2600 	char *str = buf;
2601 	int rc;
2602 
2603 	switch (type) {
2604 	case ISCSI_BOOT_ETH_FLAGS:
2605 		rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
2606 		break;
2607 	case ISCSI_BOOT_ETH_INDEX:
2608 		rc = sprintf(str, "0\n");
2609 		break;
2610 	case ISCSI_BOOT_ETH_MAC:
2611 		rc = sysfs_format_mac(str, ha->my_mac,
2612 				      MAC_ADDR_LEN);
2613 		break;
2614 	default:
2615 		rc = -ENOSYS;
2616 		break;
2617 	}
2618 	return rc;
2619 }
2620 
2621 static mode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
2622 {
2623 	int rc;
2624 
2625 	switch (type) {
2626 	case ISCSI_BOOT_ETH_FLAGS:
2627 	case ISCSI_BOOT_ETH_MAC:
2628 	case ISCSI_BOOT_ETH_INDEX:
2629 		rc = S_IRUGO;
2630 		break;
2631 	default:
2632 		rc = 0;
2633 		break;
2634 	}
2635 	return rc;
2636 }
2637 
2638 static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
2639 {
2640 	struct scsi_qla_host *ha = data;
2641 	char *str = buf;
2642 	int rc;
2643 
2644 	switch (type) {
2645 	case ISCSI_BOOT_INI_INITIATOR_NAME:
2646 		rc = sprintf(str, "%s\n", ha->name_string);
2647 		break;
2648 	default:
2649 		rc = -ENOSYS;
2650 		break;
2651 	}
2652 	return rc;
2653 }
2654 
2655 static mode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
2656 {
2657 	int rc;
2658 
2659 	switch (type) {
2660 	case ISCSI_BOOT_INI_INITIATOR_NAME:
2661 		rc = S_IRUGO;
2662 		break;
2663 	default:
2664 		rc = 0;
2665 		break;
2666 	}
2667 	return rc;
2668 }
2669 
2670 static ssize_t
2671 qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
2672 			   char *buf)
2673 {
2674 	struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
2675 	char *str = buf;
2676 	int rc;
2677 
2678 	switch (type) {
2679 	case ISCSI_BOOT_TGT_NAME:
2680 		rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name);
2681 		break;
2682 	case ISCSI_BOOT_TGT_IP_ADDR:
2683 		if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1)
2684 			rc = sprintf(buf, "%pI4\n",
2685 				     &boot_conn->dest_ipaddr.ip_address);
2686 		else
2687 			rc = sprintf(str, "%pI6\n",
2688 				     &boot_conn->dest_ipaddr.ip_address);
2689 		break;
2690 	case ISCSI_BOOT_TGT_PORT:
2691 			rc = sprintf(str, "%d\n", boot_conn->dest_port);
2692 		break;
2693 	case ISCSI_BOOT_TGT_CHAP_NAME:
2694 		rc = sprintf(str,  "%.*s\n",
2695 			     boot_conn->chap.target_chap_name_length,
2696 			     (char *)&boot_conn->chap.target_chap_name);
2697 		break;
2698 	case ISCSI_BOOT_TGT_CHAP_SECRET:
2699 		rc = sprintf(str,  "%.*s\n",
2700 			     boot_conn->chap.target_secret_length,
2701 			     (char *)&boot_conn->chap.target_secret);
2702 		break;
2703 	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
2704 		rc = sprintf(str,  "%.*s\n",
2705 			     boot_conn->chap.intr_chap_name_length,
2706 			     (char *)&boot_conn->chap.intr_chap_name);
2707 		break;
2708 	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
2709 		rc = sprintf(str,  "%.*s\n",
2710 			     boot_conn->chap.intr_secret_length,
2711 			     (char *)&boot_conn->chap.intr_secret);
2712 		break;
2713 	case ISCSI_BOOT_TGT_FLAGS:
2714 		rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
2715 		break;
2716 	case ISCSI_BOOT_TGT_NIC_ASSOC:
2717 		rc = sprintf(str, "0\n");
2718 		break;
2719 	default:
2720 		rc = -ENOSYS;
2721 		break;
2722 	}
2723 	return rc;
2724 }
2725 
2726 static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf)
2727 {
2728 	struct scsi_qla_host *ha = data;
2729 	struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess);
2730 
2731 	return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
2732 }
2733 
2734 static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
2735 {
2736 	struct scsi_qla_host *ha = data;
2737 	struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess);
2738 
2739 	return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
2740 }
2741 
2742 static mode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
2743 {
2744 	int rc;
2745 
2746 	switch (type) {
2747 	case ISCSI_BOOT_TGT_NAME:
2748 	case ISCSI_BOOT_TGT_IP_ADDR:
2749 	case ISCSI_BOOT_TGT_PORT:
2750 	case ISCSI_BOOT_TGT_CHAP_NAME:
2751 	case ISCSI_BOOT_TGT_CHAP_SECRET:
2752 	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
2753 	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
2754 	case ISCSI_BOOT_TGT_NIC_ASSOC:
2755 	case ISCSI_BOOT_TGT_FLAGS:
2756 		rc = S_IRUGO;
2757 		break;
2758 	default:
2759 		rc = 0;
2760 		break;
2761 	}
2762 	return rc;
2763 }
2764 
2765 static void qla4xxx_boot_release(void *data)
2766 {
2767 	struct scsi_qla_host *ha = data;
2768 
2769 	scsi_host_put(ha->host);
2770 }
2771 
2772 static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
2773 {
2774 	dma_addr_t buf_dma;
2775 	uint32_t addr, pri_addr, sec_addr;
2776 	uint32_t offset;
2777 	uint16_t func_num;
2778 	uint8_t val;
2779 	uint8_t *buf = NULL;
2780 	size_t size = 13 * sizeof(uint8_t);
2781 	int ret = QLA_SUCCESS;
2782 
2783 	func_num = PCI_FUNC(ha->pdev->devfn);
2784 
2785 	ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n",
2786 		   __func__, ha->pdev->device, func_num);
2787 
2788 	if (is_qla40XX(ha)) {
2789 		if (func_num == 1) {
2790 			addr = NVRAM_PORT0_BOOT_MODE;
2791 			pri_addr = NVRAM_PORT0_BOOT_PRI_TGT;
2792 			sec_addr = NVRAM_PORT0_BOOT_SEC_TGT;
2793 		} else if (func_num == 3) {
2794 			addr = NVRAM_PORT1_BOOT_MODE;
2795 			pri_addr = NVRAM_PORT1_BOOT_PRI_TGT;
2796 			sec_addr = NVRAM_PORT1_BOOT_SEC_TGT;
2797 		} else {
2798 			ret = QLA_ERROR;
2799 			goto exit_boot_info;
2800 		}
2801 
2802 		/* Check Boot Mode */
2803 		val = rd_nvram_byte(ha, addr);
2804 		if (!(val & 0x07)) {
2805 			DEBUG2(ql4_printk(KERN_ERR, ha,
2806 					  "%s: Failed Boot options : 0x%x\n",
2807 					  __func__, val));
2808 			ret = QLA_ERROR;
2809 			goto exit_boot_info;
2810 		}
2811 
2812 		/* get primary valid target index */
2813 		val = rd_nvram_byte(ha, pri_addr);
2814 		if (val & BIT_7)
2815 			ddb_index[0] = (val & 0x7f);
2816 
2817 		/* get secondary valid target index */
2818 		val = rd_nvram_byte(ha, sec_addr);
2819 		if (val & BIT_7)
2820 			ddb_index[1] = (val & 0x7f);
2821 
2822 	} else if (is_qla8022(ha)) {
2823 		buf = dma_alloc_coherent(&ha->pdev->dev, size,
2824 					 &buf_dma, GFP_KERNEL);
2825 		if (!buf) {
2826 			DEBUG2(ql4_printk(KERN_ERR, ha,
2827 					  "%s: Unable to allocate dma buffer\n",
2828 					   __func__));
2829 			ret = QLA_ERROR;
2830 			goto exit_boot_info;
2831 		}
2832 
2833 		if (ha->port_num == 0)
2834 			offset = BOOT_PARAM_OFFSET_PORT0;
2835 		else if (ha->port_num == 1)
2836 			offset = BOOT_PARAM_OFFSET_PORT1;
2837 		else {
2838 			ret = QLA_ERROR;
2839 			goto exit_boot_info_free;
2840 		}
2841 		addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) +
2842 		       offset;
2843 		if (qla4xxx_get_flash(ha, buf_dma, addr,
2844 				      13 * sizeof(uint8_t)) != QLA_SUCCESS) {
2845 			DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
2846 					  "failed\n", ha->host_no, __func__));
2847 			ret = QLA_ERROR;
2848 			goto exit_boot_info_free;
2849 		}
2850 		/* Check Boot Mode */
2851 		if (!(buf[1] & 0x07)) {
2852 			DEBUG2(ql4_printk(KERN_INFO, ha,
2853 					  "Failed: Boot options : 0x%x\n",
2854 					  buf[1]));
2855 			ret = QLA_ERROR;
2856 			goto exit_boot_info_free;
2857 		}
2858 
2859 		/* get primary valid target index */
2860 		if (buf[2] & BIT_7)
2861 			ddb_index[0] = buf[2] & 0x7f;
2862 
2863 		/* get secondary valid target index */
2864 		if (buf[11] & BIT_7)
2865 			ddb_index[1] = buf[11] & 0x7f;
2866 	} else {
2867 		ret = QLA_ERROR;
2868 		goto exit_boot_info;
2869 	}
2870 
2871 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary"
2872 			  " target ID %d\n", __func__, ddb_index[0],
2873 			  ddb_index[1]));
2874 
2875 exit_boot_info_free:
2876 	dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
2877 exit_boot_info:
2878 	return ret;
2879 }
2880 
2881 /**
2882  * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password
2883  * @ha: pointer to adapter structure
2884  * @username: CHAP username to be returned
2885  * @password: CHAP password to be returned
2886  *
2887  * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP
2888  * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/.
2889  * So from the CHAP cache find the first BIDI CHAP entry and set it
2890  * to the boot record in sysfs.
2891  **/
2892 static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
2893 			    char *password)
2894 {
2895 	int i, ret = -EINVAL;
2896 	int max_chap_entries = 0;
2897 	struct ql4_chap_table *chap_table;
2898 
2899 	if (is_qla8022(ha))
2900 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
2901 						sizeof(struct ql4_chap_table);
2902 	else
2903 		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
2904 
2905 	if (!ha->chap_list) {
2906 		ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
2907 		return ret;
2908 	}
2909 
2910 	mutex_lock(&ha->chap_sem);
2911 	for (i = 0; i < max_chap_entries; i++) {
2912 		chap_table = (struct ql4_chap_table *)ha->chap_list + i;
2913 		if (chap_table->cookie !=
2914 		    __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
2915 			continue;
2916 		}
2917 
2918 		if (chap_table->flags & BIT_7) /* local */
2919 			continue;
2920 
2921 		if (!(chap_table->flags & BIT_6)) /* Not BIDI */
2922 			continue;
2923 
2924 		strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
2925 		strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
2926 		ret = 0;
2927 		break;
2928 	}
2929 	mutex_unlock(&ha->chap_sem);
2930 
2931 	return ret;
2932 }
2933 
2934 
2935 static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
2936 				   struct ql4_boot_session_info *boot_sess,
2937 				   uint16_t ddb_index)
2938 {
2939 	struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
2940 	struct dev_db_entry *fw_ddb_entry;
2941 	dma_addr_t fw_ddb_entry_dma;
2942 	uint16_t idx;
2943 	uint16_t options;
2944 	int ret = QLA_SUCCESS;
2945 
2946 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2947 					  &fw_ddb_entry_dma, GFP_KERNEL);
2948 	if (!fw_ddb_entry) {
2949 		DEBUG2(ql4_printk(KERN_ERR, ha,
2950 				  "%s: Unable to allocate dma buffer.\n",
2951 				  __func__));
2952 		ret = QLA_ERROR;
2953 		return ret;
2954 	}
2955 
2956 	if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
2957 				   fw_ddb_entry_dma, ddb_index)) {
2958 		DEBUG2(ql4_printk(KERN_ERR, ha,
2959 				  "%s: Flash DDB read Failed\n", __func__));
2960 		ret = QLA_ERROR;
2961 		goto exit_boot_target;
2962 	}
2963 
2964 	/* Update target name and IP from DDB */
2965 	memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name,
2966 	       min(sizeof(boot_sess->target_name),
2967 		   sizeof(fw_ddb_entry->iscsi_name)));
2968 
2969 	options = le16_to_cpu(fw_ddb_entry->options);
2970 	if (options & DDB_OPT_IPV6_DEVICE) {
2971 		memcpy(&boot_conn->dest_ipaddr.ip_address,
2972 		       &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN);
2973 	} else {
2974 		boot_conn->dest_ipaddr.ip_type = 0x1;
2975 		memcpy(&boot_conn->dest_ipaddr.ip_address,
2976 		       &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN);
2977 	}
2978 
2979 	boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port);
2980 
2981 	/* update chap information */
2982 	idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2983 
2984 	if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options))	{
2985 
2986 		DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n"));
2987 
2988 		ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
2989 				       target_chap_name,
2990 				       (char *)&boot_conn->chap.target_secret,
2991 				       idx);
2992 		if (ret) {
2993 			ql4_printk(KERN_ERR, ha, "Failed to set chap\n");
2994 			ret = QLA_ERROR;
2995 			goto exit_boot_target;
2996 		}
2997 
2998 		boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
2999 		boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN;
3000 	}
3001 
3002 	if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
3003 
3004 		DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n"));
3005 
3006 		ret = qla4xxx_get_bidi_chap(ha,
3007 				    (char *)&boot_conn->chap.intr_chap_name,
3008 				    (char *)&boot_conn->chap.intr_secret);
3009 
3010 		if (ret) {
3011 			ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n");
3012 			ret = QLA_ERROR;
3013 			goto exit_boot_target;
3014 		}
3015 
3016 		boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
3017 		boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN;
3018 	}
3019 
3020 exit_boot_target:
3021 	dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3022 			  fw_ddb_entry, fw_ddb_entry_dma);
3023 	return ret;
3024 }
3025 
3026 static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
3027 {
3028 	uint16_t ddb_index[2];
3029 	int ret = QLA_ERROR;
3030 	int rval;
3031 
3032 	memset(ddb_index, 0, sizeof(ddb_index));
3033 	ddb_index[0] = 0xffff;
3034 	ddb_index[1] = 0xffff;
3035 	ret = get_fw_boot_info(ha, ddb_index);
3036 	if (ret != QLA_SUCCESS) {
3037 		DEBUG2(ql4_printk(KERN_ERR, ha,
3038 				  "%s: Failed to set boot info.\n", __func__));
3039 		return ret;
3040 	}
3041 
3042 	if (ddb_index[0] == 0xffff)
3043 		goto sec_target;
3044 
3045 	rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
3046 				      ddb_index[0]);
3047 	if (rval != QLA_SUCCESS) {
3048 		DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Failed to get "
3049 				  "primary target\n", __func__));
3050 	} else
3051 		ret = QLA_SUCCESS;
3052 
3053 sec_target:
3054 	if (ddb_index[1] == 0xffff)
3055 		goto exit_get_boot_info;
3056 
3057 	rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
3058 				      ddb_index[1]);
3059 	if (rval != QLA_SUCCESS) {
3060 		DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Failed to get "
3061 				  "secondary target\n", __func__));
3062 	} else
3063 		ret = QLA_SUCCESS;
3064 
3065 exit_get_boot_info:
3066 	return ret;
3067 }
3068 
3069 static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
3070 {
3071 	struct iscsi_boot_kobj *boot_kobj;
3072 
3073 	if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
3074 		return 0;
3075 
3076 	ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
3077 	if (!ha->boot_kset)
3078 		goto kset_free;
3079 
3080 	if (!scsi_host_get(ha->host))
3081 		goto kset_free;
3082 	boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha,
3083 					     qla4xxx_show_boot_tgt_pri_info,
3084 					     qla4xxx_tgt_get_attr_visibility,
3085 					     qla4xxx_boot_release);
3086 	if (!boot_kobj)
3087 		goto put_host;
3088 
3089 	if (!scsi_host_get(ha->host))
3090 		goto kset_free;
3091 	boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha,
3092 					     qla4xxx_show_boot_tgt_sec_info,
3093 					     qla4xxx_tgt_get_attr_visibility,
3094 					     qla4xxx_boot_release);
3095 	if (!boot_kobj)
3096 		goto put_host;
3097 
3098 	if (!scsi_host_get(ha->host))
3099 		goto kset_free;
3100 	boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha,
3101 					       qla4xxx_show_boot_ini_info,
3102 					       qla4xxx_ini_get_attr_visibility,
3103 					       qla4xxx_boot_release);
3104 	if (!boot_kobj)
3105 		goto put_host;
3106 
3107 	if (!scsi_host_get(ha->host))
3108 		goto kset_free;
3109 	boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha,
3110 					       qla4xxx_show_boot_eth_info,
3111 					       qla4xxx_eth_get_attr_visibility,
3112 					       qla4xxx_boot_release);
3113 	if (!boot_kobj)
3114 		goto put_host;
3115 
3116 	return 0;
3117 
3118 put_host:
3119 	scsi_host_put(ha->host);
3120 kset_free:
3121 	iscsi_boot_destroy_kset(ha->boot_kset);
3122 	return -ENOMEM;
3123 }
3124 
3125 
3126 /**
3127  * qla4xxx_create chap_list - Create CHAP list from FLASH
3128  * @ha: pointer to adapter structure
3129  *
3130  * Read flash and make a list of CHAP entries, during login when a CHAP entry
3131  * is received, it will be checked in this list. If entry exist then the CHAP
3132  * entry index is set in the DDB. If CHAP entry does not exist in this list
3133  * then a new entry is added in FLASH in CHAP table and the index obtained is
3134  * used in the DDB.
3135  **/
3136 static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
3137 {
3138 	int rval = 0;
3139 	uint8_t *chap_flash_data = NULL;
3140 	uint32_t offset;
3141 	dma_addr_t chap_dma;
3142 	uint32_t chap_size = 0;
3143 
3144 	if (is_qla40XX(ha))
3145 		chap_size = MAX_CHAP_ENTRIES_40XX  *
3146 					sizeof(struct ql4_chap_table);
3147 	else	/* Single region contains CHAP info for both
3148 		 * ports which is divided into half for each port.
3149 		 */
3150 		chap_size = ha->hw.flt_chap_size / 2;
3151 
3152 	chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size,
3153 					  &chap_dma, GFP_KERNEL);
3154 	if (!chap_flash_data) {
3155 		ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n");
3156 		return;
3157 	}
3158 	if (is_qla40XX(ha))
3159 		offset = FLASH_CHAP_OFFSET;
3160 	else {
3161 		offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
3162 		if (ha->port_num == 1)
3163 			offset += chap_size;
3164 	}
3165 
3166 	rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
3167 	if (rval != QLA_SUCCESS)
3168 		goto exit_chap_list;
3169 
3170 	if (ha->chap_list == NULL)
3171 		ha->chap_list = vmalloc(chap_size);
3172 	if (ha->chap_list == NULL) {
3173 		ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n");
3174 		goto exit_chap_list;
3175 	}
3176 
3177 	memcpy(ha->chap_list, chap_flash_data, chap_size);
3178 
3179 exit_chap_list:
3180 	dma_free_coherent(&ha->pdev->dev, chap_size,
3181 			chap_flash_data, chap_dma);
3182 	return;
3183 }
3184 
3185 /**
3186  * qla4xxx_probe_adapter - callback function to probe HBA
3187  * @pdev: pointer to pci_dev structure
3188  * @pci_device_id: pointer to pci_device entry
3189  *
3190  * This routine will probe for Qlogic 4xxx iSCSI host adapters.
3191  * It returns zero if successful. It also initializes all data necessary for
3192  * the driver.
3193  **/
3194 static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
3195 					   const struct pci_device_id *ent)
3196 {
3197 	int ret = -ENODEV, status;
3198 	struct Scsi_Host *host;
3199 	struct scsi_qla_host *ha;
3200 	uint8_t init_retry_count = 0;
3201 	char buf[34];
3202 	struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
3203 	uint32_t dev_state;
3204 
3205 	if (pci_enable_device(pdev))
3206 		return -1;
3207 
3208 	host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0);
3209 	if (host == NULL) {
3210 		printk(KERN_WARNING
3211 		       "qla4xxx: Couldn't allocate host from scsi layer!\n");
3212 		goto probe_disable_device;
3213 	}
3214 
3215 	/* Clear our data area */
3216 	ha = to_qla_host(host);
3217 	memset(ha, 0, sizeof(*ha));
3218 
3219 	/* Save the information from PCI BIOS.	*/
3220 	ha->pdev = pdev;
3221 	ha->host = host;
3222 	ha->host_no = host->host_no;
3223 
3224 	pci_enable_pcie_error_reporting(pdev);
3225 
3226 	/* Setup Runtime configurable options */
3227 	if (is_qla8022(ha)) {
3228 		ha->isp_ops = &qla4_8xxx_isp_ops;
3229 		rwlock_init(&ha->hw_lock);
3230 		ha->qdr_sn_window = -1;
3231 		ha->ddr_mn_window = -1;
3232 		ha->curr_window = 255;
3233 		ha->func_num = PCI_FUNC(ha->pdev->devfn);
3234 		nx_legacy_intr = &legacy_intr[ha->func_num];
3235 		ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
3236 		ha->nx_legacy_intr.tgt_status_reg =
3237 			nx_legacy_intr->tgt_status_reg;
3238 		ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
3239 		ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
3240 	} else {
3241 		ha->isp_ops = &qla4xxx_isp_ops;
3242 	}
3243 
3244 	/* Set EEH reset type to fundamental if required by hba */
3245 	if (is_qla8022(ha))
3246 		pdev->needs_freset = 1;
3247 
3248 	/* Configure PCI I/O space. */
3249 	ret = ha->isp_ops->iospace_config(ha);
3250 	if (ret)
3251 		goto probe_failed_ioconfig;
3252 
3253 	ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n",
3254 		   pdev->device, pdev->irq, ha->reg);
3255 
3256 	qla4xxx_config_dma_addressing(ha);
3257 
3258 	/* Initialize lists and spinlocks. */
3259 	INIT_LIST_HEAD(&ha->free_srb_q);
3260 
3261 	mutex_init(&ha->mbox_sem);
3262 	mutex_init(&ha->chap_sem);
3263 	init_completion(&ha->mbx_intr_comp);
3264 	init_completion(&ha->disable_acb_comp);
3265 
3266 	spin_lock_init(&ha->hardware_lock);
3267 
3268 	/* Allocate dma buffers */
3269 	if (qla4xxx_mem_alloc(ha)) {
3270 		ql4_printk(KERN_WARNING, ha,
3271 		    "[ERROR] Failed to allocate memory for adapter\n");
3272 
3273 		ret = -ENOMEM;
3274 		goto probe_failed;
3275 	}
3276 
3277 	host->cmd_per_lun = 3;
3278 	host->max_channel = 0;
3279 	host->max_lun = MAX_LUNS - 1;
3280 	host->max_id = MAX_TARGETS;
3281 	host->max_cmd_len = IOCB_MAX_CDB_LEN;
3282 	host->can_queue = MAX_SRBS ;
3283 	host->transportt = qla4xxx_scsi_transport;
3284 
3285 	ret = scsi_init_shared_tag_map(host, MAX_SRBS);
3286 	if (ret) {
3287 		ql4_printk(KERN_WARNING, ha,
3288 			   "%s: scsi_init_shared_tag_map failed\n", __func__);
3289 		goto probe_failed;
3290 	}
3291 
3292 	pci_set_drvdata(pdev, ha);
3293 
3294 	ret = scsi_add_host(host, &pdev->dev);
3295 	if (ret)
3296 		goto probe_failed;
3297 
3298 	if (is_qla8022(ha))
3299 		(void) qla4_8xxx_get_flash_info(ha);
3300 
3301 	/*
3302 	 * Initialize the Host adapter request/response queues and
3303 	 * firmware
3304 	 * NOTE: interrupts enabled upon successful completion
3305 	 */
3306 	status = qla4xxx_initialize_adapter(ha);
3307 	while ((!test_bit(AF_ONLINE, &ha->flags)) &&
3308 	    init_retry_count++ < MAX_INIT_RETRIES) {
3309 
3310 		if (is_qla8022(ha)) {
3311 			qla4_8xxx_idc_lock(ha);
3312 			dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3313 			qla4_8xxx_idc_unlock(ha);
3314 			if (dev_state == QLA82XX_DEV_FAILED) {
3315 				ql4_printk(KERN_WARNING, ha, "%s: don't retry "
3316 				    "initialize adapter. H/W is in failed state\n",
3317 				    __func__);
3318 				break;
3319 			}
3320 		}
3321 		DEBUG2(printk("scsi: %s: retrying adapter initialization "
3322 			      "(%d)\n", __func__, init_retry_count));
3323 
3324 		if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
3325 			continue;
3326 
3327 		status = qla4xxx_initialize_adapter(ha);
3328 	}
3329 
3330 	if (!test_bit(AF_ONLINE, &ha->flags)) {
3331 		ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
3332 
3333 		if (is_qla8022(ha) && ql4xdontresethba) {
3334 			/* Put the device in failed state. */
3335 			DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
3336 			qla4_8xxx_idc_lock(ha);
3337 			qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3338 			    QLA82XX_DEV_FAILED);
3339 			qla4_8xxx_idc_unlock(ha);
3340 		}
3341 		ret = -ENODEV;
3342 		goto remove_host;
3343 	}
3344 
3345 	/* Startup the kernel thread for this host adapter. */
3346 	DEBUG2(printk("scsi: %s: Starting kernel thread for "
3347 		      "qla4xxx_dpc\n", __func__));
3348 	sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
3349 	ha->dpc_thread = create_singlethread_workqueue(buf);
3350 	if (!ha->dpc_thread) {
3351 		ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
3352 		ret = -ENODEV;
3353 		goto remove_host;
3354 	}
3355 	INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
3356 
3357 	sprintf(buf, "qla4xxx_%lu_task", ha->host_no);
3358 	ha->task_wq = alloc_workqueue(buf, WQ_MEM_RECLAIM, 1);
3359 	if (!ha->task_wq) {
3360 		ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
3361 		ret = -ENODEV;
3362 		goto remove_host;
3363 	}
3364 
3365 	/* For ISP-82XX, request_irqs is called in qla4_8xxx_load_risc
3366 	 * (which is called indirectly by qla4xxx_initialize_adapter),
3367 	 * so that irqs will be registered after crbinit but before
3368 	 * mbx_intr_enable.
3369 	 */
3370 	if (!is_qla8022(ha)) {
3371 		ret = qla4xxx_request_irqs(ha);
3372 		if (ret) {
3373 			ql4_printk(KERN_WARNING, ha, "Failed to reserve "
3374 			    "interrupt %d already in use.\n", pdev->irq);
3375 			goto remove_host;
3376 		}
3377 	}
3378 
3379 	pci_save_state(ha->pdev);
3380 	ha->isp_ops->enable_intrs(ha);
3381 
3382 	/* Start timer thread. */
3383 	qla4xxx_start_timer(ha, qla4xxx_timer, 1);
3384 
3385 	set_bit(AF_INIT_DONE, &ha->flags);
3386 
3387 	printk(KERN_INFO
3388 	       " QLogic iSCSI HBA Driver version: %s\n"
3389 	       "  QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
3390 	       qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
3391 	       ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
3392 	       ha->patch_number, ha->build_number);
3393 
3394 	qla4xxx_create_chap_list(ha);
3395 
3396 	if (qla4xxx_setup_boot_info(ha))
3397 		ql4_printk(KERN_ERR, ha, "%s:ISCSI boot info setup failed\n",
3398 			   __func__);
3399 
3400 	qla4xxx_create_ifaces(ha);
3401 	return 0;
3402 
3403 remove_host:
3404 	scsi_remove_host(ha->host);
3405 
3406 probe_failed:
3407 	qla4xxx_free_adapter(ha);
3408 
3409 probe_failed_ioconfig:
3410 	pci_disable_pcie_error_reporting(pdev);
3411 	scsi_host_put(ha->host);
3412 
3413 probe_disable_device:
3414 	pci_disable_device(pdev);
3415 
3416 	return ret;
3417 }
3418 
3419 /**
3420  * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
3421  * @ha: pointer to adapter structure
3422  *
3423  * Mark the other ISP-4xxx port to indicate that the driver is being removed,
3424  * so that the other port will not re-initialize while in the process of
3425  * removing the ha due to driver unload or hba hotplug.
3426  **/
3427 static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
3428 {
3429 	struct scsi_qla_host *other_ha = NULL;
3430 	struct pci_dev *other_pdev = NULL;
3431 	int fn = ISP4XXX_PCI_FN_2;
3432 
3433 	/*iscsi function numbers for ISP4xxx is 1 and 3*/
3434 	if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
3435 		fn = ISP4XXX_PCI_FN_1;
3436 
3437 	other_pdev =
3438 		pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
3439 		ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
3440 		fn));
3441 
3442 	/* Get other_ha if other_pdev is valid and state is enable*/
3443 	if (other_pdev) {
3444 		if (atomic_read(&other_pdev->enable_cnt)) {
3445 			other_ha = pci_get_drvdata(other_pdev);
3446 			if (other_ha) {
3447 				set_bit(AF_HA_REMOVAL, &other_ha->flags);
3448 				DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
3449 				    "Prevent %s reinit\n", __func__,
3450 				    dev_name(&other_ha->pdev->dev)));
3451 			}
3452 		}
3453 		pci_dev_put(other_pdev);
3454 	}
3455 }
3456 
3457 /**
3458  * qla4xxx_remove_adapter - calback function to remove adapter.
3459  * @pci_dev: PCI device pointer
3460  **/
3461 static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
3462 {
3463 	struct scsi_qla_host *ha;
3464 
3465 	ha = pci_get_drvdata(pdev);
3466 
3467 	if (!is_qla8022(ha))
3468 		qla4xxx_prevent_other_port_reinit(ha);
3469 
3470 	/* destroy iface from sysfs */
3471 	qla4xxx_destroy_ifaces(ha);
3472 
3473 	if (ha->boot_kset)
3474 		iscsi_boot_destroy_kset(ha->boot_kset);
3475 
3476 	scsi_remove_host(ha->host);
3477 
3478 	qla4xxx_free_adapter(ha);
3479 
3480 	scsi_host_put(ha->host);
3481 
3482 	pci_disable_pcie_error_reporting(pdev);
3483 	pci_disable_device(pdev);
3484 	pci_set_drvdata(pdev, NULL);
3485 }
3486 
3487 /**
3488  * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
3489  * @ha: HA context
3490  *
3491  * At exit, the @ha's flags.enable_64bit_addressing set to indicated
3492  * supported addressing method.
3493  */
3494 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
3495 {
3496 	int retval;
3497 
3498 	/* Update our PCI device dma_mask for full 64 bit mask */
3499 	if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
3500 		if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
3501 			dev_dbg(&ha->pdev->dev,
3502 				  "Failed to set 64 bit PCI consistent mask; "
3503 				   "using 32 bit.\n");
3504 			retval = pci_set_consistent_dma_mask(ha->pdev,
3505 							     DMA_BIT_MASK(32));
3506 		}
3507 	} else
3508 		retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
3509 }
3510 
3511 static int qla4xxx_slave_alloc(struct scsi_device *sdev)
3512 {
3513 	struct iscsi_cls_session *cls_sess;
3514 	struct iscsi_session *sess;
3515 	struct ddb_entry *ddb;
3516 	int queue_depth = QL4_DEF_QDEPTH;
3517 
3518 	cls_sess = starget_to_session(sdev->sdev_target);
3519 	sess = cls_sess->dd_data;
3520 	ddb = sess->dd_data;
3521 
3522 	sdev->hostdata = ddb;
3523 	sdev->tagged_supported = 1;
3524 
3525 	if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
3526 		queue_depth = ql4xmaxqdepth;
3527 
3528 	scsi_activate_tcq(sdev, queue_depth);
3529 	return 0;
3530 }
3531 
3532 static int qla4xxx_slave_configure(struct scsi_device *sdev)
3533 {
3534 	sdev->tagged_supported = 1;
3535 	return 0;
3536 }
3537 
3538 static void qla4xxx_slave_destroy(struct scsi_device *sdev)
3539 {
3540 	scsi_deactivate_tcq(sdev, 1);
3541 }
3542 
3543 /**
3544  * qla4xxx_del_from_active_array - returns an active srb
3545  * @ha: Pointer to host adapter structure.
3546  * @index: index into the active_array
3547  *
3548  * This routine removes and returns the srb at the specified index
3549  **/
3550 struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
3551     uint32_t index)
3552 {
3553 	struct srb *srb = NULL;
3554 	struct scsi_cmnd *cmd = NULL;
3555 
3556 	cmd = scsi_host_find_tag(ha->host, index);
3557 	if (!cmd)
3558 		return srb;
3559 
3560 	srb = (struct srb *)CMD_SP(cmd);
3561 	if (!srb)
3562 		return srb;
3563 
3564 	/* update counters */
3565 	if (srb->flags & SRB_DMA_VALID) {
3566 		ha->req_q_count += srb->iocb_cnt;
3567 		ha->iocb_cnt -= srb->iocb_cnt;
3568 		if (srb->cmd)
3569 			srb->cmd->host_scribble =
3570 				(unsigned char *)(unsigned long) MAX_SRBS;
3571 	}
3572 	return srb;
3573 }
3574 
3575 /**
3576  * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
3577  * @ha: Pointer to host adapter structure.
3578  * @cmd: Scsi Command to wait on.
3579  *
3580  * This routine waits for the command to be returned by the Firmware
3581  * for some max time.
3582  **/
3583 static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
3584 				      struct scsi_cmnd *cmd)
3585 {
3586 	int done = 0;
3587 	struct srb *rp;
3588 	uint32_t max_wait_time = EH_WAIT_CMD_TOV;
3589 	int ret = SUCCESS;
3590 
3591 	/* Dont wait on command if PCI error is being handled
3592 	 * by PCI AER driver
3593 	 */
3594 	if (unlikely(pci_channel_offline(ha->pdev)) ||
3595 	    (test_bit(AF_EEH_BUSY, &ha->flags))) {
3596 		ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
3597 		    ha->host_no, __func__);
3598 		return ret;
3599 	}
3600 
3601 	do {
3602 		/* Checking to see if its returned to OS */
3603 		rp = (struct srb *) CMD_SP(cmd);
3604 		if (rp == NULL) {
3605 			done++;
3606 			break;
3607 		}
3608 
3609 		msleep(2000);
3610 	} while (max_wait_time--);
3611 
3612 	return done;
3613 }
3614 
3615 /**
3616  * qla4xxx_wait_for_hba_online - waits for HBA to come online
3617  * @ha: Pointer to host adapter structure
3618  **/
3619 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
3620 {
3621 	unsigned long wait_online;
3622 
3623 	wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
3624 	while (time_before(jiffies, wait_online)) {
3625 
3626 		if (adapter_up(ha))
3627 			return QLA_SUCCESS;
3628 
3629 		msleep(2000);
3630 	}
3631 
3632 	return QLA_ERROR;
3633 }
3634 
3635 /**
3636  * qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
3637  * @ha: pointer to HBA
3638  * @t: target id
3639  * @l: lun id
3640  *
3641  * This function waits for all outstanding commands to a lun to complete. It
3642  * returns 0 if all pending commands are returned and 1 otherwise.
3643  **/
3644 static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
3645 					struct scsi_target *stgt,
3646 					struct scsi_device *sdev)
3647 {
3648 	int cnt;
3649 	int status = 0;
3650 	struct scsi_cmnd *cmd;
3651 
3652 	/*
3653 	 * Waiting for all commands for the designated target or dev
3654 	 * in the active array
3655 	 */
3656 	for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
3657 		cmd = scsi_host_find_tag(ha->host, cnt);
3658 		if (cmd && stgt == scsi_target(cmd->device) &&
3659 		    (!sdev || sdev == cmd->device)) {
3660 			if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
3661 				status++;
3662 				break;
3663 			}
3664 		}
3665 	}
3666 	return status;
3667 }
3668 
3669 /**
3670  * qla4xxx_eh_abort - callback for abort task.
3671  * @cmd: Pointer to Linux's SCSI command structure
3672  *
3673  * This routine is called by the Linux OS to abort the specified
3674  * command.
3675  **/
3676 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
3677 {
3678 	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
3679 	unsigned int id = cmd->device->id;
3680 	unsigned int lun = cmd->device->lun;
3681 	unsigned long flags;
3682 	struct srb *srb = NULL;
3683 	int ret = SUCCESS;
3684 	int wait = 0;
3685 
3686 	ql4_printk(KERN_INFO, ha,
3687 	    "scsi%ld:%d:%d: Abort command issued cmd=%p\n",
3688 	    ha->host_no, id, lun, cmd);
3689 
3690 	spin_lock_irqsave(&ha->hardware_lock, flags);
3691 	srb = (struct srb *) CMD_SP(cmd);
3692 	if (!srb) {
3693 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
3694 		return SUCCESS;
3695 	}
3696 	kref_get(&srb->srb_ref);
3697 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3698 
3699 	if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
3700 		DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
3701 		    ha->host_no, id, lun));
3702 		ret = FAILED;
3703 	} else {
3704 		DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n",
3705 		    ha->host_no, id, lun));
3706 		wait = 1;
3707 	}
3708 
3709 	kref_put(&srb->srb_ref, qla4xxx_srb_compl);
3710 
3711 	/* Wait for command to complete */
3712 	if (wait) {
3713 		if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
3714 			DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n",
3715 			    ha->host_no, id, lun));
3716 			ret = FAILED;
3717 		}
3718 	}
3719 
3720 	ql4_printk(KERN_INFO, ha,
3721 	    "scsi%ld:%d:%d: Abort command - %s\n",
3722 	    ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed");
3723 
3724 	return ret;
3725 }
3726 
3727 /**
3728  * qla4xxx_eh_device_reset - callback for target reset.
3729  * @cmd: Pointer to Linux's SCSI command structure
3730  *
3731  * This routine is called by the Linux OS to reset all luns on the
3732  * specified target.
3733  **/
3734 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
3735 {
3736 	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
3737 	struct ddb_entry *ddb_entry = cmd->device->hostdata;
3738 	int ret = FAILED, stat;
3739 
3740 	if (!ddb_entry)
3741 		return ret;
3742 
3743 	ret = iscsi_block_scsi_eh(cmd);
3744 	if (ret)
3745 		return ret;
3746 	ret = FAILED;
3747 
3748 	ql4_printk(KERN_INFO, ha,
3749 		   "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no,
3750 		   cmd->device->channel, cmd->device->id, cmd->device->lun);
3751 
3752 	DEBUG2(printk(KERN_INFO
3753 		      "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
3754 		      "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
3755 		      cmd, jiffies, cmd->request->timeout / HZ,
3756 		      ha->dpc_flags, cmd->result, cmd->allowed));
3757 
3758 	/* FIXME: wait for hba to go online */
3759 	stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
3760 	if (stat != QLA_SUCCESS) {
3761 		ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat);
3762 		goto eh_dev_reset_done;
3763 	}
3764 
3765 	if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
3766 					 cmd->device)) {
3767 		ql4_printk(KERN_INFO, ha,
3768 			   "DEVICE RESET FAILED - waiting for "
3769 			   "commands.\n");
3770 		goto eh_dev_reset_done;
3771 	}
3772 
3773 	/* Send marker. */
3774 	if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
3775 		MM_LUN_RESET) != QLA_SUCCESS)
3776 		goto eh_dev_reset_done;
3777 
3778 	ql4_printk(KERN_INFO, ha,
3779 		   "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
3780 		   ha->host_no, cmd->device->channel, cmd->device->id,
3781 		   cmd->device->lun);
3782 
3783 	ret = SUCCESS;
3784 
3785 eh_dev_reset_done:
3786 
3787 	return ret;
3788 }
3789 
3790 /**
3791  * qla4xxx_eh_target_reset - callback for target reset.
3792  * @cmd: Pointer to Linux's SCSI command structure
3793  *
3794  * This routine is called by the Linux OS to reset the target.
3795  **/
3796 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
3797 {
3798 	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
3799 	struct ddb_entry *ddb_entry = cmd->device->hostdata;
3800 	int stat, ret;
3801 
3802 	if (!ddb_entry)
3803 		return FAILED;
3804 
3805 	ret = iscsi_block_scsi_eh(cmd);
3806 	if (ret)
3807 		return ret;
3808 
3809 	starget_printk(KERN_INFO, scsi_target(cmd->device),
3810 		       "WARM TARGET RESET ISSUED.\n");
3811 
3812 	DEBUG2(printk(KERN_INFO
3813 		      "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
3814 		      "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
3815 		      ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
3816 		      ha->dpc_flags, cmd->result, cmd->allowed));
3817 
3818 	stat = qla4xxx_reset_target(ha, ddb_entry);
3819 	if (stat != QLA_SUCCESS) {
3820 		starget_printk(KERN_INFO, scsi_target(cmd->device),
3821 			       "WARM TARGET RESET FAILED.\n");
3822 		return FAILED;
3823 	}
3824 
3825 	if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
3826 					 NULL)) {
3827 		starget_printk(KERN_INFO, scsi_target(cmd->device),
3828 			       "WARM TARGET DEVICE RESET FAILED - "
3829 			       "waiting for commands.\n");
3830 		return FAILED;
3831 	}
3832 
3833 	/* Send marker. */
3834 	if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
3835 		MM_TGT_WARM_RESET) != QLA_SUCCESS) {
3836 		starget_printk(KERN_INFO, scsi_target(cmd->device),
3837 			       "WARM TARGET DEVICE RESET FAILED - "
3838 			       "marker iocb failed.\n");
3839 		return FAILED;
3840 	}
3841 
3842 	starget_printk(KERN_INFO, scsi_target(cmd->device),
3843 		       "WARM TARGET RESET SUCCEEDED.\n");
3844 	return SUCCESS;
3845 }
3846 
3847 /**
3848  * qla4xxx_eh_host_reset - kernel callback
3849  * @cmd: Pointer to Linux's SCSI command structure
3850  *
3851  * This routine is invoked by the Linux kernel to perform fatal error
3852  * recovery on the specified adapter.
3853  **/
3854 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
3855 {
3856 	int return_status = FAILED;
3857 	struct scsi_qla_host *ha;
3858 
3859 	ha = to_qla_host(cmd->device->host);
3860 
3861 	if (ql4xdontresethba) {
3862 		DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
3863 		     ha->host_no, __func__));
3864 		return FAILED;
3865 	}
3866 
3867 	ql4_printk(KERN_INFO, ha,
3868 		   "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no,
3869 		   cmd->device->channel, cmd->device->id, cmd->device->lun);
3870 
3871 	if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
3872 		DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host.  Adapter "
3873 			      "DEAD.\n", ha->host_no, cmd->device->channel,
3874 			      __func__));
3875 
3876 		return FAILED;
3877 	}
3878 
3879 	if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
3880 		if (is_qla8022(ha))
3881 			set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3882 		else
3883 			set_bit(DPC_RESET_HA, &ha->dpc_flags);
3884 	}
3885 
3886 	if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS)
3887 		return_status = SUCCESS;
3888 
3889 	ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n",
3890 		   return_status == FAILED ? "FAILED" : "SUCCEEDED");
3891 
3892 	return return_status;
3893 }
3894 
3895 static int qla4xxx_context_reset(struct scsi_qla_host *ha)
3896 {
3897 	uint32_t mbox_cmd[MBOX_REG_COUNT];
3898 	uint32_t mbox_sts[MBOX_REG_COUNT];
3899 	struct addr_ctrl_blk_def *acb = NULL;
3900 	uint32_t acb_len = sizeof(struct addr_ctrl_blk_def);
3901 	int rval = QLA_SUCCESS;
3902 	dma_addr_t acb_dma;
3903 
3904 	acb = dma_alloc_coherent(&ha->pdev->dev,
3905 				 sizeof(struct addr_ctrl_blk_def),
3906 				 &acb_dma, GFP_KERNEL);
3907 	if (!acb) {
3908 		ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
3909 			   __func__);
3910 		rval = -ENOMEM;
3911 		goto exit_port_reset;
3912 	}
3913 
3914 	memset(acb, 0, acb_len);
3915 
3916 	rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len);
3917 	if (rval != QLA_SUCCESS) {
3918 		rval = -EIO;
3919 		goto exit_free_acb;
3920 	}
3921 
3922 	rval = qla4xxx_disable_acb(ha);
3923 	if (rval != QLA_SUCCESS) {
3924 		rval = -EIO;
3925 		goto exit_free_acb;
3926 	}
3927 
3928 	wait_for_completion_timeout(&ha->disable_acb_comp,
3929 				    DISABLE_ACB_TOV * HZ);
3930 
3931 	rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
3932 	if (rval != QLA_SUCCESS) {
3933 		rval = -EIO;
3934 		goto exit_free_acb;
3935 	}
3936 
3937 exit_free_acb:
3938 	dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def),
3939 			  acb, acb_dma);
3940 exit_port_reset:
3941 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__,
3942 			  rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
3943 	return rval;
3944 }
3945 
3946 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
3947 {
3948 	struct scsi_qla_host *ha = to_qla_host(shost);
3949 	int rval = QLA_SUCCESS;
3950 
3951 	if (ql4xdontresethba) {
3952 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
3953 				  __func__));
3954 		rval = -EPERM;
3955 		goto exit_host_reset;
3956 	}
3957 
3958 	rval = qla4xxx_wait_for_hba_online(ha);
3959 	if (rval != QLA_SUCCESS) {
3960 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unable to reset host "
3961 				  "adapter\n", __func__));
3962 		rval = -EIO;
3963 		goto exit_host_reset;
3964 	}
3965 
3966 	if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
3967 		goto recover_adapter;
3968 
3969 	switch (reset_type) {
3970 	case SCSI_ADAPTER_RESET:
3971 		set_bit(DPC_RESET_HA, &ha->dpc_flags);
3972 		break;
3973 	case SCSI_FIRMWARE_RESET:
3974 		if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
3975 			if (is_qla8022(ha))
3976 				/* set firmware context reset */
3977 				set_bit(DPC_RESET_HA_FW_CONTEXT,
3978 					&ha->dpc_flags);
3979 			else {
3980 				rval = qla4xxx_context_reset(ha);
3981 				goto exit_host_reset;
3982 			}
3983 		}
3984 		break;
3985 	}
3986 
3987 recover_adapter:
3988 	rval = qla4xxx_recover_adapter(ha);
3989 	if (rval != QLA_SUCCESS) {
3990 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
3991 				  __func__));
3992 		rval = -EIO;
3993 	}
3994 
3995 exit_host_reset:
3996 	return rval;
3997 }
3998 
3999 /* PCI AER driver recovers from all correctable errors w/o
4000  * driver intervention. For uncorrectable errors PCI AER
4001  * driver calls the following device driver's callbacks
4002  *
4003  * - Fatal Errors - link_reset
4004  * - Non-Fatal Errors - driver's pci_error_detected() which
4005  * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
4006  *
4007  * PCI AER driver calls
4008  * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled
4009  *               returns RECOVERED or NEED_RESET if fw_hung
4010  * NEED_RESET - driver's slot_reset()
4011  * DISCONNECT - device is dead & cannot recover
4012  * RECOVERED - driver's pci_resume()
4013  */
4014 static pci_ers_result_t
4015 qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
4016 {
4017 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
4018 
4019 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
4020 	    ha->host_no, __func__, state);
4021 
4022 	if (!is_aer_supported(ha))
4023 		return PCI_ERS_RESULT_NONE;
4024 
4025 	switch (state) {
4026 	case pci_channel_io_normal:
4027 		clear_bit(AF_EEH_BUSY, &ha->flags);
4028 		return PCI_ERS_RESULT_CAN_RECOVER;
4029 	case pci_channel_io_frozen:
4030 		set_bit(AF_EEH_BUSY, &ha->flags);
4031 		qla4xxx_mailbox_premature_completion(ha);
4032 		qla4xxx_free_irqs(ha);
4033 		pci_disable_device(pdev);
4034 		/* Return back all IOs */
4035 		qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
4036 		return PCI_ERS_RESULT_NEED_RESET;
4037 	case pci_channel_io_perm_failure:
4038 		set_bit(AF_EEH_BUSY, &ha->flags);
4039 		set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
4040 		qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
4041 		return PCI_ERS_RESULT_DISCONNECT;
4042 	}
4043 	return PCI_ERS_RESULT_NEED_RESET;
4044 }
4045 
4046 /**
4047  * qla4xxx_pci_mmio_enabled() gets called if
4048  * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
4049  * and read/write to the device still works.
4050  **/
4051 static pci_ers_result_t
4052 qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
4053 {
4054 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
4055 
4056 	if (!is_aer_supported(ha))
4057 		return PCI_ERS_RESULT_NONE;
4058 
4059 	return PCI_ERS_RESULT_RECOVERED;
4060 }
4061 
4062 static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
4063 {
4064 	uint32_t rval = QLA_ERROR;
4065 	uint32_t ret = 0;
4066 	int fn;
4067 	struct pci_dev *other_pdev = NULL;
4068 
4069 	ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
4070 
4071 	set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
4072 
4073 	if (test_bit(AF_ONLINE, &ha->flags)) {
4074 		clear_bit(AF_ONLINE, &ha->flags);
4075 		clear_bit(AF_LINK_UP, &ha->flags);
4076 		iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
4077 		qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
4078 	}
4079 
4080 	fn = PCI_FUNC(ha->pdev->devfn);
4081 	while (fn > 0) {
4082 		fn--;
4083 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at "
4084 		    "func %x\n", ha->host_no, __func__, fn);
4085 		/* Get the pci device given the domain, bus,
4086 		 * slot/function number */
4087 		other_pdev =
4088 		    pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
4089 		    ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
4090 		    fn));
4091 
4092 		if (!other_pdev)
4093 			continue;
4094 
4095 		if (atomic_read(&other_pdev->enable_cnt)) {
4096 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI "
4097 			    "func in enabled state%x\n", ha->host_no,
4098 			    __func__, fn);
4099 			pci_dev_put(other_pdev);
4100 			break;
4101 		}
4102 		pci_dev_put(other_pdev);
4103 	}
4104 
4105 	/* The first function on the card, the reset owner will
4106 	 * start & initialize the firmware. The other functions
4107 	 * on the card will reset the firmware context
4108 	 */
4109 	if (!fn) {
4110 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
4111 		    "0x%x is the owner\n", ha->host_no, __func__,
4112 		    ha->pdev->devfn);
4113 
4114 		qla4_8xxx_idc_lock(ha);
4115 		qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4116 		    QLA82XX_DEV_COLD);
4117 
4118 		qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
4119 		    QLA82XX_IDC_VERSION);
4120 
4121 		qla4_8xxx_idc_unlock(ha);
4122 		clear_bit(AF_FW_RECOVERY, &ha->flags);
4123 		rval = qla4xxx_initialize_adapter(ha);
4124 		qla4_8xxx_idc_lock(ha);
4125 
4126 		if (rval != QLA_SUCCESS) {
4127 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
4128 			    "FAILED\n", ha->host_no, __func__);
4129 			qla4_8xxx_clear_drv_active(ha);
4130 			qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4131 			    QLA82XX_DEV_FAILED);
4132 		} else {
4133 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
4134 			    "READY\n", ha->host_no, __func__);
4135 			qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4136 			    QLA82XX_DEV_READY);
4137 			/* Clear driver state register */
4138 			qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
4139 			qla4_8xxx_set_drv_active(ha);
4140 			ret = qla4xxx_request_irqs(ha);
4141 			if (ret) {
4142 				ql4_printk(KERN_WARNING, ha, "Failed to "
4143 				    "reserve interrupt %d already in use.\n",
4144 				    ha->pdev->irq);
4145 				rval = QLA_ERROR;
4146 			} else {
4147 				ha->isp_ops->enable_intrs(ha);
4148 				rval = QLA_SUCCESS;
4149 			}
4150 		}
4151 		qla4_8xxx_idc_unlock(ha);
4152 	} else {
4153 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
4154 		    "the reset owner\n", ha->host_no, __func__,
4155 		    ha->pdev->devfn);
4156 		if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
4157 		    QLA82XX_DEV_READY)) {
4158 			clear_bit(AF_FW_RECOVERY, &ha->flags);
4159 			rval = qla4xxx_initialize_adapter(ha);
4160 			if (rval == QLA_SUCCESS) {
4161 				ret = qla4xxx_request_irqs(ha);
4162 				if (ret) {
4163 					ql4_printk(KERN_WARNING, ha, "Failed to"
4164 					    " reserve interrupt %d already in"
4165 					    " use.\n", ha->pdev->irq);
4166 					rval = QLA_ERROR;
4167 				} else {
4168 					ha->isp_ops->enable_intrs(ha);
4169 					rval = QLA_SUCCESS;
4170 				}
4171 			}
4172 			qla4_8xxx_idc_lock(ha);
4173 			qla4_8xxx_set_drv_active(ha);
4174 			qla4_8xxx_idc_unlock(ha);
4175 		}
4176 	}
4177 	clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
4178 	return rval;
4179 }
4180 
4181 static pci_ers_result_t
4182 qla4xxx_pci_slot_reset(struct pci_dev *pdev)
4183 {
4184 	pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
4185 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
4186 	int rc;
4187 
4188 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
4189 	    ha->host_no, __func__);
4190 
4191 	if (!is_aer_supported(ha))
4192 		return PCI_ERS_RESULT_NONE;
4193 
4194 	/* Restore the saved state of PCIe device -
4195 	 * BAR registers, PCI Config space, PCIX, MSI,
4196 	 * IOV states
4197 	 */
4198 	pci_restore_state(pdev);
4199 
4200 	/* pci_restore_state() clears the saved_state flag of the device
4201 	 * save restored state which resets saved_state flag
4202 	 */
4203 	pci_save_state(pdev);
4204 
4205 	/* Initialize device or resume if in suspended state */
4206 	rc = pci_enable_device(pdev);
4207 	if (rc) {
4208 		ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable "
4209 		    "device after reset\n", ha->host_no, __func__);
4210 		goto exit_slot_reset;
4211 	}
4212 
4213 	ha->isp_ops->disable_intrs(ha);
4214 
4215 	if (is_qla8022(ha)) {
4216 		if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
4217 			ret = PCI_ERS_RESULT_RECOVERED;
4218 			goto exit_slot_reset;
4219 		} else
4220 			goto exit_slot_reset;
4221 	}
4222 
4223 exit_slot_reset:
4224 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
4225 	    "device after reset\n", ha->host_no, __func__, ret);
4226 	return ret;
4227 }
4228 
4229 static void
4230 qla4xxx_pci_resume(struct pci_dev *pdev)
4231 {
4232 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
4233 	int ret;
4234 
4235 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
4236 	    ha->host_no, __func__);
4237 
4238 	ret = qla4xxx_wait_for_hba_online(ha);
4239 	if (ret != QLA_SUCCESS) {
4240 		ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
4241 		    "resume I/O from slot/link_reset\n", ha->host_no,
4242 		     __func__);
4243 	}
4244 
4245 	pci_cleanup_aer_uncorrect_error_status(pdev);
4246 	clear_bit(AF_EEH_BUSY, &ha->flags);
4247 }
4248 
4249 static struct pci_error_handlers qla4xxx_err_handler = {
4250 	.error_detected = qla4xxx_pci_error_detected,
4251 	.mmio_enabled = qla4xxx_pci_mmio_enabled,
4252 	.slot_reset = qla4xxx_pci_slot_reset,
4253 	.resume = qla4xxx_pci_resume,
4254 };
4255 
4256 static struct pci_device_id qla4xxx_pci_tbl[] = {
4257 	{
4258 		.vendor		= PCI_VENDOR_ID_QLOGIC,
4259 		.device		= PCI_DEVICE_ID_QLOGIC_ISP4010,
4260 		.subvendor	= PCI_ANY_ID,
4261 		.subdevice	= PCI_ANY_ID,
4262 	},
4263 	{
4264 		.vendor		= PCI_VENDOR_ID_QLOGIC,
4265 		.device		= PCI_DEVICE_ID_QLOGIC_ISP4022,
4266 		.subvendor	= PCI_ANY_ID,
4267 		.subdevice	= PCI_ANY_ID,
4268 	},
4269 	{
4270 		.vendor		= PCI_VENDOR_ID_QLOGIC,
4271 		.device		= PCI_DEVICE_ID_QLOGIC_ISP4032,
4272 		.subvendor	= PCI_ANY_ID,
4273 		.subdevice	= PCI_ANY_ID,
4274 	},
4275 	{
4276 		.vendor         = PCI_VENDOR_ID_QLOGIC,
4277 		.device         = PCI_DEVICE_ID_QLOGIC_ISP8022,
4278 		.subvendor      = PCI_ANY_ID,
4279 		.subdevice      = PCI_ANY_ID,
4280 	},
4281 	{0, 0},
4282 };
4283 MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
4284 
4285 static struct pci_driver qla4xxx_pci_driver = {
4286 	.name		= DRIVER_NAME,
4287 	.id_table	= qla4xxx_pci_tbl,
4288 	.probe		= qla4xxx_probe_adapter,
4289 	.remove		= qla4xxx_remove_adapter,
4290 	.err_handler = &qla4xxx_err_handler,
4291 };
4292 
4293 static int __init qla4xxx_module_init(void)
4294 {
4295 	int ret;
4296 
4297 	/* Allocate cache for SRBs. */
4298 	srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
4299 				       SLAB_HWCACHE_ALIGN, NULL);
4300 	if (srb_cachep == NULL) {
4301 		printk(KERN_ERR
4302 		       "%s: Unable to allocate SRB cache..."
4303 		       "Failing load!\n", DRIVER_NAME);
4304 		ret = -ENOMEM;
4305 		goto no_srp_cache;
4306 	}
4307 
4308 	/* Derive version string. */
4309 	strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
4310 	if (ql4xextended_error_logging)
4311 		strcat(qla4xxx_version_str, "-debug");
4312 
4313 	qla4xxx_scsi_transport =
4314 		iscsi_register_transport(&qla4xxx_iscsi_transport);
4315 	if (!qla4xxx_scsi_transport){
4316 		ret = -ENODEV;
4317 		goto release_srb_cache;
4318 	}
4319 
4320 	ret = pci_register_driver(&qla4xxx_pci_driver);
4321 	if (ret)
4322 		goto unregister_transport;
4323 
4324 	printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
4325 	return 0;
4326 
4327 unregister_transport:
4328 	iscsi_unregister_transport(&qla4xxx_iscsi_transport);
4329 release_srb_cache:
4330 	kmem_cache_destroy(srb_cachep);
4331 no_srp_cache:
4332 	return ret;
4333 }
4334 
4335 static void __exit qla4xxx_module_exit(void)
4336 {
4337 	pci_unregister_driver(&qla4xxx_pci_driver);
4338 	iscsi_unregister_transport(&qla4xxx_iscsi_transport);
4339 	kmem_cache_destroy(srb_cachep);
4340 }
4341 
4342 module_init(qla4xxx_module_init);
4343 module_exit(qla4xxx_module_exit);
4344 
4345 MODULE_AUTHOR("QLogic Corporation");
4346 MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
4347 MODULE_LICENSE("GPL");
4348 MODULE_VERSION(QLA4XXX_DRIVER_VERSION);
4349