xref: /openbmc/linux/drivers/scsi/qla4xxx/ql4_os.c (revision 089a49b6)
1 /*
2  * QLogic iSCSI HBA Driver
3  * Copyright (c)  2003-2013 QLogic Corporation
4  *
5  * See LICENSE.qla4xxx for copyright and licensing details.
6  */
7 #include <linux/moduleparam.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/iscsi_boot_sysfs.h>
11 #include <linux/inet.h>
12 
13 #include <scsi/scsi_tcq.h>
14 #include <scsi/scsicam.h>
15 
16 #include "ql4_def.h"
17 #include "ql4_version.h"
18 #include "ql4_glbl.h"
19 #include "ql4_dbg.h"
20 #include "ql4_inline.h"
21 #include "ql4_83xx.h"
22 
23 /*
24  * Driver version
25  */
26 static char qla4xxx_version_str[40];
27 
28 /*
29  * SRB allocation cache
30  */
31 static struct kmem_cache *srb_cachep;
32 
33 /*
34  * Module parameter information and variables
35  */
36 static int ql4xdisablesysfsboot = 1;
37 module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
38 MODULE_PARM_DESC(ql4xdisablesysfsboot,
39 		 " Set to disable exporting boot targets to sysfs.\n"
40 		 "\t\t  0 - Export boot targets\n"
41 		 "\t\t  1 - Do not export boot targets (Default)");
42 
43 int ql4xdontresethba;
44 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
45 MODULE_PARM_DESC(ql4xdontresethba,
46 		 " Don't reset the HBA for driver recovery.\n"
47 		 "\t\t  0 - It will reset HBA (Default)\n"
48 		 "\t\t  1 - It will NOT reset HBA");
49 
50 int ql4xextended_error_logging;
51 module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
52 MODULE_PARM_DESC(ql4xextended_error_logging,
53 		 " Option to enable extended error logging.\n"
54 		 "\t\t  0 - no logging (Default)\n"
55 		 "\t\t  2 - debug logging");
56 
57 int ql4xenablemsix = 1;
58 module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
59 MODULE_PARM_DESC(ql4xenablemsix,
60 		 " Set to enable MSI or MSI-X interrupt mechanism.\n"
61 		 "\t\t  0 = enable INTx interrupt mechanism.\n"
62 		 "\t\t  1 = enable MSI-X interrupt mechanism (Default).\n"
63 		 "\t\t  2 = enable MSI interrupt mechanism.");
64 
65 #define QL4_DEF_QDEPTH 32
66 static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
67 module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
68 MODULE_PARM_DESC(ql4xmaxqdepth,
69 		 " Maximum queue depth to report for target devices.\n"
70 		 "\t\t  Default: 32.");
71 
72 static int ql4xqfulltracking = 1;
73 module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR);
74 MODULE_PARM_DESC(ql4xqfulltracking,
75 		 " Enable or disable dynamic tracking and adjustment of\n"
76 		 "\t\t scsi device queue depth.\n"
77 		 "\t\t  0 - Disable.\n"
78 		 "\t\t  1 - Enable. (Default)");
79 
80 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
81 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
82 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
83 		" Target Session Recovery Timeout.\n"
84 		"\t\t  Default: 120 sec.");
85 
86 int ql4xmdcapmask = 0x1F;
87 module_param(ql4xmdcapmask, int, S_IRUGO);
88 MODULE_PARM_DESC(ql4xmdcapmask,
89 		 " Set the Minidump driver capture mask level.\n"
90 		 "\t\t  Default is 0x1F.\n"
91 		 "\t\t  Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F");
92 
93 int ql4xenablemd = 1;
94 module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
95 MODULE_PARM_DESC(ql4xenablemd,
96 		 " Set to enable minidump.\n"
97 		 "\t\t  0 - disable minidump\n"
98 		 "\t\t  1 - enable minidump (Default)");
99 
100 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
101 /*
102  * SCSI host template entry points
103  */
104 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
105 
106 /*
107  * iSCSI template entry points
108  */
109 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
110 				     enum iscsi_param param, char *buf);
111 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
112 				  enum iscsi_param param, char *buf);
113 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
114 				  enum iscsi_host_param param, char *buf);
115 static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data,
116 				   uint32_t len);
117 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
118 				   enum iscsi_param_type param_type,
119 				   int param, char *buf);
120 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
121 static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost,
122 						 struct sockaddr *dst_addr,
123 						 int non_blocking);
124 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
125 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep);
126 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
127 				enum iscsi_param param, char *buf);
128 static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
129 static struct iscsi_cls_conn *
130 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx);
131 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
132 			     struct iscsi_cls_conn *cls_conn,
133 			     uint64_t transport_fd, int is_leading);
134 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn);
135 static struct iscsi_cls_session *
136 qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
137 			uint16_t qdepth, uint32_t initial_cmdsn);
138 static void qla4xxx_session_destroy(struct iscsi_cls_session *sess);
139 static void qla4xxx_task_work(struct work_struct *wdata);
140 static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t);
141 static int qla4xxx_task_xmit(struct iscsi_task *);
142 static void qla4xxx_task_cleanup(struct iscsi_task *);
143 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
144 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
145 				   struct iscsi_stats *stats);
146 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
147 			     uint32_t iface_type, uint32_t payload_size,
148 			     uint32_t pid, struct sockaddr *dst_addr);
149 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
150 				 uint32_t *num_entries, char *buf);
151 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx);
152 
153 /*
154  * SCSI host template entry points
155  */
156 static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
157 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
158 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
159 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
160 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
161 static int qla4xxx_slave_alloc(struct scsi_device *device);
162 static int qla4xxx_slave_configure(struct scsi_device *device);
163 static void qla4xxx_slave_destroy(struct scsi_device *sdev);
164 static umode_t qla4_attr_is_visible(int param_type, int param);
165 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
166 static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
167 				      int reason);
168 
169 /*
170  * iSCSI Flash DDB sysfs entry points
171  */
172 static int
173 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
174 			    struct iscsi_bus_flash_conn *fnode_conn,
175 			    void *data, int len);
176 static int
177 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
178 			    int param, char *buf);
179 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
180 				 int len);
181 static int
182 qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess);
183 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
184 				   struct iscsi_bus_flash_conn *fnode_conn);
185 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
186 				    struct iscsi_bus_flash_conn *fnode_conn);
187 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess);
188 
189 static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
190     QLA82XX_LEGACY_INTR_CONFIG;
191 
192 static struct scsi_host_template qla4xxx_driver_template = {
193 	.module			= THIS_MODULE,
194 	.name			= DRIVER_NAME,
195 	.proc_name		= DRIVER_NAME,
196 	.queuecommand		= qla4xxx_queuecommand,
197 
198 	.eh_abort_handler	= qla4xxx_eh_abort,
199 	.eh_device_reset_handler = qla4xxx_eh_device_reset,
200 	.eh_target_reset_handler = qla4xxx_eh_target_reset,
201 	.eh_host_reset_handler	= qla4xxx_eh_host_reset,
202 	.eh_timed_out		= qla4xxx_eh_cmd_timed_out,
203 
204 	.slave_configure	= qla4xxx_slave_configure,
205 	.slave_alloc		= qla4xxx_slave_alloc,
206 	.slave_destroy		= qla4xxx_slave_destroy,
207 	.change_queue_depth	= qla4xxx_change_queue_depth,
208 
209 	.this_id		= -1,
210 	.cmd_per_lun		= 3,
211 	.use_clustering		= ENABLE_CLUSTERING,
212 	.sg_tablesize		= SG_ALL,
213 
214 	.max_sectors		= 0xFFFF,
215 	.shost_attrs		= qla4xxx_host_attrs,
216 	.host_reset		= qla4xxx_host_reset,
217 	.vendor_id		= SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
218 };
219 
220 static struct iscsi_transport qla4xxx_iscsi_transport = {
221 	.owner			= THIS_MODULE,
222 	.name			= DRIVER_NAME,
223 	.caps			= CAP_TEXT_NEGO |
224 				  CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
225 				  CAP_DATADGST | CAP_LOGIN_OFFLOAD |
226 				  CAP_MULTI_R2T,
227 	.attr_is_visible	= qla4_attr_is_visible,
228 	.create_session         = qla4xxx_session_create,
229 	.destroy_session        = qla4xxx_session_destroy,
230 	.start_conn             = qla4xxx_conn_start,
231 	.create_conn            = qla4xxx_conn_create,
232 	.bind_conn              = qla4xxx_conn_bind,
233 	.stop_conn              = iscsi_conn_stop,
234 	.destroy_conn           = qla4xxx_conn_destroy,
235 	.set_param              = iscsi_set_param,
236 	.get_conn_param		= qla4xxx_conn_get_param,
237 	.get_session_param	= qla4xxx_session_get_param,
238 	.get_ep_param           = qla4xxx_get_ep_param,
239 	.ep_connect		= qla4xxx_ep_connect,
240 	.ep_poll		= qla4xxx_ep_poll,
241 	.ep_disconnect		= qla4xxx_ep_disconnect,
242 	.get_stats		= qla4xxx_conn_get_stats,
243 	.send_pdu		= iscsi_conn_send_pdu,
244 	.xmit_task		= qla4xxx_task_xmit,
245 	.cleanup_task		= qla4xxx_task_cleanup,
246 	.alloc_pdu		= qla4xxx_alloc_pdu,
247 
248 	.get_host_param		= qla4xxx_host_get_param,
249 	.set_iface_param	= qla4xxx_iface_set_param,
250 	.get_iface_param	= qla4xxx_get_iface_param,
251 	.bsg_request		= qla4xxx_bsg_request,
252 	.send_ping		= qla4xxx_send_ping,
253 	.get_chap		= qla4xxx_get_chap_list,
254 	.delete_chap		= qla4xxx_delete_chap,
255 	.get_flashnode_param	= qla4xxx_sysfs_ddb_get_param,
256 	.set_flashnode_param	= qla4xxx_sysfs_ddb_set_param,
257 	.new_flashnode		= qla4xxx_sysfs_ddb_add,
258 	.del_flashnode		= qla4xxx_sysfs_ddb_delete,
259 	.login_flashnode	= qla4xxx_sysfs_ddb_login,
260 	.logout_flashnode	= qla4xxx_sysfs_ddb_logout,
261 	.logout_flashnode_sid	= qla4xxx_sysfs_ddb_logout_sid,
262 };
263 
264 static struct scsi_transport_template *qla4xxx_scsi_transport;
265 
266 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
267 			     uint32_t iface_type, uint32_t payload_size,
268 			     uint32_t pid, struct sockaddr *dst_addr)
269 {
270 	struct scsi_qla_host *ha = to_qla_host(shost);
271 	struct sockaddr_in *addr;
272 	struct sockaddr_in6 *addr6;
273 	uint32_t options = 0;
274 	uint8_t ipaddr[IPv6_ADDR_LEN];
275 	int rval;
276 
277 	memset(ipaddr, 0, IPv6_ADDR_LEN);
278 	/* IPv4 to IPv4 */
279 	if ((iface_type == ISCSI_IFACE_TYPE_IPV4) &&
280 	    (dst_addr->sa_family == AF_INET)) {
281 		addr = (struct sockaddr_in *)dst_addr;
282 		memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN);
283 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 "
284 				  "dest: %pI4\n", __func__,
285 				  &ha->ip_config.ip_address, ipaddr));
286 		rval = qla4xxx_ping_iocb(ha, options, payload_size, pid,
287 					 ipaddr);
288 		if (rval)
289 			rval = -EINVAL;
290 	} else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) &&
291 		   (dst_addr->sa_family == AF_INET6)) {
292 		/* IPv6 to IPv6 */
293 		addr6 = (struct sockaddr_in6 *)dst_addr;
294 		memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN);
295 
296 		options |= PING_IPV6_PROTOCOL_ENABLE;
297 
298 		/* Ping using LinkLocal address */
299 		if ((iface_num == 0) || (iface_num == 1)) {
300 			DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping "
301 					  "src: %pI6 dest: %pI6\n", __func__,
302 					  &ha->ip_config.ipv6_link_local_addr,
303 					  ipaddr));
304 			options |= PING_IPV6_LINKLOCAL_ADDR;
305 			rval = qla4xxx_ping_iocb(ha, options, payload_size,
306 						 pid, ipaddr);
307 		} else {
308 			ql4_printk(KERN_WARNING, ha, "%s: iface num = %d "
309 				   "not supported\n", __func__, iface_num);
310 			rval = -ENOSYS;
311 			goto exit_send_ping;
312 		}
313 
314 		/*
315 		 * If ping using LinkLocal address fails, try ping using
316 		 * IPv6 address
317 		 */
318 		if (rval != QLA_SUCCESS) {
319 			options &= ~PING_IPV6_LINKLOCAL_ADDR;
320 			if (iface_num == 0) {
321 				options |= PING_IPV6_ADDR0;
322 				DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
323 						  "Ping src: %pI6 "
324 						  "dest: %pI6\n", __func__,
325 						  &ha->ip_config.ipv6_addr0,
326 						  ipaddr));
327 			} else if (iface_num == 1) {
328 				options |= PING_IPV6_ADDR1;
329 				DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
330 						  "Ping src: %pI6 "
331 						  "dest: %pI6\n", __func__,
332 						  &ha->ip_config.ipv6_addr1,
333 						  ipaddr));
334 			}
335 			rval = qla4xxx_ping_iocb(ha, options, payload_size,
336 						 pid, ipaddr);
337 			if (rval)
338 				rval = -EINVAL;
339 		}
340 	} else
341 		rval = -ENOSYS;
342 exit_send_ping:
343 	return rval;
344 }
345 
346 static umode_t qla4_attr_is_visible(int param_type, int param)
347 {
348 	switch (param_type) {
349 	case ISCSI_HOST_PARAM:
350 		switch (param) {
351 		case ISCSI_HOST_PARAM_HWADDRESS:
352 		case ISCSI_HOST_PARAM_IPADDRESS:
353 		case ISCSI_HOST_PARAM_INITIATOR_NAME:
354 		case ISCSI_HOST_PARAM_PORT_STATE:
355 		case ISCSI_HOST_PARAM_PORT_SPEED:
356 			return S_IRUGO;
357 		default:
358 			return 0;
359 		}
360 	case ISCSI_PARAM:
361 		switch (param) {
362 		case ISCSI_PARAM_PERSISTENT_ADDRESS:
363 		case ISCSI_PARAM_PERSISTENT_PORT:
364 		case ISCSI_PARAM_CONN_ADDRESS:
365 		case ISCSI_PARAM_CONN_PORT:
366 		case ISCSI_PARAM_TARGET_NAME:
367 		case ISCSI_PARAM_TPGT:
368 		case ISCSI_PARAM_TARGET_ALIAS:
369 		case ISCSI_PARAM_MAX_BURST:
370 		case ISCSI_PARAM_MAX_R2T:
371 		case ISCSI_PARAM_FIRST_BURST:
372 		case ISCSI_PARAM_MAX_RECV_DLENGTH:
373 		case ISCSI_PARAM_MAX_XMIT_DLENGTH:
374 		case ISCSI_PARAM_IFACE_NAME:
375 		case ISCSI_PARAM_CHAP_OUT_IDX:
376 		case ISCSI_PARAM_CHAP_IN_IDX:
377 		case ISCSI_PARAM_USERNAME:
378 		case ISCSI_PARAM_PASSWORD:
379 		case ISCSI_PARAM_USERNAME_IN:
380 		case ISCSI_PARAM_PASSWORD_IN:
381 		case ISCSI_PARAM_AUTO_SND_TGT_DISABLE:
382 		case ISCSI_PARAM_DISCOVERY_SESS:
383 		case ISCSI_PARAM_PORTAL_TYPE:
384 		case ISCSI_PARAM_CHAP_AUTH_EN:
385 		case ISCSI_PARAM_DISCOVERY_LOGOUT_EN:
386 		case ISCSI_PARAM_BIDI_CHAP_EN:
387 		case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL:
388 		case ISCSI_PARAM_DEF_TIME2WAIT:
389 		case ISCSI_PARAM_DEF_TIME2RETAIN:
390 		case ISCSI_PARAM_HDRDGST_EN:
391 		case ISCSI_PARAM_DATADGST_EN:
392 		case ISCSI_PARAM_INITIAL_R2T_EN:
393 		case ISCSI_PARAM_IMM_DATA_EN:
394 		case ISCSI_PARAM_PDU_INORDER_EN:
395 		case ISCSI_PARAM_DATASEQ_INORDER_EN:
396 		case ISCSI_PARAM_MAX_SEGMENT_SIZE:
397 		case ISCSI_PARAM_TCP_TIMESTAMP_STAT:
398 		case ISCSI_PARAM_TCP_WSF_DISABLE:
399 		case ISCSI_PARAM_TCP_NAGLE_DISABLE:
400 		case ISCSI_PARAM_TCP_TIMER_SCALE:
401 		case ISCSI_PARAM_TCP_TIMESTAMP_EN:
402 		case ISCSI_PARAM_TCP_XMIT_WSF:
403 		case ISCSI_PARAM_TCP_RECV_WSF:
404 		case ISCSI_PARAM_IP_FRAGMENT_DISABLE:
405 		case ISCSI_PARAM_IPV4_TOS:
406 		case ISCSI_PARAM_IPV6_TC:
407 		case ISCSI_PARAM_IPV6_FLOW_LABEL:
408 		case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6:
409 		case ISCSI_PARAM_KEEPALIVE_TMO:
410 		case ISCSI_PARAM_LOCAL_PORT:
411 		case ISCSI_PARAM_ISID:
412 		case ISCSI_PARAM_TSID:
413 		case ISCSI_PARAM_DEF_TASKMGMT_TMO:
414 		case ISCSI_PARAM_ERL:
415 		case ISCSI_PARAM_STATSN:
416 		case ISCSI_PARAM_EXP_STATSN:
417 		case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
418 		case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
419 			return S_IRUGO;
420 		default:
421 			return 0;
422 		}
423 	case ISCSI_NET_PARAM:
424 		switch (param) {
425 		case ISCSI_NET_PARAM_IPV4_ADDR:
426 		case ISCSI_NET_PARAM_IPV4_SUBNET:
427 		case ISCSI_NET_PARAM_IPV4_GW:
428 		case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
429 		case ISCSI_NET_PARAM_IFACE_ENABLE:
430 		case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
431 		case ISCSI_NET_PARAM_IPV6_ADDR:
432 		case ISCSI_NET_PARAM_IPV6_ROUTER:
433 		case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
434 		case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
435 		case ISCSI_NET_PARAM_VLAN_ID:
436 		case ISCSI_NET_PARAM_VLAN_PRIORITY:
437 		case ISCSI_NET_PARAM_VLAN_ENABLED:
438 		case ISCSI_NET_PARAM_MTU:
439 		case ISCSI_NET_PARAM_PORT:
440 			return S_IRUGO;
441 		default:
442 			return 0;
443 		}
444 	case ISCSI_FLASHNODE_PARAM:
445 		switch (param) {
446 		case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
447 		case ISCSI_FLASHNODE_PORTAL_TYPE:
448 		case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
449 		case ISCSI_FLASHNODE_DISCOVERY_SESS:
450 		case ISCSI_FLASHNODE_ENTRY_EN:
451 		case ISCSI_FLASHNODE_HDR_DGST_EN:
452 		case ISCSI_FLASHNODE_DATA_DGST_EN:
453 		case ISCSI_FLASHNODE_IMM_DATA_EN:
454 		case ISCSI_FLASHNODE_INITIAL_R2T_EN:
455 		case ISCSI_FLASHNODE_DATASEQ_INORDER:
456 		case ISCSI_FLASHNODE_PDU_INORDER:
457 		case ISCSI_FLASHNODE_CHAP_AUTH_EN:
458 		case ISCSI_FLASHNODE_SNACK_REQ_EN:
459 		case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
460 		case ISCSI_FLASHNODE_BIDI_CHAP_EN:
461 		case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
462 		case ISCSI_FLASHNODE_ERL:
463 		case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
464 		case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
465 		case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
466 		case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
467 		case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
468 		case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
469 		case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
470 		case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
471 		case ISCSI_FLASHNODE_FIRST_BURST:
472 		case ISCSI_FLASHNODE_DEF_TIME2WAIT:
473 		case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
474 		case ISCSI_FLASHNODE_MAX_R2T:
475 		case ISCSI_FLASHNODE_KEEPALIVE_TMO:
476 		case ISCSI_FLASHNODE_ISID:
477 		case ISCSI_FLASHNODE_TSID:
478 		case ISCSI_FLASHNODE_PORT:
479 		case ISCSI_FLASHNODE_MAX_BURST:
480 		case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
481 		case ISCSI_FLASHNODE_IPADDR:
482 		case ISCSI_FLASHNODE_ALIAS:
483 		case ISCSI_FLASHNODE_REDIRECT_IPADDR:
484 		case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
485 		case ISCSI_FLASHNODE_LOCAL_PORT:
486 		case ISCSI_FLASHNODE_IPV4_TOS:
487 		case ISCSI_FLASHNODE_IPV6_TC:
488 		case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
489 		case ISCSI_FLASHNODE_NAME:
490 		case ISCSI_FLASHNODE_TPGT:
491 		case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
492 		case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
493 		case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
494 		case ISCSI_FLASHNODE_TCP_XMIT_WSF:
495 		case ISCSI_FLASHNODE_TCP_RECV_WSF:
496 		case ISCSI_FLASHNODE_CHAP_OUT_IDX:
497 		case ISCSI_FLASHNODE_USERNAME:
498 		case ISCSI_FLASHNODE_PASSWORD:
499 		case ISCSI_FLASHNODE_STATSN:
500 		case ISCSI_FLASHNODE_EXP_STATSN:
501 		case ISCSI_FLASHNODE_IS_BOOT_TGT:
502 			return S_IRUGO;
503 		default:
504 			return 0;
505 		}
506 	}
507 
508 	return 0;
509 }
510 
511 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
512 				  uint32_t *num_entries, char *buf)
513 {
514 	struct scsi_qla_host *ha = to_qla_host(shost);
515 	struct ql4_chap_table *chap_table;
516 	struct iscsi_chap_rec *chap_rec;
517 	int max_chap_entries = 0;
518 	int valid_chap_entries = 0;
519 	int ret = 0, i;
520 
521 	if (is_qla80XX(ha))
522 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
523 					sizeof(struct ql4_chap_table);
524 	else
525 		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
526 
527 	ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n",
528 			__func__, *num_entries, chap_tbl_idx);
529 
530 	if (!buf) {
531 		ret = -ENOMEM;
532 		goto exit_get_chap_list;
533 	}
534 
535 	chap_rec = (struct iscsi_chap_rec *) buf;
536 	mutex_lock(&ha->chap_sem);
537 	for (i = chap_tbl_idx; i < max_chap_entries; i++) {
538 		chap_table = (struct ql4_chap_table *)ha->chap_list + i;
539 		if (chap_table->cookie !=
540 		    __constant_cpu_to_le16(CHAP_VALID_COOKIE))
541 			continue;
542 
543 		chap_rec->chap_tbl_idx = i;
544 		strncpy(chap_rec->username, chap_table->name,
545 			ISCSI_CHAP_AUTH_NAME_MAX_LEN);
546 		strncpy(chap_rec->password, chap_table->secret,
547 			QL4_CHAP_MAX_SECRET_LEN);
548 		chap_rec->password_length = chap_table->secret_len;
549 
550 		if (chap_table->flags & BIT_7) /* local */
551 			chap_rec->chap_type = CHAP_TYPE_OUT;
552 
553 		if (chap_table->flags & BIT_6) /* peer */
554 			chap_rec->chap_type = CHAP_TYPE_IN;
555 
556 		chap_rec++;
557 
558 		valid_chap_entries++;
559 		if (valid_chap_entries == *num_entries)
560 			break;
561 		else
562 			continue;
563 	}
564 	mutex_unlock(&ha->chap_sem);
565 
566 exit_get_chap_list:
567 	ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n",
568 			__func__,  valid_chap_entries);
569 	*num_entries = valid_chap_entries;
570 	return ret;
571 }
572 
573 static int __qla4xxx_is_chap_active(struct device *dev, void *data)
574 {
575 	int ret = 0;
576 	uint16_t *chap_tbl_idx = (uint16_t *) data;
577 	struct iscsi_cls_session *cls_session;
578 	struct iscsi_session *sess;
579 	struct ddb_entry *ddb_entry;
580 
581 	if (!iscsi_is_session_dev(dev))
582 		goto exit_is_chap_active;
583 
584 	cls_session = iscsi_dev_to_session(dev);
585 	sess = cls_session->dd_data;
586 	ddb_entry = sess->dd_data;
587 
588 	if (iscsi_session_chkready(cls_session))
589 		goto exit_is_chap_active;
590 
591 	if (ddb_entry->chap_tbl_idx == *chap_tbl_idx)
592 		ret = 1;
593 
594 exit_is_chap_active:
595 	return ret;
596 }
597 
598 static int qla4xxx_is_chap_active(struct Scsi_Host *shost,
599 				  uint16_t chap_tbl_idx)
600 {
601 	int ret = 0;
602 
603 	ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx,
604 				    __qla4xxx_is_chap_active);
605 
606 	return ret;
607 }
608 
609 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx)
610 {
611 	struct scsi_qla_host *ha = to_qla_host(shost);
612 	struct ql4_chap_table *chap_table;
613 	dma_addr_t chap_dma;
614 	int max_chap_entries = 0;
615 	uint32_t offset = 0;
616 	uint32_t chap_size;
617 	int ret = 0;
618 
619 	chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
620 	if (chap_table == NULL)
621 		return -ENOMEM;
622 
623 	memset(chap_table, 0, sizeof(struct ql4_chap_table));
624 
625 	if (is_qla80XX(ha))
626 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
627 				   sizeof(struct ql4_chap_table);
628 	else
629 		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
630 
631 	if (chap_tbl_idx > max_chap_entries) {
632 		ret = -EINVAL;
633 		goto exit_delete_chap;
634 	}
635 
636 	/* Check if chap index is in use.
637 	 * If chap is in use don't delet chap entry */
638 	ret = qla4xxx_is_chap_active(shost, chap_tbl_idx);
639 	if (ret) {
640 		ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot "
641 			   "delete from flash\n", chap_tbl_idx);
642 		ret = -EBUSY;
643 		goto exit_delete_chap;
644 	}
645 
646 	chap_size = sizeof(struct ql4_chap_table);
647 	if (is_qla40XX(ha))
648 		offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size);
649 	else {
650 		offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
651 		/* flt_chap_size is CHAP table size for both ports
652 		 * so divide it by 2 to calculate the offset for second port
653 		 */
654 		if (ha->port_num == 1)
655 			offset += (ha->hw.flt_chap_size / 2);
656 		offset += (chap_tbl_idx * chap_size);
657 	}
658 
659 	ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
660 	if (ret != QLA_SUCCESS) {
661 		ret = -EINVAL;
662 		goto exit_delete_chap;
663 	}
664 
665 	DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
666 			  __le16_to_cpu(chap_table->cookie)));
667 
668 	if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
669 		ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
670 		goto exit_delete_chap;
671 	}
672 
673 	chap_table->cookie = __constant_cpu_to_le16(0xFFFF);
674 
675 	offset = FLASH_CHAP_OFFSET |
676 			(chap_tbl_idx * sizeof(struct ql4_chap_table));
677 	ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size,
678 				FLASH_OPT_RMW_COMMIT);
679 	if (ret == QLA_SUCCESS && ha->chap_list) {
680 		mutex_lock(&ha->chap_sem);
681 		/* Update ha chap_list cache */
682 		memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx,
683 			chap_table, sizeof(struct ql4_chap_table));
684 		mutex_unlock(&ha->chap_sem);
685 	}
686 	if (ret != QLA_SUCCESS)
687 		ret =  -EINVAL;
688 
689 exit_delete_chap:
690 	dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
691 	return ret;
692 }
693 
694 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
695 				   enum iscsi_param_type param_type,
696 				   int param, char *buf)
697 {
698 	struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
699 	struct scsi_qla_host *ha = to_qla_host(shost);
700 	int len = -ENOSYS;
701 
702 	if (param_type != ISCSI_NET_PARAM)
703 		return -ENOSYS;
704 
705 	switch (param) {
706 	case ISCSI_NET_PARAM_IPV4_ADDR:
707 		len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
708 		break;
709 	case ISCSI_NET_PARAM_IPV4_SUBNET:
710 		len = sprintf(buf, "%pI4\n", &ha->ip_config.subnet_mask);
711 		break;
712 	case ISCSI_NET_PARAM_IPV4_GW:
713 		len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
714 		break;
715 	case ISCSI_NET_PARAM_IFACE_ENABLE:
716 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
717 			len = sprintf(buf, "%s\n",
718 				      (ha->ip_config.ipv4_options &
719 				       IPOPT_IPV4_PROTOCOL_ENABLE) ?
720 				      "enabled" : "disabled");
721 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
722 			len = sprintf(buf, "%s\n",
723 				      (ha->ip_config.ipv6_options &
724 				       IPV6_OPT_IPV6_PROTOCOL_ENABLE) ?
725 				       "enabled" : "disabled");
726 		break;
727 	case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
728 		len = sprintf(buf, "%s\n",
729 			      (ha->ip_config.tcp_options & TCPOPT_DHCP_ENABLE) ?
730 			      "dhcp" : "static");
731 		break;
732 	case ISCSI_NET_PARAM_IPV6_ADDR:
733 		if (iface->iface_num == 0)
734 			len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr0);
735 		if (iface->iface_num == 1)
736 			len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr1);
737 		break;
738 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
739 		len = sprintf(buf, "%pI6\n",
740 			      &ha->ip_config.ipv6_link_local_addr);
741 		break;
742 	case ISCSI_NET_PARAM_IPV6_ROUTER:
743 		len = sprintf(buf, "%pI6\n",
744 			      &ha->ip_config.ipv6_default_router_addr);
745 		break;
746 	case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
747 		len = sprintf(buf, "%s\n",
748 			      (ha->ip_config.ipv6_addl_options &
749 			       IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
750 			       "nd" : "static");
751 		break;
752 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
753 		len = sprintf(buf, "%s\n",
754 			      (ha->ip_config.ipv6_addl_options &
755 			       IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
756 			       "auto" : "static");
757 		break;
758 	case ISCSI_NET_PARAM_VLAN_ID:
759 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
760 			len = sprintf(buf, "%d\n",
761 				      (ha->ip_config.ipv4_vlan_tag &
762 				       ISCSI_MAX_VLAN_ID));
763 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
764 			len = sprintf(buf, "%d\n",
765 				      (ha->ip_config.ipv6_vlan_tag &
766 				       ISCSI_MAX_VLAN_ID));
767 		break;
768 	case ISCSI_NET_PARAM_VLAN_PRIORITY:
769 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
770 			len = sprintf(buf, "%d\n",
771 				      ((ha->ip_config.ipv4_vlan_tag >> 13) &
772 					ISCSI_MAX_VLAN_PRIORITY));
773 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
774 			len = sprintf(buf, "%d\n",
775 				      ((ha->ip_config.ipv6_vlan_tag >> 13) &
776 					ISCSI_MAX_VLAN_PRIORITY));
777 		break;
778 	case ISCSI_NET_PARAM_VLAN_ENABLED:
779 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
780 			len = sprintf(buf, "%s\n",
781 				      (ha->ip_config.ipv4_options &
782 				       IPOPT_VLAN_TAGGING_ENABLE) ?
783 				       "enabled" : "disabled");
784 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
785 			len = sprintf(buf, "%s\n",
786 				      (ha->ip_config.ipv6_options &
787 				       IPV6_OPT_VLAN_TAGGING_ENABLE) ?
788 				       "enabled" : "disabled");
789 		break;
790 	case ISCSI_NET_PARAM_MTU:
791 		len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
792 		break;
793 	case ISCSI_NET_PARAM_PORT:
794 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
795 			len = sprintf(buf, "%d\n", ha->ip_config.ipv4_port);
796 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
797 			len = sprintf(buf, "%d\n", ha->ip_config.ipv6_port);
798 		break;
799 	default:
800 		len = -ENOSYS;
801 	}
802 
803 	return len;
804 }
805 
806 static struct iscsi_endpoint *
807 qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
808 		   int non_blocking)
809 {
810 	int ret;
811 	struct iscsi_endpoint *ep;
812 	struct qla_endpoint *qla_ep;
813 	struct scsi_qla_host *ha;
814 	struct sockaddr_in *addr;
815 	struct sockaddr_in6 *addr6;
816 
817 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
818 	if (!shost) {
819 		ret = -ENXIO;
820 		printk(KERN_ERR "%s: shost is NULL\n",
821 		       __func__);
822 		return ERR_PTR(ret);
823 	}
824 
825 	ha = iscsi_host_priv(shost);
826 
827 	ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
828 	if (!ep) {
829 		ret = -ENOMEM;
830 		return ERR_PTR(ret);
831 	}
832 
833 	qla_ep = ep->dd_data;
834 	memset(qla_ep, 0, sizeof(struct qla_endpoint));
835 	if (dst_addr->sa_family == AF_INET) {
836 		memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in));
837 		addr = (struct sockaddr_in *)&qla_ep->dst_addr;
838 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__,
839 				  (char *)&addr->sin_addr));
840 	} else if (dst_addr->sa_family == AF_INET6) {
841 		memcpy(&qla_ep->dst_addr, dst_addr,
842 		       sizeof(struct sockaddr_in6));
843 		addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
844 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
845 				  (char *)&addr6->sin6_addr));
846 	}
847 
848 	qla_ep->host = shost;
849 
850 	return ep;
851 }
852 
853 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
854 {
855 	struct qla_endpoint *qla_ep;
856 	struct scsi_qla_host *ha;
857 	int ret = 0;
858 
859 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
860 	qla_ep = ep->dd_data;
861 	ha = to_qla_host(qla_ep->host);
862 
863 	if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
864 		ret = 1;
865 
866 	return ret;
867 }
868 
869 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
870 {
871 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
872 	iscsi_destroy_endpoint(ep);
873 }
874 
875 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
876 				enum iscsi_param param,
877 				char *buf)
878 {
879 	struct qla_endpoint *qla_ep = ep->dd_data;
880 	struct sockaddr *dst_addr;
881 
882 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
883 
884 	switch (param) {
885 	case ISCSI_PARAM_CONN_PORT:
886 	case ISCSI_PARAM_CONN_ADDRESS:
887 		if (!qla_ep)
888 			return -ENOTCONN;
889 
890 		dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
891 		if (!dst_addr)
892 			return -ENOTCONN;
893 
894 		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
895 						 &qla_ep->dst_addr, param, buf);
896 	default:
897 		return -ENOSYS;
898 	}
899 }
900 
901 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
902 				   struct iscsi_stats *stats)
903 {
904 	struct iscsi_session *sess;
905 	struct iscsi_cls_session *cls_sess;
906 	struct ddb_entry *ddb_entry;
907 	struct scsi_qla_host *ha;
908 	struct ql_iscsi_stats *ql_iscsi_stats;
909 	int stats_size;
910 	int ret;
911 	dma_addr_t iscsi_stats_dma;
912 
913 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
914 
915 	cls_sess = iscsi_conn_to_session(cls_conn);
916 	sess = cls_sess->dd_data;
917 	ddb_entry = sess->dd_data;
918 	ha = ddb_entry->ha;
919 
920 	stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
921 	/* Allocate memory */
922 	ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
923 					    &iscsi_stats_dma, GFP_KERNEL);
924 	if (!ql_iscsi_stats) {
925 		ql4_printk(KERN_ERR, ha,
926 			   "Unable to allocate memory for iscsi stats\n");
927 		goto exit_get_stats;
928 	}
929 
930 	ret =  qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size,
931 				     iscsi_stats_dma);
932 	if (ret != QLA_SUCCESS) {
933 		ql4_printk(KERN_ERR, ha,
934 			   "Unable to retrieve iscsi stats\n");
935 		goto free_stats;
936 	}
937 
938 	/* octets */
939 	stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets);
940 	stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets);
941 	/* xmit pdus */
942 	stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus);
943 	stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus);
944 	stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus);
945 	stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus);
946 	stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus);
947 	stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus);
948 	stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus);
949 	stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus);
950 	/* recv pdus */
951 	stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus);
952 	stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus);
953 	stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus);
954 	stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus);
955 	stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus);
956 	stats->logoutrsp_pdus =
957 			le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus);
958 	stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus);
959 	stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus);
960 	stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus);
961 
962 free_stats:
963 	dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats,
964 			  iscsi_stats_dma);
965 exit_get_stats:
966 	return;
967 }
968 
969 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
970 {
971 	struct iscsi_cls_session *session;
972 	struct iscsi_session *sess;
973 	unsigned long flags;
974 	enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED;
975 
976 	session = starget_to_session(scsi_target(sc->device));
977 	sess = session->dd_data;
978 
979 	spin_lock_irqsave(&session->lock, flags);
980 	if (session->state == ISCSI_SESSION_FAILED)
981 		ret = BLK_EH_RESET_TIMER;
982 	spin_unlock_irqrestore(&session->lock, flags);
983 
984 	return ret;
985 }
986 
987 static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
988 {
989 	struct scsi_qla_host *ha = to_qla_host(shost);
990 	struct iscsi_cls_host *ihost = shost->shost_data;
991 	uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN;
992 
993 	qla4xxx_get_firmware_state(ha);
994 
995 	switch (ha->addl_fw_state & 0x0F00) {
996 	case FW_ADDSTATE_LINK_SPEED_10MBPS:
997 		speed = ISCSI_PORT_SPEED_10MBPS;
998 		break;
999 	case FW_ADDSTATE_LINK_SPEED_100MBPS:
1000 		speed = ISCSI_PORT_SPEED_100MBPS;
1001 		break;
1002 	case FW_ADDSTATE_LINK_SPEED_1GBPS:
1003 		speed = ISCSI_PORT_SPEED_1GBPS;
1004 		break;
1005 	case FW_ADDSTATE_LINK_SPEED_10GBPS:
1006 		speed = ISCSI_PORT_SPEED_10GBPS;
1007 		break;
1008 	}
1009 	ihost->port_speed = speed;
1010 }
1011 
1012 static void qla4xxx_set_port_state(struct Scsi_Host *shost)
1013 {
1014 	struct scsi_qla_host *ha = to_qla_host(shost);
1015 	struct iscsi_cls_host *ihost = shost->shost_data;
1016 	uint32_t state = ISCSI_PORT_STATE_DOWN;
1017 
1018 	if (test_bit(AF_LINK_UP, &ha->flags))
1019 		state = ISCSI_PORT_STATE_UP;
1020 
1021 	ihost->port_state = state;
1022 }
1023 
1024 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
1025 				  enum iscsi_host_param param, char *buf)
1026 {
1027 	struct scsi_qla_host *ha = to_qla_host(shost);
1028 	int len;
1029 
1030 	switch (param) {
1031 	case ISCSI_HOST_PARAM_HWADDRESS:
1032 		len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
1033 		break;
1034 	case ISCSI_HOST_PARAM_IPADDRESS:
1035 		len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
1036 		break;
1037 	case ISCSI_HOST_PARAM_INITIATOR_NAME:
1038 		len = sprintf(buf, "%s\n", ha->name_string);
1039 		break;
1040 	case ISCSI_HOST_PARAM_PORT_STATE:
1041 		qla4xxx_set_port_state(shost);
1042 		len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
1043 		break;
1044 	case ISCSI_HOST_PARAM_PORT_SPEED:
1045 		qla4xxx_set_port_speed(shost);
1046 		len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
1047 		break;
1048 	default:
1049 		return -ENOSYS;
1050 	}
1051 
1052 	return len;
1053 }
1054 
1055 static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha)
1056 {
1057 	if (ha->iface_ipv4)
1058 		return;
1059 
1060 	/* IPv4 */
1061 	ha->iface_ipv4 = iscsi_create_iface(ha->host,
1062 					    &qla4xxx_iscsi_transport,
1063 					    ISCSI_IFACE_TYPE_IPV4, 0, 0);
1064 	if (!ha->iface_ipv4)
1065 		ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI "
1066 			   "iface0.\n");
1067 }
1068 
1069 static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha)
1070 {
1071 	if (!ha->iface_ipv6_0)
1072 		/* IPv6 iface-0 */
1073 		ha->iface_ipv6_0 = iscsi_create_iface(ha->host,
1074 						      &qla4xxx_iscsi_transport,
1075 						      ISCSI_IFACE_TYPE_IPV6, 0,
1076 						      0);
1077 	if (!ha->iface_ipv6_0)
1078 		ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
1079 			   "iface0.\n");
1080 
1081 	if (!ha->iface_ipv6_1)
1082 		/* IPv6 iface-1 */
1083 		ha->iface_ipv6_1 = iscsi_create_iface(ha->host,
1084 						      &qla4xxx_iscsi_transport,
1085 						      ISCSI_IFACE_TYPE_IPV6, 1,
1086 						      0);
1087 	if (!ha->iface_ipv6_1)
1088 		ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
1089 			   "iface1.\n");
1090 }
1091 
1092 static void qla4xxx_create_ifaces(struct scsi_qla_host *ha)
1093 {
1094 	if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE)
1095 		qla4xxx_create_ipv4_iface(ha);
1096 
1097 	if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE)
1098 		qla4xxx_create_ipv6_iface(ha);
1099 }
1100 
1101 static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha)
1102 {
1103 	if (ha->iface_ipv4) {
1104 		iscsi_destroy_iface(ha->iface_ipv4);
1105 		ha->iface_ipv4 = NULL;
1106 	}
1107 }
1108 
1109 static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha)
1110 {
1111 	if (ha->iface_ipv6_0) {
1112 		iscsi_destroy_iface(ha->iface_ipv6_0);
1113 		ha->iface_ipv6_0 = NULL;
1114 	}
1115 	if (ha->iface_ipv6_1) {
1116 		iscsi_destroy_iface(ha->iface_ipv6_1);
1117 		ha->iface_ipv6_1 = NULL;
1118 	}
1119 }
1120 
1121 static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha)
1122 {
1123 	qla4xxx_destroy_ipv4_iface(ha);
1124 	qla4xxx_destroy_ipv6_iface(ha);
1125 }
1126 
1127 static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
1128 			     struct iscsi_iface_param_info *iface_param,
1129 			     struct addr_ctrl_blk *init_fw_cb)
1130 {
1131 	/*
1132 	 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg.
1133 	 * iface_num 1 is valid only for IPv6 Addr.
1134 	 */
1135 	switch (iface_param->param) {
1136 	case ISCSI_NET_PARAM_IPV6_ADDR:
1137 		if (iface_param->iface_num & 0x1)
1138 			/* IPv6 Addr 1 */
1139 			memcpy(init_fw_cb->ipv6_addr1, iface_param->value,
1140 			       sizeof(init_fw_cb->ipv6_addr1));
1141 		else
1142 			/* IPv6 Addr 0 */
1143 			memcpy(init_fw_cb->ipv6_addr0, iface_param->value,
1144 			       sizeof(init_fw_cb->ipv6_addr0));
1145 		break;
1146 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
1147 		if (iface_param->iface_num & 0x1)
1148 			break;
1149 		memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8],
1150 		       sizeof(init_fw_cb->ipv6_if_id));
1151 		break;
1152 	case ISCSI_NET_PARAM_IPV6_ROUTER:
1153 		if (iface_param->iface_num & 0x1)
1154 			break;
1155 		memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value,
1156 		       sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
1157 		break;
1158 	case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
1159 		/* Autocfg applies to even interface */
1160 		if (iface_param->iface_num & 0x1)
1161 			break;
1162 
1163 		if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE)
1164 			init_fw_cb->ipv6_addtl_opts &=
1165 				cpu_to_le16(
1166 				  ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
1167 		else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE)
1168 			init_fw_cb->ipv6_addtl_opts |=
1169 				cpu_to_le16(
1170 				  IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
1171 		else
1172 			ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
1173 				   "IPv6 addr\n");
1174 		break;
1175 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
1176 		/* Autocfg applies to even interface */
1177 		if (iface_param->iface_num & 0x1)
1178 			break;
1179 
1180 		if (iface_param->value[0] ==
1181 		    ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE)
1182 			init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(
1183 					IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
1184 		else if (iface_param->value[0] ==
1185 			 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE)
1186 			init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
1187 				       ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
1188 		else
1189 			ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
1190 				   "IPv6 linklocal addr\n");
1191 		break;
1192 	case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
1193 		/* Autocfg applies to even interface */
1194 		if (iface_param->iface_num & 0x1)
1195 			break;
1196 
1197 		if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE)
1198 			memset(init_fw_cb->ipv6_dflt_rtr_addr, 0,
1199 			       sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
1200 		break;
1201 	case ISCSI_NET_PARAM_IFACE_ENABLE:
1202 		if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
1203 			init_fw_cb->ipv6_opts |=
1204 				cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE);
1205 			qla4xxx_create_ipv6_iface(ha);
1206 		} else {
1207 			init_fw_cb->ipv6_opts &=
1208 				cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE &
1209 					    0xFFFF);
1210 			qla4xxx_destroy_ipv6_iface(ha);
1211 		}
1212 		break;
1213 	case ISCSI_NET_PARAM_VLAN_TAG:
1214 		if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag))
1215 			break;
1216 		init_fw_cb->ipv6_vlan_tag =
1217 				cpu_to_be16(*(uint16_t *)iface_param->value);
1218 		break;
1219 	case ISCSI_NET_PARAM_VLAN_ENABLED:
1220 		if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
1221 			init_fw_cb->ipv6_opts |=
1222 				cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE);
1223 		else
1224 			init_fw_cb->ipv6_opts &=
1225 				cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE);
1226 		break;
1227 	case ISCSI_NET_PARAM_MTU:
1228 		init_fw_cb->eth_mtu_size =
1229 				cpu_to_le16(*(uint16_t *)iface_param->value);
1230 		break;
1231 	case ISCSI_NET_PARAM_PORT:
1232 		/* Autocfg applies to even interface */
1233 		if (iface_param->iface_num & 0x1)
1234 			break;
1235 
1236 		init_fw_cb->ipv6_port =
1237 				cpu_to_le16(*(uint16_t *)iface_param->value);
1238 		break;
1239 	default:
1240 		ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
1241 			   iface_param->param);
1242 		break;
1243 	}
1244 }
1245 
1246 static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
1247 			     struct iscsi_iface_param_info *iface_param,
1248 			     struct addr_ctrl_blk *init_fw_cb)
1249 {
1250 	switch (iface_param->param) {
1251 	case ISCSI_NET_PARAM_IPV4_ADDR:
1252 		memcpy(init_fw_cb->ipv4_addr, iface_param->value,
1253 		       sizeof(init_fw_cb->ipv4_addr));
1254 		break;
1255 	case ISCSI_NET_PARAM_IPV4_SUBNET:
1256 		memcpy(init_fw_cb->ipv4_subnet,	iface_param->value,
1257 		       sizeof(init_fw_cb->ipv4_subnet));
1258 		break;
1259 	case ISCSI_NET_PARAM_IPV4_GW:
1260 		memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value,
1261 		       sizeof(init_fw_cb->ipv4_gw_addr));
1262 		break;
1263 	case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
1264 		if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
1265 			init_fw_cb->ipv4_tcp_opts |=
1266 					cpu_to_le16(TCPOPT_DHCP_ENABLE);
1267 		else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
1268 			init_fw_cb->ipv4_tcp_opts &=
1269 					cpu_to_le16(~TCPOPT_DHCP_ENABLE);
1270 		else
1271 			ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n");
1272 		break;
1273 	case ISCSI_NET_PARAM_IFACE_ENABLE:
1274 		if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
1275 			init_fw_cb->ipv4_ip_opts |=
1276 				cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE);
1277 			qla4xxx_create_ipv4_iface(ha);
1278 		} else {
1279 			init_fw_cb->ipv4_ip_opts &=
1280 				cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE &
1281 					    0xFFFF);
1282 			qla4xxx_destroy_ipv4_iface(ha);
1283 		}
1284 		break;
1285 	case ISCSI_NET_PARAM_VLAN_TAG:
1286 		if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag))
1287 			break;
1288 		init_fw_cb->ipv4_vlan_tag =
1289 				cpu_to_be16(*(uint16_t *)iface_param->value);
1290 		break;
1291 	case ISCSI_NET_PARAM_VLAN_ENABLED:
1292 		if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
1293 			init_fw_cb->ipv4_ip_opts |=
1294 					cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE);
1295 		else
1296 			init_fw_cb->ipv4_ip_opts &=
1297 					cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE);
1298 		break;
1299 	case ISCSI_NET_PARAM_MTU:
1300 		init_fw_cb->eth_mtu_size =
1301 				cpu_to_le16(*(uint16_t *)iface_param->value);
1302 		break;
1303 	case ISCSI_NET_PARAM_PORT:
1304 		init_fw_cb->ipv4_port =
1305 				cpu_to_le16(*(uint16_t *)iface_param->value);
1306 		break;
1307 	default:
1308 		ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
1309 			   iface_param->param);
1310 		break;
1311 	}
1312 }
1313 
1314 static void
1315 qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
1316 {
1317 	struct addr_ctrl_blk_def *acb;
1318 	acb = (struct addr_ctrl_blk_def *)init_fw_cb;
1319 	memset(acb->reserved1, 0, sizeof(acb->reserved1));
1320 	memset(acb->reserved2, 0, sizeof(acb->reserved2));
1321 	memset(acb->reserved3, 0, sizeof(acb->reserved3));
1322 	memset(acb->reserved4, 0, sizeof(acb->reserved4));
1323 	memset(acb->reserved5, 0, sizeof(acb->reserved5));
1324 	memset(acb->reserved6, 0, sizeof(acb->reserved6));
1325 	memset(acb->reserved7, 0, sizeof(acb->reserved7));
1326 	memset(acb->reserved8, 0, sizeof(acb->reserved8));
1327 	memset(acb->reserved9, 0, sizeof(acb->reserved9));
1328 	memset(acb->reserved10, 0, sizeof(acb->reserved10));
1329 	memset(acb->reserved11, 0, sizeof(acb->reserved11));
1330 	memset(acb->reserved12, 0, sizeof(acb->reserved12));
1331 	memset(acb->reserved13, 0, sizeof(acb->reserved13));
1332 	memset(acb->reserved14, 0, sizeof(acb->reserved14));
1333 	memset(acb->reserved15, 0, sizeof(acb->reserved15));
1334 }
1335 
1336 static int
1337 qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
1338 {
1339 	struct scsi_qla_host *ha = to_qla_host(shost);
1340 	int rval = 0;
1341 	struct iscsi_iface_param_info *iface_param = NULL;
1342 	struct addr_ctrl_blk *init_fw_cb = NULL;
1343 	dma_addr_t init_fw_cb_dma;
1344 	uint32_t mbox_cmd[MBOX_REG_COUNT];
1345 	uint32_t mbox_sts[MBOX_REG_COUNT];
1346 	uint32_t rem = len;
1347 	struct nlattr *attr;
1348 
1349 	init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
1350 					sizeof(struct addr_ctrl_blk),
1351 					&init_fw_cb_dma, GFP_KERNEL);
1352 	if (!init_fw_cb) {
1353 		ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
1354 			   __func__);
1355 		return -ENOMEM;
1356 	}
1357 
1358 	memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
1359 	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1360 	memset(&mbox_sts, 0, sizeof(mbox_sts));
1361 
1362 	if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) {
1363 		ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__);
1364 		rval = -EIO;
1365 		goto exit_init_fw_cb;
1366 	}
1367 
1368 	nla_for_each_attr(attr, data, len, rem) {
1369 		iface_param = nla_data(attr);
1370 
1371 		if (iface_param->param_type != ISCSI_NET_PARAM)
1372 			continue;
1373 
1374 		switch (iface_param->iface_type) {
1375 		case ISCSI_IFACE_TYPE_IPV4:
1376 			switch (iface_param->iface_num) {
1377 			case 0:
1378 				qla4xxx_set_ipv4(ha, iface_param, init_fw_cb);
1379 				break;
1380 			default:
1381 				/* Cannot have more than one IPv4 interface */
1382 				ql4_printk(KERN_ERR, ha, "Invalid IPv4 iface "
1383 					   "number = %d\n",
1384 					   iface_param->iface_num);
1385 				break;
1386 			}
1387 			break;
1388 		case ISCSI_IFACE_TYPE_IPV6:
1389 			switch (iface_param->iface_num) {
1390 			case 0:
1391 			case 1:
1392 				qla4xxx_set_ipv6(ha, iface_param, init_fw_cb);
1393 				break;
1394 			default:
1395 				/* Cannot have more than two IPv6 interface */
1396 				ql4_printk(KERN_ERR, ha, "Invalid IPv6 iface "
1397 					   "number = %d\n",
1398 					   iface_param->iface_num);
1399 				break;
1400 			}
1401 			break;
1402 		default:
1403 			ql4_printk(KERN_ERR, ha, "Invalid iface type\n");
1404 			break;
1405 		}
1406 	}
1407 
1408 	init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A);
1409 
1410 	rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB,
1411 				 sizeof(struct addr_ctrl_blk),
1412 				 FLASH_OPT_RMW_COMMIT);
1413 	if (rval != QLA_SUCCESS) {
1414 		ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n",
1415 			   __func__);
1416 		rval = -EIO;
1417 		goto exit_init_fw_cb;
1418 	}
1419 
1420 	rval = qla4xxx_disable_acb(ha);
1421 	if (rval != QLA_SUCCESS) {
1422 		ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n",
1423 			   __func__);
1424 		rval = -EIO;
1425 		goto exit_init_fw_cb;
1426 	}
1427 
1428 	wait_for_completion_timeout(&ha->disable_acb_comp,
1429 				    DISABLE_ACB_TOV * HZ);
1430 
1431 	qla4xxx_initcb_to_acb(init_fw_cb);
1432 
1433 	rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma);
1434 	if (rval != QLA_SUCCESS) {
1435 		ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n",
1436 			   __func__);
1437 		rval = -EIO;
1438 		goto exit_init_fw_cb;
1439 	}
1440 
1441 	memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
1442 	qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb,
1443 				  init_fw_cb_dma);
1444 
1445 exit_init_fw_cb:
1446 	dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
1447 			  init_fw_cb, init_fw_cb_dma);
1448 
1449 	return rval;
1450 }
1451 
1452 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
1453 				     enum iscsi_param param, char *buf)
1454 {
1455 	struct iscsi_session *sess = cls_sess->dd_data;
1456 	struct ddb_entry *ddb_entry = sess->dd_data;
1457 	struct scsi_qla_host *ha = ddb_entry->ha;
1458 	int rval, len;
1459 	uint16_t idx;
1460 
1461 	switch (param) {
1462 	case ISCSI_PARAM_CHAP_IN_IDX:
1463 		rval = qla4xxx_get_chap_index(ha, sess->username_in,
1464 					      sess->password_in, BIDI_CHAP,
1465 					      &idx);
1466 		if (rval)
1467 			len = sprintf(buf, "\n");
1468 		else
1469 			len = sprintf(buf, "%hu\n", idx);
1470 		break;
1471 	case ISCSI_PARAM_CHAP_OUT_IDX:
1472 		rval = qla4xxx_get_chap_index(ha, sess->username,
1473 					      sess->password, LOCAL_CHAP,
1474 					      &idx);
1475 		if (rval)
1476 			len = sprintf(buf, "\n");
1477 		else
1478 			len = sprintf(buf, "%hu\n", idx);
1479 		break;
1480 	default:
1481 		return iscsi_session_get_param(cls_sess, param, buf);
1482 	}
1483 
1484 	return len;
1485 }
1486 
1487 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
1488 				  enum iscsi_param param, char *buf)
1489 {
1490 	struct iscsi_conn *conn;
1491 	struct qla_conn *qla_conn;
1492 	struct sockaddr *dst_addr;
1493 	int len = 0;
1494 
1495 	conn = cls_conn->dd_data;
1496 	qla_conn = conn->dd_data;
1497 	dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr;
1498 
1499 	switch (param) {
1500 	case ISCSI_PARAM_CONN_PORT:
1501 	case ISCSI_PARAM_CONN_ADDRESS:
1502 		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
1503 						 dst_addr, param, buf);
1504 	default:
1505 		return iscsi_conn_get_param(cls_conn, param, buf);
1506 	}
1507 
1508 	return len;
1509 
1510 }
1511 
1512 int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
1513 {
1514 	uint32_t mbx_sts = 0;
1515 	uint16_t tmp_ddb_index;
1516 	int ret;
1517 
1518 get_ddb_index:
1519 	tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
1520 
1521 	if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
1522 		DEBUG2(ql4_printk(KERN_INFO, ha,
1523 				  "Free DDB index not available\n"));
1524 		ret = QLA_ERROR;
1525 		goto exit_get_ddb_index;
1526 	}
1527 
1528 	if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
1529 		goto get_ddb_index;
1530 
1531 	DEBUG2(ql4_printk(KERN_INFO, ha,
1532 			  "Found a free DDB index at %d\n", tmp_ddb_index));
1533 	ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
1534 	if (ret == QLA_ERROR) {
1535 		if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
1536 			ql4_printk(KERN_INFO, ha,
1537 				   "DDB index = %d not available trying next\n",
1538 				   tmp_ddb_index);
1539 			goto get_ddb_index;
1540 		}
1541 		DEBUG2(ql4_printk(KERN_INFO, ha,
1542 				  "Free FW DDB not available\n"));
1543 	}
1544 
1545 	*ddb_index = tmp_ddb_index;
1546 
1547 exit_get_ddb_index:
1548 	return ret;
1549 }
1550 
1551 static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
1552 				   struct ddb_entry *ddb_entry,
1553 				   char *existing_ipaddr,
1554 				   char *user_ipaddr)
1555 {
1556 	uint8_t dst_ipaddr[IPv6_ADDR_LEN];
1557 	char formatted_ipaddr[DDB_IPADDR_LEN];
1558 	int status = QLA_SUCCESS, ret = 0;
1559 
1560 	if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
1561 		ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
1562 			       '\0', NULL);
1563 		if (ret == 0) {
1564 			status = QLA_ERROR;
1565 			goto out_match;
1566 		}
1567 		ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
1568 	} else {
1569 		ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
1570 			       '\0', NULL);
1571 		if (ret == 0) {
1572 			status = QLA_ERROR;
1573 			goto out_match;
1574 		}
1575 		ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
1576 	}
1577 
1578 	if (strcmp(existing_ipaddr, formatted_ipaddr))
1579 		status = QLA_ERROR;
1580 
1581 out_match:
1582 	return status;
1583 }
1584 
1585 static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
1586 				      struct iscsi_cls_conn *cls_conn)
1587 {
1588 	int idx = 0, max_ddbs, rval;
1589 	struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1590 	struct iscsi_session *sess, *existing_sess;
1591 	struct iscsi_conn *conn, *existing_conn;
1592 	struct ddb_entry *ddb_entry;
1593 
1594 	sess = cls_sess->dd_data;
1595 	conn = cls_conn->dd_data;
1596 
1597 	if (sess->targetname == NULL ||
1598 	    conn->persistent_address == NULL ||
1599 	    conn->persistent_port == 0)
1600 		return QLA_ERROR;
1601 
1602 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
1603 				     MAX_DEV_DB_ENTRIES;
1604 
1605 	for (idx = 0; idx < max_ddbs; idx++) {
1606 		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
1607 		if (ddb_entry == NULL)
1608 			continue;
1609 
1610 		if (ddb_entry->ddb_type != FLASH_DDB)
1611 			continue;
1612 
1613 		existing_sess = ddb_entry->sess->dd_data;
1614 		existing_conn = ddb_entry->conn->dd_data;
1615 
1616 		if (existing_sess->targetname == NULL ||
1617 		    existing_conn->persistent_address == NULL ||
1618 		    existing_conn->persistent_port == 0)
1619 			continue;
1620 
1621 		DEBUG2(ql4_printk(KERN_INFO, ha,
1622 				  "IQN = %s User IQN = %s\n",
1623 				  existing_sess->targetname,
1624 				  sess->targetname));
1625 
1626 		DEBUG2(ql4_printk(KERN_INFO, ha,
1627 				  "IP = %s User IP = %s\n",
1628 				  existing_conn->persistent_address,
1629 				  conn->persistent_address));
1630 
1631 		DEBUG2(ql4_printk(KERN_INFO, ha,
1632 				  "Port = %d User Port = %d\n",
1633 				  existing_conn->persistent_port,
1634 				  conn->persistent_port));
1635 
1636 		if (strcmp(existing_sess->targetname, sess->targetname))
1637 			continue;
1638 		rval = qla4xxx_match_ipaddress(ha, ddb_entry,
1639 					existing_conn->persistent_address,
1640 					conn->persistent_address);
1641 		if (rval == QLA_ERROR)
1642 			continue;
1643 		if (existing_conn->persistent_port != conn->persistent_port)
1644 			continue;
1645 		break;
1646 	}
1647 
1648 	if (idx == max_ddbs)
1649 		return QLA_ERROR;
1650 
1651 	DEBUG2(ql4_printk(KERN_INFO, ha,
1652 			  "Match found in fwdb sessions\n"));
1653 	return QLA_SUCCESS;
1654 }
1655 
1656 static struct iscsi_cls_session *
1657 qla4xxx_session_create(struct iscsi_endpoint *ep,
1658 			uint16_t cmds_max, uint16_t qdepth,
1659 			uint32_t initial_cmdsn)
1660 {
1661 	struct iscsi_cls_session *cls_sess;
1662 	struct scsi_qla_host *ha;
1663 	struct qla_endpoint *qla_ep;
1664 	struct ddb_entry *ddb_entry;
1665 	uint16_t ddb_index;
1666 	struct iscsi_session *sess;
1667 	struct sockaddr *dst_addr;
1668 	int ret;
1669 
1670 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1671 	if (!ep) {
1672 		printk(KERN_ERR "qla4xxx: missing ep.\n");
1673 		return NULL;
1674 	}
1675 
1676 	qla_ep = ep->dd_data;
1677 	dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
1678 	ha = to_qla_host(qla_ep->host);
1679 
1680 	ret = qla4xxx_get_ddb_index(ha, &ddb_index);
1681 	if (ret == QLA_ERROR)
1682 		return NULL;
1683 
1684 	cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
1685 				       cmds_max, sizeof(struct ddb_entry),
1686 				       sizeof(struct ql4_task_data),
1687 				       initial_cmdsn, ddb_index);
1688 	if (!cls_sess)
1689 		return NULL;
1690 
1691 	sess = cls_sess->dd_data;
1692 	ddb_entry = sess->dd_data;
1693 	ddb_entry->fw_ddb_index = ddb_index;
1694 	ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
1695 	ddb_entry->ha = ha;
1696 	ddb_entry->sess = cls_sess;
1697 	ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
1698 	ddb_entry->ddb_change = qla4xxx_ddb_change;
1699 	cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
1700 	ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
1701 	ha->tot_ddbs++;
1702 
1703 	return cls_sess;
1704 }
1705 
1706 static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
1707 {
1708 	struct iscsi_session *sess;
1709 	struct ddb_entry *ddb_entry;
1710 	struct scsi_qla_host *ha;
1711 	unsigned long flags, wtime;
1712 	struct dev_db_entry *fw_ddb_entry = NULL;
1713 	dma_addr_t fw_ddb_entry_dma;
1714 	uint32_t ddb_state;
1715 	int ret;
1716 
1717 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1718 	sess = cls_sess->dd_data;
1719 	ddb_entry = sess->dd_data;
1720 	ha = ddb_entry->ha;
1721 
1722 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1723 					  &fw_ddb_entry_dma, GFP_KERNEL);
1724 	if (!fw_ddb_entry) {
1725 		ql4_printk(KERN_ERR, ha,
1726 			   "%s: Unable to allocate dma buffer\n", __func__);
1727 		goto destroy_session;
1728 	}
1729 
1730 	wtime = jiffies + (HZ * LOGOUT_TOV);
1731 	do {
1732 		ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
1733 					      fw_ddb_entry, fw_ddb_entry_dma,
1734 					      NULL, NULL, &ddb_state, NULL,
1735 					      NULL, NULL);
1736 		if (ret == QLA_ERROR)
1737 			goto destroy_session;
1738 
1739 		if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
1740 		    (ddb_state == DDB_DS_SESSION_FAILED))
1741 			goto destroy_session;
1742 
1743 		schedule_timeout_uninterruptible(HZ);
1744 	} while ((time_after(wtime, jiffies)));
1745 
1746 destroy_session:
1747 	qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
1748 
1749 	spin_lock_irqsave(&ha->hardware_lock, flags);
1750 	qla4xxx_free_ddb(ha, ddb_entry);
1751 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1752 
1753 	iscsi_session_teardown(cls_sess);
1754 
1755 	if (fw_ddb_entry)
1756 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1757 				  fw_ddb_entry, fw_ddb_entry_dma);
1758 }
1759 
1760 static struct iscsi_cls_conn *
1761 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
1762 {
1763 	struct iscsi_cls_conn *cls_conn;
1764 	struct iscsi_session *sess;
1765 	struct ddb_entry *ddb_entry;
1766 
1767 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1768 	cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
1769 				    conn_idx);
1770 	if (!cls_conn)
1771 		return NULL;
1772 
1773 	sess = cls_sess->dd_data;
1774 	ddb_entry = sess->dd_data;
1775 	ddb_entry->conn = cls_conn;
1776 
1777 	return cls_conn;
1778 }
1779 
1780 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
1781 			     struct iscsi_cls_conn *cls_conn,
1782 			     uint64_t transport_fd, int is_leading)
1783 {
1784 	struct iscsi_conn *conn;
1785 	struct qla_conn *qla_conn;
1786 	struct iscsi_endpoint *ep;
1787 
1788 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1789 
1790 	if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
1791 		return -EINVAL;
1792 	ep = iscsi_lookup_endpoint(transport_fd);
1793 	conn = cls_conn->dd_data;
1794 	qla_conn = conn->dd_data;
1795 	qla_conn->qla_ep = ep->dd_data;
1796 	return 0;
1797 }
1798 
1799 static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
1800 {
1801 	struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1802 	struct iscsi_session *sess;
1803 	struct ddb_entry *ddb_entry;
1804 	struct scsi_qla_host *ha;
1805 	struct dev_db_entry *fw_ddb_entry = NULL;
1806 	dma_addr_t fw_ddb_entry_dma;
1807 	uint32_t mbx_sts = 0;
1808 	int ret = 0;
1809 	int status = QLA_SUCCESS;
1810 
1811 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1812 	sess = cls_sess->dd_data;
1813 	ddb_entry = sess->dd_data;
1814 	ha = ddb_entry->ha;
1815 
1816 	/* Check if we have  matching FW DDB, if yes then do not
1817 	 * login to this target. This could cause target to logout previous
1818 	 * connection
1819 	 */
1820 	ret = qla4xxx_match_fwdb_session(ha, cls_conn);
1821 	if (ret == QLA_SUCCESS) {
1822 		ql4_printk(KERN_INFO, ha,
1823 			   "Session already exist in FW.\n");
1824 		ret = -EEXIST;
1825 		goto exit_conn_start;
1826 	}
1827 
1828 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1829 					  &fw_ddb_entry_dma, GFP_KERNEL);
1830 	if (!fw_ddb_entry) {
1831 		ql4_printk(KERN_ERR, ha,
1832 			   "%s: Unable to allocate dma buffer\n", __func__);
1833 		ret = -ENOMEM;
1834 		goto exit_conn_start;
1835 	}
1836 
1837 	ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
1838 	if (ret) {
1839 		/* If iscsid is stopped and started then no need to do
1840 		* set param again since ddb state will be already
1841 		* active and FW does not allow set ddb to an
1842 		* active session.
1843 		*/
1844 		if (mbx_sts)
1845 			if (ddb_entry->fw_ddb_device_state ==
1846 						DDB_DS_SESSION_ACTIVE) {
1847 				ddb_entry->unblock_sess(ddb_entry->sess);
1848 				goto exit_set_param;
1849 			}
1850 
1851 		ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n",
1852 			   __func__, ddb_entry->fw_ddb_index);
1853 		goto exit_conn_start;
1854 	}
1855 
1856 	status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
1857 	if (status == QLA_ERROR) {
1858 		ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
1859 			   sess->targetname);
1860 		ret = -EINVAL;
1861 		goto exit_conn_start;
1862 	}
1863 
1864 	if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE)
1865 		ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
1866 
1867 	DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__,
1868 		      ddb_entry->fw_ddb_device_state));
1869 
1870 exit_set_param:
1871 	ret = 0;
1872 
1873 exit_conn_start:
1874 	if (fw_ddb_entry)
1875 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1876 				  fw_ddb_entry, fw_ddb_entry_dma);
1877 	return ret;
1878 }
1879 
1880 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
1881 {
1882 	struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1883 	struct iscsi_session *sess;
1884 	struct scsi_qla_host *ha;
1885 	struct ddb_entry *ddb_entry;
1886 	int options;
1887 
1888 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1889 	sess = cls_sess->dd_data;
1890 	ddb_entry = sess->dd_data;
1891 	ha = ddb_entry->ha;
1892 
1893 	options = LOGOUT_OPTION_CLOSE_SESSION;
1894 	if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
1895 		ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
1896 }
1897 
1898 static void qla4xxx_task_work(struct work_struct *wdata)
1899 {
1900 	struct ql4_task_data *task_data;
1901 	struct scsi_qla_host *ha;
1902 	struct passthru_status *sts;
1903 	struct iscsi_task *task;
1904 	struct iscsi_hdr *hdr;
1905 	uint8_t *data;
1906 	uint32_t data_len;
1907 	struct iscsi_conn *conn;
1908 	int hdr_len;
1909 	itt_t itt;
1910 
1911 	task_data = container_of(wdata, struct ql4_task_data, task_work);
1912 	ha = task_data->ha;
1913 	task = task_data->task;
1914 	sts = &task_data->sts;
1915 	hdr_len = sizeof(struct iscsi_hdr);
1916 
1917 	DEBUG3(printk(KERN_INFO "Status returned\n"));
1918 	DEBUG3(qla4xxx_dump_buffer(sts, 64));
1919 	DEBUG3(printk(KERN_INFO "Response buffer"));
1920 	DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64));
1921 
1922 	conn = task->conn;
1923 
1924 	switch (sts->completionStatus) {
1925 	case PASSTHRU_STATUS_COMPLETE:
1926 		hdr = (struct iscsi_hdr *)task_data->resp_buffer;
1927 		/* Assign back the itt in hdr, until we use the PREASSIGN_TAG */
1928 		itt = sts->handle;
1929 		hdr->itt = itt;
1930 		data = task_data->resp_buffer + hdr_len;
1931 		data_len = task_data->resp_len - hdr_len;
1932 		iscsi_complete_pdu(conn, hdr, data, data_len);
1933 		break;
1934 	default:
1935 		ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n",
1936 			   sts->completionStatus);
1937 		break;
1938 	}
1939 	return;
1940 }
1941 
1942 static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
1943 {
1944 	struct ql4_task_data *task_data;
1945 	struct iscsi_session *sess;
1946 	struct ddb_entry *ddb_entry;
1947 	struct scsi_qla_host *ha;
1948 	int hdr_len;
1949 
1950 	sess = task->conn->session;
1951 	ddb_entry = sess->dd_data;
1952 	ha = ddb_entry->ha;
1953 	task_data = task->dd_data;
1954 	memset(task_data, 0, sizeof(struct ql4_task_data));
1955 
1956 	if (task->sc) {
1957 		ql4_printk(KERN_INFO, ha,
1958 			   "%s: SCSI Commands not implemented\n", __func__);
1959 		return -EINVAL;
1960 	}
1961 
1962 	hdr_len = sizeof(struct iscsi_hdr);
1963 	task_data->ha = ha;
1964 	task_data->task = task;
1965 
1966 	if (task->data_count) {
1967 		task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
1968 						     task->data_count,
1969 						     PCI_DMA_TODEVICE);
1970 	}
1971 
1972 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1973 		      __func__, task->conn->max_recv_dlength, hdr_len));
1974 
1975 	task_data->resp_len = task->conn->max_recv_dlength + hdr_len;
1976 	task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev,
1977 						    task_data->resp_len,
1978 						    &task_data->resp_dma,
1979 						    GFP_ATOMIC);
1980 	if (!task_data->resp_buffer)
1981 		goto exit_alloc_pdu;
1982 
1983 	task_data->req_len = task->data_count + hdr_len;
1984 	task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev,
1985 						   task_data->req_len,
1986 						   &task_data->req_dma,
1987 						   GFP_ATOMIC);
1988 	if (!task_data->req_buffer)
1989 		goto exit_alloc_pdu;
1990 
1991 	task->hdr = task_data->req_buffer;
1992 
1993 	INIT_WORK(&task_data->task_work, qla4xxx_task_work);
1994 
1995 	return 0;
1996 
1997 exit_alloc_pdu:
1998 	if (task_data->resp_buffer)
1999 		dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
2000 				  task_data->resp_buffer, task_data->resp_dma);
2001 
2002 	if (task_data->req_buffer)
2003 		dma_free_coherent(&ha->pdev->dev, task_data->req_len,
2004 				  task_data->req_buffer, task_data->req_dma);
2005 	return -ENOMEM;
2006 }
2007 
2008 static void qla4xxx_task_cleanup(struct iscsi_task *task)
2009 {
2010 	struct ql4_task_data *task_data;
2011 	struct iscsi_session *sess;
2012 	struct ddb_entry *ddb_entry;
2013 	struct scsi_qla_host *ha;
2014 	int hdr_len;
2015 
2016 	hdr_len = sizeof(struct iscsi_hdr);
2017 	sess = task->conn->session;
2018 	ddb_entry = sess->dd_data;
2019 	ha = ddb_entry->ha;
2020 	task_data = task->dd_data;
2021 
2022 	if (task->data_count) {
2023 		dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
2024 				 task->data_count, PCI_DMA_TODEVICE);
2025 	}
2026 
2027 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
2028 		      __func__, task->conn->max_recv_dlength, hdr_len));
2029 
2030 	dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
2031 			  task_data->resp_buffer, task_data->resp_dma);
2032 	dma_free_coherent(&ha->pdev->dev, task_data->req_len,
2033 			  task_data->req_buffer, task_data->req_dma);
2034 	return;
2035 }
2036 
2037 static int qla4xxx_task_xmit(struct iscsi_task *task)
2038 {
2039 	struct scsi_cmnd *sc = task->sc;
2040 	struct iscsi_session *sess = task->conn->session;
2041 	struct ddb_entry *ddb_entry = sess->dd_data;
2042 	struct scsi_qla_host *ha = ddb_entry->ha;
2043 
2044 	if (!sc)
2045 		return qla4xxx_send_passthru0(task);
2046 
2047 	ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n",
2048 		   __func__);
2049 	return -ENOSYS;
2050 }
2051 
2052 static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess,
2053 					 struct iscsi_bus_flash_conn *conn,
2054 					 struct dev_db_entry *fw_ddb_entry)
2055 {
2056 	unsigned long options = 0;
2057 	int rc = 0;
2058 
2059 	options = le16_to_cpu(fw_ddb_entry->options);
2060 	conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
2061 	if (test_bit(OPT_IPV6_DEVICE, &options)) {
2062 		rc = iscsi_switch_str_param(&sess->portal_type,
2063 					    PORTAL_TYPE_IPV6);
2064 		if (rc)
2065 			goto exit_copy;
2066 	} else {
2067 		rc = iscsi_switch_str_param(&sess->portal_type,
2068 					    PORTAL_TYPE_IPV4);
2069 		if (rc)
2070 			goto exit_copy;
2071 	}
2072 
2073 	sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
2074 					      &options);
2075 	sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
2076 	sess->entry_state = test_bit(OPT_ENTRY_STATE, &options);
2077 
2078 	options = le16_to_cpu(fw_ddb_entry->iscsi_options);
2079 	conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
2080 	conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
2081 	sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
2082 	sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
2083 	sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
2084 					    &options);
2085 	sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
2086 	sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
2087 	conn->snack_req_en = test_bit(ISCSIOPT_SNACK_REQ_EN, &options);
2088 	sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
2089 					     &options);
2090 	sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
2091 	sess->discovery_auth_optional =
2092 			test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
2093 	if (test_bit(ISCSIOPT_ERL1, &options))
2094 		sess->erl |= BIT_1;
2095 	if (test_bit(ISCSIOPT_ERL0, &options))
2096 		sess->erl |= BIT_0;
2097 
2098 	options = le16_to_cpu(fw_ddb_entry->tcp_options);
2099 	conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
2100 	conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
2101 	conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
2102 	if (test_bit(TCPOPT_TIMER_SCALE3, &options))
2103 		conn->tcp_timer_scale |= BIT_3;
2104 	if (test_bit(TCPOPT_TIMER_SCALE2, &options))
2105 		conn->tcp_timer_scale |= BIT_2;
2106 	if (test_bit(TCPOPT_TIMER_SCALE1, &options))
2107 		conn->tcp_timer_scale |= BIT_1;
2108 
2109 	conn->tcp_timer_scale >>= 1;
2110 	conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
2111 
2112 	options = le16_to_cpu(fw_ddb_entry->ip_options);
2113 	conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
2114 
2115 	conn->max_recv_dlength = BYTE_UNITS *
2116 			  le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
2117 	conn->max_xmit_dlength = BYTE_UNITS *
2118 			  le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
2119 	sess->first_burst = BYTE_UNITS *
2120 			       le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
2121 	sess->max_burst = BYTE_UNITS *
2122 				 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
2123 	sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
2124 	sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2125 	sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
2126 	sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
2127 	conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
2128 	conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
2129 	conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
2130 	conn->ipv6_flow_label = le16_to_cpu(fw_ddb_entry->ipv6_flow_lbl);
2131 	conn->keepalive_timeout = le16_to_cpu(fw_ddb_entry->ka_timeout);
2132 	conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
2133 	conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
2134 	conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
2135 	sess->discovery_parent_idx = le16_to_cpu(fw_ddb_entry->ddb_link);
2136 	sess->discovery_parent_type = le16_to_cpu(fw_ddb_entry->ddb_link);
2137 	sess->chap_out_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2138 	sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
2139 
2140 	sess->default_taskmgmt_timeout =
2141 				le16_to_cpu(fw_ddb_entry->def_timeout);
2142 	conn->port = le16_to_cpu(fw_ddb_entry->port);
2143 
2144 	options = le16_to_cpu(fw_ddb_entry->options);
2145 	conn->ipaddress = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
2146 	if (!conn->ipaddress) {
2147 		rc = -ENOMEM;
2148 		goto exit_copy;
2149 	}
2150 
2151 	conn->redirect_ipaddr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
2152 	if (!conn->redirect_ipaddr) {
2153 		rc = -ENOMEM;
2154 		goto exit_copy;
2155 	}
2156 
2157 	memcpy(conn->ipaddress, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
2158 	memcpy(conn->redirect_ipaddr, fw_ddb_entry->tgt_addr, IPv6_ADDR_LEN);
2159 
2160 	if (test_bit(OPT_IPV6_DEVICE, &options)) {
2161 		conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos;
2162 
2163 		conn->link_local_ipv6_addr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
2164 		if (!conn->link_local_ipv6_addr) {
2165 			rc = -ENOMEM;
2166 			goto exit_copy;
2167 		}
2168 
2169 		memcpy(conn->link_local_ipv6_addr,
2170 		       fw_ddb_entry->link_local_ipv6_addr, IPv6_ADDR_LEN);
2171 	} else {
2172 		conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
2173 	}
2174 
2175 	if (fw_ddb_entry->iscsi_name[0]) {
2176 		rc = iscsi_switch_str_param(&sess->targetname,
2177 					    (char *)fw_ddb_entry->iscsi_name);
2178 		if (rc)
2179 			goto exit_copy;
2180 	}
2181 
2182 	if (fw_ddb_entry->iscsi_alias[0]) {
2183 		rc = iscsi_switch_str_param(&sess->targetalias,
2184 					    (char *)fw_ddb_entry->iscsi_alias);
2185 		if (rc)
2186 			goto exit_copy;
2187 	}
2188 
2189 	COPY_ISID(sess->isid, fw_ddb_entry->isid);
2190 
2191 exit_copy:
2192 	return rc;
2193 }
2194 
2195 static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
2196 				       struct iscsi_bus_flash_conn *conn,
2197 				       struct dev_db_entry *fw_ddb_entry)
2198 {
2199 	uint16_t options;
2200 	int rc = 0;
2201 
2202 	options = le16_to_cpu(fw_ddb_entry->options);
2203 	SET_BITVAL(conn->is_fw_assigned_ipv6,  options, BIT_11);
2204 	if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
2205 		options |= BIT_8;
2206 	else
2207 		options &= ~BIT_8;
2208 
2209 	SET_BITVAL(sess->auto_snd_tgt_disable, options, BIT_6);
2210 	SET_BITVAL(sess->discovery_sess, options, BIT_4);
2211 	SET_BITVAL(sess->entry_state, options, BIT_3);
2212 	fw_ddb_entry->options = cpu_to_le16(options);
2213 
2214 	options = le16_to_cpu(fw_ddb_entry->iscsi_options);
2215 	SET_BITVAL(conn->hdrdgst_en, options, BIT_13);
2216 	SET_BITVAL(conn->datadgst_en, options, BIT_12);
2217 	SET_BITVAL(sess->imm_data_en, options, BIT_11);
2218 	SET_BITVAL(sess->initial_r2t_en, options, BIT_10);
2219 	SET_BITVAL(sess->dataseq_inorder_en, options, BIT_9);
2220 	SET_BITVAL(sess->pdu_inorder_en, options, BIT_8);
2221 	SET_BITVAL(sess->chap_auth_en, options, BIT_7);
2222 	SET_BITVAL(conn->snack_req_en, options, BIT_6);
2223 	SET_BITVAL(sess->discovery_logout_en, options, BIT_5);
2224 	SET_BITVAL(sess->bidi_chap_en, options, BIT_4);
2225 	SET_BITVAL(sess->discovery_auth_optional, options, BIT_3);
2226 	SET_BITVAL(sess->erl & BIT_1, options, BIT_1);
2227 	SET_BITVAL(sess->erl & BIT_0, options, BIT_0);
2228 	fw_ddb_entry->iscsi_options = cpu_to_le16(options);
2229 
2230 	options = le16_to_cpu(fw_ddb_entry->tcp_options);
2231 	SET_BITVAL(conn->tcp_timestamp_stat, options, BIT_6);
2232 	SET_BITVAL(conn->tcp_nagle_disable, options, BIT_5);
2233 	SET_BITVAL(conn->tcp_wsf_disable, options, BIT_4);
2234 	SET_BITVAL(conn->tcp_timer_scale & BIT_2, options, BIT_3);
2235 	SET_BITVAL(conn->tcp_timer_scale & BIT_1, options, BIT_2);
2236 	SET_BITVAL(conn->tcp_timer_scale & BIT_0, options, BIT_1);
2237 	SET_BITVAL(conn->tcp_timestamp_en, options, BIT_0);
2238 	fw_ddb_entry->tcp_options = cpu_to_le16(options);
2239 
2240 	options = le16_to_cpu(fw_ddb_entry->ip_options);
2241 	SET_BITVAL(conn->fragment_disable, options, BIT_4);
2242 	fw_ddb_entry->ip_options = cpu_to_le16(options);
2243 
2244 	fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t);
2245 	fw_ddb_entry->iscsi_max_rcv_data_seg_len =
2246 			       cpu_to_le16(conn->max_recv_dlength / BYTE_UNITS);
2247 	fw_ddb_entry->iscsi_max_snd_data_seg_len =
2248 			       cpu_to_le16(conn->max_xmit_dlength / BYTE_UNITS);
2249 	fw_ddb_entry->iscsi_first_burst_len =
2250 				cpu_to_le16(sess->first_burst / BYTE_UNITS);
2251 	fw_ddb_entry->iscsi_max_burst_len = cpu_to_le16(sess->max_burst /
2252 					    BYTE_UNITS);
2253 	fw_ddb_entry->iscsi_def_time2wait = cpu_to_le16(sess->time2wait);
2254 	fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain);
2255 	fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt);
2256 	fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size);
2257 	fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf);
2258 	fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf);
2259 	fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label);
2260 	fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout);
2261 	fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port);
2262 	fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn);
2263 	fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn);
2264 	fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_idx);
2265 	fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx);
2266 	fw_ddb_entry->tsid = cpu_to_le16(sess->tsid);
2267 	fw_ddb_entry->port = cpu_to_le16(conn->port);
2268 	fw_ddb_entry->def_timeout =
2269 				cpu_to_le16(sess->default_taskmgmt_timeout);
2270 
2271 	if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
2272 		fw_ddb_entry->ipv4_tos = conn->ipv6_traffic_class;
2273 	else
2274 		fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
2275 
2276 	if (conn->ipaddress)
2277 		memcpy(fw_ddb_entry->ip_addr, conn->ipaddress,
2278 		       sizeof(fw_ddb_entry->ip_addr));
2279 
2280 	if (conn->redirect_ipaddr)
2281 		memcpy(fw_ddb_entry->tgt_addr, conn->redirect_ipaddr,
2282 		       sizeof(fw_ddb_entry->tgt_addr));
2283 
2284 	if (conn->link_local_ipv6_addr)
2285 		memcpy(fw_ddb_entry->link_local_ipv6_addr,
2286 		       conn->link_local_ipv6_addr,
2287 		       sizeof(fw_ddb_entry->link_local_ipv6_addr));
2288 
2289 	if (sess->targetname)
2290 		memcpy(fw_ddb_entry->iscsi_name, sess->targetname,
2291 		       sizeof(fw_ddb_entry->iscsi_name));
2292 
2293 	if (sess->targetalias)
2294 		memcpy(fw_ddb_entry->iscsi_alias, sess->targetalias,
2295 		       sizeof(fw_ddb_entry->iscsi_alias));
2296 
2297 	COPY_ISID(fw_ddb_entry->isid, sess->isid);
2298 
2299 	return rc;
2300 }
2301 
2302 static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn,
2303 					     struct iscsi_session *sess,
2304 					     struct dev_db_entry *fw_ddb_entry)
2305 {
2306 	unsigned long options = 0;
2307 	uint16_t ddb_link;
2308 	uint16_t disc_parent;
2309 
2310 	options = le16_to_cpu(fw_ddb_entry->options);
2311 	conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
2312 	sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
2313 					      &options);
2314 	sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
2315 
2316 	options = le16_to_cpu(fw_ddb_entry->iscsi_options);
2317 	conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
2318 	conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
2319 	sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
2320 	sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
2321 	sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
2322 					    &options);
2323 	sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
2324 	sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
2325 	sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
2326 					     &options);
2327 	sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
2328 	sess->discovery_auth_optional =
2329 			test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
2330 	if (test_bit(ISCSIOPT_ERL1, &options))
2331 		sess->erl |= BIT_1;
2332 	if (test_bit(ISCSIOPT_ERL0, &options))
2333 		sess->erl |= BIT_0;
2334 
2335 	options = le16_to_cpu(fw_ddb_entry->tcp_options);
2336 	conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
2337 	conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
2338 	conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
2339 	if (test_bit(TCPOPT_TIMER_SCALE3, &options))
2340 		conn->tcp_timer_scale |= BIT_3;
2341 	if (test_bit(TCPOPT_TIMER_SCALE2, &options))
2342 		conn->tcp_timer_scale |= BIT_2;
2343 	if (test_bit(TCPOPT_TIMER_SCALE1, &options))
2344 		conn->tcp_timer_scale |= BIT_1;
2345 
2346 	conn->tcp_timer_scale >>= 1;
2347 	conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
2348 
2349 	options = le16_to_cpu(fw_ddb_entry->ip_options);
2350 	conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
2351 
2352 	conn->max_recv_dlength = BYTE_UNITS *
2353 			  le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
2354 	conn->max_xmit_dlength = BYTE_UNITS *
2355 			  le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
2356 	sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
2357 	sess->first_burst = BYTE_UNITS *
2358 			       le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
2359 	sess->max_burst = BYTE_UNITS *
2360 				 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
2361 	sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2362 	sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
2363 	sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
2364 	conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
2365 	conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
2366 	conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
2367 	conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
2368 	conn->keepalive_tmo = le16_to_cpu(fw_ddb_entry->ka_timeout);
2369 	conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
2370 	conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
2371 	conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
2372 	sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
2373 	COPY_ISID(sess->isid, fw_ddb_entry->isid);
2374 
2375 	ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
2376 	if (ddb_link < MAX_DDB_ENTRIES)
2377 		sess->discovery_parent_idx = ddb_link;
2378 	else
2379 		sess->discovery_parent_idx = DDB_NO_LINK;
2380 
2381 	if (ddb_link == DDB_ISNS)
2382 		disc_parent = ISCSI_DISC_PARENT_ISNS;
2383 	else if (ddb_link == DDB_NO_LINK)
2384 		disc_parent = ISCSI_DISC_PARENT_UNKNOWN;
2385 	else if (ddb_link < MAX_DDB_ENTRIES)
2386 		disc_parent = ISCSI_DISC_PARENT_SENDTGT;
2387 	else
2388 		disc_parent = ISCSI_DISC_PARENT_UNKNOWN;
2389 
2390 	iscsi_set_param(conn->cls_conn, ISCSI_PARAM_DISCOVERY_PARENT_TYPE,
2391 			iscsi_get_discovery_parent_name(disc_parent), 0);
2392 
2393 	iscsi_set_param(conn->cls_conn, ISCSI_PARAM_TARGET_ALIAS,
2394 			(char *)fw_ddb_entry->iscsi_alias, 0);
2395 }
2396 
2397 static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
2398 				     struct dev_db_entry *fw_ddb_entry,
2399 				     struct iscsi_cls_session *cls_sess,
2400 				     struct iscsi_cls_conn *cls_conn)
2401 {
2402 	int buflen = 0;
2403 	struct iscsi_session *sess;
2404 	struct ddb_entry *ddb_entry;
2405 	struct iscsi_conn *conn;
2406 	char ip_addr[DDB_IPADDR_LEN];
2407 	uint16_t options = 0;
2408 
2409 	sess = cls_sess->dd_data;
2410 	ddb_entry = sess->dd_data;
2411 	conn = cls_conn->dd_data;
2412 
2413 	ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2414 
2415 	qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry);
2416 
2417 	sess->def_taskmgmt_tmo = le16_to_cpu(fw_ddb_entry->def_timeout);
2418 	conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
2419 
2420 	memset(ip_addr, 0, sizeof(ip_addr));
2421 	options = le16_to_cpu(fw_ddb_entry->options);
2422 	if (options & DDB_OPT_IPV6_DEVICE) {
2423 		iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv6", 4);
2424 
2425 		memset(ip_addr, 0, sizeof(ip_addr));
2426 		sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
2427 	} else {
2428 		iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv4", 4);
2429 		sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
2430 	}
2431 
2432 	iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
2433 			(char *)ip_addr, buflen);
2434 	iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
2435 			(char *)fw_ddb_entry->iscsi_name, buflen);
2436 	iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
2437 			(char *)ha->name_string, buflen);
2438 }
2439 
2440 void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
2441 					     struct ddb_entry *ddb_entry)
2442 {
2443 	struct iscsi_cls_session *cls_sess;
2444 	struct iscsi_cls_conn *cls_conn;
2445 	uint32_t ddb_state;
2446 	dma_addr_t fw_ddb_entry_dma;
2447 	struct dev_db_entry *fw_ddb_entry;
2448 
2449 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2450 					  &fw_ddb_entry_dma, GFP_KERNEL);
2451 	if (!fw_ddb_entry) {
2452 		ql4_printk(KERN_ERR, ha,
2453 			   "%s: Unable to allocate dma buffer\n", __func__);
2454 		goto exit_session_conn_fwddb_param;
2455 	}
2456 
2457 	if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
2458 				    fw_ddb_entry_dma, NULL, NULL, &ddb_state,
2459 				    NULL, NULL, NULL) == QLA_ERROR) {
2460 		DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
2461 				  "get_ddb_entry for fw_ddb_index %d\n",
2462 				  ha->host_no, __func__,
2463 				  ddb_entry->fw_ddb_index));
2464 		goto exit_session_conn_fwddb_param;
2465 	}
2466 
2467 	cls_sess = ddb_entry->sess;
2468 
2469 	cls_conn = ddb_entry->conn;
2470 
2471 	/* Update params */
2472 	qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
2473 
2474 exit_session_conn_fwddb_param:
2475 	if (fw_ddb_entry)
2476 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2477 				  fw_ddb_entry, fw_ddb_entry_dma);
2478 }
2479 
2480 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
2481 				       struct ddb_entry *ddb_entry)
2482 {
2483 	struct iscsi_cls_session *cls_sess;
2484 	struct iscsi_cls_conn *cls_conn;
2485 	struct iscsi_session *sess;
2486 	struct iscsi_conn *conn;
2487 	uint32_t ddb_state;
2488 	dma_addr_t fw_ddb_entry_dma;
2489 	struct dev_db_entry *fw_ddb_entry;
2490 
2491 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2492 					  &fw_ddb_entry_dma, GFP_KERNEL);
2493 	if (!fw_ddb_entry) {
2494 		ql4_printk(KERN_ERR, ha,
2495 			   "%s: Unable to allocate dma buffer\n", __func__);
2496 		goto exit_session_conn_param;
2497 	}
2498 
2499 	if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
2500 				    fw_ddb_entry_dma, NULL, NULL, &ddb_state,
2501 				    NULL, NULL, NULL) == QLA_ERROR) {
2502 		DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
2503 				  "get_ddb_entry for fw_ddb_index %d\n",
2504 				  ha->host_no, __func__,
2505 				  ddb_entry->fw_ddb_index));
2506 		goto exit_session_conn_param;
2507 	}
2508 
2509 	cls_sess = ddb_entry->sess;
2510 	sess = cls_sess->dd_data;
2511 
2512 	cls_conn = ddb_entry->conn;
2513 	conn = cls_conn->dd_data;
2514 
2515 	/* Update timers after login */
2516 	ddb_entry->default_relogin_timeout =
2517 		(le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
2518 		 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
2519 		 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
2520 	ddb_entry->default_time2wait =
2521 				le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2522 
2523 	/* Update params */
2524 	ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2525 	qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry);
2526 
2527 	memcpy(sess->initiatorname, ha->name_string,
2528 	       min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
2529 
2530 exit_session_conn_param:
2531 	if (fw_ddb_entry)
2532 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2533 				  fw_ddb_entry, fw_ddb_entry_dma);
2534 }
2535 
2536 /*
2537  * Timer routines
2538  */
2539 
2540 static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
2541 				unsigned long interval)
2542 {
2543 	DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
2544 		     __func__, ha->host->host_no));
2545 	init_timer(&ha->timer);
2546 	ha->timer.expires = jiffies + interval * HZ;
2547 	ha->timer.data = (unsigned long)ha;
2548 	ha->timer.function = (void (*)(unsigned long))func;
2549 	add_timer(&ha->timer);
2550 	ha->timer_active = 1;
2551 }
2552 
2553 static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
2554 {
2555 	del_timer_sync(&ha->timer);
2556 	ha->timer_active = 0;
2557 }
2558 
2559 /***
2560  * qla4xxx_mark_device_missing - blocks the session
2561  * @cls_session: Pointer to the session to be blocked
2562  * @ddb_entry: Pointer to device database entry
2563  *
2564  * This routine marks a device missing and close connection.
2565  **/
2566 void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session)
2567 {
2568 	iscsi_block_session(cls_session);
2569 }
2570 
2571 /**
2572  * qla4xxx_mark_all_devices_missing - mark all devices as missing.
2573  * @ha: Pointer to host adapter structure.
2574  *
2575  * This routine marks a device missing and resets the relogin retry count.
2576  **/
2577 void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
2578 {
2579 	iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing);
2580 }
2581 
2582 static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
2583 				       struct ddb_entry *ddb_entry,
2584 				       struct scsi_cmnd *cmd)
2585 {
2586 	struct srb *srb;
2587 
2588 	srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
2589 	if (!srb)
2590 		return srb;
2591 
2592 	kref_init(&srb->srb_ref);
2593 	srb->ha = ha;
2594 	srb->ddb = ddb_entry;
2595 	srb->cmd = cmd;
2596 	srb->flags = 0;
2597 	CMD_SP(cmd) = (void *)srb;
2598 
2599 	return srb;
2600 }
2601 
2602 static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
2603 {
2604 	struct scsi_cmnd *cmd = srb->cmd;
2605 
2606 	if (srb->flags & SRB_DMA_VALID) {
2607 		scsi_dma_unmap(cmd);
2608 		srb->flags &= ~SRB_DMA_VALID;
2609 	}
2610 	CMD_SP(cmd) = NULL;
2611 }
2612 
2613 void qla4xxx_srb_compl(struct kref *ref)
2614 {
2615 	struct srb *srb = container_of(ref, struct srb, srb_ref);
2616 	struct scsi_cmnd *cmd = srb->cmd;
2617 	struct scsi_qla_host *ha = srb->ha;
2618 
2619 	qla4xxx_srb_free_dma(ha, srb);
2620 
2621 	mempool_free(srb, ha->srb_mempool);
2622 
2623 	cmd->scsi_done(cmd);
2624 }
2625 
2626 /**
2627  * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
2628  * @host: scsi host
2629  * @cmd: Pointer to Linux's SCSI command structure
2630  *
2631  * Remarks:
2632  * This routine is invoked by Linux to send a SCSI command to the driver.
2633  * The mid-level driver tries to ensure that queuecommand never gets
2634  * invoked concurrently with itself or the interrupt handler (although
2635  * the interrupt handler may call this routine as part of request-
2636  * completion handling).   Unfortunely, it sometimes calls the scheduler
2637  * in interrupt context which is a big NO! NO!.
2638  **/
2639 static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2640 {
2641 	struct scsi_qla_host *ha = to_qla_host(host);
2642 	struct ddb_entry *ddb_entry = cmd->device->hostdata;
2643 	struct iscsi_cls_session *sess = ddb_entry->sess;
2644 	struct srb *srb;
2645 	int rval;
2646 
2647 	if (test_bit(AF_EEH_BUSY, &ha->flags)) {
2648 		if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
2649 			cmd->result = DID_NO_CONNECT << 16;
2650 		else
2651 			cmd->result = DID_REQUEUE << 16;
2652 		goto qc_fail_command;
2653 	}
2654 
2655 	if (!sess) {
2656 		cmd->result = DID_IMM_RETRY << 16;
2657 		goto qc_fail_command;
2658 	}
2659 
2660 	rval = iscsi_session_chkready(sess);
2661 	if (rval) {
2662 		cmd->result = rval;
2663 		goto qc_fail_command;
2664 	}
2665 
2666 	if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
2667 	    test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
2668 	    test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2669 	    test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
2670 	    test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
2671 	    !test_bit(AF_ONLINE, &ha->flags) ||
2672 	    !test_bit(AF_LINK_UP, &ha->flags) ||
2673 	    test_bit(AF_LOOPBACK, &ha->flags) ||
2674 	    test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) ||
2675 	    test_bit(DPC_RESTORE_ACB, &ha->dpc_flags) ||
2676 	    test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
2677 		goto qc_host_busy;
2678 
2679 	srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
2680 	if (!srb)
2681 		goto qc_host_busy;
2682 
2683 	rval = qla4xxx_send_command_to_isp(ha, srb);
2684 	if (rval != QLA_SUCCESS)
2685 		goto qc_host_busy_free_sp;
2686 
2687 	return 0;
2688 
2689 qc_host_busy_free_sp:
2690 	qla4xxx_srb_free_dma(ha, srb);
2691 	mempool_free(srb, ha->srb_mempool);
2692 
2693 qc_host_busy:
2694 	return SCSI_MLQUEUE_HOST_BUSY;
2695 
2696 qc_fail_command:
2697 	cmd->scsi_done(cmd);
2698 
2699 	return 0;
2700 }
2701 
2702 /**
2703  * qla4xxx_mem_free - frees memory allocated to adapter
2704  * @ha: Pointer to host adapter structure.
2705  *
2706  * Frees memory previously allocated by qla4xxx_mem_alloc
2707  **/
2708 static void qla4xxx_mem_free(struct scsi_qla_host *ha)
2709 {
2710 	if (ha->queues)
2711 		dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
2712 				  ha->queues_dma);
2713 
2714 	 if (ha->fw_dump)
2715 		vfree(ha->fw_dump);
2716 
2717 	ha->queues_len = 0;
2718 	ha->queues = NULL;
2719 	ha->queues_dma = 0;
2720 	ha->request_ring = NULL;
2721 	ha->request_dma = 0;
2722 	ha->response_ring = NULL;
2723 	ha->response_dma = 0;
2724 	ha->shadow_regs = NULL;
2725 	ha->shadow_regs_dma = 0;
2726 	ha->fw_dump = NULL;
2727 	ha->fw_dump_size = 0;
2728 
2729 	/* Free srb pool. */
2730 	if (ha->srb_mempool)
2731 		mempool_destroy(ha->srb_mempool);
2732 
2733 	ha->srb_mempool = NULL;
2734 
2735 	if (ha->chap_dma_pool)
2736 		dma_pool_destroy(ha->chap_dma_pool);
2737 
2738 	if (ha->chap_list)
2739 		vfree(ha->chap_list);
2740 	ha->chap_list = NULL;
2741 
2742 	if (ha->fw_ddb_dma_pool)
2743 		dma_pool_destroy(ha->fw_ddb_dma_pool);
2744 
2745 	/* release io space registers  */
2746 	if (is_qla8022(ha)) {
2747 		if (ha->nx_pcibase)
2748 			iounmap(
2749 			    (struct device_reg_82xx __iomem *)ha->nx_pcibase);
2750 	} else if (is_qla8032(ha) || is_qla8042(ha)) {
2751 		if (ha->nx_pcibase)
2752 			iounmap(
2753 			    (struct device_reg_83xx __iomem *)ha->nx_pcibase);
2754 	} else if (ha->reg) {
2755 		iounmap(ha->reg);
2756 	}
2757 
2758 	if (ha->reset_tmplt.buff)
2759 		vfree(ha->reset_tmplt.buff);
2760 
2761 	pci_release_regions(ha->pdev);
2762 }
2763 
2764 /**
2765  * qla4xxx_mem_alloc - allocates memory for use by adapter.
2766  * @ha: Pointer to host adapter structure
2767  *
2768  * Allocates DMA memory for request and response queues. Also allocates memory
2769  * for srbs.
2770  **/
2771 static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
2772 {
2773 	unsigned long align;
2774 
2775 	/* Allocate contiguous block of DMA memory for queues. */
2776 	ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
2777 			  (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
2778 			  sizeof(struct shadow_regs) +
2779 			  MEM_ALIGN_VALUE +
2780 			  (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
2781 	ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
2782 					&ha->queues_dma, GFP_KERNEL);
2783 	if (ha->queues == NULL) {
2784 		ql4_printk(KERN_WARNING, ha,
2785 		    "Memory Allocation failed - queues.\n");
2786 
2787 		goto mem_alloc_error_exit;
2788 	}
2789 	memset(ha->queues, 0, ha->queues_len);
2790 
2791 	/*
2792 	 * As per RISC alignment requirements -- the bus-address must be a
2793 	 * multiple of the request-ring size (in bytes).
2794 	 */
2795 	align = 0;
2796 	if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
2797 		align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
2798 					   (MEM_ALIGN_VALUE - 1));
2799 
2800 	/* Update request and response queue pointers. */
2801 	ha->request_dma = ha->queues_dma + align;
2802 	ha->request_ring = (struct queue_entry *) (ha->queues + align);
2803 	ha->response_dma = ha->queues_dma + align +
2804 		(REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
2805 	ha->response_ring = (struct queue_entry *) (ha->queues + align +
2806 						    (REQUEST_QUEUE_DEPTH *
2807 						     QUEUE_SIZE));
2808 	ha->shadow_regs_dma = ha->queues_dma + align +
2809 		(REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
2810 		(RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
2811 	ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
2812 						  (REQUEST_QUEUE_DEPTH *
2813 						   QUEUE_SIZE) +
2814 						  (RESPONSE_QUEUE_DEPTH *
2815 						   QUEUE_SIZE));
2816 
2817 	/* Allocate memory for srb pool. */
2818 	ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
2819 					 mempool_free_slab, srb_cachep);
2820 	if (ha->srb_mempool == NULL) {
2821 		ql4_printk(KERN_WARNING, ha,
2822 		    "Memory Allocation failed - SRB Pool.\n");
2823 
2824 		goto mem_alloc_error_exit;
2825 	}
2826 
2827 	ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev,
2828 					    CHAP_DMA_BLOCK_SIZE, 8, 0);
2829 
2830 	if (ha->chap_dma_pool == NULL) {
2831 		ql4_printk(KERN_WARNING, ha,
2832 		    "%s: chap_dma_pool allocation failed..\n", __func__);
2833 		goto mem_alloc_error_exit;
2834 	}
2835 
2836 	ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
2837 					      DDB_DMA_BLOCK_SIZE, 8, 0);
2838 
2839 	if (ha->fw_ddb_dma_pool == NULL) {
2840 		ql4_printk(KERN_WARNING, ha,
2841 			   "%s: fw_ddb_dma_pool allocation failed..\n",
2842 			   __func__);
2843 		goto mem_alloc_error_exit;
2844 	}
2845 
2846 	return QLA_SUCCESS;
2847 
2848 mem_alloc_error_exit:
2849 	qla4xxx_mem_free(ha);
2850 	return QLA_ERROR;
2851 }
2852 
2853 /**
2854  * qla4_8xxx_check_temp - Check the ISP82XX temperature.
2855  * @ha: adapter block pointer.
2856  *
2857  * Note: The caller should not hold the idc lock.
2858  **/
2859 static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
2860 {
2861 	uint32_t temp, temp_state, temp_val;
2862 	int status = QLA_SUCCESS;
2863 
2864 	temp = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_TEMP_STATE);
2865 
2866 	temp_state = qla82xx_get_temp_state(temp);
2867 	temp_val = qla82xx_get_temp_val(temp);
2868 
2869 	if (temp_state == QLA82XX_TEMP_PANIC) {
2870 		ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
2871 			   " exceeds maximum allowed. Hardware has been shut"
2872 			   " down.\n", temp_val);
2873 		status = QLA_ERROR;
2874 	} else if (temp_state == QLA82XX_TEMP_WARN) {
2875 		if (ha->temperature == QLA82XX_TEMP_NORMAL)
2876 			ql4_printk(KERN_WARNING, ha, "Device temperature %d"
2877 				   " degrees C exceeds operating range."
2878 				   " Immediate action needed.\n", temp_val);
2879 	} else {
2880 		if (ha->temperature == QLA82XX_TEMP_WARN)
2881 			ql4_printk(KERN_INFO, ha, "Device temperature is"
2882 				   " now %d degrees C in normal range.\n",
2883 				   temp_val);
2884 	}
2885 	ha->temperature = temp_state;
2886 	return status;
2887 }
2888 
2889 /**
2890  * qla4_8xxx_check_fw_alive  - Check firmware health
2891  * @ha: Pointer to host adapter structure.
2892  *
2893  * Context: Interrupt
2894  **/
2895 static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
2896 {
2897 	uint32_t fw_heartbeat_counter;
2898 	int status = QLA_SUCCESS;
2899 
2900 	fw_heartbeat_counter = qla4_8xxx_rd_direct(ha,
2901 						   QLA8XXX_PEG_ALIVE_COUNTER);
2902 	/* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
2903 	if (fw_heartbeat_counter == 0xffffffff) {
2904 		DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
2905 		    "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
2906 		    ha->host_no, __func__));
2907 		return status;
2908 	}
2909 
2910 	if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
2911 		ha->seconds_since_last_heartbeat++;
2912 		/* FW not alive after 2 seconds */
2913 		if (ha->seconds_since_last_heartbeat == 2) {
2914 			ha->seconds_since_last_heartbeat = 0;
2915 			qla4_8xxx_dump_peg_reg(ha);
2916 			status = QLA_ERROR;
2917 		}
2918 	} else
2919 		ha->seconds_since_last_heartbeat = 0;
2920 
2921 	ha->fw_heartbeat_counter = fw_heartbeat_counter;
2922 	return status;
2923 }
2924 
2925 static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha)
2926 {
2927 	uint32_t halt_status;
2928 	int halt_status_unrecoverable = 0;
2929 
2930 	halt_status = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1);
2931 
2932 	if (is_qla8022(ha)) {
2933 		ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
2934 			   __func__);
2935 		qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2936 				CRB_NIU_XG_PAUSE_CTL_P0 |
2937 				CRB_NIU_XG_PAUSE_CTL_P1);
2938 
2939 		if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
2940 			ql4_printk(KERN_ERR, ha, "%s: Firmware aborted with error code 0x00006700. Device is being reset\n",
2941 				   __func__);
2942 		if (halt_status & HALT_STATUS_UNRECOVERABLE)
2943 			halt_status_unrecoverable = 1;
2944 	} else if (is_qla8032(ha) || is_qla8042(ha)) {
2945 		if (halt_status & QLA83XX_HALT_STATUS_FW_RESET)
2946 			ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n",
2947 				   __func__);
2948 		else if (halt_status & QLA83XX_HALT_STATUS_UNRECOVERABLE)
2949 			halt_status_unrecoverable = 1;
2950 	}
2951 
2952 	/*
2953 	 * Since we cannot change dev_state in interrupt context,
2954 	 * set appropriate DPC flag then wakeup DPC
2955 	 */
2956 	if (halt_status_unrecoverable) {
2957 		set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
2958 	} else {
2959 		ql4_printk(KERN_INFO, ha, "%s: detect abort needed!\n",
2960 			   __func__);
2961 		set_bit(DPC_RESET_HA, &ha->dpc_flags);
2962 	}
2963 	qla4xxx_mailbox_premature_completion(ha);
2964 	qla4xxx_wake_dpc(ha);
2965 }
2966 
2967 /**
2968  * qla4_8xxx_watchdog - Poll dev state
2969  * @ha: Pointer to host adapter structure.
2970  *
2971  * Context: Interrupt
2972  **/
2973 void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2974 {
2975 	uint32_t dev_state;
2976 	uint32_t idc_ctrl;
2977 
2978 	/* don't poll if reset is going on */
2979 	if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
2980 	    test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2981 	    test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
2982 		dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
2983 
2984 		if (qla4_8xxx_check_temp(ha)) {
2985 			if (is_qla8022(ha)) {
2986 				ql4_printk(KERN_INFO, ha, "disabling pause transmit on port 0 & 1.\n");
2987 				qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2988 						CRB_NIU_XG_PAUSE_CTL_P0 |
2989 						CRB_NIU_XG_PAUSE_CTL_P1);
2990 			}
2991 			set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
2992 			qla4xxx_wake_dpc(ha);
2993 		} else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
2994 			   !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
2995 
2996 			ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n",
2997 				   __func__);
2998 
2999 			if (is_qla8032(ha) || is_qla8042(ha)) {
3000 				idc_ctrl = qla4_83xx_rd_reg(ha,
3001 							QLA83XX_IDC_DRV_CTRL);
3002 				if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) {
3003 					ql4_printk(KERN_INFO, ha, "%s: Graceful reset bit is not set\n",
3004 						   __func__);
3005 					qla4xxx_mailbox_premature_completion(
3006 									    ha);
3007 				}
3008 			}
3009 
3010 			if ((is_qla8032(ha) || is_qla8042(ha)) ||
3011 			    (is_qla8022(ha) && !ql4xdontresethba)) {
3012 				set_bit(DPC_RESET_HA, &ha->dpc_flags);
3013 				qla4xxx_wake_dpc(ha);
3014 			}
3015 		} else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
3016 		    !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
3017 			ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
3018 			    __func__);
3019 			set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
3020 			qla4xxx_wake_dpc(ha);
3021 		} else  {
3022 			/* Check firmware health */
3023 			if (qla4_8xxx_check_fw_alive(ha))
3024 				qla4_8xxx_process_fw_error(ha);
3025 		}
3026 	}
3027 }
3028 
3029 static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
3030 {
3031 	struct iscsi_session *sess;
3032 	struct ddb_entry *ddb_entry;
3033 	struct scsi_qla_host *ha;
3034 
3035 	sess = cls_sess->dd_data;
3036 	ddb_entry = sess->dd_data;
3037 	ha = ddb_entry->ha;
3038 
3039 	if (!(ddb_entry->ddb_type == FLASH_DDB))
3040 		return;
3041 
3042 	if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
3043 	    !iscsi_is_session_online(cls_sess)) {
3044 		if (atomic_read(&ddb_entry->retry_relogin_timer) !=
3045 		    INVALID_ENTRY) {
3046 			if (atomic_read(&ddb_entry->retry_relogin_timer) ==
3047 					0) {
3048 				atomic_set(&ddb_entry->retry_relogin_timer,
3049 					   INVALID_ENTRY);
3050 				set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
3051 				set_bit(DF_RELOGIN, &ddb_entry->flags);
3052 				DEBUG2(ql4_printk(KERN_INFO, ha,
3053 				       "%s: index [%d] login device\n",
3054 					__func__, ddb_entry->fw_ddb_index));
3055 			} else
3056 				atomic_dec(&ddb_entry->retry_relogin_timer);
3057 		}
3058 	}
3059 
3060 	/* Wait for relogin to timeout */
3061 	if (atomic_read(&ddb_entry->relogin_timer) &&
3062 	    (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
3063 		/*
3064 		 * If the relogin times out and the device is
3065 		 * still NOT ONLINE then try and relogin again.
3066 		 */
3067 		if (!iscsi_is_session_online(cls_sess)) {
3068 			/* Reset retry relogin timer */
3069 			atomic_inc(&ddb_entry->relogin_retry_count);
3070 			DEBUG2(ql4_printk(KERN_INFO, ha,
3071 				"%s: index[%d] relogin timed out-retrying"
3072 				" relogin (%d), retry (%d)\n", __func__,
3073 				ddb_entry->fw_ddb_index,
3074 				atomic_read(&ddb_entry->relogin_retry_count),
3075 				ddb_entry->default_time2wait + 4));
3076 			set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
3077 			atomic_set(&ddb_entry->retry_relogin_timer,
3078 				   ddb_entry->default_time2wait + 4);
3079 		}
3080 	}
3081 }
3082 
3083 /**
3084  * qla4xxx_timer - checks every second for work to do.
3085  * @ha: Pointer to host adapter structure.
3086  **/
3087 static void qla4xxx_timer(struct scsi_qla_host *ha)
3088 {
3089 	int start_dpc = 0;
3090 	uint16_t w;
3091 
3092 	iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
3093 
3094 	/* If we are in the middle of AER/EEH processing
3095 	 * skip any processing and reschedule the timer
3096 	 */
3097 	if (test_bit(AF_EEH_BUSY, &ha->flags)) {
3098 		mod_timer(&ha->timer, jiffies + HZ);
3099 		return;
3100 	}
3101 
3102 	/* Hardware read to trigger an EEH error during mailbox waits. */
3103 	if (!pci_channel_offline(ha->pdev))
3104 		pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
3105 
3106 	if (is_qla80XX(ha))
3107 		qla4_8xxx_watchdog(ha);
3108 
3109 	if (is_qla40XX(ha)) {
3110 		/* Check for heartbeat interval. */
3111 		if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
3112 		    ha->heartbeat_interval != 0) {
3113 			ha->seconds_since_last_heartbeat++;
3114 			if (ha->seconds_since_last_heartbeat >
3115 			    ha->heartbeat_interval + 2)
3116 				set_bit(DPC_RESET_HA, &ha->dpc_flags);
3117 		}
3118 	}
3119 
3120 	/* Process any deferred work. */
3121 	if (!list_empty(&ha->work_list))
3122 		start_dpc++;
3123 
3124 	/* Wakeup the dpc routine for this adapter, if needed. */
3125 	if (start_dpc ||
3126 	     test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
3127 	     test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
3128 	     test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
3129 	     test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
3130 	     test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
3131 	     test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
3132 	     test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
3133 	     test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
3134 	     test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
3135 	     test_bit(DPC_AEN, &ha->dpc_flags)) {
3136 		DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
3137 			      " - dpc flags = 0x%lx\n",
3138 			      ha->host_no, __func__, ha->dpc_flags));
3139 		qla4xxx_wake_dpc(ha);
3140 	}
3141 
3142 	/* Reschedule timer thread to call us back in one second */
3143 	mod_timer(&ha->timer, jiffies + HZ);
3144 
3145 	DEBUG2(ha->seconds_since_last_intr++);
3146 }
3147 
3148 /**
3149  * qla4xxx_cmd_wait - waits for all outstanding commands to complete
3150  * @ha: Pointer to host adapter structure.
3151  *
3152  * This routine stalls the driver until all outstanding commands are returned.
3153  * Caller must release the Hardware Lock prior to calling this routine.
3154  **/
3155 static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
3156 {
3157 	uint32_t index = 0;
3158 	unsigned long flags;
3159 	struct scsi_cmnd *cmd;
3160 
3161 	unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ);
3162 
3163 	DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to "
3164 	    "complete\n", WAIT_CMD_TOV));
3165 
3166 	while (!time_after_eq(jiffies, wtime)) {
3167 		spin_lock_irqsave(&ha->hardware_lock, flags);
3168 		/* Find a command that hasn't completed. */
3169 		for (index = 0; index < ha->host->can_queue; index++) {
3170 			cmd = scsi_host_find_tag(ha->host, index);
3171 			/*
3172 			 * We cannot just check if the index is valid,
3173 			 * becase if we are run from the scsi eh, then
3174 			 * the scsi/block layer is going to prevent
3175 			 * the tag from being released.
3176 			 */
3177 			if (cmd != NULL && CMD_SP(cmd))
3178 				break;
3179 		}
3180 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
3181 
3182 		/* If No Commands are pending, wait is complete */
3183 		if (index == ha->host->can_queue)
3184 			return QLA_SUCCESS;
3185 
3186 		msleep(1000);
3187 	}
3188 	/* If we timed out on waiting for commands to come back
3189 	 * return ERROR. */
3190 	return QLA_ERROR;
3191 }
3192 
3193 int qla4xxx_hw_reset(struct scsi_qla_host *ha)
3194 {
3195 	uint32_t ctrl_status;
3196 	unsigned long flags = 0;
3197 
3198 	DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
3199 
3200 	if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
3201 		return QLA_ERROR;
3202 
3203 	spin_lock_irqsave(&ha->hardware_lock, flags);
3204 
3205 	/*
3206 	 * If the SCSI Reset Interrupt bit is set, clear it.
3207 	 * Otherwise, the Soft Reset won't work.
3208 	 */
3209 	ctrl_status = readw(&ha->reg->ctrl_status);
3210 	if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
3211 		writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
3212 
3213 	/* Issue Soft Reset */
3214 	writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
3215 	readl(&ha->reg->ctrl_status);
3216 
3217 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3218 	return QLA_SUCCESS;
3219 }
3220 
3221 /**
3222  * qla4xxx_soft_reset - performs soft reset.
3223  * @ha: Pointer to host adapter structure.
3224  **/
3225 int qla4xxx_soft_reset(struct scsi_qla_host *ha)
3226 {
3227 	uint32_t max_wait_time;
3228 	unsigned long flags = 0;
3229 	int status;
3230 	uint32_t ctrl_status;
3231 
3232 	status = qla4xxx_hw_reset(ha);
3233 	if (status != QLA_SUCCESS)
3234 		return status;
3235 
3236 	status = QLA_ERROR;
3237 	/* Wait until the Network Reset Intr bit is cleared */
3238 	max_wait_time = RESET_INTR_TOV;
3239 	do {
3240 		spin_lock_irqsave(&ha->hardware_lock, flags);
3241 		ctrl_status = readw(&ha->reg->ctrl_status);
3242 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
3243 
3244 		if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
3245 			break;
3246 
3247 		msleep(1000);
3248 	} while ((--max_wait_time));
3249 
3250 	if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
3251 		DEBUG2(printk(KERN_WARNING
3252 			      "scsi%ld: Network Reset Intr not cleared by "
3253 			      "Network function, clearing it now!\n",
3254 			      ha->host_no));
3255 		spin_lock_irqsave(&ha->hardware_lock, flags);
3256 		writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
3257 		readl(&ha->reg->ctrl_status);
3258 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
3259 	}
3260 
3261 	/* Wait until the firmware tells us the Soft Reset is done */
3262 	max_wait_time = SOFT_RESET_TOV;
3263 	do {
3264 		spin_lock_irqsave(&ha->hardware_lock, flags);
3265 		ctrl_status = readw(&ha->reg->ctrl_status);
3266 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
3267 
3268 		if ((ctrl_status & CSR_SOFT_RESET) == 0) {
3269 			status = QLA_SUCCESS;
3270 			break;
3271 		}
3272 
3273 		msleep(1000);
3274 	} while ((--max_wait_time));
3275 
3276 	/*
3277 	 * Also, make sure that the SCSI Reset Interrupt bit has been cleared
3278 	 * after the soft reset has taken place.
3279 	 */
3280 	spin_lock_irqsave(&ha->hardware_lock, flags);
3281 	ctrl_status = readw(&ha->reg->ctrl_status);
3282 	if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
3283 		writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
3284 		readl(&ha->reg->ctrl_status);
3285 	}
3286 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3287 
3288 	/* If soft reset fails then most probably the bios on other
3289 	 * function is also enabled.
3290 	 * Since the initialization is sequential the other fn
3291 	 * wont be able to acknowledge the soft reset.
3292 	 * Issue a force soft reset to workaround this scenario.
3293 	 */
3294 	if (max_wait_time == 0) {
3295 		/* Issue Force Soft Reset */
3296 		spin_lock_irqsave(&ha->hardware_lock, flags);
3297 		writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
3298 		readl(&ha->reg->ctrl_status);
3299 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
3300 		/* Wait until the firmware tells us the Soft Reset is done */
3301 		max_wait_time = SOFT_RESET_TOV;
3302 		do {
3303 			spin_lock_irqsave(&ha->hardware_lock, flags);
3304 			ctrl_status = readw(&ha->reg->ctrl_status);
3305 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
3306 
3307 			if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
3308 				status = QLA_SUCCESS;
3309 				break;
3310 			}
3311 
3312 			msleep(1000);
3313 		} while ((--max_wait_time));
3314 	}
3315 
3316 	return status;
3317 }
3318 
3319 /**
3320  * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S.
3321  * @ha: Pointer to host adapter structure.
3322  * @res: returned scsi status
3323  *
3324  * This routine is called just prior to a HARD RESET to return all
3325  * outstanding commands back to the Operating System.
3326  * Caller should make sure that the following locks are released
3327  * before this calling routine: Hardware lock, and io_request_lock.
3328  **/
3329 static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res)
3330 {
3331 	struct srb *srb;
3332 	int i;
3333 	unsigned long flags;
3334 
3335 	spin_lock_irqsave(&ha->hardware_lock, flags);
3336 	for (i = 0; i < ha->host->can_queue; i++) {
3337 		srb = qla4xxx_del_from_active_array(ha, i);
3338 		if (srb != NULL) {
3339 			srb->cmd->result = res;
3340 			kref_put(&srb->srb_ref, qla4xxx_srb_compl);
3341 		}
3342 	}
3343 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3344 }
3345 
3346 void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
3347 {
3348 	clear_bit(AF_ONLINE, &ha->flags);
3349 
3350 	/* Disable the board */
3351 	ql4_printk(KERN_INFO, ha, "Disabling the board\n");
3352 
3353 	qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
3354 	qla4xxx_mark_all_devices_missing(ha);
3355 	clear_bit(AF_INIT_DONE, &ha->flags);
3356 }
3357 
3358 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
3359 {
3360 	struct iscsi_session *sess;
3361 	struct ddb_entry *ddb_entry;
3362 
3363 	sess = cls_session->dd_data;
3364 	ddb_entry = sess->dd_data;
3365 	ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
3366 
3367 	if (ddb_entry->ddb_type == FLASH_DDB)
3368 		iscsi_block_session(ddb_entry->sess);
3369 	else
3370 		iscsi_session_failure(cls_session->dd_data,
3371 				      ISCSI_ERR_CONN_FAILED);
3372 }
3373 
3374 /**
3375  * qla4xxx_recover_adapter - recovers adapter after a fatal error
3376  * @ha: Pointer to host adapter structure.
3377  **/
3378 static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
3379 {
3380 	int status = QLA_ERROR;
3381 	uint8_t reset_chip = 0;
3382 	uint32_t dev_state;
3383 	unsigned long wait;
3384 
3385 	/* Stall incoming I/O until we are done */
3386 	scsi_block_requests(ha->host);
3387 	clear_bit(AF_ONLINE, &ha->flags);
3388 	clear_bit(AF_LINK_UP, &ha->flags);
3389 
3390 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
3391 
3392 	set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
3393 
3394 	if ((is_qla8032(ha) || is_qla8042(ha)) &&
3395 	    !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
3396 		ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
3397 			   __func__);
3398 		/* disable pause frame for ISP83xx */
3399 		qla4_83xx_disable_pause(ha);
3400 	}
3401 
3402 	iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
3403 
3404 	if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
3405 		reset_chip = 1;
3406 
3407 	/* For the DPC_RESET_HA_INTR case (ISP-4xxx specific)
3408 	 * do not reset adapter, jump to initialize_adapter */
3409 	if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
3410 		status = QLA_SUCCESS;
3411 		goto recover_ha_init_adapter;
3412 	}
3413 
3414 	/* For the ISP-8xxx adapter, issue a stop_firmware if invoked
3415 	 * from eh_host_reset or ioctl module */
3416 	if (is_qla80XX(ha) && !reset_chip &&
3417 	    test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
3418 
3419 		DEBUG2(ql4_printk(KERN_INFO, ha,
3420 		    "scsi%ld: %s - Performing stop_firmware...\n",
3421 		    ha->host_no, __func__));
3422 		status = ha->isp_ops->reset_firmware(ha);
3423 		if (status == QLA_SUCCESS) {
3424 			if (!test_bit(AF_FW_RECOVERY, &ha->flags))
3425 				qla4xxx_cmd_wait(ha);
3426 
3427 			ha->isp_ops->disable_intrs(ha);
3428 			qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3429 			qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
3430 		} else {
3431 			/* If the stop_firmware fails then
3432 			 * reset the entire chip */
3433 			reset_chip = 1;
3434 			clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3435 			set_bit(DPC_RESET_HA, &ha->dpc_flags);
3436 		}
3437 	}
3438 
3439 	/* Issue full chip reset if recovering from a catastrophic error,
3440 	 * or if stop_firmware fails for ISP-8xxx.
3441 	 * This is the default case for ISP-4xxx */
3442 	if (is_qla40XX(ha) || reset_chip) {
3443 		if (is_qla40XX(ha))
3444 			goto chip_reset;
3445 
3446 		/* Check if 8XXX firmware is alive or not
3447 		 * We may have arrived here from NEED_RESET
3448 		 * detection only */
3449 		if (test_bit(AF_FW_RECOVERY, &ha->flags))
3450 			goto chip_reset;
3451 
3452 		wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ);
3453 		while (time_before(jiffies, wait)) {
3454 			if (qla4_8xxx_check_fw_alive(ha)) {
3455 				qla4xxx_mailbox_premature_completion(ha);
3456 				break;
3457 			}
3458 
3459 			set_current_state(TASK_UNINTERRUPTIBLE);
3460 			schedule_timeout(HZ);
3461 		}
3462 chip_reset:
3463 		if (!test_bit(AF_FW_RECOVERY, &ha->flags))
3464 			qla4xxx_cmd_wait(ha);
3465 
3466 		qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3467 		qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
3468 		DEBUG2(ql4_printk(KERN_INFO, ha,
3469 		    "scsi%ld: %s - Performing chip reset..\n",
3470 		    ha->host_no, __func__));
3471 		status = ha->isp_ops->reset_chip(ha);
3472 	}
3473 
3474 	/* Flush any pending ddb changed AENs */
3475 	qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3476 
3477 recover_ha_init_adapter:
3478 	/* Upon successful firmware/chip reset, re-initialize the adapter */
3479 	if (status == QLA_SUCCESS) {
3480 		/* For ISP-4xxx, force function 1 to always initialize
3481 		 * before function 3 to prevent both funcions from
3482 		 * stepping on top of the other */
3483 		if (is_qla40XX(ha) && (ha->mac_index == 3))
3484 			ssleep(6);
3485 
3486 		/* NOTE: AF_ONLINE flag set upon successful completion of
3487 		 *       qla4xxx_initialize_adapter */
3488 		status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
3489 	}
3490 
3491 	/* Retry failed adapter initialization, if necessary
3492 	 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific)
3493 	 * case to prevent ping-pong resets between functions */
3494 	if (!test_bit(AF_ONLINE, &ha->flags) &&
3495 	    !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
3496 		/* Adapter initialization failed, see if we can retry
3497 		 * resetting the ha.
3498 		 * Since we don't want to block the DPC for too long
3499 		 * with multiple resets in the same thread,
3500 		 * utilize DPC to retry */
3501 		if (is_qla80XX(ha)) {
3502 			ha->isp_ops->idc_lock(ha);
3503 			dev_state = qla4_8xxx_rd_direct(ha,
3504 							QLA8XXX_CRB_DEV_STATE);
3505 			ha->isp_ops->idc_unlock(ha);
3506 			if (dev_state == QLA8XXX_DEV_FAILED) {
3507 				ql4_printk(KERN_INFO, ha, "%s: don't retry "
3508 					   "recover adapter. H/W is in Failed "
3509 					   "state\n", __func__);
3510 				qla4xxx_dead_adapter_cleanup(ha);
3511 				clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3512 				clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3513 				clear_bit(DPC_RESET_HA_FW_CONTEXT,
3514 						&ha->dpc_flags);
3515 				status = QLA_ERROR;
3516 
3517 				goto exit_recover;
3518 			}
3519 		}
3520 
3521 		if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
3522 			ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
3523 			DEBUG2(printk("scsi%ld: recover adapter - retrying "
3524 				      "(%d) more times\n", ha->host_no,
3525 				      ha->retry_reset_ha_cnt));
3526 			set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3527 			status = QLA_ERROR;
3528 		} else {
3529 			if (ha->retry_reset_ha_cnt > 0) {
3530 				/* Schedule another Reset HA--DPC will retry */
3531 				ha->retry_reset_ha_cnt--;
3532 				DEBUG2(printk("scsi%ld: recover adapter - "
3533 					      "retry remaining %d\n",
3534 					      ha->host_no,
3535 					      ha->retry_reset_ha_cnt));
3536 				status = QLA_ERROR;
3537 			}
3538 
3539 			if (ha->retry_reset_ha_cnt == 0) {
3540 				/* Recover adapter retries have been exhausted.
3541 				 * Adapter DEAD */
3542 				DEBUG2(printk("scsi%ld: recover adapter "
3543 					      "failed - board disabled\n",
3544 					      ha->host_no));
3545 				qla4xxx_dead_adapter_cleanup(ha);
3546 				clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3547 				clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3548 				clear_bit(DPC_RESET_HA_FW_CONTEXT,
3549 					  &ha->dpc_flags);
3550 				status = QLA_ERROR;
3551 			}
3552 		}
3553 	} else {
3554 		clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3555 		clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3556 		clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3557 	}
3558 
3559 exit_recover:
3560 	ha->adapter_error_count++;
3561 
3562 	if (test_bit(AF_ONLINE, &ha->flags))
3563 		ha->isp_ops->enable_intrs(ha);
3564 
3565 	scsi_unblock_requests(ha->host);
3566 
3567 	clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
3568 	DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no,
3569 	    status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
3570 
3571 	return status;
3572 }
3573 
3574 static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
3575 {
3576 	struct iscsi_session *sess;
3577 	struct ddb_entry *ddb_entry;
3578 	struct scsi_qla_host *ha;
3579 
3580 	sess = cls_session->dd_data;
3581 	ddb_entry = sess->dd_data;
3582 	ha = ddb_entry->ha;
3583 	if (!iscsi_is_session_online(cls_session)) {
3584 		if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
3585 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3586 				   " unblock session\n", ha->host_no, __func__,
3587 				   ddb_entry->fw_ddb_index);
3588 			iscsi_unblock_session(ddb_entry->sess);
3589 		} else {
3590 			/* Trigger relogin */
3591 			if (ddb_entry->ddb_type == FLASH_DDB) {
3592 				if (!(test_bit(DF_RELOGIN, &ddb_entry->flags) ||
3593 				      test_bit(DF_DISABLE_RELOGIN,
3594 					       &ddb_entry->flags)))
3595 					qla4xxx_arm_relogin_timer(ddb_entry);
3596 			} else
3597 				iscsi_session_failure(cls_session->dd_data,
3598 						      ISCSI_ERR_CONN_FAILED);
3599 		}
3600 	}
3601 }
3602 
3603 int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
3604 {
3605 	struct iscsi_session *sess;
3606 	struct ddb_entry *ddb_entry;
3607 	struct scsi_qla_host *ha;
3608 
3609 	sess = cls_session->dd_data;
3610 	ddb_entry = sess->dd_data;
3611 	ha = ddb_entry->ha;
3612 	ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3613 		   " unblock session\n", ha->host_no, __func__,
3614 		   ddb_entry->fw_ddb_index);
3615 
3616 	iscsi_unblock_session(ddb_entry->sess);
3617 
3618 	/* Start scan target */
3619 	if (test_bit(AF_ONLINE, &ha->flags)) {
3620 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3621 			   " start scan\n", ha->host_no, __func__,
3622 			   ddb_entry->fw_ddb_index);
3623 		scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
3624 	}
3625 	return QLA_SUCCESS;
3626 }
3627 
3628 int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
3629 {
3630 	struct iscsi_session *sess;
3631 	struct ddb_entry *ddb_entry;
3632 	struct scsi_qla_host *ha;
3633 	int status = QLA_SUCCESS;
3634 
3635 	sess = cls_session->dd_data;
3636 	ddb_entry = sess->dd_data;
3637 	ha = ddb_entry->ha;
3638 	ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3639 		   " unblock user space session\n", ha->host_no, __func__,
3640 		   ddb_entry->fw_ddb_index);
3641 
3642 	if (!iscsi_is_session_online(cls_session)) {
3643 		iscsi_conn_start(ddb_entry->conn);
3644 		iscsi_conn_login_event(ddb_entry->conn,
3645 				       ISCSI_CONN_STATE_LOGGED_IN);
3646 	} else {
3647 		ql4_printk(KERN_INFO, ha,
3648 			   "scsi%ld: %s: ddb[%d] session [%d] already logged in\n",
3649 			   ha->host_no, __func__, ddb_entry->fw_ddb_index,
3650 			   cls_session->sid);
3651 		status = QLA_ERROR;
3652 	}
3653 
3654 	return status;
3655 }
3656 
3657 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
3658 {
3659 	iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
3660 }
3661 
3662 static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
3663 {
3664 	uint16_t relogin_timer;
3665 	struct iscsi_session *sess;
3666 	struct ddb_entry *ddb_entry;
3667 	struct scsi_qla_host *ha;
3668 
3669 	sess = cls_sess->dd_data;
3670 	ddb_entry = sess->dd_data;
3671 	ha = ddb_entry->ha;
3672 
3673 	relogin_timer = max(ddb_entry->default_relogin_timeout,
3674 			    (uint16_t)RELOGIN_TOV);
3675 	atomic_set(&ddb_entry->relogin_timer, relogin_timer);
3676 
3677 	DEBUG2(ql4_printk(KERN_INFO, ha,
3678 			  "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
3679 			  ddb_entry->fw_ddb_index, relogin_timer));
3680 
3681 	qla4xxx_login_flash_ddb(cls_sess);
3682 }
3683 
3684 static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
3685 {
3686 	struct iscsi_session *sess;
3687 	struct ddb_entry *ddb_entry;
3688 	struct scsi_qla_host *ha;
3689 
3690 	sess = cls_sess->dd_data;
3691 	ddb_entry = sess->dd_data;
3692 	ha = ddb_entry->ha;
3693 
3694 	if (!(ddb_entry->ddb_type == FLASH_DDB))
3695 		return;
3696 
3697 	if (test_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))
3698 		return;
3699 
3700 	if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
3701 	    !iscsi_is_session_online(cls_sess)) {
3702 		DEBUG2(ql4_printk(KERN_INFO, ha,
3703 				  "relogin issued\n"));
3704 		qla4xxx_relogin_flash_ddb(cls_sess);
3705 	}
3706 }
3707 
3708 void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
3709 {
3710 	if (ha->dpc_thread)
3711 		queue_work(ha->dpc_thread, &ha->dpc_work);
3712 }
3713 
3714 static struct qla4_work_evt *
3715 qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size,
3716 		   enum qla4_work_type type)
3717 {
3718 	struct qla4_work_evt *e;
3719 	uint32_t size = sizeof(struct qla4_work_evt) + data_size;
3720 
3721 	e = kzalloc(size, GFP_ATOMIC);
3722 	if (!e)
3723 		return NULL;
3724 
3725 	INIT_LIST_HEAD(&e->list);
3726 	e->type = type;
3727 	return e;
3728 }
3729 
3730 static void qla4xxx_post_work(struct scsi_qla_host *ha,
3731 			     struct qla4_work_evt *e)
3732 {
3733 	unsigned long flags;
3734 
3735 	spin_lock_irqsave(&ha->work_lock, flags);
3736 	list_add_tail(&e->list, &ha->work_list);
3737 	spin_unlock_irqrestore(&ha->work_lock, flags);
3738 	qla4xxx_wake_dpc(ha);
3739 }
3740 
3741 int qla4xxx_post_aen_work(struct scsi_qla_host *ha,
3742 			  enum iscsi_host_event_code aen_code,
3743 			  uint32_t data_size, uint8_t *data)
3744 {
3745 	struct qla4_work_evt *e;
3746 
3747 	e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN);
3748 	if (!e)
3749 		return QLA_ERROR;
3750 
3751 	e->u.aen.code = aen_code;
3752 	e->u.aen.data_size = data_size;
3753 	memcpy(e->u.aen.data, data, data_size);
3754 
3755 	qla4xxx_post_work(ha, e);
3756 
3757 	return QLA_SUCCESS;
3758 }
3759 
3760 int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
3761 			       uint32_t status, uint32_t pid,
3762 			       uint32_t data_size, uint8_t *data)
3763 {
3764 	struct qla4_work_evt *e;
3765 
3766 	e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS);
3767 	if (!e)
3768 		return QLA_ERROR;
3769 
3770 	e->u.ping.status = status;
3771 	e->u.ping.pid = pid;
3772 	e->u.ping.data_size = data_size;
3773 	memcpy(e->u.ping.data, data, data_size);
3774 
3775 	qla4xxx_post_work(ha, e);
3776 
3777 	return QLA_SUCCESS;
3778 }
3779 
3780 static void qla4xxx_do_work(struct scsi_qla_host *ha)
3781 {
3782 	struct qla4_work_evt *e, *tmp;
3783 	unsigned long flags;
3784 	LIST_HEAD(work);
3785 
3786 	spin_lock_irqsave(&ha->work_lock, flags);
3787 	list_splice_init(&ha->work_list, &work);
3788 	spin_unlock_irqrestore(&ha->work_lock, flags);
3789 
3790 	list_for_each_entry_safe(e, tmp, &work, list) {
3791 		list_del_init(&e->list);
3792 
3793 		switch (e->type) {
3794 		case QLA4_EVENT_AEN:
3795 			iscsi_post_host_event(ha->host_no,
3796 					      &qla4xxx_iscsi_transport,
3797 					      e->u.aen.code,
3798 					      e->u.aen.data_size,
3799 					      e->u.aen.data);
3800 			break;
3801 		case QLA4_EVENT_PING_STATUS:
3802 			iscsi_ping_comp_event(ha->host_no,
3803 					      &qla4xxx_iscsi_transport,
3804 					      e->u.ping.status,
3805 					      e->u.ping.pid,
3806 					      e->u.ping.data_size,
3807 					      e->u.ping.data);
3808 			break;
3809 		default:
3810 			ql4_printk(KERN_WARNING, ha, "event type: 0x%x not "
3811 				   "supported", e->type);
3812 		}
3813 		kfree(e);
3814 	}
3815 }
3816 
3817 /**
3818  * qla4xxx_do_dpc - dpc routine
3819  * @data: in our case pointer to adapter structure
3820  *
3821  * This routine is a task that is schedule by the interrupt handler
3822  * to perform the background processing for interrupts.  We put it
3823  * on a task queue that is consumed whenever the scheduler runs; that's
3824  * so you can do anything (i.e. put the process to sleep etc).  In fact,
3825  * the mid-level tries to sleep when it reaches the driver threshold
3826  * "host->can_queue". This can cause a panic if we were in our interrupt code.
3827  **/
3828 static void qla4xxx_do_dpc(struct work_struct *work)
3829 {
3830 	struct scsi_qla_host *ha =
3831 		container_of(work, struct scsi_qla_host, dpc_work);
3832 	int status = QLA_ERROR;
3833 
3834 	DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
3835 	    "flags = 0x%08lx, dpc_flags = 0x%08lx\n",
3836 	    ha->host_no, __func__, ha->flags, ha->dpc_flags))
3837 
3838 	/* Initialization not yet finished. Don't do anything yet. */
3839 	if (!test_bit(AF_INIT_DONE, &ha->flags))
3840 		return;
3841 
3842 	if (test_bit(AF_EEH_BUSY, &ha->flags)) {
3843 		DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
3844 		    ha->host_no, __func__, ha->flags));
3845 		return;
3846 	}
3847 
3848 	/* post events to application */
3849 	qla4xxx_do_work(ha);
3850 
3851 	if (is_qla80XX(ha)) {
3852 		if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
3853 			if (is_qla8032(ha) || is_qla8042(ha)) {
3854 				ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
3855 					   __func__);
3856 				/* disable pause frame for ISP83xx */
3857 				qla4_83xx_disable_pause(ha);
3858 			}
3859 
3860 			ha->isp_ops->idc_lock(ha);
3861 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
3862 					    QLA8XXX_DEV_FAILED);
3863 			ha->isp_ops->idc_unlock(ha);
3864 			ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
3865 			qla4_8xxx_device_state_handler(ha);
3866 		}
3867 
3868 		if (test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) {
3869 			if (is_qla8042(ha)) {
3870 				if (ha->idc_info.info2 &
3871 				    ENABLE_INTERNAL_LOOPBACK) {
3872 					ql4_printk(KERN_INFO, ha, "%s: Disabling ACB\n",
3873 						   __func__);
3874 					status = qla4_84xx_config_acb(ha,
3875 							    ACB_CONFIG_DISABLE);
3876 					if (status != QLA_SUCCESS) {
3877 						ql4_printk(KERN_INFO, ha, "%s: ACB config failed\n",
3878 							   __func__);
3879 					}
3880 				}
3881 			}
3882 			qla4_83xx_post_idc_ack(ha);
3883 			clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags);
3884 		}
3885 
3886 		if (is_qla8042(ha) &&
3887 		    test_bit(DPC_RESTORE_ACB, &ha->dpc_flags)) {
3888 			ql4_printk(KERN_INFO, ha, "%s: Restoring ACB\n",
3889 				   __func__);
3890 			if (qla4_84xx_config_acb(ha, ACB_CONFIG_SET) !=
3891 			    QLA_SUCCESS) {
3892 				ql4_printk(KERN_INFO, ha, "%s: ACB config failed ",
3893 					   __func__);
3894 			}
3895 			clear_bit(DPC_RESTORE_ACB, &ha->dpc_flags);
3896 		}
3897 
3898 		if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
3899 			qla4_8xxx_need_qsnt_handler(ha);
3900 		}
3901 	}
3902 
3903 	if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) &&
3904 	    (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
3905 	    test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
3906 	    test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
3907 		if ((is_qla8022(ha) && ql4xdontresethba) ||
3908 		    ((is_qla8032(ha) || is_qla8042(ha)) &&
3909 		     qla4_83xx_idc_dontreset(ha))) {
3910 			DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
3911 			    ha->host_no, __func__));
3912 			clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3913 			clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
3914 			clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3915 			goto dpc_post_reset_ha;
3916 		}
3917 		if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
3918 		    test_bit(DPC_RESET_HA, &ha->dpc_flags))
3919 			qla4xxx_recover_adapter(ha);
3920 
3921 		if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
3922 			uint8_t wait_time = RESET_INTR_TOV;
3923 
3924 			while ((readw(&ha->reg->ctrl_status) &
3925 				(CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
3926 				if (--wait_time == 0)
3927 					break;
3928 				msleep(1000);
3929 			}
3930 			if (wait_time == 0)
3931 				DEBUG2(printk("scsi%ld: %s: SR|FSR "
3932 					      "bit not cleared-- resetting\n",
3933 					      ha->host_no, __func__));
3934 			qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
3935 			if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
3936 				qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3937 				status = qla4xxx_recover_adapter(ha);
3938 			}
3939 			clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
3940 			if (status == QLA_SUCCESS)
3941 				ha->isp_ops->enable_intrs(ha);
3942 		}
3943 	}
3944 
3945 dpc_post_reset_ha:
3946 	/* ---- process AEN? --- */
3947 	if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
3948 		qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
3949 
3950 	/* ---- Get DHCP IP Address? --- */
3951 	if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
3952 		qla4xxx_get_dhcp_ip_address(ha);
3953 
3954 	/* ---- relogin device? --- */
3955 	if (adapter_up(ha) &&
3956 	    test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
3957 		iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
3958 	}
3959 
3960 	/* ---- link change? --- */
3961 	if (!test_bit(AF_LOOPBACK, &ha->flags) &&
3962 	    test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
3963 		if (!test_bit(AF_LINK_UP, &ha->flags)) {
3964 			/* ---- link down? --- */
3965 			qla4xxx_mark_all_devices_missing(ha);
3966 		} else {
3967 			/* ---- link up? --- *
3968 			 * F/W will auto login to all devices ONLY ONCE after
3969 			 * link up during driver initialization and runtime
3970 			 * fatal error recovery.  Therefore, the driver must
3971 			 * manually relogin to devices when recovering from
3972 			 * connection failures, logouts, expired KATO, etc. */
3973 			if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
3974 				qla4xxx_build_ddb_list(ha, ha->is_reset);
3975 				iscsi_host_for_each_session(ha->host,
3976 						qla4xxx_login_flash_ddb);
3977 			} else
3978 				qla4xxx_relogin_all_devices(ha);
3979 		}
3980 	}
3981 }
3982 
3983 /**
3984  * qla4xxx_free_adapter - release the adapter
3985  * @ha: pointer to adapter structure
3986  **/
3987 static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
3988 {
3989 	qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
3990 
3991 	/* Turn-off interrupts on the card. */
3992 	ha->isp_ops->disable_intrs(ha);
3993 
3994 	if (is_qla40XX(ha)) {
3995 		writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
3996 		       &ha->reg->ctrl_status);
3997 		readl(&ha->reg->ctrl_status);
3998 	} else if (is_qla8022(ha)) {
3999 		writel(0, &ha->qla4_82xx_reg->host_int);
4000 		readl(&ha->qla4_82xx_reg->host_int);
4001 	} else if (is_qla8032(ha) || is_qla8042(ha)) {
4002 		writel(0, &ha->qla4_83xx_reg->risc_intr);
4003 		readl(&ha->qla4_83xx_reg->risc_intr);
4004 	}
4005 
4006 	/* Remove timer thread, if present */
4007 	if (ha->timer_active)
4008 		qla4xxx_stop_timer(ha);
4009 
4010 	/* Kill the kernel thread for this host */
4011 	if (ha->dpc_thread)
4012 		destroy_workqueue(ha->dpc_thread);
4013 
4014 	/* Kill the kernel thread for this host */
4015 	if (ha->task_wq)
4016 		destroy_workqueue(ha->task_wq);
4017 
4018 	/* Put firmware in known state */
4019 	ha->isp_ops->reset_firmware(ha);
4020 
4021 	if (is_qla80XX(ha)) {
4022 		ha->isp_ops->idc_lock(ha);
4023 		qla4_8xxx_clear_drv_active(ha);
4024 		ha->isp_ops->idc_unlock(ha);
4025 	}
4026 
4027 	/* Detach interrupts */
4028 	qla4xxx_free_irqs(ha);
4029 
4030 	/* free extra memory */
4031 	qla4xxx_mem_free(ha);
4032 }
4033 
4034 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
4035 {
4036 	int status = 0;
4037 	unsigned long mem_base, mem_len, db_base, db_len;
4038 	struct pci_dev *pdev = ha->pdev;
4039 
4040 	status = pci_request_regions(pdev, DRIVER_NAME);
4041 	if (status) {
4042 		printk(KERN_WARNING
4043 		    "scsi(%ld) Failed to reserve PIO regions (%s) "
4044 		    "status=%d\n", ha->host_no, pci_name(pdev), status);
4045 		goto iospace_error_exit;
4046 	}
4047 
4048 	DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
4049 	    __func__, pdev->revision));
4050 	ha->revision_id = pdev->revision;
4051 
4052 	/* remap phys address */
4053 	mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
4054 	mem_len = pci_resource_len(pdev, 0);
4055 	DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n",
4056 	    __func__, mem_base, mem_len));
4057 
4058 	/* mapping of pcibase pointer */
4059 	ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len);
4060 	if (!ha->nx_pcibase) {
4061 		printk(KERN_ERR
4062 		    "cannot remap MMIO (%s), aborting\n", pci_name(pdev));
4063 		pci_release_regions(ha->pdev);
4064 		goto iospace_error_exit;
4065 	}
4066 
4067 	/* Mapping of IO base pointer, door bell read and write pointer */
4068 
4069 	/* mapping of IO base pointer */
4070 	if (is_qla8022(ha)) {
4071 		ha->qla4_82xx_reg = (struct device_reg_82xx  __iomem *)
4072 				    ((uint8_t *)ha->nx_pcibase + 0xbc000 +
4073 				     (ha->pdev->devfn << 11));
4074 		ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
4075 				    QLA82XX_CAM_RAM_DB2);
4076 	} else if (is_qla8032(ha) || is_qla8042(ha)) {
4077 		ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *)
4078 				    ((uint8_t *)ha->nx_pcibase);
4079 	}
4080 
4081 	db_base = pci_resource_start(pdev, 4);  /* doorbell is on bar 4 */
4082 	db_len = pci_resource_len(pdev, 4);
4083 
4084 	return 0;
4085 iospace_error_exit:
4086 	return -ENOMEM;
4087 }
4088 
4089 /***
4090  * qla4xxx_iospace_config - maps registers
4091  * @ha: pointer to adapter structure
4092  *
4093  * This routines maps HBA's registers from the pci address space
4094  * into the kernel virtual address space for memory mapped i/o.
4095  **/
4096 int qla4xxx_iospace_config(struct scsi_qla_host *ha)
4097 {
4098 	unsigned long pio, pio_len, pio_flags;
4099 	unsigned long mmio, mmio_len, mmio_flags;
4100 
4101 	pio = pci_resource_start(ha->pdev, 0);
4102 	pio_len = pci_resource_len(ha->pdev, 0);
4103 	pio_flags = pci_resource_flags(ha->pdev, 0);
4104 	if (pio_flags & IORESOURCE_IO) {
4105 		if (pio_len < MIN_IOBASE_LEN) {
4106 			ql4_printk(KERN_WARNING, ha,
4107 				"Invalid PCI I/O region size\n");
4108 			pio = 0;
4109 		}
4110 	} else {
4111 		ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n");
4112 		pio = 0;
4113 	}
4114 
4115 	/* Use MMIO operations for all accesses. */
4116 	mmio = pci_resource_start(ha->pdev, 1);
4117 	mmio_len = pci_resource_len(ha->pdev, 1);
4118 	mmio_flags = pci_resource_flags(ha->pdev, 1);
4119 
4120 	if (!(mmio_flags & IORESOURCE_MEM)) {
4121 		ql4_printk(KERN_ERR, ha,
4122 		    "region #0 not an MMIO resource, aborting\n");
4123 
4124 		goto iospace_error_exit;
4125 	}
4126 
4127 	if (mmio_len < MIN_IOBASE_LEN) {
4128 		ql4_printk(KERN_ERR, ha,
4129 		    "Invalid PCI mem region size, aborting\n");
4130 		goto iospace_error_exit;
4131 	}
4132 
4133 	if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
4134 		ql4_printk(KERN_WARNING, ha,
4135 		    "Failed to reserve PIO/MMIO regions\n");
4136 
4137 		goto iospace_error_exit;
4138 	}
4139 
4140 	ha->pio_address = pio;
4141 	ha->pio_length = pio_len;
4142 	ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
4143 	if (!ha->reg) {
4144 		ql4_printk(KERN_ERR, ha,
4145 		    "cannot remap MMIO, aborting\n");
4146 
4147 		goto iospace_error_exit;
4148 	}
4149 
4150 	return 0;
4151 
4152 iospace_error_exit:
4153 	return -ENOMEM;
4154 }
4155 
4156 static struct isp_operations qla4xxx_isp_ops = {
4157 	.iospace_config         = qla4xxx_iospace_config,
4158 	.pci_config             = qla4xxx_pci_config,
4159 	.disable_intrs          = qla4xxx_disable_intrs,
4160 	.enable_intrs           = qla4xxx_enable_intrs,
4161 	.start_firmware         = qla4xxx_start_firmware,
4162 	.intr_handler           = qla4xxx_intr_handler,
4163 	.interrupt_service_routine = qla4xxx_interrupt_service_routine,
4164 	.reset_chip             = qla4xxx_soft_reset,
4165 	.reset_firmware         = qla4xxx_hw_reset,
4166 	.queue_iocb             = qla4xxx_queue_iocb,
4167 	.complete_iocb          = qla4xxx_complete_iocb,
4168 	.rd_shdw_req_q_out      = qla4xxx_rd_shdw_req_q_out,
4169 	.rd_shdw_rsp_q_in       = qla4xxx_rd_shdw_rsp_q_in,
4170 	.get_sys_info           = qla4xxx_get_sys_info,
4171 	.queue_mailbox_command	= qla4xxx_queue_mbox_cmd,
4172 	.process_mailbox_interrupt = qla4xxx_process_mbox_intr,
4173 };
4174 
4175 static struct isp_operations qla4_82xx_isp_ops = {
4176 	.iospace_config         = qla4_8xxx_iospace_config,
4177 	.pci_config             = qla4_8xxx_pci_config,
4178 	.disable_intrs          = qla4_82xx_disable_intrs,
4179 	.enable_intrs           = qla4_82xx_enable_intrs,
4180 	.start_firmware         = qla4_8xxx_load_risc,
4181 	.restart_firmware	= qla4_82xx_try_start_fw,
4182 	.intr_handler           = qla4_82xx_intr_handler,
4183 	.interrupt_service_routine = qla4_82xx_interrupt_service_routine,
4184 	.need_reset		= qla4_8xxx_need_reset,
4185 	.reset_chip             = qla4_82xx_isp_reset,
4186 	.reset_firmware         = qla4_8xxx_stop_firmware,
4187 	.queue_iocb             = qla4_82xx_queue_iocb,
4188 	.complete_iocb          = qla4_82xx_complete_iocb,
4189 	.rd_shdw_req_q_out      = qla4_82xx_rd_shdw_req_q_out,
4190 	.rd_shdw_rsp_q_in       = qla4_82xx_rd_shdw_rsp_q_in,
4191 	.get_sys_info           = qla4_8xxx_get_sys_info,
4192 	.rd_reg_direct		= qla4_82xx_rd_32,
4193 	.wr_reg_direct		= qla4_82xx_wr_32,
4194 	.rd_reg_indirect	= qla4_82xx_md_rd_32,
4195 	.wr_reg_indirect	= qla4_82xx_md_wr_32,
4196 	.idc_lock		= qla4_82xx_idc_lock,
4197 	.idc_unlock		= qla4_82xx_idc_unlock,
4198 	.rom_lock_recovery	= qla4_82xx_rom_lock_recovery,
4199 	.queue_mailbox_command	= qla4_82xx_queue_mbox_cmd,
4200 	.process_mailbox_interrupt = qla4_82xx_process_mbox_intr,
4201 };
4202 
4203 static struct isp_operations qla4_83xx_isp_ops = {
4204 	.iospace_config		= qla4_8xxx_iospace_config,
4205 	.pci_config		= qla4_8xxx_pci_config,
4206 	.disable_intrs		= qla4_83xx_disable_intrs,
4207 	.enable_intrs		= qla4_83xx_enable_intrs,
4208 	.start_firmware		= qla4_8xxx_load_risc,
4209 	.restart_firmware	= qla4_83xx_start_firmware,
4210 	.intr_handler		= qla4_83xx_intr_handler,
4211 	.interrupt_service_routine = qla4_83xx_interrupt_service_routine,
4212 	.need_reset		= qla4_8xxx_need_reset,
4213 	.reset_chip		= qla4_83xx_isp_reset,
4214 	.reset_firmware		= qla4_8xxx_stop_firmware,
4215 	.queue_iocb		= qla4_83xx_queue_iocb,
4216 	.complete_iocb		= qla4_83xx_complete_iocb,
4217 	.rd_shdw_req_q_out	= qla4xxx_rd_shdw_req_q_out,
4218 	.rd_shdw_rsp_q_in	= qla4xxx_rd_shdw_rsp_q_in,
4219 	.get_sys_info		= qla4_8xxx_get_sys_info,
4220 	.rd_reg_direct		= qla4_83xx_rd_reg,
4221 	.wr_reg_direct		= qla4_83xx_wr_reg,
4222 	.rd_reg_indirect	= qla4_83xx_rd_reg_indirect,
4223 	.wr_reg_indirect	= qla4_83xx_wr_reg_indirect,
4224 	.idc_lock		= qla4_83xx_drv_lock,
4225 	.idc_unlock		= qla4_83xx_drv_unlock,
4226 	.rom_lock_recovery	= qla4_83xx_rom_lock_recovery,
4227 	.queue_mailbox_command	= qla4_83xx_queue_mbox_cmd,
4228 	.process_mailbox_interrupt = qla4_83xx_process_mbox_intr,
4229 };
4230 
4231 uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
4232 {
4233 	return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
4234 }
4235 
4236 uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
4237 {
4238 	return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out));
4239 }
4240 
4241 uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
4242 {
4243 	return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
4244 }
4245 
4246 uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
4247 {
4248 	return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in));
4249 }
4250 
4251 static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
4252 {
4253 	struct scsi_qla_host *ha = data;
4254 	char *str = buf;
4255 	int rc;
4256 
4257 	switch (type) {
4258 	case ISCSI_BOOT_ETH_FLAGS:
4259 		rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
4260 		break;
4261 	case ISCSI_BOOT_ETH_INDEX:
4262 		rc = sprintf(str, "0\n");
4263 		break;
4264 	case ISCSI_BOOT_ETH_MAC:
4265 		rc = sysfs_format_mac(str, ha->my_mac,
4266 				      MAC_ADDR_LEN);
4267 		break;
4268 	default:
4269 		rc = -ENOSYS;
4270 		break;
4271 	}
4272 	return rc;
4273 }
4274 
4275 static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
4276 {
4277 	int rc;
4278 
4279 	switch (type) {
4280 	case ISCSI_BOOT_ETH_FLAGS:
4281 	case ISCSI_BOOT_ETH_MAC:
4282 	case ISCSI_BOOT_ETH_INDEX:
4283 		rc = S_IRUGO;
4284 		break;
4285 	default:
4286 		rc = 0;
4287 		break;
4288 	}
4289 	return rc;
4290 }
4291 
4292 static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
4293 {
4294 	struct scsi_qla_host *ha = data;
4295 	char *str = buf;
4296 	int rc;
4297 
4298 	switch (type) {
4299 	case ISCSI_BOOT_INI_INITIATOR_NAME:
4300 		rc = sprintf(str, "%s\n", ha->name_string);
4301 		break;
4302 	default:
4303 		rc = -ENOSYS;
4304 		break;
4305 	}
4306 	return rc;
4307 }
4308 
4309 static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
4310 {
4311 	int rc;
4312 
4313 	switch (type) {
4314 	case ISCSI_BOOT_INI_INITIATOR_NAME:
4315 		rc = S_IRUGO;
4316 		break;
4317 	default:
4318 		rc = 0;
4319 		break;
4320 	}
4321 	return rc;
4322 }
4323 
4324 static ssize_t
4325 qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
4326 			   char *buf)
4327 {
4328 	struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
4329 	char *str = buf;
4330 	int rc;
4331 
4332 	switch (type) {
4333 	case ISCSI_BOOT_TGT_NAME:
4334 		rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name);
4335 		break;
4336 	case ISCSI_BOOT_TGT_IP_ADDR:
4337 		if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1)
4338 			rc = sprintf(buf, "%pI4\n",
4339 				     &boot_conn->dest_ipaddr.ip_address);
4340 		else
4341 			rc = sprintf(str, "%pI6\n",
4342 				     &boot_conn->dest_ipaddr.ip_address);
4343 		break;
4344 	case ISCSI_BOOT_TGT_PORT:
4345 			rc = sprintf(str, "%d\n", boot_conn->dest_port);
4346 		break;
4347 	case ISCSI_BOOT_TGT_CHAP_NAME:
4348 		rc = sprintf(str,  "%.*s\n",
4349 			     boot_conn->chap.target_chap_name_length,
4350 			     (char *)&boot_conn->chap.target_chap_name);
4351 		break;
4352 	case ISCSI_BOOT_TGT_CHAP_SECRET:
4353 		rc = sprintf(str,  "%.*s\n",
4354 			     boot_conn->chap.target_secret_length,
4355 			     (char *)&boot_conn->chap.target_secret);
4356 		break;
4357 	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
4358 		rc = sprintf(str,  "%.*s\n",
4359 			     boot_conn->chap.intr_chap_name_length,
4360 			     (char *)&boot_conn->chap.intr_chap_name);
4361 		break;
4362 	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
4363 		rc = sprintf(str,  "%.*s\n",
4364 			     boot_conn->chap.intr_secret_length,
4365 			     (char *)&boot_conn->chap.intr_secret);
4366 		break;
4367 	case ISCSI_BOOT_TGT_FLAGS:
4368 		rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
4369 		break;
4370 	case ISCSI_BOOT_TGT_NIC_ASSOC:
4371 		rc = sprintf(str, "0\n");
4372 		break;
4373 	default:
4374 		rc = -ENOSYS;
4375 		break;
4376 	}
4377 	return rc;
4378 }
4379 
4380 static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf)
4381 {
4382 	struct scsi_qla_host *ha = data;
4383 	struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess);
4384 
4385 	return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
4386 }
4387 
4388 static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
4389 {
4390 	struct scsi_qla_host *ha = data;
4391 	struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess);
4392 
4393 	return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
4394 }
4395 
4396 static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
4397 {
4398 	int rc;
4399 
4400 	switch (type) {
4401 	case ISCSI_BOOT_TGT_NAME:
4402 	case ISCSI_BOOT_TGT_IP_ADDR:
4403 	case ISCSI_BOOT_TGT_PORT:
4404 	case ISCSI_BOOT_TGT_CHAP_NAME:
4405 	case ISCSI_BOOT_TGT_CHAP_SECRET:
4406 	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
4407 	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
4408 	case ISCSI_BOOT_TGT_NIC_ASSOC:
4409 	case ISCSI_BOOT_TGT_FLAGS:
4410 		rc = S_IRUGO;
4411 		break;
4412 	default:
4413 		rc = 0;
4414 		break;
4415 	}
4416 	return rc;
4417 }
4418 
4419 static void qla4xxx_boot_release(void *data)
4420 {
4421 	struct scsi_qla_host *ha = data;
4422 
4423 	scsi_host_put(ha->host);
4424 }
4425 
4426 static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
4427 {
4428 	dma_addr_t buf_dma;
4429 	uint32_t addr, pri_addr, sec_addr;
4430 	uint32_t offset;
4431 	uint16_t func_num;
4432 	uint8_t val;
4433 	uint8_t *buf = NULL;
4434 	size_t size = 13 * sizeof(uint8_t);
4435 	int ret = QLA_SUCCESS;
4436 
4437 	func_num = PCI_FUNC(ha->pdev->devfn);
4438 
4439 	ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n",
4440 		   __func__, ha->pdev->device, func_num);
4441 
4442 	if (is_qla40XX(ha)) {
4443 		if (func_num == 1) {
4444 			addr = NVRAM_PORT0_BOOT_MODE;
4445 			pri_addr = NVRAM_PORT0_BOOT_PRI_TGT;
4446 			sec_addr = NVRAM_PORT0_BOOT_SEC_TGT;
4447 		} else if (func_num == 3) {
4448 			addr = NVRAM_PORT1_BOOT_MODE;
4449 			pri_addr = NVRAM_PORT1_BOOT_PRI_TGT;
4450 			sec_addr = NVRAM_PORT1_BOOT_SEC_TGT;
4451 		} else {
4452 			ret = QLA_ERROR;
4453 			goto exit_boot_info;
4454 		}
4455 
4456 		/* Check Boot Mode */
4457 		val = rd_nvram_byte(ha, addr);
4458 		if (!(val & 0x07)) {
4459 			DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot "
4460 					  "options : 0x%x\n", __func__, val));
4461 			ret = QLA_ERROR;
4462 			goto exit_boot_info;
4463 		}
4464 
4465 		/* get primary valid target index */
4466 		val = rd_nvram_byte(ha, pri_addr);
4467 		if (val & BIT_7)
4468 			ddb_index[0] = (val & 0x7f);
4469 
4470 		/* get secondary valid target index */
4471 		val = rd_nvram_byte(ha, sec_addr);
4472 		if (val & BIT_7)
4473 			ddb_index[1] = (val & 0x7f);
4474 
4475 	} else if (is_qla80XX(ha)) {
4476 		buf = dma_alloc_coherent(&ha->pdev->dev, size,
4477 					 &buf_dma, GFP_KERNEL);
4478 		if (!buf) {
4479 			DEBUG2(ql4_printk(KERN_ERR, ha,
4480 					  "%s: Unable to allocate dma buffer\n",
4481 					   __func__));
4482 			ret = QLA_ERROR;
4483 			goto exit_boot_info;
4484 		}
4485 
4486 		if (ha->port_num == 0)
4487 			offset = BOOT_PARAM_OFFSET_PORT0;
4488 		else if (ha->port_num == 1)
4489 			offset = BOOT_PARAM_OFFSET_PORT1;
4490 		else {
4491 			ret = QLA_ERROR;
4492 			goto exit_boot_info_free;
4493 		}
4494 		addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) +
4495 		       offset;
4496 		if (qla4xxx_get_flash(ha, buf_dma, addr,
4497 				      13 * sizeof(uint8_t)) != QLA_SUCCESS) {
4498 			DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
4499 					  " failed\n", ha->host_no, __func__));
4500 			ret = QLA_ERROR;
4501 			goto exit_boot_info_free;
4502 		}
4503 		/* Check Boot Mode */
4504 		if (!(buf[1] & 0x07)) {
4505 			DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options"
4506 					  " : 0x%x\n", buf[1]));
4507 			ret = QLA_ERROR;
4508 			goto exit_boot_info_free;
4509 		}
4510 
4511 		/* get primary valid target index */
4512 		if (buf[2] & BIT_7)
4513 			ddb_index[0] = buf[2] & 0x7f;
4514 
4515 		/* get secondary valid target index */
4516 		if (buf[11] & BIT_7)
4517 			ddb_index[1] = buf[11] & 0x7f;
4518 	} else {
4519 		ret = QLA_ERROR;
4520 		goto exit_boot_info;
4521 	}
4522 
4523 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary"
4524 			  " target ID %d\n", __func__, ddb_index[0],
4525 			  ddb_index[1]));
4526 
4527 exit_boot_info_free:
4528 	dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
4529 exit_boot_info:
4530 	ha->pri_ddb_idx = ddb_index[0];
4531 	ha->sec_ddb_idx = ddb_index[1];
4532 	return ret;
4533 }
4534 
4535 /**
4536  * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password
4537  * @ha: pointer to adapter structure
4538  * @username: CHAP username to be returned
4539  * @password: CHAP password to be returned
4540  *
4541  * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP
4542  * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/.
4543  * So from the CHAP cache find the first BIDI CHAP entry and set it
4544  * to the boot record in sysfs.
4545  **/
4546 static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
4547 			    char *password)
4548 {
4549 	int i, ret = -EINVAL;
4550 	int max_chap_entries = 0;
4551 	struct ql4_chap_table *chap_table;
4552 
4553 	if (is_qla80XX(ha))
4554 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
4555 						sizeof(struct ql4_chap_table);
4556 	else
4557 		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
4558 
4559 	if (!ha->chap_list) {
4560 		ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
4561 		return ret;
4562 	}
4563 
4564 	mutex_lock(&ha->chap_sem);
4565 	for (i = 0; i < max_chap_entries; i++) {
4566 		chap_table = (struct ql4_chap_table *)ha->chap_list + i;
4567 		if (chap_table->cookie !=
4568 		    __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
4569 			continue;
4570 		}
4571 
4572 		if (chap_table->flags & BIT_7) /* local */
4573 			continue;
4574 
4575 		if (!(chap_table->flags & BIT_6)) /* Not BIDI */
4576 			continue;
4577 
4578 		strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
4579 		strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
4580 		ret = 0;
4581 		break;
4582 	}
4583 	mutex_unlock(&ha->chap_sem);
4584 
4585 	return ret;
4586 }
4587 
4588 
4589 static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
4590 				   struct ql4_boot_session_info *boot_sess,
4591 				   uint16_t ddb_index)
4592 {
4593 	struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
4594 	struct dev_db_entry *fw_ddb_entry;
4595 	dma_addr_t fw_ddb_entry_dma;
4596 	uint16_t idx;
4597 	uint16_t options;
4598 	int ret = QLA_SUCCESS;
4599 
4600 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
4601 					  &fw_ddb_entry_dma, GFP_KERNEL);
4602 	if (!fw_ddb_entry) {
4603 		DEBUG2(ql4_printk(KERN_ERR, ha,
4604 				  "%s: Unable to allocate dma buffer.\n",
4605 				  __func__));
4606 		ret = QLA_ERROR;
4607 		return ret;
4608 	}
4609 
4610 	if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
4611 				   fw_ddb_entry_dma, ddb_index)) {
4612 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at "
4613 				  "index [%d]\n", __func__, ddb_index));
4614 		ret = QLA_ERROR;
4615 		goto exit_boot_target;
4616 	}
4617 
4618 	/* Update target name and IP from DDB */
4619 	memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name,
4620 	       min(sizeof(boot_sess->target_name),
4621 		   sizeof(fw_ddb_entry->iscsi_name)));
4622 
4623 	options = le16_to_cpu(fw_ddb_entry->options);
4624 	if (options & DDB_OPT_IPV6_DEVICE) {
4625 		memcpy(&boot_conn->dest_ipaddr.ip_address,
4626 		       &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN);
4627 	} else {
4628 		boot_conn->dest_ipaddr.ip_type = 0x1;
4629 		memcpy(&boot_conn->dest_ipaddr.ip_address,
4630 		       &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN);
4631 	}
4632 
4633 	boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port);
4634 
4635 	/* update chap information */
4636 	idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
4637 
4638 	if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options))	{
4639 
4640 		DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n"));
4641 
4642 		ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
4643 				       target_chap_name,
4644 				       (char *)&boot_conn->chap.target_secret,
4645 				       idx);
4646 		if (ret) {
4647 			ql4_printk(KERN_ERR, ha, "Failed to set chap\n");
4648 			ret = QLA_ERROR;
4649 			goto exit_boot_target;
4650 		}
4651 
4652 		boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
4653 		boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN;
4654 	}
4655 
4656 	if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
4657 
4658 		DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n"));
4659 
4660 		ret = qla4xxx_get_bidi_chap(ha,
4661 				    (char *)&boot_conn->chap.intr_chap_name,
4662 				    (char *)&boot_conn->chap.intr_secret);
4663 
4664 		if (ret) {
4665 			ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n");
4666 			ret = QLA_ERROR;
4667 			goto exit_boot_target;
4668 		}
4669 
4670 		boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
4671 		boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN;
4672 	}
4673 
4674 exit_boot_target:
4675 	dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
4676 			  fw_ddb_entry, fw_ddb_entry_dma);
4677 	return ret;
4678 }
4679 
4680 static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
4681 {
4682 	uint16_t ddb_index[2];
4683 	int ret = QLA_ERROR;
4684 	int rval;
4685 
4686 	memset(ddb_index, 0, sizeof(ddb_index));
4687 	ddb_index[0] = 0xffff;
4688 	ddb_index[1] = 0xffff;
4689 	ret = get_fw_boot_info(ha, ddb_index);
4690 	if (ret != QLA_SUCCESS) {
4691 		DEBUG2(ql4_printk(KERN_INFO, ha,
4692 				"%s: No boot target configured.\n", __func__));
4693 		return ret;
4694 	}
4695 
4696 	if (ql4xdisablesysfsboot)
4697 		return QLA_SUCCESS;
4698 
4699 	if (ddb_index[0] == 0xffff)
4700 		goto sec_target;
4701 
4702 	rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
4703 				      ddb_index[0]);
4704 	if (rval != QLA_SUCCESS) {
4705 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not "
4706 				  "configured\n", __func__));
4707 	} else
4708 		ret = QLA_SUCCESS;
4709 
4710 sec_target:
4711 	if (ddb_index[1] == 0xffff)
4712 		goto exit_get_boot_info;
4713 
4714 	rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
4715 				      ddb_index[1]);
4716 	if (rval != QLA_SUCCESS) {
4717 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not"
4718 				  " configured\n", __func__));
4719 	} else
4720 		ret = QLA_SUCCESS;
4721 
4722 exit_get_boot_info:
4723 	return ret;
4724 }
4725 
4726 static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
4727 {
4728 	struct iscsi_boot_kobj *boot_kobj;
4729 
4730 	if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
4731 		return QLA_ERROR;
4732 
4733 	if (ql4xdisablesysfsboot) {
4734 		ql4_printk(KERN_INFO, ha,
4735 			   "%s: syfsboot disabled - driver will trigger login "
4736 			   "and publish session for discovery .\n", __func__);
4737 		return QLA_SUCCESS;
4738 	}
4739 
4740 
4741 	ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
4742 	if (!ha->boot_kset)
4743 		goto kset_free;
4744 
4745 	if (!scsi_host_get(ha->host))
4746 		goto kset_free;
4747 	boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha,
4748 					     qla4xxx_show_boot_tgt_pri_info,
4749 					     qla4xxx_tgt_get_attr_visibility,
4750 					     qla4xxx_boot_release);
4751 	if (!boot_kobj)
4752 		goto put_host;
4753 
4754 	if (!scsi_host_get(ha->host))
4755 		goto kset_free;
4756 	boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha,
4757 					     qla4xxx_show_boot_tgt_sec_info,
4758 					     qla4xxx_tgt_get_attr_visibility,
4759 					     qla4xxx_boot_release);
4760 	if (!boot_kobj)
4761 		goto put_host;
4762 
4763 	if (!scsi_host_get(ha->host))
4764 		goto kset_free;
4765 	boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha,
4766 					       qla4xxx_show_boot_ini_info,
4767 					       qla4xxx_ini_get_attr_visibility,
4768 					       qla4xxx_boot_release);
4769 	if (!boot_kobj)
4770 		goto put_host;
4771 
4772 	if (!scsi_host_get(ha->host))
4773 		goto kset_free;
4774 	boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha,
4775 					       qla4xxx_show_boot_eth_info,
4776 					       qla4xxx_eth_get_attr_visibility,
4777 					       qla4xxx_boot_release);
4778 	if (!boot_kobj)
4779 		goto put_host;
4780 
4781 	return QLA_SUCCESS;
4782 
4783 put_host:
4784 	scsi_host_put(ha->host);
4785 kset_free:
4786 	iscsi_boot_destroy_kset(ha->boot_kset);
4787 	return -ENOMEM;
4788 }
4789 
4790 
4791 /**
4792  * qla4xxx_create chap_list - Create CHAP list from FLASH
4793  * @ha: pointer to adapter structure
4794  *
4795  * Read flash and make a list of CHAP entries, during login when a CHAP entry
4796  * is received, it will be checked in this list. If entry exist then the CHAP
4797  * entry index is set in the DDB. If CHAP entry does not exist in this list
4798  * then a new entry is added in FLASH in CHAP table and the index obtained is
4799  * used in the DDB.
4800  **/
4801 static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
4802 {
4803 	int rval = 0;
4804 	uint8_t *chap_flash_data = NULL;
4805 	uint32_t offset;
4806 	dma_addr_t chap_dma;
4807 	uint32_t chap_size = 0;
4808 
4809 	if (is_qla40XX(ha))
4810 		chap_size = MAX_CHAP_ENTRIES_40XX  *
4811 					sizeof(struct ql4_chap_table);
4812 	else	/* Single region contains CHAP info for both
4813 		 * ports which is divided into half for each port.
4814 		 */
4815 		chap_size = ha->hw.flt_chap_size / 2;
4816 
4817 	chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size,
4818 					  &chap_dma, GFP_KERNEL);
4819 	if (!chap_flash_data) {
4820 		ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n");
4821 		return;
4822 	}
4823 	if (is_qla40XX(ha))
4824 		offset = FLASH_CHAP_OFFSET;
4825 	else {
4826 		offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
4827 		if (ha->port_num == 1)
4828 			offset += chap_size;
4829 	}
4830 
4831 	rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
4832 	if (rval != QLA_SUCCESS)
4833 		goto exit_chap_list;
4834 
4835 	if (ha->chap_list == NULL)
4836 		ha->chap_list = vmalloc(chap_size);
4837 	if (ha->chap_list == NULL) {
4838 		ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n");
4839 		goto exit_chap_list;
4840 	}
4841 
4842 	memcpy(ha->chap_list, chap_flash_data, chap_size);
4843 
4844 exit_chap_list:
4845 	dma_free_coherent(&ha->pdev->dev, chap_size,
4846 			chap_flash_data, chap_dma);
4847 }
4848 
4849 static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
4850 				  struct ql4_tuple_ddb *tddb)
4851 {
4852 	struct scsi_qla_host *ha;
4853 	struct iscsi_cls_session *cls_sess;
4854 	struct iscsi_cls_conn *cls_conn;
4855 	struct iscsi_session *sess;
4856 	struct iscsi_conn *conn;
4857 
4858 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
4859 	ha = ddb_entry->ha;
4860 	cls_sess = ddb_entry->sess;
4861 	sess = cls_sess->dd_data;
4862 	cls_conn = ddb_entry->conn;
4863 	conn = cls_conn->dd_data;
4864 
4865 	tddb->tpgt = sess->tpgt;
4866 	tddb->port = conn->persistent_port;
4867 	strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
4868 	strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
4869 }
4870 
4871 static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
4872 				      struct ql4_tuple_ddb *tddb,
4873 				      uint8_t *flash_isid)
4874 {
4875 	uint16_t options = 0;
4876 
4877 	tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
4878 	memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
4879 	       min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
4880 
4881 	options = le16_to_cpu(fw_ddb_entry->options);
4882 	if (options & DDB_OPT_IPV6_DEVICE)
4883 		sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
4884 	else
4885 		sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
4886 
4887 	tddb->port = le16_to_cpu(fw_ddb_entry->port);
4888 
4889 	if (flash_isid == NULL)
4890 		memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0],
4891 		       sizeof(tddb->isid));
4892 	else
4893 		memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid));
4894 }
4895 
4896 static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
4897 				     struct ql4_tuple_ddb *old_tddb,
4898 				     struct ql4_tuple_ddb *new_tddb,
4899 				     uint8_t is_isid_compare)
4900 {
4901 	if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
4902 		return QLA_ERROR;
4903 
4904 	if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
4905 		return QLA_ERROR;
4906 
4907 	if (old_tddb->port != new_tddb->port)
4908 		return QLA_ERROR;
4909 
4910 	/* For multi sessions, driver generates the ISID, so do not compare
4911 	 * ISID in reset path since it would be a comparison between the
4912 	 * driver generated ISID and firmware generated ISID. This could
4913 	 * lead to adding duplicated DDBs in the list as driver generated
4914 	 * ISID would not match firmware generated ISID.
4915 	 */
4916 	if (is_isid_compare) {
4917 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x"
4918 			"%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n",
4919 			__func__, old_tddb->isid[5], old_tddb->isid[4],
4920 			old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1],
4921 			old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4],
4922 			new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1],
4923 			new_tddb->isid[0]));
4924 
4925 		if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
4926 			   sizeof(old_tddb->isid)))
4927 			return QLA_ERROR;
4928 	}
4929 
4930 	DEBUG2(ql4_printk(KERN_INFO, ha,
4931 			  "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
4932 			  old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
4933 			  old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
4934 			  new_tddb->ip_addr, new_tddb->iscsi_name));
4935 
4936 	return QLA_SUCCESS;
4937 }
4938 
4939 static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
4940 				     struct dev_db_entry *fw_ddb_entry)
4941 {
4942 	struct ddb_entry *ddb_entry;
4943 	struct ql4_tuple_ddb *fw_tddb = NULL;
4944 	struct ql4_tuple_ddb *tmp_tddb = NULL;
4945 	int idx;
4946 	int ret = QLA_ERROR;
4947 
4948 	fw_tddb = vzalloc(sizeof(*fw_tddb));
4949 	if (!fw_tddb) {
4950 		DEBUG2(ql4_printk(KERN_WARNING, ha,
4951 				  "Memory Allocation failed.\n"));
4952 		ret = QLA_SUCCESS;
4953 		goto exit_check;
4954 	}
4955 
4956 	tmp_tddb = vzalloc(sizeof(*tmp_tddb));
4957 	if (!tmp_tddb) {
4958 		DEBUG2(ql4_printk(KERN_WARNING, ha,
4959 				  "Memory Allocation failed.\n"));
4960 		ret = QLA_SUCCESS;
4961 		goto exit_check;
4962 	}
4963 
4964 	qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
4965 
4966 	for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
4967 		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
4968 		if (ddb_entry == NULL)
4969 			continue;
4970 
4971 		qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
4972 		if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) {
4973 			ret = QLA_SUCCESS; /* found */
4974 			goto exit_check;
4975 		}
4976 	}
4977 
4978 exit_check:
4979 	if (fw_tddb)
4980 		vfree(fw_tddb);
4981 	if (tmp_tddb)
4982 		vfree(tmp_tddb);
4983 	return ret;
4984 }
4985 
4986 /**
4987  * qla4xxx_check_existing_isid - check if target with same isid exist
4988  *				 in target list
4989  * @list_nt: list of target
4990  * @isid: isid to check
4991  *
4992  * This routine return QLA_SUCCESS if target with same isid exist
4993  **/
4994 static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid)
4995 {
4996 	struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
4997 	struct dev_db_entry *fw_ddb_entry;
4998 
4999 	list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
5000 		fw_ddb_entry = &nt_ddb_idx->fw_ddb;
5001 
5002 		if (memcmp(&fw_ddb_entry->isid[0], &isid[0],
5003 			   sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) {
5004 			return QLA_SUCCESS;
5005 		}
5006 	}
5007 	return QLA_ERROR;
5008 }
5009 
5010 /**
5011  * qla4xxx_update_isid - compare ddbs and updated isid
5012  * @ha: Pointer to host adapter structure.
5013  * @list_nt: list of nt target
5014  * @fw_ddb_entry: firmware ddb entry
5015  *
5016  * This routine update isid if ddbs have same iqn, same isid and
5017  * different IP addr.
5018  * Return QLA_SUCCESS if isid is updated.
5019  **/
5020 static int qla4xxx_update_isid(struct scsi_qla_host *ha,
5021 			       struct list_head *list_nt,
5022 			       struct dev_db_entry *fw_ddb_entry)
5023 {
5024 	uint8_t base_value, i;
5025 
5026 	base_value = fw_ddb_entry->isid[1] & 0x1f;
5027 	for (i = 0; i < 8; i++) {
5028 		fw_ddb_entry->isid[1] = (base_value | (i << 5));
5029 		if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
5030 			break;
5031 	}
5032 
5033 	if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
5034 		return QLA_ERROR;
5035 
5036 	return QLA_SUCCESS;
5037 }
5038 
5039 /**
5040  * qla4xxx_should_update_isid - check if isid need to update
5041  * @ha: Pointer to host adapter structure.
5042  * @old_tddb: ddb tuple
5043  * @new_tddb: ddb tuple
5044  *
5045  * Return QLA_SUCCESS if different IP, different PORT, same iqn,
5046  * same isid
5047  **/
5048 static int qla4xxx_should_update_isid(struct scsi_qla_host *ha,
5049 				      struct ql4_tuple_ddb *old_tddb,
5050 				      struct ql4_tuple_ddb *new_tddb)
5051 {
5052 	if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) {
5053 		/* Same ip */
5054 		if (old_tddb->port == new_tddb->port)
5055 			return QLA_ERROR;
5056 	}
5057 
5058 	if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
5059 		/* different iqn */
5060 		return QLA_ERROR;
5061 
5062 	if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
5063 		   sizeof(old_tddb->isid)))
5064 		/* different isid */
5065 		return QLA_ERROR;
5066 
5067 	return QLA_SUCCESS;
5068 }
5069 
5070 /**
5071  * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt
5072  * @ha: Pointer to host adapter structure.
5073  * @list_nt: list of nt target.
5074  * @fw_ddb_entry: firmware ddb entry.
5075  *
5076  * This routine check if fw_ddb_entry already exists in list_nt to avoid
5077  * duplicate ddb in list_nt.
5078  * Return QLA_SUCCESS if duplicate ddb exit in list_nl.
5079  * Note: This function also update isid of DDB if required.
5080  **/
5081 
5082 static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
5083 				       struct list_head *list_nt,
5084 				       struct dev_db_entry *fw_ddb_entry)
5085 {
5086 	struct qla_ddb_index  *nt_ddb_idx, *nt_ddb_idx_tmp;
5087 	struct ql4_tuple_ddb *fw_tddb = NULL;
5088 	struct ql4_tuple_ddb *tmp_tddb = NULL;
5089 	int rval, ret = QLA_ERROR;
5090 
5091 	fw_tddb = vzalloc(sizeof(*fw_tddb));
5092 	if (!fw_tddb) {
5093 		DEBUG2(ql4_printk(KERN_WARNING, ha,
5094 				  "Memory Allocation failed.\n"));
5095 		ret = QLA_SUCCESS;
5096 		goto exit_check;
5097 	}
5098 
5099 	tmp_tddb = vzalloc(sizeof(*tmp_tddb));
5100 	if (!tmp_tddb) {
5101 		DEBUG2(ql4_printk(KERN_WARNING, ha,
5102 				  "Memory Allocation failed.\n"));
5103 		ret = QLA_SUCCESS;
5104 		goto exit_check;
5105 	}
5106 
5107 	qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
5108 
5109 	list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
5110 		qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb,
5111 					  nt_ddb_idx->flash_isid);
5112 		ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true);
5113 		/* found duplicate ddb */
5114 		if (ret == QLA_SUCCESS)
5115 			goto exit_check;
5116 	}
5117 
5118 	list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
5119 		qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL);
5120 
5121 		ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb);
5122 		if (ret == QLA_SUCCESS) {
5123 			rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry);
5124 			if (rval == QLA_SUCCESS)
5125 				ret = QLA_ERROR;
5126 			else
5127 				ret = QLA_SUCCESS;
5128 
5129 			goto exit_check;
5130 		}
5131 	}
5132 
5133 exit_check:
5134 	if (fw_tddb)
5135 		vfree(fw_tddb);
5136 	if (tmp_tddb)
5137 		vfree(tmp_tddb);
5138 	return ret;
5139 }
5140 
5141 static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
5142 {
5143 	struct qla_ddb_index  *ddb_idx, *ddb_idx_tmp;
5144 
5145 	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
5146 		list_del_init(&ddb_idx->list);
5147 		vfree(ddb_idx);
5148 	}
5149 }
5150 
5151 static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
5152 					struct dev_db_entry *fw_ddb_entry)
5153 {
5154 	struct iscsi_endpoint *ep;
5155 	struct sockaddr_in *addr;
5156 	struct sockaddr_in6 *addr6;
5157 	struct sockaddr *t_addr;
5158 	struct sockaddr_storage *dst_addr;
5159 	char *ip;
5160 
5161 	/* TODO: need to destroy on unload iscsi_endpoint*/
5162 	dst_addr = vmalloc(sizeof(*dst_addr));
5163 	if (!dst_addr)
5164 		return NULL;
5165 
5166 	if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
5167 		t_addr = (struct sockaddr *)dst_addr;
5168 		t_addr->sa_family = AF_INET6;
5169 		addr6 = (struct sockaddr_in6 *)dst_addr;
5170 		ip = (char *)&addr6->sin6_addr;
5171 		memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
5172 		addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
5173 
5174 	} else {
5175 		t_addr = (struct sockaddr *)dst_addr;
5176 		t_addr->sa_family = AF_INET;
5177 		addr = (struct sockaddr_in *)dst_addr;
5178 		ip = (char *)&addr->sin_addr;
5179 		memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
5180 		addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
5181 	}
5182 
5183 	ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
5184 	vfree(dst_addr);
5185 	return ep;
5186 }
5187 
5188 static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
5189 {
5190 	if (ql4xdisablesysfsboot)
5191 		return QLA_SUCCESS;
5192 	if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
5193 		return QLA_ERROR;
5194 	return QLA_SUCCESS;
5195 }
5196 
5197 static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
5198 					  struct ddb_entry *ddb_entry,
5199 					  uint16_t idx)
5200 {
5201 	uint16_t def_timeout;
5202 
5203 	ddb_entry->ddb_type = FLASH_DDB;
5204 	ddb_entry->fw_ddb_index = INVALID_ENTRY;
5205 	ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
5206 	ddb_entry->ha = ha;
5207 	ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
5208 	ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
5209 
5210 	atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
5211 	atomic_set(&ddb_entry->relogin_timer, 0);
5212 	atomic_set(&ddb_entry->relogin_retry_count, 0);
5213 	def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
5214 	ddb_entry->default_relogin_timeout =
5215 		(def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
5216 		def_timeout : LOGIN_TOV;
5217 	ddb_entry->default_time2wait =
5218 		le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
5219 
5220 	if (ql4xdisablesysfsboot &&
5221 	    (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx))
5222 		set_bit(DF_BOOT_TGT, &ddb_entry->flags);
5223 }
5224 
5225 static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
5226 {
5227 	uint32_t idx = 0;
5228 	uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
5229 	uint32_t sts[MBOX_REG_COUNT];
5230 	uint32_t ip_state;
5231 	unsigned long wtime;
5232 	int ret;
5233 
5234 	wtime = jiffies + (HZ * IP_CONFIG_TOV);
5235 	do {
5236 		for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
5237 			if (ip_idx[idx] == -1)
5238 				continue;
5239 
5240 			ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
5241 
5242 			if (ret == QLA_ERROR) {
5243 				ip_idx[idx] = -1;
5244 				continue;
5245 			}
5246 
5247 			ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
5248 
5249 			DEBUG2(ql4_printk(KERN_INFO, ha,
5250 					  "Waiting for IP state for idx = %d, state = 0x%x\n",
5251 					  ip_idx[idx], ip_state));
5252 			if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
5253 			    ip_state == IP_ADDRSTATE_INVALID ||
5254 			    ip_state == IP_ADDRSTATE_PREFERRED ||
5255 			    ip_state == IP_ADDRSTATE_DEPRICATED ||
5256 			    ip_state == IP_ADDRSTATE_DISABLING)
5257 				ip_idx[idx] = -1;
5258 		}
5259 
5260 		/* Break if all IP states checked */
5261 		if ((ip_idx[0] == -1) &&
5262 		    (ip_idx[1] == -1) &&
5263 		    (ip_idx[2] == -1) &&
5264 		    (ip_idx[3] == -1))
5265 			break;
5266 		schedule_timeout_uninterruptible(HZ);
5267 	} while (time_after(wtime, jiffies));
5268 }
5269 
5270 static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
5271 				  struct list_head *list_st)
5272 {
5273 	struct qla_ddb_index  *st_ddb_idx;
5274 	int max_ddbs;
5275 	int fw_idx_size;
5276 	struct dev_db_entry *fw_ddb_entry;
5277 	dma_addr_t fw_ddb_dma;
5278 	int ret;
5279 	uint32_t idx = 0, next_idx = 0;
5280 	uint32_t state = 0, conn_err = 0;
5281 	uint16_t conn_id = 0;
5282 
5283 	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
5284 				      &fw_ddb_dma);
5285 	if (fw_ddb_entry == NULL) {
5286 		DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
5287 		goto exit_st_list;
5288 	}
5289 
5290 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
5291 				     MAX_DEV_DB_ENTRIES;
5292 	fw_idx_size = sizeof(struct qla_ddb_index);
5293 
5294 	for (idx = 0; idx < max_ddbs; idx = next_idx) {
5295 		ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
5296 					      NULL, &next_idx, &state,
5297 					      &conn_err, NULL, &conn_id);
5298 		if (ret == QLA_ERROR)
5299 			break;
5300 
5301 		/* Ignore DDB if invalid state (unassigned) */
5302 		if (state == DDB_DS_UNASSIGNED)
5303 			goto continue_next_st;
5304 
5305 		/* Check if ST, add to the list_st */
5306 		if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
5307 			goto continue_next_st;
5308 
5309 		st_ddb_idx = vzalloc(fw_idx_size);
5310 		if (!st_ddb_idx)
5311 			break;
5312 
5313 		st_ddb_idx->fw_ddb_idx = idx;
5314 
5315 		list_add_tail(&st_ddb_idx->list, list_st);
5316 continue_next_st:
5317 		if (next_idx == 0)
5318 			break;
5319 	}
5320 
5321 exit_st_list:
5322 	if (fw_ddb_entry)
5323 		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
5324 }
5325 
5326 /**
5327  * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
5328  * @ha: pointer to adapter structure
5329  * @list_ddb: List from which failed ddb to be removed
5330  *
5331  * Iterate over the list of DDBs and find and remove DDBs that are either in
5332  * no connection active state or failed state
5333  **/
5334 static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
5335 				      struct list_head *list_ddb)
5336 {
5337 	struct qla_ddb_index  *ddb_idx, *ddb_idx_tmp;
5338 	uint32_t next_idx = 0;
5339 	uint32_t state = 0, conn_err = 0;
5340 	int ret;
5341 
5342 	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
5343 		ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
5344 					      NULL, 0, NULL, &next_idx, &state,
5345 					      &conn_err, NULL, NULL);
5346 		if (ret == QLA_ERROR)
5347 			continue;
5348 
5349 		if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
5350 		    state == DDB_DS_SESSION_FAILED) {
5351 			list_del_init(&ddb_idx->list);
5352 			vfree(ddb_idx);
5353 		}
5354 	}
5355 }
5356 
5357 static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
5358 				   struct dev_db_entry *fw_ddb_entry,
5359 				   int is_reset, uint16_t idx)
5360 {
5361 	struct iscsi_cls_session *cls_sess;
5362 	struct iscsi_session *sess;
5363 	struct iscsi_cls_conn *cls_conn;
5364 	struct iscsi_endpoint *ep;
5365 	uint16_t cmds_max = 32;
5366 	uint16_t conn_id = 0;
5367 	uint32_t initial_cmdsn = 0;
5368 	int ret = QLA_SUCCESS;
5369 
5370 	struct ddb_entry *ddb_entry = NULL;
5371 
5372 	/* Create session object, with INVALID_ENTRY,
5373 	 * the targer_id would get set when we issue the login
5374 	 */
5375 	cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
5376 				       cmds_max, sizeof(struct ddb_entry),
5377 				       sizeof(struct ql4_task_data),
5378 				       initial_cmdsn, INVALID_ENTRY);
5379 	if (!cls_sess) {
5380 		ret = QLA_ERROR;
5381 		goto exit_setup;
5382 	}
5383 
5384 	/*
5385 	 * so calling module_put function to decrement the
5386 	 * reference count.
5387 	 **/
5388 	module_put(qla4xxx_iscsi_transport.owner);
5389 	sess = cls_sess->dd_data;
5390 	ddb_entry = sess->dd_data;
5391 	ddb_entry->sess = cls_sess;
5392 
5393 	cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
5394 	memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
5395 	       sizeof(struct dev_db_entry));
5396 
5397 	qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, idx);
5398 
5399 	cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
5400 
5401 	if (!cls_conn) {
5402 		ret = QLA_ERROR;
5403 		goto exit_setup;
5404 	}
5405 
5406 	ddb_entry->conn = cls_conn;
5407 
5408 	/* Setup ep, for displaying attributes in sysfs */
5409 	ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
5410 	if (ep) {
5411 		ep->conn = cls_conn;
5412 		cls_conn->ep = ep;
5413 	} else {
5414 		DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
5415 		ret = QLA_ERROR;
5416 		goto exit_setup;
5417 	}
5418 
5419 	/* Update sess/conn params */
5420 	qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
5421 
5422 	if (is_reset == RESET_ADAPTER) {
5423 		iscsi_block_session(cls_sess);
5424 		/* Use the relogin path to discover new devices
5425 		 *  by short-circuting the logic of setting
5426 		 *  timer to relogin - instead set the flags
5427 		 *  to initiate login right away.
5428 		 */
5429 		set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
5430 		set_bit(DF_RELOGIN, &ddb_entry->flags);
5431 	}
5432 
5433 exit_setup:
5434 	return ret;
5435 }
5436 
5437 static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
5438 				  struct list_head *list_nt, int is_reset)
5439 {
5440 	struct dev_db_entry *fw_ddb_entry;
5441 	dma_addr_t fw_ddb_dma;
5442 	int max_ddbs;
5443 	int fw_idx_size;
5444 	int ret;
5445 	uint32_t idx = 0, next_idx = 0;
5446 	uint32_t state = 0, conn_err = 0;
5447 	uint16_t conn_id = 0;
5448 	struct qla_ddb_index  *nt_ddb_idx;
5449 
5450 	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
5451 				      &fw_ddb_dma);
5452 	if (fw_ddb_entry == NULL) {
5453 		DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
5454 		goto exit_nt_list;
5455 	}
5456 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
5457 				     MAX_DEV_DB_ENTRIES;
5458 	fw_idx_size = sizeof(struct qla_ddb_index);
5459 
5460 	for (idx = 0; idx < max_ddbs; idx = next_idx) {
5461 		ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
5462 					      NULL, &next_idx, &state,
5463 					      &conn_err, NULL, &conn_id);
5464 		if (ret == QLA_ERROR)
5465 			break;
5466 
5467 		if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
5468 			goto continue_next_nt;
5469 
5470 		/* Check if NT, then add to list it */
5471 		if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
5472 			goto continue_next_nt;
5473 
5474 		if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
5475 		    state == DDB_DS_SESSION_FAILED))
5476 			goto continue_next_nt;
5477 
5478 		DEBUG2(ql4_printk(KERN_INFO, ha,
5479 				  "Adding  DDB to session = 0x%x\n", idx));
5480 		if (is_reset == INIT_ADAPTER) {
5481 			nt_ddb_idx = vmalloc(fw_idx_size);
5482 			if (!nt_ddb_idx)
5483 				break;
5484 
5485 			nt_ddb_idx->fw_ddb_idx = idx;
5486 
5487 			/* Copy original isid as it may get updated in function
5488 			 * qla4xxx_update_isid(). We need original isid in
5489 			 * function qla4xxx_compare_tuple_ddb to find duplicate
5490 			 * target */
5491 			memcpy(&nt_ddb_idx->flash_isid[0],
5492 			       &fw_ddb_entry->isid[0],
5493 			       sizeof(nt_ddb_idx->flash_isid));
5494 
5495 			ret = qla4xxx_is_flash_ddb_exists(ha, list_nt,
5496 							  fw_ddb_entry);
5497 			if (ret == QLA_SUCCESS) {
5498 				/* free nt_ddb_idx and do not add to list_nt */
5499 				vfree(nt_ddb_idx);
5500 				goto continue_next_nt;
5501 			}
5502 
5503 			/* Copy updated isid */
5504 			memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
5505 			       sizeof(struct dev_db_entry));
5506 
5507 			list_add_tail(&nt_ddb_idx->list, list_nt);
5508 		} else if (is_reset == RESET_ADAPTER) {
5509 			if (qla4xxx_is_session_exists(ha, fw_ddb_entry) ==
5510 								QLA_SUCCESS)
5511 				goto continue_next_nt;
5512 		}
5513 
5514 		ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx);
5515 		if (ret == QLA_ERROR)
5516 			goto exit_nt_list;
5517 
5518 continue_next_nt:
5519 		if (next_idx == 0)
5520 			break;
5521 	}
5522 
5523 exit_nt_list:
5524 	if (fw_ddb_entry)
5525 		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
5526 }
5527 
5528 static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha,
5529 				      struct list_head *list_nt)
5530 {
5531 	struct dev_db_entry *fw_ddb_entry;
5532 	dma_addr_t fw_ddb_dma;
5533 	int max_ddbs;
5534 	int fw_idx_size;
5535 	int ret;
5536 	uint32_t idx = 0, next_idx = 0;
5537 	uint32_t state = 0, conn_err = 0;
5538 	uint16_t conn_id = 0;
5539 	struct qla_ddb_index  *nt_ddb_idx;
5540 
5541 	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
5542 				      &fw_ddb_dma);
5543 	if (fw_ddb_entry == NULL) {
5544 		DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
5545 		goto exit_new_nt_list;
5546 	}
5547 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
5548 				     MAX_DEV_DB_ENTRIES;
5549 	fw_idx_size = sizeof(struct qla_ddb_index);
5550 
5551 	for (idx = 0; idx < max_ddbs; idx = next_idx) {
5552 		ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
5553 					      NULL, &next_idx, &state,
5554 					      &conn_err, NULL, &conn_id);
5555 		if (ret == QLA_ERROR)
5556 			break;
5557 
5558 		/* Check if NT, then add it to list */
5559 		if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
5560 			goto continue_next_new_nt;
5561 
5562 		if (!(state == DDB_DS_NO_CONNECTION_ACTIVE))
5563 			goto continue_next_new_nt;
5564 
5565 		DEBUG2(ql4_printk(KERN_INFO, ha,
5566 				  "Adding  DDB to session = 0x%x\n", idx));
5567 
5568 		nt_ddb_idx = vmalloc(fw_idx_size);
5569 		if (!nt_ddb_idx)
5570 			break;
5571 
5572 		nt_ddb_idx->fw_ddb_idx = idx;
5573 
5574 		ret = qla4xxx_is_session_exists(ha, fw_ddb_entry);
5575 		if (ret == QLA_SUCCESS) {
5576 			/* free nt_ddb_idx and do not add to list_nt */
5577 			vfree(nt_ddb_idx);
5578 			goto continue_next_new_nt;
5579 		}
5580 
5581 		list_add_tail(&nt_ddb_idx->list, list_nt);
5582 
5583 		ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
5584 					      idx);
5585 		if (ret == QLA_ERROR)
5586 			goto exit_new_nt_list;
5587 
5588 continue_next_new_nt:
5589 		if (next_idx == 0)
5590 			break;
5591 	}
5592 
5593 exit_new_nt_list:
5594 	if (fw_ddb_entry)
5595 		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
5596 }
5597 
5598 /**
5599  * qla4xxx_sysfs_ddb_is_non_persistent - check for non-persistence of ddb entry
5600  * @dev: dev associated with the sysfs entry
5601  * @data: pointer to flashnode session object
5602  *
5603  * Returns:
5604  *	1: if flashnode entry is non-persistent
5605  *	0: if flashnode entry is persistent
5606  **/
5607 static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data)
5608 {
5609 	struct iscsi_bus_flash_session *fnode_sess;
5610 
5611 	if (!iscsi_flashnode_bus_match(dev, NULL))
5612 		return 0;
5613 
5614 	fnode_sess = iscsi_dev_to_flash_session(dev);
5615 
5616 	return (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT);
5617 }
5618 
5619 /**
5620  * qla4xxx_sysfs_ddb_tgt_create - Create sysfs entry for target
5621  * @ha: pointer to host
5622  * @fw_ddb_entry: flash ddb data
5623  * @idx: target index
5624  * @user: if set then this call is made from userland else from kernel
5625  *
5626  * Returns:
5627  * On sucess: QLA_SUCCESS
5628  * On failure: QLA_ERROR
5629  *
5630  * This create separate sysfs entries for session and connection attributes of
5631  * the given fw ddb entry.
5632  * If this is invoked as a result of a userspace call then the entry is marked
5633  * as nonpersistent using flash_state field.
5634  **/
5635 static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
5636 					struct dev_db_entry *fw_ddb_entry,
5637 					uint16_t *idx, int user)
5638 {
5639 	struct iscsi_bus_flash_session *fnode_sess = NULL;
5640 	struct iscsi_bus_flash_conn *fnode_conn = NULL;
5641 	int rc = QLA_ERROR;
5642 
5643 	fnode_sess = iscsi_create_flashnode_sess(ha->host, *idx,
5644 						 &qla4xxx_iscsi_transport, 0);
5645 	if (!fnode_sess) {
5646 		ql4_printk(KERN_ERR, ha,
5647 			   "%s: Unable to create session sysfs entry for flashnode %d of host%lu\n",
5648 			   __func__, *idx, ha->host_no);
5649 		goto exit_tgt_create;
5650 	}
5651 
5652 	fnode_conn = iscsi_create_flashnode_conn(ha->host, fnode_sess,
5653 						 &qla4xxx_iscsi_transport, 0);
5654 	if (!fnode_conn) {
5655 		ql4_printk(KERN_ERR, ha,
5656 			   "%s: Unable to create conn sysfs entry for flashnode %d of host%lu\n",
5657 			   __func__, *idx, ha->host_no);
5658 		goto free_sess;
5659 	}
5660 
5661 	if (user) {
5662 		fnode_sess->flash_state = DEV_DB_NON_PERSISTENT;
5663 	} else {
5664 		fnode_sess->flash_state = DEV_DB_PERSISTENT;
5665 
5666 		if (*idx == ha->pri_ddb_idx || *idx == ha->sec_ddb_idx)
5667 			fnode_sess->is_boot_target = 1;
5668 		else
5669 			fnode_sess->is_boot_target = 0;
5670 	}
5671 
5672 	rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
5673 					   fw_ddb_entry);
5674 
5675 	ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
5676 		   __func__, fnode_sess->dev.kobj.name);
5677 
5678 	ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
5679 		   __func__, fnode_conn->dev.kobj.name);
5680 
5681 	return QLA_SUCCESS;
5682 
5683 free_sess:
5684 	iscsi_destroy_flashnode_sess(fnode_sess);
5685 
5686 exit_tgt_create:
5687 	return QLA_ERROR;
5688 }
5689 
5690 /**
5691  * qla4xxx_sysfs_ddb_add - Add new ddb entry in flash
5692  * @shost: pointer to host
5693  * @buf: type of ddb entry (ipv4/ipv6)
5694  * @len: length of buf
5695  *
5696  * This creates new ddb entry in the flash by finding first free index and
5697  * storing default ddb there. And then create sysfs entry for the new ddb entry.
5698  **/
5699 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
5700 				 int len)
5701 {
5702 	struct scsi_qla_host *ha = to_qla_host(shost);
5703 	struct dev_db_entry *fw_ddb_entry = NULL;
5704 	dma_addr_t fw_ddb_entry_dma;
5705 	struct device *dev;
5706 	uint16_t idx = 0;
5707 	uint16_t max_ddbs = 0;
5708 	uint32_t options = 0;
5709 	uint32_t rval = QLA_ERROR;
5710 
5711 	if (strncasecmp(PORTAL_TYPE_IPV4, buf, 4) &&
5712 	    strncasecmp(PORTAL_TYPE_IPV6, buf, 4)) {
5713 		DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Invalid portal type\n",
5714 				  __func__));
5715 		goto exit_ddb_add;
5716 	}
5717 
5718 	max_ddbs =  is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
5719 				     MAX_DEV_DB_ENTRIES;
5720 
5721 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5722 					  &fw_ddb_entry_dma, GFP_KERNEL);
5723 	if (!fw_ddb_entry) {
5724 		DEBUG2(ql4_printk(KERN_ERR, ha,
5725 				  "%s: Unable to allocate dma buffer\n",
5726 				  __func__));
5727 		goto exit_ddb_add;
5728 	}
5729 
5730 	dev = iscsi_find_flashnode_sess(ha->host, NULL,
5731 					qla4xxx_sysfs_ddb_is_non_persistent);
5732 	if (dev) {
5733 		ql4_printk(KERN_ERR, ha,
5734 			   "%s: A non-persistent entry %s found\n",
5735 			   __func__, dev->kobj.name);
5736 		put_device(dev);
5737 		goto exit_ddb_add;
5738 	}
5739 
5740 	/* Index 0 and 1 are reserved for boot target entries */
5741 	for (idx = 2; idx < max_ddbs; idx++) {
5742 		if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry,
5743 					     fw_ddb_entry_dma, idx))
5744 			break;
5745 	}
5746 
5747 	if (idx == max_ddbs)
5748 		goto exit_ddb_add;
5749 
5750 	if (!strncasecmp("ipv6", buf, 4))
5751 		options |= IPV6_DEFAULT_DDB_ENTRY;
5752 
5753 	rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
5754 	if (rval == QLA_ERROR)
5755 		goto exit_ddb_add;
5756 
5757 	rval = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 1);
5758 
5759 exit_ddb_add:
5760 	if (fw_ddb_entry)
5761 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5762 				  fw_ddb_entry, fw_ddb_entry_dma);
5763 	if (rval == QLA_SUCCESS)
5764 		return idx;
5765 	else
5766 		return -EIO;
5767 }
5768 
5769 /**
5770  * qla4xxx_sysfs_ddb_apply - write the target ddb contents to Flash
5771  * @fnode_sess: pointer to session attrs of flash ddb entry
5772  * @fnode_conn: pointer to connection attrs of flash ddb entry
5773  *
5774  * This writes the contents of target ddb buffer to Flash with a valid cookie
5775  * value in order to make the ddb entry persistent.
5776  **/
5777 static int  qla4xxx_sysfs_ddb_apply(struct iscsi_bus_flash_session *fnode_sess,
5778 				    struct iscsi_bus_flash_conn *fnode_conn)
5779 {
5780 	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
5781 	struct scsi_qla_host *ha = to_qla_host(shost);
5782 	uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO;
5783 	struct dev_db_entry *fw_ddb_entry = NULL;
5784 	dma_addr_t fw_ddb_entry_dma;
5785 	uint32_t options = 0;
5786 	int rval = 0;
5787 
5788 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5789 					  &fw_ddb_entry_dma, GFP_KERNEL);
5790 	if (!fw_ddb_entry) {
5791 		DEBUG2(ql4_printk(KERN_ERR, ha,
5792 				  "%s: Unable to allocate dma buffer\n",
5793 				  __func__));
5794 		rval = -ENOMEM;
5795 		goto exit_ddb_apply;
5796 	}
5797 
5798 	if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
5799 		options |= IPV6_DEFAULT_DDB_ENTRY;
5800 
5801 	rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
5802 	if (rval == QLA_ERROR)
5803 		goto exit_ddb_apply;
5804 
5805 	dev_db_start_offset += (fnode_sess->target_id *
5806 				sizeof(*fw_ddb_entry));
5807 
5808 	qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
5809 	fw_ddb_entry->cookie = DDB_VALID_COOKIE;
5810 
5811 	rval = qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
5812 				 sizeof(*fw_ddb_entry), FLASH_OPT_RMW_COMMIT);
5813 
5814 	if (rval == QLA_SUCCESS) {
5815 		fnode_sess->flash_state = DEV_DB_PERSISTENT;
5816 		ql4_printk(KERN_INFO, ha,
5817 			   "%s: flash node %u of host %lu written to flash\n",
5818 			   __func__, fnode_sess->target_id, ha->host_no);
5819 	} else {
5820 		rval = -EIO;
5821 		ql4_printk(KERN_ERR, ha,
5822 			   "%s: Error while writing flash node %u of host %lu to flash\n",
5823 			   __func__, fnode_sess->target_id, ha->host_no);
5824 	}
5825 
5826 exit_ddb_apply:
5827 	if (fw_ddb_entry)
5828 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5829 				  fw_ddb_entry, fw_ddb_entry_dma);
5830 	return rval;
5831 }
5832 
5833 static ssize_t qla4xxx_sysfs_ddb_conn_open(struct scsi_qla_host *ha,
5834 					   struct dev_db_entry *fw_ddb_entry,
5835 					   uint16_t idx)
5836 {
5837 	struct dev_db_entry *ddb_entry = NULL;
5838 	dma_addr_t ddb_entry_dma;
5839 	unsigned long wtime;
5840 	uint32_t mbx_sts = 0;
5841 	uint32_t state = 0, conn_err = 0;
5842 	uint16_t tmo = 0;
5843 	int ret = 0;
5844 
5845 	ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
5846 				       &ddb_entry_dma, GFP_KERNEL);
5847 	if (!ddb_entry) {
5848 		DEBUG2(ql4_printk(KERN_ERR, ha,
5849 				  "%s: Unable to allocate dma buffer\n",
5850 				  __func__));
5851 		return QLA_ERROR;
5852 	}
5853 
5854 	memcpy(ddb_entry, fw_ddb_entry, sizeof(*ddb_entry));
5855 
5856 	ret = qla4xxx_set_ddb_entry(ha, idx, ddb_entry_dma, &mbx_sts);
5857 	if (ret != QLA_SUCCESS) {
5858 		DEBUG2(ql4_printk(KERN_ERR, ha,
5859 				  "%s: Unable to set ddb entry for index %d\n",
5860 				  __func__, idx));
5861 		goto exit_ddb_conn_open;
5862 	}
5863 
5864 	qla4xxx_conn_open(ha, idx);
5865 
5866 	/* To ensure that sendtargets is done, wait for at least 12 secs */
5867 	tmo = ((ha->def_timeout > LOGIN_TOV) &&
5868 	       (ha->def_timeout < LOGIN_TOV * 10) ?
5869 	       ha->def_timeout : LOGIN_TOV);
5870 
5871 	DEBUG2(ql4_printk(KERN_INFO, ha,
5872 			  "Default time to wait for login to ddb %d\n", tmo));
5873 
5874 	wtime = jiffies + (HZ * tmo);
5875 	do {
5876 		ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
5877 					      NULL, &state, &conn_err, NULL,
5878 					      NULL);
5879 		if (ret == QLA_ERROR)
5880 			continue;
5881 
5882 		if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
5883 		    state == DDB_DS_SESSION_FAILED)
5884 			break;
5885 
5886 		schedule_timeout_uninterruptible(HZ / 10);
5887 	} while (time_after(wtime, jiffies));
5888 
5889 exit_ddb_conn_open:
5890 	if (ddb_entry)
5891 		dma_free_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
5892 				  ddb_entry, ddb_entry_dma);
5893 	return ret;
5894 }
5895 
5896 static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha,
5897 				struct dev_db_entry *fw_ddb_entry)
5898 {
5899 	struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
5900 	struct list_head list_nt;
5901 	uint16_t ddb_index;
5902 	int ret = 0;
5903 
5904 	if (test_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags)) {
5905 		ql4_printk(KERN_WARNING, ha,
5906 			   "%s: A discovery already in progress!\n", __func__);
5907 		return QLA_ERROR;
5908 	}
5909 
5910 	INIT_LIST_HEAD(&list_nt);
5911 
5912 	set_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
5913 
5914 	ret = qla4xxx_get_ddb_index(ha, &ddb_index);
5915 	if (ret == QLA_ERROR)
5916 		goto exit_login_st_clr_bit;
5917 
5918 	ret = qla4xxx_sysfs_ddb_conn_open(ha, fw_ddb_entry, ddb_index);
5919 	if (ret == QLA_ERROR)
5920 		goto exit_login_st;
5921 
5922 	qla4xxx_build_new_nt_list(ha, &list_nt);
5923 
5924 	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) {
5925 		list_del_init(&ddb_idx->list);
5926 		qla4xxx_clear_ddb_entry(ha, ddb_idx->fw_ddb_idx);
5927 		vfree(ddb_idx);
5928 	}
5929 
5930 exit_login_st:
5931 	if (qla4xxx_clear_ddb_entry(ha, ddb_index) == QLA_ERROR) {
5932 		ql4_printk(KERN_ERR, ha,
5933 			   "Unable to clear DDB index = 0x%x\n", ddb_index);
5934 	}
5935 
5936 	clear_bit(ddb_index, ha->ddb_idx_map);
5937 
5938 exit_login_st_clr_bit:
5939 	clear_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
5940 	return ret;
5941 }
5942 
5943 static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha,
5944 				struct dev_db_entry *fw_ddb_entry,
5945 				uint16_t idx)
5946 {
5947 	int ret = QLA_ERROR;
5948 
5949 	ret = qla4xxx_is_session_exists(ha, fw_ddb_entry);
5950 	if (ret != QLA_SUCCESS)
5951 		ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
5952 					      idx);
5953 	else
5954 		ret = -EPERM;
5955 
5956 	return ret;
5957 }
5958 
5959 /**
5960  * qla4xxx_sysfs_ddb_login - Login to the specified target
5961  * @fnode_sess: pointer to session attrs of flash ddb entry
5962  * @fnode_conn: pointer to connection attrs of flash ddb entry
5963  *
5964  * This logs in to the specified target
5965  **/
5966 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
5967 				   struct iscsi_bus_flash_conn *fnode_conn)
5968 {
5969 	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
5970 	struct scsi_qla_host *ha = to_qla_host(shost);
5971 	struct dev_db_entry *fw_ddb_entry = NULL;
5972 	dma_addr_t fw_ddb_entry_dma;
5973 	uint32_t options = 0;
5974 	int ret = 0;
5975 
5976 	if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) {
5977 		ql4_printk(KERN_ERR, ha,
5978 			   "%s: Target info is not persistent\n", __func__);
5979 		ret = -EIO;
5980 		goto exit_ddb_login;
5981 	}
5982 
5983 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5984 					  &fw_ddb_entry_dma, GFP_KERNEL);
5985 	if (!fw_ddb_entry) {
5986 		DEBUG2(ql4_printk(KERN_ERR, ha,
5987 				  "%s: Unable to allocate dma buffer\n",
5988 				  __func__));
5989 		ret = -ENOMEM;
5990 		goto exit_ddb_login;
5991 	}
5992 
5993 	if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
5994 		options |= IPV6_DEFAULT_DDB_ENTRY;
5995 
5996 	ret = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
5997 	if (ret == QLA_ERROR)
5998 		goto exit_ddb_login;
5999 
6000 	qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
6001 	fw_ddb_entry->cookie = DDB_VALID_COOKIE;
6002 
6003 	if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
6004 		ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry);
6005 	else
6006 		ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry,
6007 					   fnode_sess->target_id);
6008 
6009 	if (ret > 0)
6010 		ret = -EIO;
6011 
6012 exit_ddb_login:
6013 	if (fw_ddb_entry)
6014 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6015 				  fw_ddb_entry, fw_ddb_entry_dma);
6016 	return ret;
6017 }
6018 
6019 /**
6020  * qla4xxx_sysfs_ddb_logout_sid - Logout session for the specified target
6021  * @cls_sess: pointer to session to be logged out
6022  *
6023  * This performs session log out from the specified target
6024  **/
6025 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess)
6026 {
6027 	struct iscsi_session *sess;
6028 	struct ddb_entry *ddb_entry = NULL;
6029 	struct scsi_qla_host *ha;
6030 	struct dev_db_entry *fw_ddb_entry = NULL;
6031 	dma_addr_t fw_ddb_entry_dma;
6032 	unsigned long flags;
6033 	unsigned long wtime;
6034 	uint32_t ddb_state;
6035 	int options;
6036 	int ret = 0;
6037 
6038 	sess = cls_sess->dd_data;
6039 	ddb_entry = sess->dd_data;
6040 	ha = ddb_entry->ha;
6041 
6042 	if (ddb_entry->ddb_type != FLASH_DDB) {
6043 		ql4_printk(KERN_ERR, ha, "%s: Not a flash node session\n",
6044 			   __func__);
6045 		ret = -ENXIO;
6046 		goto exit_ddb_logout;
6047 	}
6048 
6049 	if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
6050 		ql4_printk(KERN_ERR, ha,
6051 			   "%s: Logout from boot target entry is not permitted.\n",
6052 			   __func__);
6053 		ret = -EPERM;
6054 		goto exit_ddb_logout;
6055 	}
6056 
6057 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6058 					  &fw_ddb_entry_dma, GFP_KERNEL);
6059 	if (!fw_ddb_entry) {
6060 		ql4_printk(KERN_ERR, ha,
6061 			   "%s: Unable to allocate dma buffer\n", __func__);
6062 		ret = -ENOMEM;
6063 		goto exit_ddb_logout;
6064 	}
6065 
6066 	if (test_and_set_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))
6067 		goto ddb_logout_init;
6068 
6069 	ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
6070 				      fw_ddb_entry, fw_ddb_entry_dma,
6071 				      NULL, NULL, &ddb_state, NULL,
6072 				      NULL, NULL);
6073 	if (ret == QLA_ERROR)
6074 		goto ddb_logout_init;
6075 
6076 	if (ddb_state == DDB_DS_SESSION_ACTIVE)
6077 		goto ddb_logout_init;
6078 
6079 	/* wait until next relogin is triggered using DF_RELOGIN and
6080 	 * clear DF_RELOGIN to avoid invocation of further relogin
6081 	 */
6082 	wtime = jiffies + (HZ * RELOGIN_TOV);
6083 	do {
6084 		if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags))
6085 			goto ddb_logout_init;
6086 
6087 		schedule_timeout_uninterruptible(HZ);
6088 	} while ((time_after(wtime, jiffies)));
6089 
6090 ddb_logout_init:
6091 	atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
6092 	atomic_set(&ddb_entry->relogin_timer, 0);
6093 
6094 	options = LOGOUT_OPTION_CLOSE_SESSION;
6095 	qla4xxx_session_logout_ddb(ha, ddb_entry, options);
6096 
6097 	memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
6098 	wtime = jiffies + (HZ * LOGOUT_TOV);
6099 	do {
6100 		ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
6101 					      fw_ddb_entry, fw_ddb_entry_dma,
6102 					      NULL, NULL, &ddb_state, NULL,
6103 					      NULL, NULL);
6104 		if (ret == QLA_ERROR)
6105 			goto ddb_logout_clr_sess;
6106 
6107 		if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
6108 		    (ddb_state == DDB_DS_SESSION_FAILED))
6109 			goto ddb_logout_clr_sess;
6110 
6111 		schedule_timeout_uninterruptible(HZ);
6112 	} while ((time_after(wtime, jiffies)));
6113 
6114 ddb_logout_clr_sess:
6115 	qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
6116 	/*
6117 	 * we have decremented the reference count of the driver
6118 	 * when we setup the session to have the driver unload
6119 	 * to be seamless without actually destroying the
6120 	 * session
6121 	 **/
6122 	try_module_get(qla4xxx_iscsi_transport.owner);
6123 	iscsi_destroy_endpoint(ddb_entry->conn->ep);
6124 
6125 	spin_lock_irqsave(&ha->hardware_lock, flags);
6126 	qla4xxx_free_ddb(ha, ddb_entry);
6127 	clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map);
6128 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
6129 
6130 	iscsi_session_teardown(ddb_entry->sess);
6131 
6132 	clear_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags);
6133 	ret = QLA_SUCCESS;
6134 
6135 exit_ddb_logout:
6136 	if (fw_ddb_entry)
6137 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6138 				  fw_ddb_entry, fw_ddb_entry_dma);
6139 	return ret;
6140 }
6141 
6142 /**
6143  * qla4xxx_sysfs_ddb_logout - Logout from the specified target
6144  * @fnode_sess: pointer to session attrs of flash ddb entry
6145  * @fnode_conn: pointer to connection attrs of flash ddb entry
6146  *
6147  * This performs log out from the specified target
6148  **/
6149 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
6150 				    struct iscsi_bus_flash_conn *fnode_conn)
6151 {
6152 	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6153 	struct scsi_qla_host *ha = to_qla_host(shost);
6154 	struct ql4_tuple_ddb *flash_tddb = NULL;
6155 	struct ql4_tuple_ddb *tmp_tddb = NULL;
6156 	struct dev_db_entry *fw_ddb_entry = NULL;
6157 	struct ddb_entry *ddb_entry = NULL;
6158 	dma_addr_t fw_ddb_dma;
6159 	uint32_t next_idx = 0;
6160 	uint32_t state = 0, conn_err = 0;
6161 	uint16_t conn_id = 0;
6162 	int idx, index;
6163 	int status, ret = 0;
6164 
6165 	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
6166 				      &fw_ddb_dma);
6167 	if (fw_ddb_entry == NULL) {
6168 		ql4_printk(KERN_ERR, ha, "%s:Out of memory\n", __func__);
6169 		ret = -ENOMEM;
6170 		goto exit_ddb_logout;
6171 	}
6172 
6173 	flash_tddb = vzalloc(sizeof(*flash_tddb));
6174 	if (!flash_tddb) {
6175 		ql4_printk(KERN_WARNING, ha,
6176 			   "%s:Memory Allocation failed.\n", __func__);
6177 		ret = -ENOMEM;
6178 		goto exit_ddb_logout;
6179 	}
6180 
6181 	tmp_tddb = vzalloc(sizeof(*tmp_tddb));
6182 	if (!tmp_tddb) {
6183 		ql4_printk(KERN_WARNING, ha,
6184 			   "%s:Memory Allocation failed.\n", __func__);
6185 		ret = -ENOMEM;
6186 		goto exit_ddb_logout;
6187 	}
6188 
6189 	if (!fnode_sess->targetname) {
6190 		ql4_printk(KERN_ERR, ha,
6191 			   "%s:Cannot logout from SendTarget entry\n",
6192 			   __func__);
6193 		ret = -EPERM;
6194 		goto exit_ddb_logout;
6195 	}
6196 
6197 	if (fnode_sess->is_boot_target) {
6198 		ql4_printk(KERN_ERR, ha,
6199 			   "%s: Logout from boot target entry is not permitted.\n",
6200 			   __func__);
6201 		ret = -EPERM;
6202 		goto exit_ddb_logout;
6203 	}
6204 
6205 	strncpy(flash_tddb->iscsi_name, fnode_sess->targetname,
6206 		ISCSI_NAME_SIZE);
6207 
6208 	if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6209 		sprintf(flash_tddb->ip_addr, "%pI6", fnode_conn->ipaddress);
6210 	else
6211 		sprintf(flash_tddb->ip_addr, "%pI4", fnode_conn->ipaddress);
6212 
6213 	flash_tddb->tpgt = fnode_sess->tpgt;
6214 	flash_tddb->port = fnode_conn->port;
6215 
6216 	COPY_ISID(flash_tddb->isid, fnode_sess->isid);
6217 
6218 	for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
6219 		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
6220 		if (ddb_entry == NULL)
6221 			continue;
6222 
6223 		if (ddb_entry->ddb_type != FLASH_DDB)
6224 			continue;
6225 
6226 		index = ddb_entry->sess->target_id;
6227 		status = qla4xxx_get_fwddb_entry(ha, index, fw_ddb_entry,
6228 						 fw_ddb_dma, NULL, &next_idx,
6229 						 &state, &conn_err, NULL,
6230 						 &conn_id);
6231 		if (status == QLA_ERROR) {
6232 			ret = -ENOMEM;
6233 			break;
6234 		}
6235 
6236 		qla4xxx_convert_param_ddb(fw_ddb_entry, tmp_tddb, NULL);
6237 
6238 		status = qla4xxx_compare_tuple_ddb(ha, flash_tddb, tmp_tddb,
6239 						   true);
6240 		if (status == QLA_SUCCESS) {
6241 			ret = qla4xxx_sysfs_ddb_logout_sid(ddb_entry->sess);
6242 			break;
6243 		}
6244 	}
6245 
6246 	if (idx == MAX_DDB_ENTRIES)
6247 		ret = -ESRCH;
6248 
6249 exit_ddb_logout:
6250 	if (flash_tddb)
6251 		vfree(flash_tddb);
6252 	if (tmp_tddb)
6253 		vfree(tmp_tddb);
6254 	if (fw_ddb_entry)
6255 		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
6256 
6257 	return ret;
6258 }
6259 
6260 static int
6261 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
6262 			    int param, char *buf)
6263 {
6264 	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6265 	struct scsi_qla_host *ha = to_qla_host(shost);
6266 	struct iscsi_bus_flash_conn *fnode_conn;
6267 	struct ql4_chap_table chap_tbl;
6268 	struct device *dev;
6269 	int parent_type;
6270 	int rc = 0;
6271 
6272 	dev = iscsi_find_flashnode_conn(fnode_sess);
6273 	if (!dev)
6274 		return -EIO;
6275 
6276 	fnode_conn = iscsi_dev_to_flash_conn(dev);
6277 
6278 	switch (param) {
6279 	case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
6280 		rc = sprintf(buf, "%u\n", fnode_conn->is_fw_assigned_ipv6);
6281 		break;
6282 	case ISCSI_FLASHNODE_PORTAL_TYPE:
6283 		rc = sprintf(buf, "%s\n", fnode_sess->portal_type);
6284 		break;
6285 	case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
6286 		rc = sprintf(buf, "%u\n", fnode_sess->auto_snd_tgt_disable);
6287 		break;
6288 	case ISCSI_FLASHNODE_DISCOVERY_SESS:
6289 		rc = sprintf(buf, "%u\n", fnode_sess->discovery_sess);
6290 		break;
6291 	case ISCSI_FLASHNODE_ENTRY_EN:
6292 		rc = sprintf(buf, "%u\n", fnode_sess->entry_state);
6293 		break;
6294 	case ISCSI_FLASHNODE_HDR_DGST_EN:
6295 		rc = sprintf(buf, "%u\n", fnode_conn->hdrdgst_en);
6296 		break;
6297 	case ISCSI_FLASHNODE_DATA_DGST_EN:
6298 		rc = sprintf(buf, "%u\n", fnode_conn->datadgst_en);
6299 		break;
6300 	case ISCSI_FLASHNODE_IMM_DATA_EN:
6301 		rc = sprintf(buf, "%u\n", fnode_sess->imm_data_en);
6302 		break;
6303 	case ISCSI_FLASHNODE_INITIAL_R2T_EN:
6304 		rc = sprintf(buf, "%u\n", fnode_sess->initial_r2t_en);
6305 		break;
6306 	case ISCSI_FLASHNODE_DATASEQ_INORDER:
6307 		rc = sprintf(buf, "%u\n", fnode_sess->dataseq_inorder_en);
6308 		break;
6309 	case ISCSI_FLASHNODE_PDU_INORDER:
6310 		rc = sprintf(buf, "%u\n", fnode_sess->pdu_inorder_en);
6311 		break;
6312 	case ISCSI_FLASHNODE_CHAP_AUTH_EN:
6313 		rc = sprintf(buf, "%u\n", fnode_sess->chap_auth_en);
6314 		break;
6315 	case ISCSI_FLASHNODE_SNACK_REQ_EN:
6316 		rc = sprintf(buf, "%u\n", fnode_conn->snack_req_en);
6317 		break;
6318 	case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
6319 		rc = sprintf(buf, "%u\n", fnode_sess->discovery_logout_en);
6320 		break;
6321 	case ISCSI_FLASHNODE_BIDI_CHAP_EN:
6322 		rc = sprintf(buf, "%u\n", fnode_sess->bidi_chap_en);
6323 		break;
6324 	case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
6325 		rc = sprintf(buf, "%u\n", fnode_sess->discovery_auth_optional);
6326 		break;
6327 	case ISCSI_FLASHNODE_ERL:
6328 		rc = sprintf(buf, "%u\n", fnode_sess->erl);
6329 		break;
6330 	case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
6331 		rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_stat);
6332 		break;
6333 	case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
6334 		rc = sprintf(buf, "%u\n", fnode_conn->tcp_nagle_disable);
6335 		break;
6336 	case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
6337 		rc = sprintf(buf, "%u\n", fnode_conn->tcp_wsf_disable);
6338 		break;
6339 	case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
6340 		rc = sprintf(buf, "%u\n", fnode_conn->tcp_timer_scale);
6341 		break;
6342 	case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
6343 		rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_en);
6344 		break;
6345 	case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
6346 		rc = sprintf(buf, "%u\n", fnode_conn->fragment_disable);
6347 		break;
6348 	case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
6349 		rc = sprintf(buf, "%u\n", fnode_conn->max_recv_dlength);
6350 		break;
6351 	case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
6352 		rc = sprintf(buf, "%u\n", fnode_conn->max_xmit_dlength);
6353 		break;
6354 	case ISCSI_FLASHNODE_FIRST_BURST:
6355 		rc = sprintf(buf, "%u\n", fnode_sess->first_burst);
6356 		break;
6357 	case ISCSI_FLASHNODE_DEF_TIME2WAIT:
6358 		rc = sprintf(buf, "%u\n", fnode_sess->time2wait);
6359 		break;
6360 	case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
6361 		rc = sprintf(buf, "%u\n", fnode_sess->time2retain);
6362 		break;
6363 	case ISCSI_FLASHNODE_MAX_R2T:
6364 		rc = sprintf(buf, "%u\n", fnode_sess->max_r2t);
6365 		break;
6366 	case ISCSI_FLASHNODE_KEEPALIVE_TMO:
6367 		rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout);
6368 		break;
6369 	case ISCSI_FLASHNODE_ISID:
6370 		rc = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n",
6371 			     fnode_sess->isid[0], fnode_sess->isid[1],
6372 			     fnode_sess->isid[2], fnode_sess->isid[3],
6373 			     fnode_sess->isid[4], fnode_sess->isid[5]);
6374 		break;
6375 	case ISCSI_FLASHNODE_TSID:
6376 		rc = sprintf(buf, "%u\n", fnode_sess->tsid);
6377 		break;
6378 	case ISCSI_FLASHNODE_PORT:
6379 		rc = sprintf(buf, "%d\n", fnode_conn->port);
6380 		break;
6381 	case ISCSI_FLASHNODE_MAX_BURST:
6382 		rc = sprintf(buf, "%u\n", fnode_sess->max_burst);
6383 		break;
6384 	case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
6385 		rc = sprintf(buf, "%u\n",
6386 			     fnode_sess->default_taskmgmt_timeout);
6387 		break;
6388 	case ISCSI_FLASHNODE_IPADDR:
6389 		if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6390 			rc = sprintf(buf, "%pI6\n", fnode_conn->ipaddress);
6391 		else
6392 			rc = sprintf(buf, "%pI4\n", fnode_conn->ipaddress);
6393 		break;
6394 	case ISCSI_FLASHNODE_ALIAS:
6395 		if (fnode_sess->targetalias)
6396 			rc = sprintf(buf, "%s\n", fnode_sess->targetalias);
6397 		else
6398 			rc = sprintf(buf, "\n");
6399 		break;
6400 	case ISCSI_FLASHNODE_REDIRECT_IPADDR:
6401 		if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6402 			rc = sprintf(buf, "%pI6\n",
6403 				     fnode_conn->redirect_ipaddr);
6404 		else
6405 			rc = sprintf(buf, "%pI4\n",
6406 				     fnode_conn->redirect_ipaddr);
6407 		break;
6408 	case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
6409 		rc = sprintf(buf, "%u\n", fnode_conn->max_segment_size);
6410 		break;
6411 	case ISCSI_FLASHNODE_LOCAL_PORT:
6412 		rc = sprintf(buf, "%u\n", fnode_conn->local_port);
6413 		break;
6414 	case ISCSI_FLASHNODE_IPV4_TOS:
6415 		rc = sprintf(buf, "%u\n", fnode_conn->ipv4_tos);
6416 		break;
6417 	case ISCSI_FLASHNODE_IPV6_TC:
6418 		if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6419 			rc = sprintf(buf, "%u\n",
6420 				     fnode_conn->ipv6_traffic_class);
6421 		else
6422 			rc = sprintf(buf, "\n");
6423 		break;
6424 	case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
6425 		rc = sprintf(buf, "%u\n", fnode_conn->ipv6_flow_label);
6426 		break;
6427 	case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
6428 		if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6429 			rc = sprintf(buf, "%pI6\n",
6430 				     fnode_conn->link_local_ipv6_addr);
6431 		else
6432 			rc = sprintf(buf, "\n");
6433 		break;
6434 	case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
6435 		rc = sprintf(buf, "%u\n", fnode_sess->discovery_parent_idx);
6436 		break;
6437 	case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
6438 		if (fnode_sess->discovery_parent_type == DDB_ISNS)
6439 			parent_type = ISCSI_DISC_PARENT_ISNS;
6440 		else if (fnode_sess->discovery_parent_type == DDB_NO_LINK)
6441 			parent_type = ISCSI_DISC_PARENT_UNKNOWN;
6442 		else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
6443 			parent_type = ISCSI_DISC_PARENT_SENDTGT;
6444 		else
6445 			parent_type = ISCSI_DISC_PARENT_UNKNOWN;
6446 
6447 		rc = sprintf(buf, "%s\n",
6448 			     iscsi_get_discovery_parent_name(parent_type));
6449 		break;
6450 	case ISCSI_FLASHNODE_NAME:
6451 		if (fnode_sess->targetname)
6452 			rc = sprintf(buf, "%s\n", fnode_sess->targetname);
6453 		else
6454 			rc = sprintf(buf, "\n");
6455 		break;
6456 	case ISCSI_FLASHNODE_TPGT:
6457 		rc = sprintf(buf, "%u\n", fnode_sess->tpgt);
6458 		break;
6459 	case ISCSI_FLASHNODE_TCP_XMIT_WSF:
6460 		rc = sprintf(buf, "%u\n", fnode_conn->tcp_xmit_wsf);
6461 		break;
6462 	case ISCSI_FLASHNODE_TCP_RECV_WSF:
6463 		rc = sprintf(buf, "%u\n", fnode_conn->tcp_recv_wsf);
6464 		break;
6465 	case ISCSI_FLASHNODE_CHAP_OUT_IDX:
6466 		rc = sprintf(buf, "%u\n", fnode_sess->chap_out_idx);
6467 		break;
6468 	case ISCSI_FLASHNODE_USERNAME:
6469 		if (fnode_sess->chap_auth_en) {
6470 			qla4xxx_get_uni_chap_at_index(ha,
6471 						      chap_tbl.name,
6472 						      chap_tbl.secret,
6473 						      fnode_sess->chap_out_idx);
6474 			rc = sprintf(buf, "%s\n", chap_tbl.name);
6475 		} else {
6476 			rc = sprintf(buf, "\n");
6477 		}
6478 		break;
6479 	case ISCSI_FLASHNODE_PASSWORD:
6480 		if (fnode_sess->chap_auth_en) {
6481 			qla4xxx_get_uni_chap_at_index(ha,
6482 						      chap_tbl.name,
6483 						      chap_tbl.secret,
6484 						      fnode_sess->chap_out_idx);
6485 			rc = sprintf(buf, "%s\n", chap_tbl.secret);
6486 		} else {
6487 			rc = sprintf(buf, "\n");
6488 		}
6489 		break;
6490 	case ISCSI_FLASHNODE_STATSN:
6491 		rc = sprintf(buf, "%u\n", fnode_conn->statsn);
6492 		break;
6493 	case ISCSI_FLASHNODE_EXP_STATSN:
6494 		rc = sprintf(buf, "%u\n", fnode_conn->exp_statsn);
6495 		break;
6496 	case ISCSI_FLASHNODE_IS_BOOT_TGT:
6497 		rc = sprintf(buf, "%u\n", fnode_sess->is_boot_target);
6498 		break;
6499 	default:
6500 		rc = -ENOSYS;
6501 		break;
6502 	}
6503 
6504 	put_device(dev);
6505 	return rc;
6506 }
6507 
6508 /**
6509  * qla4xxx_sysfs_ddb_set_param - Set parameter for firmware DDB entry
6510  * @fnode_sess: pointer to session attrs of flash ddb entry
6511  * @fnode_conn: pointer to connection attrs of flash ddb entry
6512  * @data: Parameters and their values to update
6513  * @len: len of data
6514  *
6515  * This sets the parameter of flash ddb entry and writes them to flash
6516  **/
6517 static int
6518 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
6519 			    struct iscsi_bus_flash_conn *fnode_conn,
6520 			    void *data, int len)
6521 {
6522 	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6523 	struct scsi_qla_host *ha = to_qla_host(shost);
6524 	struct iscsi_flashnode_param_info *fnode_param;
6525 	struct nlattr *attr;
6526 	int rc = QLA_ERROR;
6527 	uint32_t rem = len;
6528 
6529 	nla_for_each_attr(attr, data, len, rem) {
6530 		fnode_param = nla_data(attr);
6531 
6532 		switch (fnode_param->param) {
6533 		case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
6534 			fnode_conn->is_fw_assigned_ipv6 = fnode_param->value[0];
6535 			break;
6536 		case ISCSI_FLASHNODE_PORTAL_TYPE:
6537 			memcpy(fnode_sess->portal_type, fnode_param->value,
6538 			       strlen(fnode_sess->portal_type));
6539 			break;
6540 		case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
6541 			fnode_sess->auto_snd_tgt_disable =
6542 							fnode_param->value[0];
6543 			break;
6544 		case ISCSI_FLASHNODE_DISCOVERY_SESS:
6545 			fnode_sess->discovery_sess = fnode_param->value[0];
6546 			break;
6547 		case ISCSI_FLASHNODE_ENTRY_EN:
6548 			fnode_sess->entry_state = fnode_param->value[0];
6549 			break;
6550 		case ISCSI_FLASHNODE_HDR_DGST_EN:
6551 			fnode_conn->hdrdgst_en = fnode_param->value[0];
6552 			break;
6553 		case ISCSI_FLASHNODE_DATA_DGST_EN:
6554 			fnode_conn->datadgst_en = fnode_param->value[0];
6555 			break;
6556 		case ISCSI_FLASHNODE_IMM_DATA_EN:
6557 			fnode_sess->imm_data_en = fnode_param->value[0];
6558 			break;
6559 		case ISCSI_FLASHNODE_INITIAL_R2T_EN:
6560 			fnode_sess->initial_r2t_en = fnode_param->value[0];
6561 			break;
6562 		case ISCSI_FLASHNODE_DATASEQ_INORDER:
6563 			fnode_sess->dataseq_inorder_en = fnode_param->value[0];
6564 			break;
6565 		case ISCSI_FLASHNODE_PDU_INORDER:
6566 			fnode_sess->pdu_inorder_en = fnode_param->value[0];
6567 			break;
6568 		case ISCSI_FLASHNODE_CHAP_AUTH_EN:
6569 			fnode_sess->chap_auth_en = fnode_param->value[0];
6570 			break;
6571 		case ISCSI_FLASHNODE_SNACK_REQ_EN:
6572 			fnode_conn->snack_req_en = fnode_param->value[0];
6573 			break;
6574 		case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
6575 			fnode_sess->discovery_logout_en = fnode_param->value[0];
6576 			break;
6577 		case ISCSI_FLASHNODE_BIDI_CHAP_EN:
6578 			fnode_sess->bidi_chap_en = fnode_param->value[0];
6579 			break;
6580 		case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
6581 			fnode_sess->discovery_auth_optional =
6582 							fnode_param->value[0];
6583 			break;
6584 		case ISCSI_FLASHNODE_ERL:
6585 			fnode_sess->erl = fnode_param->value[0];
6586 			break;
6587 		case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
6588 			fnode_conn->tcp_timestamp_stat = fnode_param->value[0];
6589 			break;
6590 		case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
6591 			fnode_conn->tcp_nagle_disable = fnode_param->value[0];
6592 			break;
6593 		case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
6594 			fnode_conn->tcp_wsf_disable = fnode_param->value[0];
6595 			break;
6596 		case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
6597 			fnode_conn->tcp_timer_scale = fnode_param->value[0];
6598 			break;
6599 		case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
6600 			fnode_conn->tcp_timestamp_en = fnode_param->value[0];
6601 			break;
6602 		case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
6603 			fnode_conn->fragment_disable = fnode_param->value[0];
6604 			break;
6605 		case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
6606 			fnode_conn->max_recv_dlength =
6607 					*(unsigned *)fnode_param->value;
6608 			break;
6609 		case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
6610 			fnode_conn->max_xmit_dlength =
6611 					*(unsigned *)fnode_param->value;
6612 			break;
6613 		case ISCSI_FLASHNODE_FIRST_BURST:
6614 			fnode_sess->first_burst =
6615 					*(unsigned *)fnode_param->value;
6616 			break;
6617 		case ISCSI_FLASHNODE_DEF_TIME2WAIT:
6618 			fnode_sess->time2wait = *(uint16_t *)fnode_param->value;
6619 			break;
6620 		case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
6621 			fnode_sess->time2retain =
6622 						*(uint16_t *)fnode_param->value;
6623 			break;
6624 		case ISCSI_FLASHNODE_MAX_R2T:
6625 			fnode_sess->max_r2t =
6626 					*(uint16_t *)fnode_param->value;
6627 			break;
6628 		case ISCSI_FLASHNODE_KEEPALIVE_TMO:
6629 			fnode_conn->keepalive_timeout =
6630 				*(uint16_t *)fnode_param->value;
6631 			break;
6632 		case ISCSI_FLASHNODE_ISID:
6633 			memcpy(fnode_sess->isid, fnode_param->value,
6634 			       sizeof(fnode_sess->isid));
6635 			break;
6636 		case ISCSI_FLASHNODE_TSID:
6637 			fnode_sess->tsid = *(uint16_t *)fnode_param->value;
6638 			break;
6639 		case ISCSI_FLASHNODE_PORT:
6640 			fnode_conn->port = *(uint16_t *)fnode_param->value;
6641 			break;
6642 		case ISCSI_FLASHNODE_MAX_BURST:
6643 			fnode_sess->max_burst = *(unsigned *)fnode_param->value;
6644 			break;
6645 		case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
6646 			fnode_sess->default_taskmgmt_timeout =
6647 						*(uint16_t *)fnode_param->value;
6648 			break;
6649 		case ISCSI_FLASHNODE_IPADDR:
6650 			memcpy(fnode_conn->ipaddress, fnode_param->value,
6651 			       IPv6_ADDR_LEN);
6652 			break;
6653 		case ISCSI_FLASHNODE_ALIAS:
6654 			rc = iscsi_switch_str_param(&fnode_sess->targetalias,
6655 						    (char *)fnode_param->value);
6656 			break;
6657 		case ISCSI_FLASHNODE_REDIRECT_IPADDR:
6658 			memcpy(fnode_conn->redirect_ipaddr, fnode_param->value,
6659 			       IPv6_ADDR_LEN);
6660 			break;
6661 		case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
6662 			fnode_conn->max_segment_size =
6663 					*(unsigned *)fnode_param->value;
6664 			break;
6665 		case ISCSI_FLASHNODE_LOCAL_PORT:
6666 			fnode_conn->local_port =
6667 						*(uint16_t *)fnode_param->value;
6668 			break;
6669 		case ISCSI_FLASHNODE_IPV4_TOS:
6670 			fnode_conn->ipv4_tos = fnode_param->value[0];
6671 			break;
6672 		case ISCSI_FLASHNODE_IPV6_TC:
6673 			fnode_conn->ipv6_traffic_class = fnode_param->value[0];
6674 			break;
6675 		case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
6676 			fnode_conn->ipv6_flow_label = fnode_param->value[0];
6677 			break;
6678 		case ISCSI_FLASHNODE_NAME:
6679 			rc = iscsi_switch_str_param(&fnode_sess->targetname,
6680 						    (char *)fnode_param->value);
6681 			break;
6682 		case ISCSI_FLASHNODE_TPGT:
6683 			fnode_sess->tpgt = *(uint16_t *)fnode_param->value;
6684 			break;
6685 		case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
6686 			memcpy(fnode_conn->link_local_ipv6_addr,
6687 			       fnode_param->value, IPv6_ADDR_LEN);
6688 			break;
6689 		case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
6690 			fnode_sess->discovery_parent_idx =
6691 						*(uint16_t *)fnode_param->value;
6692 			break;
6693 		case ISCSI_FLASHNODE_TCP_XMIT_WSF:
6694 			fnode_conn->tcp_xmit_wsf =
6695 						*(uint8_t *)fnode_param->value;
6696 			break;
6697 		case ISCSI_FLASHNODE_TCP_RECV_WSF:
6698 			fnode_conn->tcp_recv_wsf =
6699 						*(uint8_t *)fnode_param->value;
6700 			break;
6701 		case ISCSI_FLASHNODE_STATSN:
6702 			fnode_conn->statsn = *(uint32_t *)fnode_param->value;
6703 			break;
6704 		case ISCSI_FLASHNODE_EXP_STATSN:
6705 			fnode_conn->exp_statsn =
6706 						*(uint32_t *)fnode_param->value;
6707 			break;
6708 		default:
6709 			ql4_printk(KERN_ERR, ha,
6710 				   "%s: No such sysfs attribute\n", __func__);
6711 			rc = -ENOSYS;
6712 			goto exit_set_param;
6713 		}
6714 	}
6715 
6716 	rc = qla4xxx_sysfs_ddb_apply(fnode_sess, fnode_conn);
6717 
6718 exit_set_param:
6719 	return rc;
6720 }
6721 
6722 /**
6723  * qla4xxx_sysfs_ddb_delete - Delete firmware DDB entry
6724  * @fnode_sess: pointer to session attrs of flash ddb entry
6725  *
6726  * This invalidates the flash ddb entry at the given index
6727  **/
6728 static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
6729 {
6730 	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6731 	struct scsi_qla_host *ha = to_qla_host(shost);
6732 	uint32_t dev_db_start_offset;
6733 	uint32_t dev_db_end_offset;
6734 	struct dev_db_entry *fw_ddb_entry = NULL;
6735 	dma_addr_t fw_ddb_entry_dma;
6736 	uint16_t *ddb_cookie = NULL;
6737 	size_t ddb_size = 0;
6738 	void *pddb = NULL;
6739 	int target_id;
6740 	int rc = 0;
6741 
6742 	if (fnode_sess->is_boot_target) {
6743 		rc = -EPERM;
6744 		DEBUG2(ql4_printk(KERN_ERR, ha,
6745 				  "%s: Deletion of boot target entry is not permitted.\n",
6746 				  __func__));
6747 		goto exit_ddb_del;
6748 	}
6749 
6750 	if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT)
6751 		goto sysfs_ddb_del;
6752 
6753 	if (is_qla40XX(ha)) {
6754 		dev_db_start_offset = FLASH_OFFSET_DB_INFO;
6755 		dev_db_end_offset = FLASH_OFFSET_DB_END;
6756 		dev_db_start_offset += (fnode_sess->target_id *
6757 				       sizeof(*fw_ddb_entry));
6758 		ddb_size = sizeof(*fw_ddb_entry);
6759 	} else {
6760 		dev_db_start_offset = FLASH_RAW_ACCESS_ADDR +
6761 				      (ha->hw.flt_region_ddb << 2);
6762 		/* flt_ddb_size is DDB table size for both ports
6763 		 * so divide it by 2 to calculate the offset for second port
6764 		 */
6765 		if (ha->port_num == 1)
6766 			dev_db_start_offset += (ha->hw.flt_ddb_size / 2);
6767 
6768 		dev_db_end_offset = dev_db_start_offset +
6769 				    (ha->hw.flt_ddb_size / 2);
6770 
6771 		dev_db_start_offset += (fnode_sess->target_id *
6772 				       sizeof(*fw_ddb_entry));
6773 		dev_db_start_offset += offsetof(struct dev_db_entry, cookie);
6774 
6775 		ddb_size = sizeof(*ddb_cookie);
6776 	}
6777 
6778 	DEBUG2(ql4_printk(KERN_ERR, ha, "%s: start offset=%u, end offset=%u\n",
6779 			  __func__, dev_db_start_offset, dev_db_end_offset));
6780 
6781 	if (dev_db_start_offset > dev_db_end_offset) {
6782 		rc = -EIO;
6783 		DEBUG2(ql4_printk(KERN_ERR, ha, "%s:Invalid DDB index %u\n",
6784 				  __func__, fnode_sess->target_id));
6785 		goto exit_ddb_del;
6786 	}
6787 
6788 	pddb = dma_alloc_coherent(&ha->pdev->dev, ddb_size,
6789 				  &fw_ddb_entry_dma, GFP_KERNEL);
6790 	if (!pddb) {
6791 		rc = -ENOMEM;
6792 		DEBUG2(ql4_printk(KERN_ERR, ha,
6793 				  "%s: Unable to allocate dma buffer\n",
6794 				  __func__));
6795 		goto exit_ddb_del;
6796 	}
6797 
6798 	if (is_qla40XX(ha)) {
6799 		fw_ddb_entry = pddb;
6800 		memset(fw_ddb_entry, 0, ddb_size);
6801 		ddb_cookie = &fw_ddb_entry->cookie;
6802 	} else {
6803 		ddb_cookie = pddb;
6804 	}
6805 
6806 	/* invalidate the cookie */
6807 	*ddb_cookie = 0xFFEE;
6808 	qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
6809 			  ddb_size, FLASH_OPT_RMW_COMMIT);
6810 
6811 sysfs_ddb_del:
6812 	target_id = fnode_sess->target_id;
6813 	iscsi_destroy_flashnode_sess(fnode_sess);
6814 	ql4_printk(KERN_INFO, ha,
6815 		   "%s: session and conn entries for flashnode %u of host %lu deleted\n",
6816 		   __func__, target_id, ha->host_no);
6817 exit_ddb_del:
6818 	if (pddb)
6819 		dma_free_coherent(&ha->pdev->dev, ddb_size, pddb,
6820 				  fw_ddb_entry_dma);
6821 	return rc;
6822 }
6823 
6824 /**
6825  * qla4xxx_sysfs_ddb_export - Create sysfs entries for firmware DDBs
6826  * @ha: pointer to adapter structure
6827  *
6828  * Export the firmware DDB for all send targets and normal targets to sysfs.
6829  **/
6830 static int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha)
6831 {
6832 	struct dev_db_entry *fw_ddb_entry = NULL;
6833 	dma_addr_t fw_ddb_entry_dma;
6834 	uint16_t max_ddbs;
6835 	uint16_t idx = 0;
6836 	int ret = QLA_SUCCESS;
6837 
6838 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
6839 					  sizeof(*fw_ddb_entry),
6840 					  &fw_ddb_entry_dma, GFP_KERNEL);
6841 	if (!fw_ddb_entry) {
6842 		DEBUG2(ql4_printk(KERN_ERR, ha,
6843 				  "%s: Unable to allocate dma buffer\n",
6844 				  __func__));
6845 		return -ENOMEM;
6846 	}
6847 
6848 	max_ddbs =  is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
6849 				     MAX_DEV_DB_ENTRIES;
6850 
6851 	for (idx = 0; idx < max_ddbs; idx++) {
6852 		if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma,
6853 					     idx))
6854 			continue;
6855 
6856 		ret = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 0);
6857 		if (ret) {
6858 			ret = -EIO;
6859 			break;
6860 		}
6861 	}
6862 
6863 	dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry,
6864 			  fw_ddb_entry_dma);
6865 
6866 	return ret;
6867 }
6868 
6869 static void qla4xxx_sysfs_ddb_remove(struct scsi_qla_host *ha)
6870 {
6871 	iscsi_destroy_all_flashnode(ha->host);
6872 }
6873 
6874 /**
6875  * qla4xxx_build_ddb_list - Build ddb list and setup sessions
6876  * @ha: pointer to adapter structure
6877  * @is_reset: Is this init path or reset path
6878  *
6879  * Create a list of sendtargets (st) from firmware DDBs, issue send targets
6880  * using connection open, then create the list of normal targets (nt)
6881  * from firmware DDBs. Based on the list of nt setup session and connection
6882  * objects.
6883  **/
6884 void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
6885 {
6886 	uint16_t tmo = 0;
6887 	struct list_head list_st, list_nt;
6888 	struct qla_ddb_index  *st_ddb_idx, *st_ddb_idx_tmp;
6889 	unsigned long wtime;
6890 
6891 	if (!test_bit(AF_LINK_UP, &ha->flags)) {
6892 		set_bit(AF_BUILD_DDB_LIST, &ha->flags);
6893 		ha->is_reset = is_reset;
6894 		return;
6895 	}
6896 
6897 	INIT_LIST_HEAD(&list_st);
6898 	INIT_LIST_HEAD(&list_nt);
6899 
6900 	qla4xxx_build_st_list(ha, &list_st);
6901 
6902 	/* Before issuing conn open mbox, ensure all IPs states are configured
6903 	 * Note, conn open fails if IPs are not configured
6904 	 */
6905 	qla4xxx_wait_for_ip_configuration(ha);
6906 
6907 	/* Go thru the STs and fire the sendtargets by issuing conn open mbx */
6908 	list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
6909 		qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
6910 	}
6911 
6912 	/* Wait to ensure all sendtargets are done for min 12 sec wait */
6913 	tmo = ((ha->def_timeout > LOGIN_TOV) &&
6914 	       (ha->def_timeout < LOGIN_TOV * 10) ?
6915 	       ha->def_timeout : LOGIN_TOV);
6916 
6917 	DEBUG2(ql4_printk(KERN_INFO, ha,
6918 			  "Default time to wait for build ddb %d\n", tmo));
6919 
6920 	wtime = jiffies + (HZ * tmo);
6921 	do {
6922 		if (list_empty(&list_st))
6923 			break;
6924 
6925 		qla4xxx_remove_failed_ddb(ha, &list_st);
6926 		schedule_timeout_uninterruptible(HZ / 10);
6927 	} while (time_after(wtime, jiffies));
6928 
6929 	/* Free up the sendtargets list */
6930 	qla4xxx_free_ddb_list(&list_st);
6931 
6932 	qla4xxx_build_nt_list(ha, &list_nt, is_reset);
6933 
6934 	qla4xxx_free_ddb_list(&list_nt);
6935 
6936 	qla4xxx_free_ddb_index(ha);
6937 }
6938 
6939 /**
6940  * qla4xxx_wait_login_resp_boot_tgt -  Wait for iSCSI boot target login
6941  * response.
6942  * @ha: pointer to adapter structure
6943  *
6944  * When the boot entry is normal iSCSI target then DF_BOOT_TGT flag will be
6945  * set in DDB and we will wait for login response of boot targets during
6946  * probe.
6947  **/
6948 static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha)
6949 {
6950 	struct ddb_entry *ddb_entry;
6951 	struct dev_db_entry *fw_ddb_entry = NULL;
6952 	dma_addr_t fw_ddb_entry_dma;
6953 	unsigned long wtime;
6954 	uint32_t ddb_state;
6955 	int max_ddbs, idx, ret;
6956 
6957 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
6958 				     MAX_DEV_DB_ENTRIES;
6959 
6960 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6961 					  &fw_ddb_entry_dma, GFP_KERNEL);
6962 	if (!fw_ddb_entry) {
6963 		ql4_printk(KERN_ERR, ha,
6964 			   "%s: Unable to allocate dma buffer\n", __func__);
6965 		goto exit_login_resp;
6966 	}
6967 
6968 	wtime = jiffies + (HZ * BOOT_LOGIN_RESP_TOV);
6969 
6970 	for (idx = 0; idx < max_ddbs; idx++) {
6971 		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
6972 		if (ddb_entry == NULL)
6973 			continue;
6974 
6975 		if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
6976 			DEBUG2(ql4_printk(KERN_INFO, ha,
6977 					  "%s: DDB index [%d]\n", __func__,
6978 					  ddb_entry->fw_ddb_index));
6979 			do {
6980 				ret = qla4xxx_get_fwddb_entry(ha,
6981 						ddb_entry->fw_ddb_index,
6982 						fw_ddb_entry, fw_ddb_entry_dma,
6983 						NULL, NULL, &ddb_state, NULL,
6984 						NULL, NULL);
6985 				if (ret == QLA_ERROR)
6986 					goto exit_login_resp;
6987 
6988 				if ((ddb_state == DDB_DS_SESSION_ACTIVE) ||
6989 				    (ddb_state == DDB_DS_SESSION_FAILED))
6990 					break;
6991 
6992 				schedule_timeout_uninterruptible(HZ);
6993 
6994 			} while ((time_after(wtime, jiffies)));
6995 
6996 			if (!time_after(wtime, jiffies)) {
6997 				DEBUG2(ql4_printk(KERN_INFO, ha,
6998 						  "%s: Login response wait timer expired\n",
6999 						  __func__));
7000 				 goto exit_login_resp;
7001 			}
7002 		}
7003 	}
7004 
7005 exit_login_resp:
7006 	if (fw_ddb_entry)
7007 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7008 				  fw_ddb_entry, fw_ddb_entry_dma);
7009 }
7010 
7011 /**
7012  * qla4xxx_probe_adapter - callback function to probe HBA
7013  * @pdev: pointer to pci_dev structure
7014  * @pci_device_id: pointer to pci_device entry
7015  *
7016  * This routine will probe for Qlogic 4xxx iSCSI host adapters.
7017  * It returns zero if successful. It also initializes all data necessary for
7018  * the driver.
7019  **/
7020 static int qla4xxx_probe_adapter(struct pci_dev *pdev,
7021 				 const struct pci_device_id *ent)
7022 {
7023 	int ret = -ENODEV, status;
7024 	struct Scsi_Host *host;
7025 	struct scsi_qla_host *ha;
7026 	uint8_t init_retry_count = 0;
7027 	char buf[34];
7028 	struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
7029 	uint32_t dev_state;
7030 
7031 	if (pci_enable_device(pdev))
7032 		return -1;
7033 
7034 	host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0);
7035 	if (host == NULL) {
7036 		printk(KERN_WARNING
7037 		       "qla4xxx: Couldn't allocate host from scsi layer!\n");
7038 		goto probe_disable_device;
7039 	}
7040 
7041 	/* Clear our data area */
7042 	ha = to_qla_host(host);
7043 	memset(ha, 0, sizeof(*ha));
7044 
7045 	/* Save the information from PCI BIOS.	*/
7046 	ha->pdev = pdev;
7047 	ha->host = host;
7048 	ha->host_no = host->host_no;
7049 	ha->func_num = PCI_FUNC(ha->pdev->devfn);
7050 
7051 	pci_enable_pcie_error_reporting(pdev);
7052 
7053 	/* Setup Runtime configurable options */
7054 	if (is_qla8022(ha)) {
7055 		ha->isp_ops = &qla4_82xx_isp_ops;
7056 		ha->reg_tbl = (uint32_t *) qla4_82xx_reg_tbl;
7057 		ha->qdr_sn_window = -1;
7058 		ha->ddr_mn_window = -1;
7059 		ha->curr_window = 255;
7060 		nx_legacy_intr = &legacy_intr[ha->func_num];
7061 		ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
7062 		ha->nx_legacy_intr.tgt_status_reg =
7063 			nx_legacy_intr->tgt_status_reg;
7064 		ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
7065 		ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
7066 	} else if (is_qla8032(ha) || is_qla8042(ha)) {
7067 		ha->isp_ops = &qla4_83xx_isp_ops;
7068 		ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl;
7069 	} else {
7070 		ha->isp_ops = &qla4xxx_isp_ops;
7071 	}
7072 
7073 	if (is_qla80XX(ha)) {
7074 		rwlock_init(&ha->hw_lock);
7075 		ha->pf_bit = ha->func_num << 16;
7076 		/* Set EEH reset type to fundamental if required by hba */
7077 		pdev->needs_freset = 1;
7078 	}
7079 
7080 	/* Configure PCI I/O space. */
7081 	ret = ha->isp_ops->iospace_config(ha);
7082 	if (ret)
7083 		goto probe_failed_ioconfig;
7084 
7085 	ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n",
7086 		   pdev->device, pdev->irq, ha->reg);
7087 
7088 	qla4xxx_config_dma_addressing(ha);
7089 
7090 	/* Initialize lists and spinlocks. */
7091 	INIT_LIST_HEAD(&ha->free_srb_q);
7092 
7093 	mutex_init(&ha->mbox_sem);
7094 	mutex_init(&ha->chap_sem);
7095 	init_completion(&ha->mbx_intr_comp);
7096 	init_completion(&ha->disable_acb_comp);
7097 
7098 	spin_lock_init(&ha->hardware_lock);
7099 	spin_lock_init(&ha->work_lock);
7100 
7101 	/* Initialize work list */
7102 	INIT_LIST_HEAD(&ha->work_list);
7103 
7104 	/* Allocate dma buffers */
7105 	if (qla4xxx_mem_alloc(ha)) {
7106 		ql4_printk(KERN_WARNING, ha,
7107 		    "[ERROR] Failed to allocate memory for adapter\n");
7108 
7109 		ret = -ENOMEM;
7110 		goto probe_failed;
7111 	}
7112 
7113 	host->cmd_per_lun = 3;
7114 	host->max_channel = 0;
7115 	host->max_lun = MAX_LUNS - 1;
7116 	host->max_id = MAX_TARGETS;
7117 	host->max_cmd_len = IOCB_MAX_CDB_LEN;
7118 	host->can_queue = MAX_SRBS ;
7119 	host->transportt = qla4xxx_scsi_transport;
7120 
7121 	ret = scsi_init_shared_tag_map(host, MAX_SRBS);
7122 	if (ret) {
7123 		ql4_printk(KERN_WARNING, ha,
7124 			   "%s: scsi_init_shared_tag_map failed\n", __func__);
7125 		goto probe_failed;
7126 	}
7127 
7128 	pci_set_drvdata(pdev, ha);
7129 
7130 	ret = scsi_add_host(host, &pdev->dev);
7131 	if (ret)
7132 		goto probe_failed;
7133 
7134 	if (is_qla80XX(ha))
7135 		qla4_8xxx_get_flash_info(ha);
7136 
7137 	if (is_qla8032(ha) || is_qla8042(ha)) {
7138 		qla4_83xx_read_reset_template(ha);
7139 		/*
7140 		 * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0.
7141 		 * If DONRESET_BIT0 is set, drivers should not set dev_state
7142 		 * to NEED_RESET. But if NEED_RESET is set, drivers should
7143 		 * should honor the reset.
7144 		 */
7145 		if (ql4xdontresethba == 1)
7146 			qla4_83xx_set_idc_dontreset(ha);
7147 	}
7148 
7149 	/*
7150 	 * Initialize the Host adapter request/response queues and
7151 	 * firmware
7152 	 * NOTE: interrupts enabled upon successful completion
7153 	 */
7154 	status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
7155 
7156 	/* Dont retry adapter initialization if IRQ allocation failed */
7157 	if (is_qla80XX(ha) && !test_bit(AF_IRQ_ATTACHED, &ha->flags)) {
7158 		ql4_printk(KERN_WARNING, ha, "%s: Skipping retry of adapter initialization\n",
7159 			   __func__);
7160 		goto skip_retry_init;
7161 	}
7162 
7163 	while ((!test_bit(AF_ONLINE, &ha->flags)) &&
7164 	    init_retry_count++ < MAX_INIT_RETRIES) {
7165 
7166 		if (is_qla80XX(ha)) {
7167 			ha->isp_ops->idc_lock(ha);
7168 			dev_state = qla4_8xxx_rd_direct(ha,
7169 							QLA8XXX_CRB_DEV_STATE);
7170 			ha->isp_ops->idc_unlock(ha);
7171 			if (dev_state == QLA8XXX_DEV_FAILED) {
7172 				ql4_printk(KERN_WARNING, ha, "%s: don't retry "
7173 				    "initialize adapter. H/W is in failed state\n",
7174 				    __func__);
7175 				break;
7176 			}
7177 		}
7178 		DEBUG2(printk("scsi: %s: retrying adapter initialization "
7179 			      "(%d)\n", __func__, init_retry_count));
7180 
7181 		if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
7182 			continue;
7183 
7184 		status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
7185 	}
7186 
7187 skip_retry_init:
7188 	if (!test_bit(AF_ONLINE, &ha->flags)) {
7189 		ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
7190 
7191 		if ((is_qla8022(ha) && ql4xdontresethba) ||
7192 		    ((is_qla8032(ha) || is_qla8042(ha)) &&
7193 		     qla4_83xx_idc_dontreset(ha))) {
7194 			/* Put the device in failed state. */
7195 			DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
7196 			ha->isp_ops->idc_lock(ha);
7197 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
7198 					    QLA8XXX_DEV_FAILED);
7199 			ha->isp_ops->idc_unlock(ha);
7200 		}
7201 		ret = -ENODEV;
7202 		goto remove_host;
7203 	}
7204 
7205 	/* Startup the kernel thread for this host adapter. */
7206 	DEBUG2(printk("scsi: %s: Starting kernel thread for "
7207 		      "qla4xxx_dpc\n", __func__));
7208 	sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
7209 	ha->dpc_thread = create_singlethread_workqueue(buf);
7210 	if (!ha->dpc_thread) {
7211 		ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
7212 		ret = -ENODEV;
7213 		goto remove_host;
7214 	}
7215 	INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
7216 
7217 	ha->task_wq = alloc_workqueue("qla4xxx_%lu_task", WQ_MEM_RECLAIM, 1,
7218 				      ha->host_no);
7219 	if (!ha->task_wq) {
7220 		ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
7221 		ret = -ENODEV;
7222 		goto remove_host;
7223 	}
7224 
7225 	/*
7226 	 * For ISP-8XXX, request_irqs is called in qla4_8xxx_load_risc
7227 	 * (which is called indirectly by qla4xxx_initialize_adapter),
7228 	 * so that irqs will be registered after crbinit but before
7229 	 * mbx_intr_enable.
7230 	 */
7231 	if (is_qla40XX(ha)) {
7232 		ret = qla4xxx_request_irqs(ha);
7233 		if (ret) {
7234 			ql4_printk(KERN_WARNING, ha, "Failed to reserve "
7235 			    "interrupt %d already in use.\n", pdev->irq);
7236 			goto remove_host;
7237 		}
7238 	}
7239 
7240 	pci_save_state(ha->pdev);
7241 	ha->isp_ops->enable_intrs(ha);
7242 
7243 	/* Start timer thread. */
7244 	qla4xxx_start_timer(ha, qla4xxx_timer, 1);
7245 
7246 	set_bit(AF_INIT_DONE, &ha->flags);
7247 
7248 	qla4_8xxx_alloc_sysfs_attr(ha);
7249 
7250 	printk(KERN_INFO
7251 	       " QLogic iSCSI HBA Driver version: %s\n"
7252 	       "  QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
7253 	       qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
7254 	       ha->host_no, ha->fw_info.fw_major, ha->fw_info.fw_minor,
7255 	       ha->fw_info.fw_patch, ha->fw_info.fw_build);
7256 
7257 	/* Set the driver version */
7258 	if (is_qla80XX(ha))
7259 		qla4_8xxx_set_param(ha, SET_DRVR_VERSION);
7260 
7261 	if (qla4xxx_setup_boot_info(ha))
7262 		ql4_printk(KERN_ERR, ha,
7263 			   "%s: No iSCSI boot target configured\n", __func__);
7264 
7265 	if (qla4xxx_sysfs_ddb_export(ha))
7266 		ql4_printk(KERN_ERR, ha,
7267 			   "%s: Error exporting ddb to sysfs\n", __func__);
7268 
7269 		/* Perform the build ddb list and login to each */
7270 	qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
7271 	iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
7272 	qla4xxx_wait_login_resp_boot_tgt(ha);
7273 
7274 	qla4xxx_create_chap_list(ha);
7275 
7276 	qla4xxx_create_ifaces(ha);
7277 	return 0;
7278 
7279 remove_host:
7280 	scsi_remove_host(ha->host);
7281 
7282 probe_failed:
7283 	qla4xxx_free_adapter(ha);
7284 
7285 probe_failed_ioconfig:
7286 	pci_disable_pcie_error_reporting(pdev);
7287 	scsi_host_put(ha->host);
7288 
7289 probe_disable_device:
7290 	pci_disable_device(pdev);
7291 
7292 	return ret;
7293 }
7294 
7295 /**
7296  * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
7297  * @ha: pointer to adapter structure
7298  *
7299  * Mark the other ISP-4xxx port to indicate that the driver is being removed,
7300  * so that the other port will not re-initialize while in the process of
7301  * removing the ha due to driver unload or hba hotplug.
7302  **/
7303 static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
7304 {
7305 	struct scsi_qla_host *other_ha = NULL;
7306 	struct pci_dev *other_pdev = NULL;
7307 	int fn = ISP4XXX_PCI_FN_2;
7308 
7309 	/*iscsi function numbers for ISP4xxx is 1 and 3*/
7310 	if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
7311 		fn = ISP4XXX_PCI_FN_1;
7312 
7313 	other_pdev =
7314 		pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
7315 		ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
7316 		fn));
7317 
7318 	/* Get other_ha if other_pdev is valid and state is enable*/
7319 	if (other_pdev) {
7320 		if (atomic_read(&other_pdev->enable_cnt)) {
7321 			other_ha = pci_get_drvdata(other_pdev);
7322 			if (other_ha) {
7323 				set_bit(AF_HA_REMOVAL, &other_ha->flags);
7324 				DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
7325 				    "Prevent %s reinit\n", __func__,
7326 				    dev_name(&other_ha->pdev->dev)));
7327 			}
7328 		}
7329 		pci_dev_put(other_pdev);
7330 	}
7331 }
7332 
7333 static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
7334 {
7335 	struct ddb_entry *ddb_entry;
7336 	int options;
7337 	int idx;
7338 
7339 	for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
7340 
7341 		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
7342 		if ((ddb_entry != NULL) &&
7343 		    (ddb_entry->ddb_type == FLASH_DDB)) {
7344 
7345 			options = LOGOUT_OPTION_CLOSE_SESSION;
7346 			if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
7347 			    == QLA_ERROR)
7348 				ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
7349 					   __func__);
7350 
7351 			qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
7352 			/*
7353 			 * we have decremented the reference count of the driver
7354 			 * when we setup the session to have the driver unload
7355 			 * to be seamless without actually destroying the
7356 			 * session
7357 			 **/
7358 			try_module_get(qla4xxx_iscsi_transport.owner);
7359 			iscsi_destroy_endpoint(ddb_entry->conn->ep);
7360 			qla4xxx_free_ddb(ha, ddb_entry);
7361 			iscsi_session_teardown(ddb_entry->sess);
7362 		}
7363 	}
7364 }
7365 /**
7366  * qla4xxx_remove_adapter - callback function to remove adapter.
7367  * @pci_dev: PCI device pointer
7368  **/
7369 static void qla4xxx_remove_adapter(struct pci_dev *pdev)
7370 {
7371 	struct scsi_qla_host *ha;
7372 
7373 	/*
7374 	 * If the PCI device is disabled then it means probe_adapter had
7375 	 * failed and resources already cleaned up on probe_adapter exit.
7376 	 */
7377 	if (!pci_is_enabled(pdev))
7378 		return;
7379 
7380 	ha = pci_get_drvdata(pdev);
7381 
7382 	if (is_qla40XX(ha))
7383 		qla4xxx_prevent_other_port_reinit(ha);
7384 
7385 	/* destroy iface from sysfs */
7386 	qla4xxx_destroy_ifaces(ha);
7387 
7388 	if ((!ql4xdisablesysfsboot) && ha->boot_kset)
7389 		iscsi_boot_destroy_kset(ha->boot_kset);
7390 
7391 	qla4xxx_destroy_fw_ddb_session(ha);
7392 	qla4_8xxx_free_sysfs_attr(ha);
7393 
7394 	qla4xxx_sysfs_ddb_remove(ha);
7395 	scsi_remove_host(ha->host);
7396 
7397 	qla4xxx_free_adapter(ha);
7398 
7399 	scsi_host_put(ha->host);
7400 
7401 	pci_disable_pcie_error_reporting(pdev);
7402 	pci_disable_device(pdev);
7403 	pci_set_drvdata(pdev, NULL);
7404 }
7405 
7406 /**
7407  * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
7408  * @ha: HA context
7409  *
7410  * At exit, the @ha's flags.enable_64bit_addressing set to indicated
7411  * supported addressing method.
7412  */
7413 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
7414 {
7415 	int retval;
7416 
7417 	/* Update our PCI device dma_mask for full 64 bit mask */
7418 	if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
7419 		if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
7420 			dev_dbg(&ha->pdev->dev,
7421 				  "Failed to set 64 bit PCI consistent mask; "
7422 				   "using 32 bit.\n");
7423 			retval = pci_set_consistent_dma_mask(ha->pdev,
7424 							     DMA_BIT_MASK(32));
7425 		}
7426 	} else
7427 		retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
7428 }
7429 
7430 static int qla4xxx_slave_alloc(struct scsi_device *sdev)
7431 {
7432 	struct iscsi_cls_session *cls_sess;
7433 	struct iscsi_session *sess;
7434 	struct ddb_entry *ddb;
7435 	int queue_depth = QL4_DEF_QDEPTH;
7436 
7437 	cls_sess = starget_to_session(sdev->sdev_target);
7438 	sess = cls_sess->dd_data;
7439 	ddb = sess->dd_data;
7440 
7441 	sdev->hostdata = ddb;
7442 	sdev->tagged_supported = 1;
7443 
7444 	if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
7445 		queue_depth = ql4xmaxqdepth;
7446 
7447 	scsi_activate_tcq(sdev, queue_depth);
7448 	return 0;
7449 }
7450 
7451 static int qla4xxx_slave_configure(struct scsi_device *sdev)
7452 {
7453 	sdev->tagged_supported = 1;
7454 	return 0;
7455 }
7456 
7457 static void qla4xxx_slave_destroy(struct scsi_device *sdev)
7458 {
7459 	scsi_deactivate_tcq(sdev, 1);
7460 }
7461 
7462 static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
7463 				      int reason)
7464 {
7465 	if (!ql4xqfulltracking)
7466 		return -EOPNOTSUPP;
7467 
7468 	return iscsi_change_queue_depth(sdev, qdepth, reason);
7469 }
7470 
7471 /**
7472  * qla4xxx_del_from_active_array - returns an active srb
7473  * @ha: Pointer to host adapter structure.
7474  * @index: index into the active_array
7475  *
7476  * This routine removes and returns the srb at the specified index
7477  **/
7478 struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
7479     uint32_t index)
7480 {
7481 	struct srb *srb = NULL;
7482 	struct scsi_cmnd *cmd = NULL;
7483 
7484 	cmd = scsi_host_find_tag(ha->host, index);
7485 	if (!cmd)
7486 		return srb;
7487 
7488 	srb = (struct srb *)CMD_SP(cmd);
7489 	if (!srb)
7490 		return srb;
7491 
7492 	/* update counters */
7493 	if (srb->flags & SRB_DMA_VALID) {
7494 		ha->iocb_cnt -= srb->iocb_cnt;
7495 		if (srb->cmd)
7496 			srb->cmd->host_scribble =
7497 				(unsigned char *)(unsigned long) MAX_SRBS;
7498 	}
7499 	return srb;
7500 }
7501 
7502 /**
7503  * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
7504  * @ha: Pointer to host adapter structure.
7505  * @cmd: Scsi Command to wait on.
7506  *
7507  * This routine waits for the command to be returned by the Firmware
7508  * for some max time.
7509  **/
7510 static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
7511 				      struct scsi_cmnd *cmd)
7512 {
7513 	int done = 0;
7514 	struct srb *rp;
7515 	uint32_t max_wait_time = EH_WAIT_CMD_TOV;
7516 	int ret = SUCCESS;
7517 
7518 	/* Dont wait on command if PCI error is being handled
7519 	 * by PCI AER driver
7520 	 */
7521 	if (unlikely(pci_channel_offline(ha->pdev)) ||
7522 	    (test_bit(AF_EEH_BUSY, &ha->flags))) {
7523 		ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
7524 		    ha->host_no, __func__);
7525 		return ret;
7526 	}
7527 
7528 	do {
7529 		/* Checking to see if its returned to OS */
7530 		rp = (struct srb *) CMD_SP(cmd);
7531 		if (rp == NULL) {
7532 			done++;
7533 			break;
7534 		}
7535 
7536 		msleep(2000);
7537 	} while (max_wait_time--);
7538 
7539 	return done;
7540 }
7541 
7542 /**
7543  * qla4xxx_wait_for_hba_online - waits for HBA to come online
7544  * @ha: Pointer to host adapter structure
7545  **/
7546 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
7547 {
7548 	unsigned long wait_online;
7549 
7550 	wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
7551 	while (time_before(jiffies, wait_online)) {
7552 
7553 		if (adapter_up(ha))
7554 			return QLA_SUCCESS;
7555 
7556 		msleep(2000);
7557 	}
7558 
7559 	return QLA_ERROR;
7560 }
7561 
7562 /**
7563  * qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
7564  * @ha: pointer to HBA
7565  * @t: target id
7566  * @l: lun id
7567  *
7568  * This function waits for all outstanding commands to a lun to complete. It
7569  * returns 0 if all pending commands are returned and 1 otherwise.
7570  **/
7571 static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
7572 					struct scsi_target *stgt,
7573 					struct scsi_device *sdev)
7574 {
7575 	int cnt;
7576 	int status = 0;
7577 	struct scsi_cmnd *cmd;
7578 
7579 	/*
7580 	 * Waiting for all commands for the designated target or dev
7581 	 * in the active array
7582 	 */
7583 	for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
7584 		cmd = scsi_host_find_tag(ha->host, cnt);
7585 		if (cmd && stgt == scsi_target(cmd->device) &&
7586 		    (!sdev || sdev == cmd->device)) {
7587 			if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
7588 				status++;
7589 				break;
7590 			}
7591 		}
7592 	}
7593 	return status;
7594 }
7595 
7596 /**
7597  * qla4xxx_eh_abort - callback for abort task.
7598  * @cmd: Pointer to Linux's SCSI command structure
7599  *
7600  * This routine is called by the Linux OS to abort the specified
7601  * command.
7602  **/
7603 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
7604 {
7605 	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
7606 	unsigned int id = cmd->device->id;
7607 	unsigned int lun = cmd->device->lun;
7608 	unsigned long flags;
7609 	struct srb *srb = NULL;
7610 	int ret = SUCCESS;
7611 	int wait = 0;
7612 
7613 	ql4_printk(KERN_INFO, ha,
7614 	    "scsi%ld:%d:%d: Abort command issued cmd=%p\n",
7615 	    ha->host_no, id, lun, cmd);
7616 
7617 	spin_lock_irqsave(&ha->hardware_lock, flags);
7618 	srb = (struct srb *) CMD_SP(cmd);
7619 	if (!srb) {
7620 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
7621 		return SUCCESS;
7622 	}
7623 	kref_get(&srb->srb_ref);
7624 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
7625 
7626 	if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
7627 		DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
7628 		    ha->host_no, id, lun));
7629 		ret = FAILED;
7630 	} else {
7631 		DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n",
7632 		    ha->host_no, id, lun));
7633 		wait = 1;
7634 	}
7635 
7636 	kref_put(&srb->srb_ref, qla4xxx_srb_compl);
7637 
7638 	/* Wait for command to complete */
7639 	if (wait) {
7640 		if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
7641 			DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n",
7642 			    ha->host_no, id, lun));
7643 			ret = FAILED;
7644 		}
7645 	}
7646 
7647 	ql4_printk(KERN_INFO, ha,
7648 	    "scsi%ld:%d:%d: Abort command - %s\n",
7649 	    ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed");
7650 
7651 	return ret;
7652 }
7653 
7654 /**
7655  * qla4xxx_eh_device_reset - callback for target reset.
7656  * @cmd: Pointer to Linux's SCSI command structure
7657  *
7658  * This routine is called by the Linux OS to reset all luns on the
7659  * specified target.
7660  **/
7661 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
7662 {
7663 	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
7664 	struct ddb_entry *ddb_entry = cmd->device->hostdata;
7665 	int ret = FAILED, stat;
7666 
7667 	if (!ddb_entry)
7668 		return ret;
7669 
7670 	ret = iscsi_block_scsi_eh(cmd);
7671 	if (ret)
7672 		return ret;
7673 	ret = FAILED;
7674 
7675 	ql4_printk(KERN_INFO, ha,
7676 		   "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no,
7677 		   cmd->device->channel, cmd->device->id, cmd->device->lun);
7678 
7679 	DEBUG2(printk(KERN_INFO
7680 		      "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
7681 		      "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
7682 		      cmd, jiffies, cmd->request->timeout / HZ,
7683 		      ha->dpc_flags, cmd->result, cmd->allowed));
7684 
7685 	/* FIXME: wait for hba to go online */
7686 	stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
7687 	if (stat != QLA_SUCCESS) {
7688 		ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat);
7689 		goto eh_dev_reset_done;
7690 	}
7691 
7692 	if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
7693 					 cmd->device)) {
7694 		ql4_printk(KERN_INFO, ha,
7695 			   "DEVICE RESET FAILED - waiting for "
7696 			   "commands.\n");
7697 		goto eh_dev_reset_done;
7698 	}
7699 
7700 	/* Send marker. */
7701 	if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
7702 		MM_LUN_RESET) != QLA_SUCCESS)
7703 		goto eh_dev_reset_done;
7704 
7705 	ql4_printk(KERN_INFO, ha,
7706 		   "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
7707 		   ha->host_no, cmd->device->channel, cmd->device->id,
7708 		   cmd->device->lun);
7709 
7710 	ret = SUCCESS;
7711 
7712 eh_dev_reset_done:
7713 
7714 	return ret;
7715 }
7716 
7717 /**
7718  * qla4xxx_eh_target_reset - callback for target reset.
7719  * @cmd: Pointer to Linux's SCSI command structure
7720  *
7721  * This routine is called by the Linux OS to reset the target.
7722  **/
7723 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
7724 {
7725 	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
7726 	struct ddb_entry *ddb_entry = cmd->device->hostdata;
7727 	int stat, ret;
7728 
7729 	if (!ddb_entry)
7730 		return FAILED;
7731 
7732 	ret = iscsi_block_scsi_eh(cmd);
7733 	if (ret)
7734 		return ret;
7735 
7736 	starget_printk(KERN_INFO, scsi_target(cmd->device),
7737 		       "WARM TARGET RESET ISSUED.\n");
7738 
7739 	DEBUG2(printk(KERN_INFO
7740 		      "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
7741 		      "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
7742 		      ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
7743 		      ha->dpc_flags, cmd->result, cmd->allowed));
7744 
7745 	stat = qla4xxx_reset_target(ha, ddb_entry);
7746 	if (stat != QLA_SUCCESS) {
7747 		starget_printk(KERN_INFO, scsi_target(cmd->device),
7748 			       "WARM TARGET RESET FAILED.\n");
7749 		return FAILED;
7750 	}
7751 
7752 	if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
7753 					 NULL)) {
7754 		starget_printk(KERN_INFO, scsi_target(cmd->device),
7755 			       "WARM TARGET DEVICE RESET FAILED - "
7756 			       "waiting for commands.\n");
7757 		return FAILED;
7758 	}
7759 
7760 	/* Send marker. */
7761 	if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
7762 		MM_TGT_WARM_RESET) != QLA_SUCCESS) {
7763 		starget_printk(KERN_INFO, scsi_target(cmd->device),
7764 			       "WARM TARGET DEVICE RESET FAILED - "
7765 			       "marker iocb failed.\n");
7766 		return FAILED;
7767 	}
7768 
7769 	starget_printk(KERN_INFO, scsi_target(cmd->device),
7770 		       "WARM TARGET RESET SUCCEEDED.\n");
7771 	return SUCCESS;
7772 }
7773 
7774 /**
7775  * qla4xxx_is_eh_active - check if error handler is running
7776  * @shost: Pointer to SCSI Host struct
7777  *
7778  * This routine finds that if reset host is called in EH
7779  * scenario or from some application like sg_reset
7780  **/
7781 static int qla4xxx_is_eh_active(struct Scsi_Host *shost)
7782 {
7783 	if (shost->shost_state == SHOST_RECOVERY)
7784 		return 1;
7785 	return 0;
7786 }
7787 
7788 /**
7789  * qla4xxx_eh_host_reset - kernel callback
7790  * @cmd: Pointer to Linux's SCSI command structure
7791  *
7792  * This routine is invoked by the Linux kernel to perform fatal error
7793  * recovery on the specified adapter.
7794  **/
7795 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
7796 {
7797 	int return_status = FAILED;
7798 	struct scsi_qla_host *ha;
7799 
7800 	ha = to_qla_host(cmd->device->host);
7801 
7802 	if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba)
7803 		qla4_83xx_set_idc_dontreset(ha);
7804 
7805 	/*
7806 	 * For ISP8324 and ISP8042, if IDC_CTRL DONTRESET_BIT0 is set by other
7807 	 * protocol drivers, we should not set device_state to NEED_RESET
7808 	 */
7809 	if (ql4xdontresethba ||
7810 	    ((is_qla8032(ha) || is_qla8042(ha)) &&
7811 	     qla4_83xx_idc_dontreset(ha))) {
7812 		DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
7813 		     ha->host_no, __func__));
7814 
7815 		/* Clear outstanding srb in queues */
7816 		if (qla4xxx_is_eh_active(cmd->device->host))
7817 			qla4xxx_abort_active_cmds(ha, DID_ABORT << 16);
7818 
7819 		return FAILED;
7820 	}
7821 
7822 	ql4_printk(KERN_INFO, ha,
7823 		   "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no,
7824 		   cmd->device->channel, cmd->device->id, cmd->device->lun);
7825 
7826 	if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
7827 		DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host.  Adapter "
7828 			      "DEAD.\n", ha->host_no, cmd->device->channel,
7829 			      __func__));
7830 
7831 		return FAILED;
7832 	}
7833 
7834 	if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
7835 		if (is_qla80XX(ha))
7836 			set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
7837 		else
7838 			set_bit(DPC_RESET_HA, &ha->dpc_flags);
7839 	}
7840 
7841 	if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS)
7842 		return_status = SUCCESS;
7843 
7844 	ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n",
7845 		   return_status == FAILED ? "FAILED" : "SUCCEEDED");
7846 
7847 	return return_status;
7848 }
7849 
7850 static int qla4xxx_context_reset(struct scsi_qla_host *ha)
7851 {
7852 	uint32_t mbox_cmd[MBOX_REG_COUNT];
7853 	uint32_t mbox_sts[MBOX_REG_COUNT];
7854 	struct addr_ctrl_blk_def *acb = NULL;
7855 	uint32_t acb_len = sizeof(struct addr_ctrl_blk_def);
7856 	int rval = QLA_SUCCESS;
7857 	dma_addr_t acb_dma;
7858 
7859 	acb = dma_alloc_coherent(&ha->pdev->dev,
7860 				 sizeof(struct addr_ctrl_blk_def),
7861 				 &acb_dma, GFP_KERNEL);
7862 	if (!acb) {
7863 		ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
7864 			   __func__);
7865 		rval = -ENOMEM;
7866 		goto exit_port_reset;
7867 	}
7868 
7869 	memset(acb, 0, acb_len);
7870 
7871 	rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len);
7872 	if (rval != QLA_SUCCESS) {
7873 		rval = -EIO;
7874 		goto exit_free_acb;
7875 	}
7876 
7877 	rval = qla4xxx_disable_acb(ha);
7878 	if (rval != QLA_SUCCESS) {
7879 		rval = -EIO;
7880 		goto exit_free_acb;
7881 	}
7882 
7883 	wait_for_completion_timeout(&ha->disable_acb_comp,
7884 				    DISABLE_ACB_TOV * HZ);
7885 
7886 	rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
7887 	if (rval != QLA_SUCCESS) {
7888 		rval = -EIO;
7889 		goto exit_free_acb;
7890 	}
7891 
7892 exit_free_acb:
7893 	dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def),
7894 			  acb, acb_dma);
7895 exit_port_reset:
7896 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__,
7897 			  rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
7898 	return rval;
7899 }
7900 
7901 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
7902 {
7903 	struct scsi_qla_host *ha = to_qla_host(shost);
7904 	int rval = QLA_SUCCESS;
7905 	uint32_t idc_ctrl;
7906 
7907 	if (ql4xdontresethba) {
7908 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
7909 				  __func__));
7910 		rval = -EPERM;
7911 		goto exit_host_reset;
7912 	}
7913 
7914 	if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
7915 		goto recover_adapter;
7916 
7917 	switch (reset_type) {
7918 	case SCSI_ADAPTER_RESET:
7919 		set_bit(DPC_RESET_HA, &ha->dpc_flags);
7920 		break;
7921 	case SCSI_FIRMWARE_RESET:
7922 		if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
7923 			if (is_qla80XX(ha))
7924 				/* set firmware context reset */
7925 				set_bit(DPC_RESET_HA_FW_CONTEXT,
7926 					&ha->dpc_flags);
7927 			else {
7928 				rval = qla4xxx_context_reset(ha);
7929 				goto exit_host_reset;
7930 			}
7931 		}
7932 		break;
7933 	}
7934 
7935 recover_adapter:
7936 	/* For ISP8324 and ISP8042 set graceful reset bit in IDC_DRV_CTRL if
7937 	 * reset is issued by application */
7938 	if ((is_qla8032(ha) || is_qla8042(ha)) &&
7939 	    test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
7940 		idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
7941 		qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
7942 				 (idc_ctrl | GRACEFUL_RESET_BIT1));
7943 	}
7944 
7945 	rval = qla4xxx_recover_adapter(ha);
7946 	if (rval != QLA_SUCCESS) {
7947 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
7948 				  __func__));
7949 		rval = -EIO;
7950 	}
7951 
7952 exit_host_reset:
7953 	return rval;
7954 }
7955 
7956 /* PCI AER driver recovers from all correctable errors w/o
7957  * driver intervention. For uncorrectable errors PCI AER
7958  * driver calls the following device driver's callbacks
7959  *
7960  * - Fatal Errors - link_reset
7961  * - Non-Fatal Errors - driver's pci_error_detected() which
7962  * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
7963  *
7964  * PCI AER driver calls
7965  * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled
7966  *               returns RECOVERED or NEED_RESET if fw_hung
7967  * NEED_RESET - driver's slot_reset()
7968  * DISCONNECT - device is dead & cannot recover
7969  * RECOVERED - driver's pci_resume()
7970  */
7971 static pci_ers_result_t
7972 qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
7973 {
7974 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
7975 
7976 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
7977 	    ha->host_no, __func__, state);
7978 
7979 	if (!is_aer_supported(ha))
7980 		return PCI_ERS_RESULT_NONE;
7981 
7982 	switch (state) {
7983 	case pci_channel_io_normal:
7984 		clear_bit(AF_EEH_BUSY, &ha->flags);
7985 		return PCI_ERS_RESULT_CAN_RECOVER;
7986 	case pci_channel_io_frozen:
7987 		set_bit(AF_EEH_BUSY, &ha->flags);
7988 		qla4xxx_mailbox_premature_completion(ha);
7989 		qla4xxx_free_irqs(ha);
7990 		pci_disable_device(pdev);
7991 		/* Return back all IOs */
7992 		qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
7993 		return PCI_ERS_RESULT_NEED_RESET;
7994 	case pci_channel_io_perm_failure:
7995 		set_bit(AF_EEH_BUSY, &ha->flags);
7996 		set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
7997 		qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
7998 		return PCI_ERS_RESULT_DISCONNECT;
7999 	}
8000 	return PCI_ERS_RESULT_NEED_RESET;
8001 }
8002 
8003 /**
8004  * qla4xxx_pci_mmio_enabled() gets called if
8005  * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
8006  * and read/write to the device still works.
8007  **/
8008 static pci_ers_result_t
8009 qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
8010 {
8011 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
8012 
8013 	if (!is_aer_supported(ha))
8014 		return PCI_ERS_RESULT_NONE;
8015 
8016 	return PCI_ERS_RESULT_RECOVERED;
8017 }
8018 
8019 static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
8020 {
8021 	uint32_t rval = QLA_ERROR;
8022 	int fn;
8023 	struct pci_dev *other_pdev = NULL;
8024 
8025 	ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
8026 
8027 	set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
8028 
8029 	if (test_bit(AF_ONLINE, &ha->flags)) {
8030 		clear_bit(AF_ONLINE, &ha->flags);
8031 		clear_bit(AF_LINK_UP, &ha->flags);
8032 		iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
8033 		qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
8034 	}
8035 
8036 	fn = PCI_FUNC(ha->pdev->devfn);
8037 	while (fn > 0) {
8038 		fn--;
8039 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at "
8040 		    "func %x\n", ha->host_no, __func__, fn);
8041 		/* Get the pci device given the domain, bus,
8042 		 * slot/function number */
8043 		other_pdev =
8044 		    pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
8045 		    ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
8046 		    fn));
8047 
8048 		if (!other_pdev)
8049 			continue;
8050 
8051 		if (atomic_read(&other_pdev->enable_cnt)) {
8052 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI "
8053 			    "func in enabled state%x\n", ha->host_no,
8054 			    __func__, fn);
8055 			pci_dev_put(other_pdev);
8056 			break;
8057 		}
8058 		pci_dev_put(other_pdev);
8059 	}
8060 
8061 	/* The first function on the card, the reset owner will
8062 	 * start & initialize the firmware. The other functions
8063 	 * on the card will reset the firmware context
8064 	 */
8065 	if (!fn) {
8066 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
8067 		    "0x%x is the owner\n", ha->host_no, __func__,
8068 		    ha->pdev->devfn);
8069 
8070 		ha->isp_ops->idc_lock(ha);
8071 		qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
8072 				    QLA8XXX_DEV_COLD);
8073 		ha->isp_ops->idc_unlock(ha);
8074 
8075 		rval = qla4_8xxx_update_idc_reg(ha);
8076 		if (rval == QLA_ERROR) {
8077 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: FAILED\n",
8078 				   ha->host_no, __func__);
8079 			ha->isp_ops->idc_lock(ha);
8080 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
8081 					    QLA8XXX_DEV_FAILED);
8082 			ha->isp_ops->idc_unlock(ha);
8083 			goto exit_error_recovery;
8084 		}
8085 
8086 		clear_bit(AF_FW_RECOVERY, &ha->flags);
8087 		rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
8088 
8089 		if (rval != QLA_SUCCESS) {
8090 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
8091 			    "FAILED\n", ha->host_no, __func__);
8092 			ha->isp_ops->idc_lock(ha);
8093 			qla4_8xxx_clear_drv_active(ha);
8094 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
8095 					    QLA8XXX_DEV_FAILED);
8096 			ha->isp_ops->idc_unlock(ha);
8097 		} else {
8098 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
8099 			    "READY\n", ha->host_no, __func__);
8100 			ha->isp_ops->idc_lock(ha);
8101 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
8102 					    QLA8XXX_DEV_READY);
8103 			/* Clear driver state register */
8104 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0);
8105 			qla4_8xxx_set_drv_active(ha);
8106 			ha->isp_ops->idc_unlock(ha);
8107 			ha->isp_ops->enable_intrs(ha);
8108 		}
8109 	} else {
8110 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
8111 		    "the reset owner\n", ha->host_no, __func__,
8112 		    ha->pdev->devfn);
8113 		if ((qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE) ==
8114 		     QLA8XXX_DEV_READY)) {
8115 			clear_bit(AF_FW_RECOVERY, &ha->flags);
8116 			rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
8117 			if (rval == QLA_SUCCESS)
8118 				ha->isp_ops->enable_intrs(ha);
8119 
8120 			ha->isp_ops->idc_lock(ha);
8121 			qla4_8xxx_set_drv_active(ha);
8122 			ha->isp_ops->idc_unlock(ha);
8123 		}
8124 	}
8125 exit_error_recovery:
8126 	clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
8127 	return rval;
8128 }
8129 
8130 static pci_ers_result_t
8131 qla4xxx_pci_slot_reset(struct pci_dev *pdev)
8132 {
8133 	pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
8134 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
8135 	int rc;
8136 
8137 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
8138 	    ha->host_no, __func__);
8139 
8140 	if (!is_aer_supported(ha))
8141 		return PCI_ERS_RESULT_NONE;
8142 
8143 	/* Restore the saved state of PCIe device -
8144 	 * BAR registers, PCI Config space, PCIX, MSI,
8145 	 * IOV states
8146 	 */
8147 	pci_restore_state(pdev);
8148 
8149 	/* pci_restore_state() clears the saved_state flag of the device
8150 	 * save restored state which resets saved_state flag
8151 	 */
8152 	pci_save_state(pdev);
8153 
8154 	/* Initialize device or resume if in suspended state */
8155 	rc = pci_enable_device(pdev);
8156 	if (rc) {
8157 		ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable "
8158 		    "device after reset\n", ha->host_no, __func__);
8159 		goto exit_slot_reset;
8160 	}
8161 
8162 	ha->isp_ops->disable_intrs(ha);
8163 
8164 	if (is_qla80XX(ha)) {
8165 		if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
8166 			ret = PCI_ERS_RESULT_RECOVERED;
8167 			goto exit_slot_reset;
8168 		} else
8169 			goto exit_slot_reset;
8170 	}
8171 
8172 exit_slot_reset:
8173 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
8174 	    "device after reset\n", ha->host_no, __func__, ret);
8175 	return ret;
8176 }
8177 
8178 static void
8179 qla4xxx_pci_resume(struct pci_dev *pdev)
8180 {
8181 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
8182 	int ret;
8183 
8184 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
8185 	    ha->host_no, __func__);
8186 
8187 	ret = qla4xxx_wait_for_hba_online(ha);
8188 	if (ret != QLA_SUCCESS) {
8189 		ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
8190 		    "resume I/O from slot/link_reset\n", ha->host_no,
8191 		     __func__);
8192 	}
8193 
8194 	pci_cleanup_aer_uncorrect_error_status(pdev);
8195 	clear_bit(AF_EEH_BUSY, &ha->flags);
8196 }
8197 
8198 static const struct pci_error_handlers qla4xxx_err_handler = {
8199 	.error_detected = qla4xxx_pci_error_detected,
8200 	.mmio_enabled = qla4xxx_pci_mmio_enabled,
8201 	.slot_reset = qla4xxx_pci_slot_reset,
8202 	.resume = qla4xxx_pci_resume,
8203 };
8204 
8205 static struct pci_device_id qla4xxx_pci_tbl[] = {
8206 	{
8207 		.vendor		= PCI_VENDOR_ID_QLOGIC,
8208 		.device		= PCI_DEVICE_ID_QLOGIC_ISP4010,
8209 		.subvendor	= PCI_ANY_ID,
8210 		.subdevice	= PCI_ANY_ID,
8211 	},
8212 	{
8213 		.vendor		= PCI_VENDOR_ID_QLOGIC,
8214 		.device		= PCI_DEVICE_ID_QLOGIC_ISP4022,
8215 		.subvendor	= PCI_ANY_ID,
8216 		.subdevice	= PCI_ANY_ID,
8217 	},
8218 	{
8219 		.vendor		= PCI_VENDOR_ID_QLOGIC,
8220 		.device		= PCI_DEVICE_ID_QLOGIC_ISP4032,
8221 		.subvendor	= PCI_ANY_ID,
8222 		.subdevice	= PCI_ANY_ID,
8223 	},
8224 	{
8225 		.vendor         = PCI_VENDOR_ID_QLOGIC,
8226 		.device         = PCI_DEVICE_ID_QLOGIC_ISP8022,
8227 		.subvendor      = PCI_ANY_ID,
8228 		.subdevice      = PCI_ANY_ID,
8229 	},
8230 	{
8231 		.vendor		= PCI_VENDOR_ID_QLOGIC,
8232 		.device		= PCI_DEVICE_ID_QLOGIC_ISP8324,
8233 		.subvendor	= PCI_ANY_ID,
8234 		.subdevice	= PCI_ANY_ID,
8235 	},
8236 	{
8237 		.vendor		= PCI_VENDOR_ID_QLOGIC,
8238 		.device		= PCI_DEVICE_ID_QLOGIC_ISP8042,
8239 		.subvendor	= PCI_ANY_ID,
8240 		.subdevice	= PCI_ANY_ID,
8241 	},
8242 	{0, 0},
8243 };
8244 MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
8245 
8246 static struct pci_driver qla4xxx_pci_driver = {
8247 	.name		= DRIVER_NAME,
8248 	.id_table	= qla4xxx_pci_tbl,
8249 	.probe		= qla4xxx_probe_adapter,
8250 	.remove		= qla4xxx_remove_adapter,
8251 	.err_handler = &qla4xxx_err_handler,
8252 };
8253 
8254 static int __init qla4xxx_module_init(void)
8255 {
8256 	int ret;
8257 
8258 	/* Allocate cache for SRBs. */
8259 	srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
8260 				       SLAB_HWCACHE_ALIGN, NULL);
8261 	if (srb_cachep == NULL) {
8262 		printk(KERN_ERR
8263 		       "%s: Unable to allocate SRB cache..."
8264 		       "Failing load!\n", DRIVER_NAME);
8265 		ret = -ENOMEM;
8266 		goto no_srp_cache;
8267 	}
8268 
8269 	/* Derive version string. */
8270 	strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
8271 	if (ql4xextended_error_logging)
8272 		strcat(qla4xxx_version_str, "-debug");
8273 
8274 	qla4xxx_scsi_transport =
8275 		iscsi_register_transport(&qla4xxx_iscsi_transport);
8276 	if (!qla4xxx_scsi_transport){
8277 		ret = -ENODEV;
8278 		goto release_srb_cache;
8279 	}
8280 
8281 	ret = pci_register_driver(&qla4xxx_pci_driver);
8282 	if (ret)
8283 		goto unregister_transport;
8284 
8285 	printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
8286 	return 0;
8287 
8288 unregister_transport:
8289 	iscsi_unregister_transport(&qla4xxx_iscsi_transport);
8290 release_srb_cache:
8291 	kmem_cache_destroy(srb_cachep);
8292 no_srp_cache:
8293 	return ret;
8294 }
8295 
8296 static void __exit qla4xxx_module_exit(void)
8297 {
8298 	pci_unregister_driver(&qla4xxx_pci_driver);
8299 	iscsi_unregister_transport(&qla4xxx_iscsi_transport);
8300 	kmem_cache_destroy(srb_cachep);
8301 }
8302 
8303 module_init(qla4xxx_module_init);
8304 module_exit(qla4xxx_module_exit);
8305 
8306 MODULE_AUTHOR("QLogic Corporation");
8307 MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
8308 MODULE_LICENSE("GPL");
8309 MODULE_VERSION(QLA4XXX_DRIVER_VERSION);
8310