xref: /openbmc/linux/drivers/scsi/qla4xxx/ql4_os.c (revision bc000245)
1 /*
2  * QLogic iSCSI HBA Driver
3  * Copyright (c)  2003-2013 QLogic Corporation
4  *
5  * See LICENSE.qla4xxx for copyright and licensing details.
6  */
7 #include <linux/moduleparam.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/iscsi_boot_sysfs.h>
11 #include <linux/inet.h>
12 
13 #include <scsi/scsi_tcq.h>
14 #include <scsi/scsicam.h>
15 
16 #include "ql4_def.h"
17 #include "ql4_version.h"
18 #include "ql4_glbl.h"
19 #include "ql4_dbg.h"
20 #include "ql4_inline.h"
21 #include "ql4_83xx.h"
22 
23 /*
24  * Driver version
25  */
26 static char qla4xxx_version_str[40];
27 
28 /*
29  * SRB allocation cache
30  */
31 static struct kmem_cache *srb_cachep;
32 
33 /*
34  * Module parameter information and variables
35  */
36 static int ql4xdisablesysfsboot = 1;
37 module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
38 MODULE_PARM_DESC(ql4xdisablesysfsboot,
39 		 " Set to disable exporting boot targets to sysfs.\n"
40 		 "\t\t  0 - Export boot targets\n"
41 		 "\t\t  1 - Do not export boot targets (Default)");
42 
43 int ql4xdontresethba;
44 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
45 MODULE_PARM_DESC(ql4xdontresethba,
46 		 " Don't reset the HBA for driver recovery.\n"
47 		 "\t\t  0 - It will reset HBA (Default)\n"
48 		 "\t\t  1 - It will NOT reset HBA");
49 
50 int ql4xextended_error_logging;
51 module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
52 MODULE_PARM_DESC(ql4xextended_error_logging,
53 		 " Option to enable extended error logging.\n"
54 		 "\t\t  0 - no logging (Default)\n"
55 		 "\t\t  2 - debug logging");
56 
57 int ql4xenablemsix = 1;
58 module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
59 MODULE_PARM_DESC(ql4xenablemsix,
60 		 " Set to enable MSI or MSI-X interrupt mechanism.\n"
61 		 "\t\t  0 = enable INTx interrupt mechanism.\n"
62 		 "\t\t  1 = enable MSI-X interrupt mechanism (Default).\n"
63 		 "\t\t  2 = enable MSI interrupt mechanism.");
64 
65 #define QL4_DEF_QDEPTH 32
66 static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
67 module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
68 MODULE_PARM_DESC(ql4xmaxqdepth,
69 		 " Maximum queue depth to report for target devices.\n"
70 		 "\t\t  Default: 32.");
71 
72 static int ql4xqfulltracking = 1;
73 module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR);
74 MODULE_PARM_DESC(ql4xqfulltracking,
75 		 " Enable or disable dynamic tracking and adjustment of\n"
76 		 "\t\t scsi device queue depth.\n"
77 		 "\t\t  0 - Disable.\n"
78 		 "\t\t  1 - Enable. (Default)");
79 
80 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
81 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
82 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
83 		" Target Session Recovery Timeout.\n"
84 		"\t\t  Default: 120 sec.");
85 
86 int ql4xmdcapmask = 0x1F;
87 module_param(ql4xmdcapmask, int, S_IRUGO);
88 MODULE_PARM_DESC(ql4xmdcapmask,
89 		 " Set the Minidump driver capture mask level.\n"
90 		 "\t\t  Default is 0x1F.\n"
91 		 "\t\t  Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F");
92 
93 int ql4xenablemd = 1;
94 module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
95 MODULE_PARM_DESC(ql4xenablemd,
96 		 " Set to enable minidump.\n"
97 		 "\t\t  0 - disable minidump\n"
98 		 "\t\t  1 - enable minidump (Default)");
99 
100 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
101 /*
102  * SCSI host template entry points
103  */
104 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
105 
106 /*
107  * iSCSI template entry points
108  */
109 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
110 				     enum iscsi_param param, char *buf);
111 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
112 				  enum iscsi_param param, char *buf);
113 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
114 				  enum iscsi_host_param param, char *buf);
115 static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data,
116 				   uint32_t len);
117 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
118 				   enum iscsi_param_type param_type,
119 				   int param, char *buf);
120 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
121 static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost,
122 						 struct sockaddr *dst_addr,
123 						 int non_blocking);
124 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
125 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep);
126 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
127 				enum iscsi_param param, char *buf);
128 static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
129 static struct iscsi_cls_conn *
130 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx);
131 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
132 			     struct iscsi_cls_conn *cls_conn,
133 			     uint64_t transport_fd, int is_leading);
134 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn);
135 static struct iscsi_cls_session *
136 qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
137 			uint16_t qdepth, uint32_t initial_cmdsn);
138 static void qla4xxx_session_destroy(struct iscsi_cls_session *sess);
139 static void qla4xxx_task_work(struct work_struct *wdata);
140 static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t);
141 static int qla4xxx_task_xmit(struct iscsi_task *);
142 static void qla4xxx_task_cleanup(struct iscsi_task *);
143 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
144 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
145 				   struct iscsi_stats *stats);
146 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
147 			     uint32_t iface_type, uint32_t payload_size,
148 			     uint32_t pid, struct sockaddr *dst_addr);
149 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
150 				 uint32_t *num_entries, char *buf);
151 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx);
152 static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void  *data,
153 				  int len);
154 
155 /*
156  * SCSI host template entry points
157  */
158 static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
159 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
160 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
161 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
162 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
163 static int qla4xxx_slave_alloc(struct scsi_device *device);
164 static int qla4xxx_slave_configure(struct scsi_device *device);
165 static void qla4xxx_slave_destroy(struct scsi_device *sdev);
166 static umode_t qla4_attr_is_visible(int param_type, int param);
167 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
168 static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
169 				      int reason);
170 
171 /*
172  * iSCSI Flash DDB sysfs entry points
173  */
174 static int
175 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
176 			    struct iscsi_bus_flash_conn *fnode_conn,
177 			    void *data, int len);
178 static int
179 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
180 			    int param, char *buf);
181 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
182 				 int len);
183 static int
184 qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess);
185 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
186 				   struct iscsi_bus_flash_conn *fnode_conn);
187 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
188 				    struct iscsi_bus_flash_conn *fnode_conn);
189 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess);
190 
191 static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
192     QLA82XX_LEGACY_INTR_CONFIG;
193 
194 static struct scsi_host_template qla4xxx_driver_template = {
195 	.module			= THIS_MODULE,
196 	.name			= DRIVER_NAME,
197 	.proc_name		= DRIVER_NAME,
198 	.queuecommand		= qla4xxx_queuecommand,
199 
200 	.eh_abort_handler	= qla4xxx_eh_abort,
201 	.eh_device_reset_handler = qla4xxx_eh_device_reset,
202 	.eh_target_reset_handler = qla4xxx_eh_target_reset,
203 	.eh_host_reset_handler	= qla4xxx_eh_host_reset,
204 	.eh_timed_out		= qla4xxx_eh_cmd_timed_out,
205 
206 	.slave_configure	= qla4xxx_slave_configure,
207 	.slave_alloc		= qla4xxx_slave_alloc,
208 	.slave_destroy		= qla4xxx_slave_destroy,
209 	.change_queue_depth	= qla4xxx_change_queue_depth,
210 
211 	.this_id		= -1,
212 	.cmd_per_lun		= 3,
213 	.use_clustering		= ENABLE_CLUSTERING,
214 	.sg_tablesize		= SG_ALL,
215 
216 	.max_sectors		= 0xFFFF,
217 	.shost_attrs		= qla4xxx_host_attrs,
218 	.host_reset		= qla4xxx_host_reset,
219 	.vendor_id		= SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
220 };
221 
222 static struct iscsi_transport qla4xxx_iscsi_transport = {
223 	.owner			= THIS_MODULE,
224 	.name			= DRIVER_NAME,
225 	.caps			= CAP_TEXT_NEGO |
226 				  CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
227 				  CAP_DATADGST | CAP_LOGIN_OFFLOAD |
228 				  CAP_MULTI_R2T,
229 	.attr_is_visible	= qla4_attr_is_visible,
230 	.create_session         = qla4xxx_session_create,
231 	.destroy_session        = qla4xxx_session_destroy,
232 	.start_conn             = qla4xxx_conn_start,
233 	.create_conn            = qla4xxx_conn_create,
234 	.bind_conn              = qla4xxx_conn_bind,
235 	.stop_conn              = iscsi_conn_stop,
236 	.destroy_conn           = qla4xxx_conn_destroy,
237 	.set_param              = iscsi_set_param,
238 	.get_conn_param		= qla4xxx_conn_get_param,
239 	.get_session_param	= qla4xxx_session_get_param,
240 	.get_ep_param           = qla4xxx_get_ep_param,
241 	.ep_connect		= qla4xxx_ep_connect,
242 	.ep_poll		= qla4xxx_ep_poll,
243 	.ep_disconnect		= qla4xxx_ep_disconnect,
244 	.get_stats		= qla4xxx_conn_get_stats,
245 	.send_pdu		= iscsi_conn_send_pdu,
246 	.xmit_task		= qla4xxx_task_xmit,
247 	.cleanup_task		= qla4xxx_task_cleanup,
248 	.alloc_pdu		= qla4xxx_alloc_pdu,
249 
250 	.get_host_param		= qla4xxx_host_get_param,
251 	.set_iface_param	= qla4xxx_iface_set_param,
252 	.get_iface_param	= qla4xxx_get_iface_param,
253 	.bsg_request		= qla4xxx_bsg_request,
254 	.send_ping		= qla4xxx_send_ping,
255 	.get_chap		= qla4xxx_get_chap_list,
256 	.delete_chap		= qla4xxx_delete_chap,
257 	.set_chap		= qla4xxx_set_chap_entry,
258 	.get_flashnode_param	= qla4xxx_sysfs_ddb_get_param,
259 	.set_flashnode_param	= qla4xxx_sysfs_ddb_set_param,
260 	.new_flashnode		= qla4xxx_sysfs_ddb_add,
261 	.del_flashnode		= qla4xxx_sysfs_ddb_delete,
262 	.login_flashnode	= qla4xxx_sysfs_ddb_login,
263 	.logout_flashnode	= qla4xxx_sysfs_ddb_logout,
264 	.logout_flashnode_sid	= qla4xxx_sysfs_ddb_logout_sid,
265 };
266 
267 static struct scsi_transport_template *qla4xxx_scsi_transport;
268 
269 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
270 			     uint32_t iface_type, uint32_t payload_size,
271 			     uint32_t pid, struct sockaddr *dst_addr)
272 {
273 	struct scsi_qla_host *ha = to_qla_host(shost);
274 	struct sockaddr_in *addr;
275 	struct sockaddr_in6 *addr6;
276 	uint32_t options = 0;
277 	uint8_t ipaddr[IPv6_ADDR_LEN];
278 	int rval;
279 
280 	memset(ipaddr, 0, IPv6_ADDR_LEN);
281 	/* IPv4 to IPv4 */
282 	if ((iface_type == ISCSI_IFACE_TYPE_IPV4) &&
283 	    (dst_addr->sa_family == AF_INET)) {
284 		addr = (struct sockaddr_in *)dst_addr;
285 		memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN);
286 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 "
287 				  "dest: %pI4\n", __func__,
288 				  &ha->ip_config.ip_address, ipaddr));
289 		rval = qla4xxx_ping_iocb(ha, options, payload_size, pid,
290 					 ipaddr);
291 		if (rval)
292 			rval = -EINVAL;
293 	} else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) &&
294 		   (dst_addr->sa_family == AF_INET6)) {
295 		/* IPv6 to IPv6 */
296 		addr6 = (struct sockaddr_in6 *)dst_addr;
297 		memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN);
298 
299 		options |= PING_IPV6_PROTOCOL_ENABLE;
300 
301 		/* Ping using LinkLocal address */
302 		if ((iface_num == 0) || (iface_num == 1)) {
303 			DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping "
304 					  "src: %pI6 dest: %pI6\n", __func__,
305 					  &ha->ip_config.ipv6_link_local_addr,
306 					  ipaddr));
307 			options |= PING_IPV6_LINKLOCAL_ADDR;
308 			rval = qla4xxx_ping_iocb(ha, options, payload_size,
309 						 pid, ipaddr);
310 		} else {
311 			ql4_printk(KERN_WARNING, ha, "%s: iface num = %d "
312 				   "not supported\n", __func__, iface_num);
313 			rval = -ENOSYS;
314 			goto exit_send_ping;
315 		}
316 
317 		/*
318 		 * If ping using LinkLocal address fails, try ping using
319 		 * IPv6 address
320 		 */
321 		if (rval != QLA_SUCCESS) {
322 			options &= ~PING_IPV6_LINKLOCAL_ADDR;
323 			if (iface_num == 0) {
324 				options |= PING_IPV6_ADDR0;
325 				DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
326 						  "Ping src: %pI6 "
327 						  "dest: %pI6\n", __func__,
328 						  &ha->ip_config.ipv6_addr0,
329 						  ipaddr));
330 			} else if (iface_num == 1) {
331 				options |= PING_IPV6_ADDR1;
332 				DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
333 						  "Ping src: %pI6 "
334 						  "dest: %pI6\n", __func__,
335 						  &ha->ip_config.ipv6_addr1,
336 						  ipaddr));
337 			}
338 			rval = qla4xxx_ping_iocb(ha, options, payload_size,
339 						 pid, ipaddr);
340 			if (rval)
341 				rval = -EINVAL;
342 		}
343 	} else
344 		rval = -ENOSYS;
345 exit_send_ping:
346 	return rval;
347 }
348 
349 static umode_t qla4_attr_is_visible(int param_type, int param)
350 {
351 	switch (param_type) {
352 	case ISCSI_HOST_PARAM:
353 		switch (param) {
354 		case ISCSI_HOST_PARAM_HWADDRESS:
355 		case ISCSI_HOST_PARAM_IPADDRESS:
356 		case ISCSI_HOST_PARAM_INITIATOR_NAME:
357 		case ISCSI_HOST_PARAM_PORT_STATE:
358 		case ISCSI_HOST_PARAM_PORT_SPEED:
359 			return S_IRUGO;
360 		default:
361 			return 0;
362 		}
363 	case ISCSI_PARAM:
364 		switch (param) {
365 		case ISCSI_PARAM_PERSISTENT_ADDRESS:
366 		case ISCSI_PARAM_PERSISTENT_PORT:
367 		case ISCSI_PARAM_CONN_ADDRESS:
368 		case ISCSI_PARAM_CONN_PORT:
369 		case ISCSI_PARAM_TARGET_NAME:
370 		case ISCSI_PARAM_TPGT:
371 		case ISCSI_PARAM_TARGET_ALIAS:
372 		case ISCSI_PARAM_MAX_BURST:
373 		case ISCSI_PARAM_MAX_R2T:
374 		case ISCSI_PARAM_FIRST_BURST:
375 		case ISCSI_PARAM_MAX_RECV_DLENGTH:
376 		case ISCSI_PARAM_MAX_XMIT_DLENGTH:
377 		case ISCSI_PARAM_IFACE_NAME:
378 		case ISCSI_PARAM_CHAP_OUT_IDX:
379 		case ISCSI_PARAM_CHAP_IN_IDX:
380 		case ISCSI_PARAM_USERNAME:
381 		case ISCSI_PARAM_PASSWORD:
382 		case ISCSI_PARAM_USERNAME_IN:
383 		case ISCSI_PARAM_PASSWORD_IN:
384 		case ISCSI_PARAM_AUTO_SND_TGT_DISABLE:
385 		case ISCSI_PARAM_DISCOVERY_SESS:
386 		case ISCSI_PARAM_PORTAL_TYPE:
387 		case ISCSI_PARAM_CHAP_AUTH_EN:
388 		case ISCSI_PARAM_DISCOVERY_LOGOUT_EN:
389 		case ISCSI_PARAM_BIDI_CHAP_EN:
390 		case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL:
391 		case ISCSI_PARAM_DEF_TIME2WAIT:
392 		case ISCSI_PARAM_DEF_TIME2RETAIN:
393 		case ISCSI_PARAM_HDRDGST_EN:
394 		case ISCSI_PARAM_DATADGST_EN:
395 		case ISCSI_PARAM_INITIAL_R2T_EN:
396 		case ISCSI_PARAM_IMM_DATA_EN:
397 		case ISCSI_PARAM_PDU_INORDER_EN:
398 		case ISCSI_PARAM_DATASEQ_INORDER_EN:
399 		case ISCSI_PARAM_MAX_SEGMENT_SIZE:
400 		case ISCSI_PARAM_TCP_TIMESTAMP_STAT:
401 		case ISCSI_PARAM_TCP_WSF_DISABLE:
402 		case ISCSI_PARAM_TCP_NAGLE_DISABLE:
403 		case ISCSI_PARAM_TCP_TIMER_SCALE:
404 		case ISCSI_PARAM_TCP_TIMESTAMP_EN:
405 		case ISCSI_PARAM_TCP_XMIT_WSF:
406 		case ISCSI_PARAM_TCP_RECV_WSF:
407 		case ISCSI_PARAM_IP_FRAGMENT_DISABLE:
408 		case ISCSI_PARAM_IPV4_TOS:
409 		case ISCSI_PARAM_IPV6_TC:
410 		case ISCSI_PARAM_IPV6_FLOW_LABEL:
411 		case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6:
412 		case ISCSI_PARAM_KEEPALIVE_TMO:
413 		case ISCSI_PARAM_LOCAL_PORT:
414 		case ISCSI_PARAM_ISID:
415 		case ISCSI_PARAM_TSID:
416 		case ISCSI_PARAM_DEF_TASKMGMT_TMO:
417 		case ISCSI_PARAM_ERL:
418 		case ISCSI_PARAM_STATSN:
419 		case ISCSI_PARAM_EXP_STATSN:
420 		case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
421 		case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
422 			return S_IRUGO;
423 		default:
424 			return 0;
425 		}
426 	case ISCSI_NET_PARAM:
427 		switch (param) {
428 		case ISCSI_NET_PARAM_IPV4_ADDR:
429 		case ISCSI_NET_PARAM_IPV4_SUBNET:
430 		case ISCSI_NET_PARAM_IPV4_GW:
431 		case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
432 		case ISCSI_NET_PARAM_IFACE_ENABLE:
433 		case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
434 		case ISCSI_NET_PARAM_IPV6_ADDR:
435 		case ISCSI_NET_PARAM_IPV6_ROUTER:
436 		case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
437 		case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
438 		case ISCSI_NET_PARAM_VLAN_ID:
439 		case ISCSI_NET_PARAM_VLAN_PRIORITY:
440 		case ISCSI_NET_PARAM_VLAN_ENABLED:
441 		case ISCSI_NET_PARAM_MTU:
442 		case ISCSI_NET_PARAM_PORT:
443 			return S_IRUGO;
444 		default:
445 			return 0;
446 		}
447 	case ISCSI_FLASHNODE_PARAM:
448 		switch (param) {
449 		case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
450 		case ISCSI_FLASHNODE_PORTAL_TYPE:
451 		case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
452 		case ISCSI_FLASHNODE_DISCOVERY_SESS:
453 		case ISCSI_FLASHNODE_ENTRY_EN:
454 		case ISCSI_FLASHNODE_HDR_DGST_EN:
455 		case ISCSI_FLASHNODE_DATA_DGST_EN:
456 		case ISCSI_FLASHNODE_IMM_DATA_EN:
457 		case ISCSI_FLASHNODE_INITIAL_R2T_EN:
458 		case ISCSI_FLASHNODE_DATASEQ_INORDER:
459 		case ISCSI_FLASHNODE_PDU_INORDER:
460 		case ISCSI_FLASHNODE_CHAP_AUTH_EN:
461 		case ISCSI_FLASHNODE_SNACK_REQ_EN:
462 		case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
463 		case ISCSI_FLASHNODE_BIDI_CHAP_EN:
464 		case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
465 		case ISCSI_FLASHNODE_ERL:
466 		case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
467 		case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
468 		case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
469 		case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
470 		case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
471 		case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
472 		case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
473 		case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
474 		case ISCSI_FLASHNODE_FIRST_BURST:
475 		case ISCSI_FLASHNODE_DEF_TIME2WAIT:
476 		case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
477 		case ISCSI_FLASHNODE_MAX_R2T:
478 		case ISCSI_FLASHNODE_KEEPALIVE_TMO:
479 		case ISCSI_FLASHNODE_ISID:
480 		case ISCSI_FLASHNODE_TSID:
481 		case ISCSI_FLASHNODE_PORT:
482 		case ISCSI_FLASHNODE_MAX_BURST:
483 		case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
484 		case ISCSI_FLASHNODE_IPADDR:
485 		case ISCSI_FLASHNODE_ALIAS:
486 		case ISCSI_FLASHNODE_REDIRECT_IPADDR:
487 		case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
488 		case ISCSI_FLASHNODE_LOCAL_PORT:
489 		case ISCSI_FLASHNODE_IPV4_TOS:
490 		case ISCSI_FLASHNODE_IPV6_TC:
491 		case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
492 		case ISCSI_FLASHNODE_NAME:
493 		case ISCSI_FLASHNODE_TPGT:
494 		case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
495 		case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
496 		case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
497 		case ISCSI_FLASHNODE_TCP_XMIT_WSF:
498 		case ISCSI_FLASHNODE_TCP_RECV_WSF:
499 		case ISCSI_FLASHNODE_CHAP_OUT_IDX:
500 		case ISCSI_FLASHNODE_USERNAME:
501 		case ISCSI_FLASHNODE_PASSWORD:
502 		case ISCSI_FLASHNODE_STATSN:
503 		case ISCSI_FLASHNODE_EXP_STATSN:
504 		case ISCSI_FLASHNODE_IS_BOOT_TGT:
505 			return S_IRUGO;
506 		default:
507 			return 0;
508 		}
509 	}
510 
511 	return 0;
512 }
513 
514 static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha,
515 				     int16_t chap_index,
516 				     struct ql4_chap_table **chap_entry)
517 {
518 	int rval = QLA_ERROR;
519 	int max_chap_entries;
520 
521 	if (!ha->chap_list) {
522 		ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n");
523 		rval = QLA_ERROR;
524 		goto exit_get_chap;
525 	}
526 
527 	if (is_qla80XX(ha))
528 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
529 				   sizeof(struct ql4_chap_table);
530 	else
531 		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
532 
533 	if (chap_index > max_chap_entries) {
534 		ql4_printk(KERN_ERR, ha, "Invalid Chap index\n");
535 		rval = QLA_ERROR;
536 		goto exit_get_chap;
537 	}
538 
539 	*chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index;
540 	if ((*chap_entry)->cookie !=
541 	     __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
542 		rval = QLA_ERROR;
543 		*chap_entry = NULL;
544 	} else {
545 		rval = QLA_SUCCESS;
546 	}
547 
548 exit_get_chap:
549 	return rval;
550 }
551 
552 /**
553  * qla4xxx_find_free_chap_index - Find the first free chap index
554  * @ha: pointer to adapter structure
555  * @chap_index: CHAP index to be returned
556  *
557  * Find the first free chap index available in the chap table
558  *
559  * Note: Caller should acquire the chap lock before getting here.
560  **/
561 static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha,
562 					uint16_t *chap_index)
563 {
564 	int i, rval;
565 	int free_index = -1;
566 	int max_chap_entries = 0;
567 	struct ql4_chap_table *chap_table;
568 
569 	if (is_qla80XX(ha))
570 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
571 						sizeof(struct ql4_chap_table);
572 	else
573 		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
574 
575 	if (!ha->chap_list) {
576 		ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n");
577 		rval = QLA_ERROR;
578 		goto exit_find_chap;
579 	}
580 
581 	for (i = 0; i < max_chap_entries; i++) {
582 		chap_table = (struct ql4_chap_table *)ha->chap_list + i;
583 
584 		if ((chap_table->cookie !=
585 		    __constant_cpu_to_le16(CHAP_VALID_COOKIE)) &&
586 		   (i > MAX_RESRV_CHAP_IDX)) {
587 				free_index = i;
588 				break;
589 		}
590 	}
591 
592 	if (free_index != -1) {
593 		*chap_index = free_index;
594 		rval = QLA_SUCCESS;
595 	} else {
596 		rval = QLA_ERROR;
597 	}
598 
599 exit_find_chap:
600 	return rval;
601 }
602 
603 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
604 				  uint32_t *num_entries, char *buf)
605 {
606 	struct scsi_qla_host *ha = to_qla_host(shost);
607 	struct ql4_chap_table *chap_table;
608 	struct iscsi_chap_rec *chap_rec;
609 	int max_chap_entries = 0;
610 	int valid_chap_entries = 0;
611 	int ret = 0, i;
612 
613 	if (is_qla80XX(ha))
614 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
615 					sizeof(struct ql4_chap_table);
616 	else
617 		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
618 
619 	ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n",
620 			__func__, *num_entries, chap_tbl_idx);
621 
622 	if (!buf) {
623 		ret = -ENOMEM;
624 		goto exit_get_chap_list;
625 	}
626 
627 	chap_rec = (struct iscsi_chap_rec *) buf;
628 	mutex_lock(&ha->chap_sem);
629 	for (i = chap_tbl_idx; i < max_chap_entries; i++) {
630 		chap_table = (struct ql4_chap_table *)ha->chap_list + i;
631 		if (chap_table->cookie !=
632 		    __constant_cpu_to_le16(CHAP_VALID_COOKIE))
633 			continue;
634 
635 		chap_rec->chap_tbl_idx = i;
636 		strncpy(chap_rec->username, chap_table->name,
637 			ISCSI_CHAP_AUTH_NAME_MAX_LEN);
638 		strncpy(chap_rec->password, chap_table->secret,
639 			QL4_CHAP_MAX_SECRET_LEN);
640 		chap_rec->password_length = chap_table->secret_len;
641 
642 		if (chap_table->flags & BIT_7) /* local */
643 			chap_rec->chap_type = CHAP_TYPE_OUT;
644 
645 		if (chap_table->flags & BIT_6) /* peer */
646 			chap_rec->chap_type = CHAP_TYPE_IN;
647 
648 		chap_rec++;
649 
650 		valid_chap_entries++;
651 		if (valid_chap_entries == *num_entries)
652 			break;
653 		else
654 			continue;
655 	}
656 	mutex_unlock(&ha->chap_sem);
657 
658 exit_get_chap_list:
659 	ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n",
660 			__func__,  valid_chap_entries);
661 	*num_entries = valid_chap_entries;
662 	return ret;
663 }
664 
665 static int __qla4xxx_is_chap_active(struct device *dev, void *data)
666 {
667 	int ret = 0;
668 	uint16_t *chap_tbl_idx = (uint16_t *) data;
669 	struct iscsi_cls_session *cls_session;
670 	struct iscsi_session *sess;
671 	struct ddb_entry *ddb_entry;
672 
673 	if (!iscsi_is_session_dev(dev))
674 		goto exit_is_chap_active;
675 
676 	cls_session = iscsi_dev_to_session(dev);
677 	sess = cls_session->dd_data;
678 	ddb_entry = sess->dd_data;
679 
680 	if (iscsi_session_chkready(cls_session))
681 		goto exit_is_chap_active;
682 
683 	if (ddb_entry->chap_tbl_idx == *chap_tbl_idx)
684 		ret = 1;
685 
686 exit_is_chap_active:
687 	return ret;
688 }
689 
690 static int qla4xxx_is_chap_active(struct Scsi_Host *shost,
691 				  uint16_t chap_tbl_idx)
692 {
693 	int ret = 0;
694 
695 	ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx,
696 				    __qla4xxx_is_chap_active);
697 
698 	return ret;
699 }
700 
701 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx)
702 {
703 	struct scsi_qla_host *ha = to_qla_host(shost);
704 	struct ql4_chap_table *chap_table;
705 	dma_addr_t chap_dma;
706 	int max_chap_entries = 0;
707 	uint32_t offset = 0;
708 	uint32_t chap_size;
709 	int ret = 0;
710 
711 	chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
712 	if (chap_table == NULL)
713 		return -ENOMEM;
714 
715 	memset(chap_table, 0, sizeof(struct ql4_chap_table));
716 
717 	if (is_qla80XX(ha))
718 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
719 				   sizeof(struct ql4_chap_table);
720 	else
721 		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
722 
723 	if (chap_tbl_idx > max_chap_entries) {
724 		ret = -EINVAL;
725 		goto exit_delete_chap;
726 	}
727 
728 	/* Check if chap index is in use.
729 	 * If chap is in use don't delet chap entry */
730 	ret = qla4xxx_is_chap_active(shost, chap_tbl_idx);
731 	if (ret) {
732 		ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot "
733 			   "delete from flash\n", chap_tbl_idx);
734 		ret = -EBUSY;
735 		goto exit_delete_chap;
736 	}
737 
738 	chap_size = sizeof(struct ql4_chap_table);
739 	if (is_qla40XX(ha))
740 		offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size);
741 	else {
742 		offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
743 		/* flt_chap_size is CHAP table size for both ports
744 		 * so divide it by 2 to calculate the offset for second port
745 		 */
746 		if (ha->port_num == 1)
747 			offset += (ha->hw.flt_chap_size / 2);
748 		offset += (chap_tbl_idx * chap_size);
749 	}
750 
751 	ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
752 	if (ret != QLA_SUCCESS) {
753 		ret = -EINVAL;
754 		goto exit_delete_chap;
755 	}
756 
757 	DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
758 			  __le16_to_cpu(chap_table->cookie)));
759 
760 	if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
761 		ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
762 		goto exit_delete_chap;
763 	}
764 
765 	chap_table->cookie = __constant_cpu_to_le16(0xFFFF);
766 
767 	offset = FLASH_CHAP_OFFSET |
768 			(chap_tbl_idx * sizeof(struct ql4_chap_table));
769 	ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size,
770 				FLASH_OPT_RMW_COMMIT);
771 	if (ret == QLA_SUCCESS && ha->chap_list) {
772 		mutex_lock(&ha->chap_sem);
773 		/* Update ha chap_list cache */
774 		memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx,
775 			chap_table, sizeof(struct ql4_chap_table));
776 		mutex_unlock(&ha->chap_sem);
777 	}
778 	if (ret != QLA_SUCCESS)
779 		ret =  -EINVAL;
780 
781 exit_delete_chap:
782 	dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
783 	return ret;
784 }
785 
786 /**
787  * qla4xxx_set_chap_entry - Make chap entry with given information
788  * @shost: pointer to host
789  * @data: chap info - credentials, index and type to make chap entry
790  * @len: length of data
791  *
792  * Add or update chap entry with the given information
793  **/
794 static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len)
795 {
796 	struct scsi_qla_host *ha = to_qla_host(shost);
797 	struct iscsi_chap_rec chap_rec;
798 	struct ql4_chap_table *chap_entry = NULL;
799 	struct iscsi_param_info *param_info;
800 	struct nlattr *attr;
801 	int max_chap_entries = 0;
802 	int type;
803 	int rem = len;
804 	int rc = 0;
805 
806 	memset(&chap_rec, 0, sizeof(chap_rec));
807 
808 	nla_for_each_attr(attr, data, len, rem) {
809 		param_info = nla_data(attr);
810 
811 		switch (param_info->param) {
812 		case ISCSI_CHAP_PARAM_INDEX:
813 			chap_rec.chap_tbl_idx = *(uint16_t *)param_info->value;
814 			break;
815 		case ISCSI_CHAP_PARAM_CHAP_TYPE:
816 			chap_rec.chap_type = param_info->value[0];
817 			break;
818 		case ISCSI_CHAP_PARAM_USERNAME:
819 			memcpy(chap_rec.username, param_info->value,
820 			       param_info->len);
821 			break;
822 		case ISCSI_CHAP_PARAM_PASSWORD:
823 			memcpy(chap_rec.password, param_info->value,
824 			       param_info->len);
825 			break;
826 		case ISCSI_CHAP_PARAM_PASSWORD_LEN:
827 			chap_rec.password_length = param_info->value[0];
828 			break;
829 		default:
830 			ql4_printk(KERN_ERR, ha,
831 				   "%s: No such sysfs attribute\n", __func__);
832 			rc = -ENOSYS;
833 			goto exit_set_chap;
834 		};
835 	}
836 
837 	if (chap_rec.chap_type == CHAP_TYPE_IN)
838 		type = BIDI_CHAP;
839 	else
840 		type = LOCAL_CHAP;
841 
842 	if (is_qla80XX(ha))
843 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
844 				   sizeof(struct ql4_chap_table);
845 	else
846 		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
847 
848 	mutex_lock(&ha->chap_sem);
849 	if (chap_rec.chap_tbl_idx < max_chap_entries) {
850 		rc = qla4xxx_get_chap_by_index(ha, chap_rec.chap_tbl_idx,
851 					       &chap_entry);
852 		if (!rc) {
853 			if (!(type == qla4xxx_get_chap_type(chap_entry))) {
854 				ql4_printk(KERN_INFO, ha,
855 					   "Type mismatch for CHAP entry %d\n",
856 					   chap_rec.chap_tbl_idx);
857 				rc = -EINVAL;
858 				goto exit_unlock_chap;
859 			}
860 
861 			/* If chap index is in use then don't modify it */
862 			rc = qla4xxx_is_chap_active(shost,
863 						    chap_rec.chap_tbl_idx);
864 			if (rc) {
865 				ql4_printk(KERN_INFO, ha,
866 					   "CHAP entry %d is in use\n",
867 					   chap_rec.chap_tbl_idx);
868 				rc = -EBUSY;
869 				goto exit_unlock_chap;
870 			}
871 		}
872 	} else {
873 		rc = qla4xxx_find_free_chap_index(ha, &chap_rec.chap_tbl_idx);
874 		if (rc) {
875 			ql4_printk(KERN_INFO, ha, "CHAP entry not available\n");
876 			rc = -EBUSY;
877 			goto exit_unlock_chap;
878 		}
879 	}
880 
881 	rc = qla4xxx_set_chap(ha, chap_rec.username, chap_rec.password,
882 			      chap_rec.chap_tbl_idx, type);
883 
884 exit_unlock_chap:
885 	mutex_unlock(&ha->chap_sem);
886 
887 exit_set_chap:
888 	return rc;
889 }
890 
891 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
892 				   enum iscsi_param_type param_type,
893 				   int param, char *buf)
894 {
895 	struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
896 	struct scsi_qla_host *ha = to_qla_host(shost);
897 	int len = -ENOSYS;
898 
899 	if (param_type != ISCSI_NET_PARAM)
900 		return -ENOSYS;
901 
902 	switch (param) {
903 	case ISCSI_NET_PARAM_IPV4_ADDR:
904 		len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
905 		break;
906 	case ISCSI_NET_PARAM_IPV4_SUBNET:
907 		len = sprintf(buf, "%pI4\n", &ha->ip_config.subnet_mask);
908 		break;
909 	case ISCSI_NET_PARAM_IPV4_GW:
910 		len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
911 		break;
912 	case ISCSI_NET_PARAM_IFACE_ENABLE:
913 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
914 			len = sprintf(buf, "%s\n",
915 				      (ha->ip_config.ipv4_options &
916 				       IPOPT_IPV4_PROTOCOL_ENABLE) ?
917 				      "enabled" : "disabled");
918 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
919 			len = sprintf(buf, "%s\n",
920 				      (ha->ip_config.ipv6_options &
921 				       IPV6_OPT_IPV6_PROTOCOL_ENABLE) ?
922 				       "enabled" : "disabled");
923 		break;
924 	case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
925 		len = sprintf(buf, "%s\n",
926 			      (ha->ip_config.tcp_options & TCPOPT_DHCP_ENABLE) ?
927 			      "dhcp" : "static");
928 		break;
929 	case ISCSI_NET_PARAM_IPV6_ADDR:
930 		if (iface->iface_num == 0)
931 			len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr0);
932 		if (iface->iface_num == 1)
933 			len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr1);
934 		break;
935 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
936 		len = sprintf(buf, "%pI6\n",
937 			      &ha->ip_config.ipv6_link_local_addr);
938 		break;
939 	case ISCSI_NET_PARAM_IPV6_ROUTER:
940 		len = sprintf(buf, "%pI6\n",
941 			      &ha->ip_config.ipv6_default_router_addr);
942 		break;
943 	case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
944 		len = sprintf(buf, "%s\n",
945 			      (ha->ip_config.ipv6_addl_options &
946 			       IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
947 			       "nd" : "static");
948 		break;
949 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
950 		len = sprintf(buf, "%s\n",
951 			      (ha->ip_config.ipv6_addl_options &
952 			       IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
953 			       "auto" : "static");
954 		break;
955 	case ISCSI_NET_PARAM_VLAN_ID:
956 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
957 			len = sprintf(buf, "%d\n",
958 				      (ha->ip_config.ipv4_vlan_tag &
959 				       ISCSI_MAX_VLAN_ID));
960 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
961 			len = sprintf(buf, "%d\n",
962 				      (ha->ip_config.ipv6_vlan_tag &
963 				       ISCSI_MAX_VLAN_ID));
964 		break;
965 	case ISCSI_NET_PARAM_VLAN_PRIORITY:
966 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
967 			len = sprintf(buf, "%d\n",
968 				      ((ha->ip_config.ipv4_vlan_tag >> 13) &
969 					ISCSI_MAX_VLAN_PRIORITY));
970 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
971 			len = sprintf(buf, "%d\n",
972 				      ((ha->ip_config.ipv6_vlan_tag >> 13) &
973 					ISCSI_MAX_VLAN_PRIORITY));
974 		break;
975 	case ISCSI_NET_PARAM_VLAN_ENABLED:
976 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
977 			len = sprintf(buf, "%s\n",
978 				      (ha->ip_config.ipv4_options &
979 				       IPOPT_VLAN_TAGGING_ENABLE) ?
980 				       "enabled" : "disabled");
981 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
982 			len = sprintf(buf, "%s\n",
983 				      (ha->ip_config.ipv6_options &
984 				       IPV6_OPT_VLAN_TAGGING_ENABLE) ?
985 				       "enabled" : "disabled");
986 		break;
987 	case ISCSI_NET_PARAM_MTU:
988 		len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
989 		break;
990 	case ISCSI_NET_PARAM_PORT:
991 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
992 			len = sprintf(buf, "%d\n", ha->ip_config.ipv4_port);
993 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
994 			len = sprintf(buf, "%d\n", ha->ip_config.ipv6_port);
995 		break;
996 	default:
997 		len = -ENOSYS;
998 	}
999 
1000 	return len;
1001 }
1002 
1003 static struct iscsi_endpoint *
1004 qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
1005 		   int non_blocking)
1006 {
1007 	int ret;
1008 	struct iscsi_endpoint *ep;
1009 	struct qla_endpoint *qla_ep;
1010 	struct scsi_qla_host *ha;
1011 	struct sockaddr_in *addr;
1012 	struct sockaddr_in6 *addr6;
1013 
1014 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1015 	if (!shost) {
1016 		ret = -ENXIO;
1017 		printk(KERN_ERR "%s: shost is NULL\n",
1018 		       __func__);
1019 		return ERR_PTR(ret);
1020 	}
1021 
1022 	ha = iscsi_host_priv(shost);
1023 
1024 	ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
1025 	if (!ep) {
1026 		ret = -ENOMEM;
1027 		return ERR_PTR(ret);
1028 	}
1029 
1030 	qla_ep = ep->dd_data;
1031 	memset(qla_ep, 0, sizeof(struct qla_endpoint));
1032 	if (dst_addr->sa_family == AF_INET) {
1033 		memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in));
1034 		addr = (struct sockaddr_in *)&qla_ep->dst_addr;
1035 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__,
1036 				  (char *)&addr->sin_addr));
1037 	} else if (dst_addr->sa_family == AF_INET6) {
1038 		memcpy(&qla_ep->dst_addr, dst_addr,
1039 		       sizeof(struct sockaddr_in6));
1040 		addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
1041 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
1042 				  (char *)&addr6->sin6_addr));
1043 	}
1044 
1045 	qla_ep->host = shost;
1046 
1047 	return ep;
1048 }
1049 
1050 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
1051 {
1052 	struct qla_endpoint *qla_ep;
1053 	struct scsi_qla_host *ha;
1054 	int ret = 0;
1055 
1056 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1057 	qla_ep = ep->dd_data;
1058 	ha = to_qla_host(qla_ep->host);
1059 
1060 	if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
1061 		ret = 1;
1062 
1063 	return ret;
1064 }
1065 
1066 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
1067 {
1068 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1069 	iscsi_destroy_endpoint(ep);
1070 }
1071 
1072 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
1073 				enum iscsi_param param,
1074 				char *buf)
1075 {
1076 	struct qla_endpoint *qla_ep = ep->dd_data;
1077 	struct sockaddr *dst_addr;
1078 
1079 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1080 
1081 	switch (param) {
1082 	case ISCSI_PARAM_CONN_PORT:
1083 	case ISCSI_PARAM_CONN_ADDRESS:
1084 		if (!qla_ep)
1085 			return -ENOTCONN;
1086 
1087 		dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
1088 		if (!dst_addr)
1089 			return -ENOTCONN;
1090 
1091 		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
1092 						 &qla_ep->dst_addr, param, buf);
1093 	default:
1094 		return -ENOSYS;
1095 	}
1096 }
1097 
1098 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
1099 				   struct iscsi_stats *stats)
1100 {
1101 	struct iscsi_session *sess;
1102 	struct iscsi_cls_session *cls_sess;
1103 	struct ddb_entry *ddb_entry;
1104 	struct scsi_qla_host *ha;
1105 	struct ql_iscsi_stats *ql_iscsi_stats;
1106 	int stats_size;
1107 	int ret;
1108 	dma_addr_t iscsi_stats_dma;
1109 
1110 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1111 
1112 	cls_sess = iscsi_conn_to_session(cls_conn);
1113 	sess = cls_sess->dd_data;
1114 	ddb_entry = sess->dd_data;
1115 	ha = ddb_entry->ha;
1116 
1117 	stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
1118 	/* Allocate memory */
1119 	ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
1120 					    &iscsi_stats_dma, GFP_KERNEL);
1121 	if (!ql_iscsi_stats) {
1122 		ql4_printk(KERN_ERR, ha,
1123 			   "Unable to allocate memory for iscsi stats\n");
1124 		goto exit_get_stats;
1125 	}
1126 
1127 	ret =  qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size,
1128 				     iscsi_stats_dma);
1129 	if (ret != QLA_SUCCESS) {
1130 		ql4_printk(KERN_ERR, ha,
1131 			   "Unable to retrieve iscsi stats\n");
1132 		goto free_stats;
1133 	}
1134 
1135 	/* octets */
1136 	stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets);
1137 	stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets);
1138 	/* xmit pdus */
1139 	stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus);
1140 	stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus);
1141 	stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus);
1142 	stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus);
1143 	stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus);
1144 	stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus);
1145 	stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus);
1146 	stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus);
1147 	/* recv pdus */
1148 	stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus);
1149 	stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus);
1150 	stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus);
1151 	stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus);
1152 	stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus);
1153 	stats->logoutrsp_pdus =
1154 			le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus);
1155 	stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus);
1156 	stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus);
1157 	stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus);
1158 
1159 free_stats:
1160 	dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats,
1161 			  iscsi_stats_dma);
1162 exit_get_stats:
1163 	return;
1164 }
1165 
1166 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
1167 {
1168 	struct iscsi_cls_session *session;
1169 	struct iscsi_session *sess;
1170 	unsigned long flags;
1171 	enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED;
1172 
1173 	session = starget_to_session(scsi_target(sc->device));
1174 	sess = session->dd_data;
1175 
1176 	spin_lock_irqsave(&session->lock, flags);
1177 	if (session->state == ISCSI_SESSION_FAILED)
1178 		ret = BLK_EH_RESET_TIMER;
1179 	spin_unlock_irqrestore(&session->lock, flags);
1180 
1181 	return ret;
1182 }
1183 
1184 static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
1185 {
1186 	struct scsi_qla_host *ha = to_qla_host(shost);
1187 	struct iscsi_cls_host *ihost = shost->shost_data;
1188 	uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN;
1189 
1190 	qla4xxx_get_firmware_state(ha);
1191 
1192 	switch (ha->addl_fw_state & 0x0F00) {
1193 	case FW_ADDSTATE_LINK_SPEED_10MBPS:
1194 		speed = ISCSI_PORT_SPEED_10MBPS;
1195 		break;
1196 	case FW_ADDSTATE_LINK_SPEED_100MBPS:
1197 		speed = ISCSI_PORT_SPEED_100MBPS;
1198 		break;
1199 	case FW_ADDSTATE_LINK_SPEED_1GBPS:
1200 		speed = ISCSI_PORT_SPEED_1GBPS;
1201 		break;
1202 	case FW_ADDSTATE_LINK_SPEED_10GBPS:
1203 		speed = ISCSI_PORT_SPEED_10GBPS;
1204 		break;
1205 	}
1206 	ihost->port_speed = speed;
1207 }
1208 
1209 static void qla4xxx_set_port_state(struct Scsi_Host *shost)
1210 {
1211 	struct scsi_qla_host *ha = to_qla_host(shost);
1212 	struct iscsi_cls_host *ihost = shost->shost_data;
1213 	uint32_t state = ISCSI_PORT_STATE_DOWN;
1214 
1215 	if (test_bit(AF_LINK_UP, &ha->flags))
1216 		state = ISCSI_PORT_STATE_UP;
1217 
1218 	ihost->port_state = state;
1219 }
1220 
1221 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
1222 				  enum iscsi_host_param param, char *buf)
1223 {
1224 	struct scsi_qla_host *ha = to_qla_host(shost);
1225 	int len;
1226 
1227 	switch (param) {
1228 	case ISCSI_HOST_PARAM_HWADDRESS:
1229 		len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
1230 		break;
1231 	case ISCSI_HOST_PARAM_IPADDRESS:
1232 		len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
1233 		break;
1234 	case ISCSI_HOST_PARAM_INITIATOR_NAME:
1235 		len = sprintf(buf, "%s\n", ha->name_string);
1236 		break;
1237 	case ISCSI_HOST_PARAM_PORT_STATE:
1238 		qla4xxx_set_port_state(shost);
1239 		len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
1240 		break;
1241 	case ISCSI_HOST_PARAM_PORT_SPEED:
1242 		qla4xxx_set_port_speed(shost);
1243 		len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
1244 		break;
1245 	default:
1246 		return -ENOSYS;
1247 	}
1248 
1249 	return len;
1250 }
1251 
1252 static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha)
1253 {
1254 	if (ha->iface_ipv4)
1255 		return;
1256 
1257 	/* IPv4 */
1258 	ha->iface_ipv4 = iscsi_create_iface(ha->host,
1259 					    &qla4xxx_iscsi_transport,
1260 					    ISCSI_IFACE_TYPE_IPV4, 0, 0);
1261 	if (!ha->iface_ipv4)
1262 		ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI "
1263 			   "iface0.\n");
1264 }
1265 
1266 static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha)
1267 {
1268 	if (!ha->iface_ipv6_0)
1269 		/* IPv6 iface-0 */
1270 		ha->iface_ipv6_0 = iscsi_create_iface(ha->host,
1271 						      &qla4xxx_iscsi_transport,
1272 						      ISCSI_IFACE_TYPE_IPV6, 0,
1273 						      0);
1274 	if (!ha->iface_ipv6_0)
1275 		ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
1276 			   "iface0.\n");
1277 
1278 	if (!ha->iface_ipv6_1)
1279 		/* IPv6 iface-1 */
1280 		ha->iface_ipv6_1 = iscsi_create_iface(ha->host,
1281 						      &qla4xxx_iscsi_transport,
1282 						      ISCSI_IFACE_TYPE_IPV6, 1,
1283 						      0);
1284 	if (!ha->iface_ipv6_1)
1285 		ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
1286 			   "iface1.\n");
1287 }
1288 
1289 static void qla4xxx_create_ifaces(struct scsi_qla_host *ha)
1290 {
1291 	if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE)
1292 		qla4xxx_create_ipv4_iface(ha);
1293 
1294 	if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE)
1295 		qla4xxx_create_ipv6_iface(ha);
1296 }
1297 
1298 static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha)
1299 {
1300 	if (ha->iface_ipv4) {
1301 		iscsi_destroy_iface(ha->iface_ipv4);
1302 		ha->iface_ipv4 = NULL;
1303 	}
1304 }
1305 
1306 static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha)
1307 {
1308 	if (ha->iface_ipv6_0) {
1309 		iscsi_destroy_iface(ha->iface_ipv6_0);
1310 		ha->iface_ipv6_0 = NULL;
1311 	}
1312 	if (ha->iface_ipv6_1) {
1313 		iscsi_destroy_iface(ha->iface_ipv6_1);
1314 		ha->iface_ipv6_1 = NULL;
1315 	}
1316 }
1317 
1318 static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha)
1319 {
1320 	qla4xxx_destroy_ipv4_iface(ha);
1321 	qla4xxx_destroy_ipv6_iface(ha);
1322 }
1323 
1324 static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
1325 			     struct iscsi_iface_param_info *iface_param,
1326 			     struct addr_ctrl_blk *init_fw_cb)
1327 {
1328 	/*
1329 	 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg.
1330 	 * iface_num 1 is valid only for IPv6 Addr.
1331 	 */
1332 	switch (iface_param->param) {
1333 	case ISCSI_NET_PARAM_IPV6_ADDR:
1334 		if (iface_param->iface_num & 0x1)
1335 			/* IPv6 Addr 1 */
1336 			memcpy(init_fw_cb->ipv6_addr1, iface_param->value,
1337 			       sizeof(init_fw_cb->ipv6_addr1));
1338 		else
1339 			/* IPv6 Addr 0 */
1340 			memcpy(init_fw_cb->ipv6_addr0, iface_param->value,
1341 			       sizeof(init_fw_cb->ipv6_addr0));
1342 		break;
1343 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
1344 		if (iface_param->iface_num & 0x1)
1345 			break;
1346 		memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8],
1347 		       sizeof(init_fw_cb->ipv6_if_id));
1348 		break;
1349 	case ISCSI_NET_PARAM_IPV6_ROUTER:
1350 		if (iface_param->iface_num & 0x1)
1351 			break;
1352 		memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value,
1353 		       sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
1354 		break;
1355 	case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
1356 		/* Autocfg applies to even interface */
1357 		if (iface_param->iface_num & 0x1)
1358 			break;
1359 
1360 		if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE)
1361 			init_fw_cb->ipv6_addtl_opts &=
1362 				cpu_to_le16(
1363 				  ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
1364 		else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE)
1365 			init_fw_cb->ipv6_addtl_opts |=
1366 				cpu_to_le16(
1367 				  IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
1368 		else
1369 			ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
1370 				   "IPv6 addr\n");
1371 		break;
1372 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
1373 		/* Autocfg applies to even interface */
1374 		if (iface_param->iface_num & 0x1)
1375 			break;
1376 
1377 		if (iface_param->value[0] ==
1378 		    ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE)
1379 			init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(
1380 					IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
1381 		else if (iface_param->value[0] ==
1382 			 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE)
1383 			init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
1384 				       ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
1385 		else
1386 			ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
1387 				   "IPv6 linklocal addr\n");
1388 		break;
1389 	case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
1390 		/* Autocfg applies to even interface */
1391 		if (iface_param->iface_num & 0x1)
1392 			break;
1393 
1394 		if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE)
1395 			memset(init_fw_cb->ipv6_dflt_rtr_addr, 0,
1396 			       sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
1397 		break;
1398 	case ISCSI_NET_PARAM_IFACE_ENABLE:
1399 		if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
1400 			init_fw_cb->ipv6_opts |=
1401 				cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE);
1402 			qla4xxx_create_ipv6_iface(ha);
1403 		} else {
1404 			init_fw_cb->ipv6_opts &=
1405 				cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE &
1406 					    0xFFFF);
1407 			qla4xxx_destroy_ipv6_iface(ha);
1408 		}
1409 		break;
1410 	case ISCSI_NET_PARAM_VLAN_TAG:
1411 		if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag))
1412 			break;
1413 		init_fw_cb->ipv6_vlan_tag =
1414 				cpu_to_be16(*(uint16_t *)iface_param->value);
1415 		break;
1416 	case ISCSI_NET_PARAM_VLAN_ENABLED:
1417 		if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
1418 			init_fw_cb->ipv6_opts |=
1419 				cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE);
1420 		else
1421 			init_fw_cb->ipv6_opts &=
1422 				cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE);
1423 		break;
1424 	case ISCSI_NET_PARAM_MTU:
1425 		init_fw_cb->eth_mtu_size =
1426 				cpu_to_le16(*(uint16_t *)iface_param->value);
1427 		break;
1428 	case ISCSI_NET_PARAM_PORT:
1429 		/* Autocfg applies to even interface */
1430 		if (iface_param->iface_num & 0x1)
1431 			break;
1432 
1433 		init_fw_cb->ipv6_port =
1434 				cpu_to_le16(*(uint16_t *)iface_param->value);
1435 		break;
1436 	default:
1437 		ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
1438 			   iface_param->param);
1439 		break;
1440 	}
1441 }
1442 
1443 static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
1444 			     struct iscsi_iface_param_info *iface_param,
1445 			     struct addr_ctrl_blk *init_fw_cb)
1446 {
1447 	switch (iface_param->param) {
1448 	case ISCSI_NET_PARAM_IPV4_ADDR:
1449 		memcpy(init_fw_cb->ipv4_addr, iface_param->value,
1450 		       sizeof(init_fw_cb->ipv4_addr));
1451 		break;
1452 	case ISCSI_NET_PARAM_IPV4_SUBNET:
1453 		memcpy(init_fw_cb->ipv4_subnet,	iface_param->value,
1454 		       sizeof(init_fw_cb->ipv4_subnet));
1455 		break;
1456 	case ISCSI_NET_PARAM_IPV4_GW:
1457 		memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value,
1458 		       sizeof(init_fw_cb->ipv4_gw_addr));
1459 		break;
1460 	case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
1461 		if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
1462 			init_fw_cb->ipv4_tcp_opts |=
1463 					cpu_to_le16(TCPOPT_DHCP_ENABLE);
1464 		else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
1465 			init_fw_cb->ipv4_tcp_opts &=
1466 					cpu_to_le16(~TCPOPT_DHCP_ENABLE);
1467 		else
1468 			ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n");
1469 		break;
1470 	case ISCSI_NET_PARAM_IFACE_ENABLE:
1471 		if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
1472 			init_fw_cb->ipv4_ip_opts |=
1473 				cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE);
1474 			qla4xxx_create_ipv4_iface(ha);
1475 		} else {
1476 			init_fw_cb->ipv4_ip_opts &=
1477 				cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE &
1478 					    0xFFFF);
1479 			qla4xxx_destroy_ipv4_iface(ha);
1480 		}
1481 		break;
1482 	case ISCSI_NET_PARAM_VLAN_TAG:
1483 		if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag))
1484 			break;
1485 		init_fw_cb->ipv4_vlan_tag =
1486 				cpu_to_be16(*(uint16_t *)iface_param->value);
1487 		break;
1488 	case ISCSI_NET_PARAM_VLAN_ENABLED:
1489 		if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
1490 			init_fw_cb->ipv4_ip_opts |=
1491 					cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE);
1492 		else
1493 			init_fw_cb->ipv4_ip_opts &=
1494 					cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE);
1495 		break;
1496 	case ISCSI_NET_PARAM_MTU:
1497 		init_fw_cb->eth_mtu_size =
1498 				cpu_to_le16(*(uint16_t *)iface_param->value);
1499 		break;
1500 	case ISCSI_NET_PARAM_PORT:
1501 		init_fw_cb->ipv4_port =
1502 				cpu_to_le16(*(uint16_t *)iface_param->value);
1503 		break;
1504 	default:
1505 		ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
1506 			   iface_param->param);
1507 		break;
1508 	}
1509 }
1510 
1511 static void
1512 qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
1513 {
1514 	struct addr_ctrl_blk_def *acb;
1515 	acb = (struct addr_ctrl_blk_def *)init_fw_cb;
1516 	memset(acb->reserved1, 0, sizeof(acb->reserved1));
1517 	memset(acb->reserved2, 0, sizeof(acb->reserved2));
1518 	memset(acb->reserved3, 0, sizeof(acb->reserved3));
1519 	memset(acb->reserved4, 0, sizeof(acb->reserved4));
1520 	memset(acb->reserved5, 0, sizeof(acb->reserved5));
1521 	memset(acb->reserved6, 0, sizeof(acb->reserved6));
1522 	memset(acb->reserved7, 0, sizeof(acb->reserved7));
1523 	memset(acb->reserved8, 0, sizeof(acb->reserved8));
1524 	memset(acb->reserved9, 0, sizeof(acb->reserved9));
1525 	memset(acb->reserved10, 0, sizeof(acb->reserved10));
1526 	memset(acb->reserved11, 0, sizeof(acb->reserved11));
1527 	memset(acb->reserved12, 0, sizeof(acb->reserved12));
1528 	memset(acb->reserved13, 0, sizeof(acb->reserved13));
1529 	memset(acb->reserved14, 0, sizeof(acb->reserved14));
1530 	memset(acb->reserved15, 0, sizeof(acb->reserved15));
1531 }
1532 
1533 static int
1534 qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
1535 {
1536 	struct scsi_qla_host *ha = to_qla_host(shost);
1537 	int rval = 0;
1538 	struct iscsi_iface_param_info *iface_param = NULL;
1539 	struct addr_ctrl_blk *init_fw_cb = NULL;
1540 	dma_addr_t init_fw_cb_dma;
1541 	uint32_t mbox_cmd[MBOX_REG_COUNT];
1542 	uint32_t mbox_sts[MBOX_REG_COUNT];
1543 	uint32_t rem = len;
1544 	struct nlattr *attr;
1545 
1546 	init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
1547 					sizeof(struct addr_ctrl_blk),
1548 					&init_fw_cb_dma, GFP_KERNEL);
1549 	if (!init_fw_cb) {
1550 		ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
1551 			   __func__);
1552 		return -ENOMEM;
1553 	}
1554 
1555 	memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
1556 	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1557 	memset(&mbox_sts, 0, sizeof(mbox_sts));
1558 
1559 	if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) {
1560 		ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__);
1561 		rval = -EIO;
1562 		goto exit_init_fw_cb;
1563 	}
1564 
1565 	nla_for_each_attr(attr, data, len, rem) {
1566 		iface_param = nla_data(attr);
1567 
1568 		if (iface_param->param_type != ISCSI_NET_PARAM)
1569 			continue;
1570 
1571 		switch (iface_param->iface_type) {
1572 		case ISCSI_IFACE_TYPE_IPV4:
1573 			switch (iface_param->iface_num) {
1574 			case 0:
1575 				qla4xxx_set_ipv4(ha, iface_param, init_fw_cb);
1576 				break;
1577 			default:
1578 				/* Cannot have more than one IPv4 interface */
1579 				ql4_printk(KERN_ERR, ha, "Invalid IPv4 iface "
1580 					   "number = %d\n",
1581 					   iface_param->iface_num);
1582 				break;
1583 			}
1584 			break;
1585 		case ISCSI_IFACE_TYPE_IPV6:
1586 			switch (iface_param->iface_num) {
1587 			case 0:
1588 			case 1:
1589 				qla4xxx_set_ipv6(ha, iface_param, init_fw_cb);
1590 				break;
1591 			default:
1592 				/* Cannot have more than two IPv6 interface */
1593 				ql4_printk(KERN_ERR, ha, "Invalid IPv6 iface "
1594 					   "number = %d\n",
1595 					   iface_param->iface_num);
1596 				break;
1597 			}
1598 			break;
1599 		default:
1600 			ql4_printk(KERN_ERR, ha, "Invalid iface type\n");
1601 			break;
1602 		}
1603 	}
1604 
1605 	init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A);
1606 
1607 	rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB,
1608 				 sizeof(struct addr_ctrl_blk),
1609 				 FLASH_OPT_RMW_COMMIT);
1610 	if (rval != QLA_SUCCESS) {
1611 		ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n",
1612 			   __func__);
1613 		rval = -EIO;
1614 		goto exit_init_fw_cb;
1615 	}
1616 
1617 	rval = qla4xxx_disable_acb(ha);
1618 	if (rval != QLA_SUCCESS) {
1619 		ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n",
1620 			   __func__);
1621 		rval = -EIO;
1622 		goto exit_init_fw_cb;
1623 	}
1624 
1625 	wait_for_completion_timeout(&ha->disable_acb_comp,
1626 				    DISABLE_ACB_TOV * HZ);
1627 
1628 	qla4xxx_initcb_to_acb(init_fw_cb);
1629 
1630 	rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma);
1631 	if (rval != QLA_SUCCESS) {
1632 		ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n",
1633 			   __func__);
1634 		rval = -EIO;
1635 		goto exit_init_fw_cb;
1636 	}
1637 
1638 	memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
1639 	qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb,
1640 				  init_fw_cb_dma);
1641 
1642 exit_init_fw_cb:
1643 	dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
1644 			  init_fw_cb, init_fw_cb_dma);
1645 
1646 	return rval;
1647 }
1648 
1649 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
1650 				     enum iscsi_param param, char *buf)
1651 {
1652 	struct iscsi_session *sess = cls_sess->dd_data;
1653 	struct ddb_entry *ddb_entry = sess->dd_data;
1654 	struct scsi_qla_host *ha = ddb_entry->ha;
1655 	struct iscsi_cls_conn *cls_conn = ddb_entry->conn;
1656 	struct ql4_chap_table chap_tbl;
1657 	int rval, len;
1658 	uint16_t idx;
1659 
1660 	memset(&chap_tbl, 0, sizeof(chap_tbl));
1661 	switch (param) {
1662 	case ISCSI_PARAM_CHAP_IN_IDX:
1663 		rval = qla4xxx_get_chap_index(ha, sess->username_in,
1664 					      sess->password_in, BIDI_CHAP,
1665 					      &idx);
1666 		if (rval)
1667 			len = sprintf(buf, "\n");
1668 		else
1669 			len = sprintf(buf, "%hu\n", idx);
1670 		break;
1671 	case ISCSI_PARAM_CHAP_OUT_IDX:
1672 		if (ddb_entry->ddb_type == FLASH_DDB) {
1673 			if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) {
1674 				idx = ddb_entry->chap_tbl_idx;
1675 				rval = QLA_SUCCESS;
1676 			} else {
1677 				rval = QLA_ERROR;
1678 			}
1679 		} else {
1680 			rval = qla4xxx_get_chap_index(ha, sess->username,
1681 						      sess->password,
1682 						      LOCAL_CHAP, &idx);
1683 		}
1684 		if (rval)
1685 			len = sprintf(buf, "\n");
1686 		else
1687 			len = sprintf(buf, "%hu\n", idx);
1688 		break;
1689 	case ISCSI_PARAM_USERNAME:
1690 	case ISCSI_PARAM_PASSWORD:
1691 		/* First, populate session username and password for FLASH DDB,
1692 		 * if not already done. This happens when session login fails
1693 		 * for a FLASH DDB.
1694 		 */
1695 		if (ddb_entry->ddb_type == FLASH_DDB &&
1696 		    ddb_entry->chap_tbl_idx != INVALID_ENTRY &&
1697 		    !sess->username && !sess->password) {
1698 			idx = ddb_entry->chap_tbl_idx;
1699 			rval = qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name,
1700 							    chap_tbl.secret,
1701 							    idx);
1702 			if (!rval) {
1703 				iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME,
1704 						(char *)chap_tbl.name,
1705 						strlen((char *)chap_tbl.name));
1706 				iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD,
1707 						(char *)chap_tbl.secret,
1708 						chap_tbl.secret_len);
1709 			}
1710 		}
1711 		/* allow fall-through */
1712 	default:
1713 		return iscsi_session_get_param(cls_sess, param, buf);
1714 	}
1715 
1716 	return len;
1717 }
1718 
1719 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
1720 				  enum iscsi_param param, char *buf)
1721 {
1722 	struct iscsi_conn *conn;
1723 	struct qla_conn *qla_conn;
1724 	struct sockaddr *dst_addr;
1725 	int len = 0;
1726 
1727 	conn = cls_conn->dd_data;
1728 	qla_conn = conn->dd_data;
1729 	dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr;
1730 
1731 	switch (param) {
1732 	case ISCSI_PARAM_CONN_PORT:
1733 	case ISCSI_PARAM_CONN_ADDRESS:
1734 		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
1735 						 dst_addr, param, buf);
1736 	default:
1737 		return iscsi_conn_get_param(cls_conn, param, buf);
1738 	}
1739 
1740 	return len;
1741 
1742 }
1743 
1744 int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
1745 {
1746 	uint32_t mbx_sts = 0;
1747 	uint16_t tmp_ddb_index;
1748 	int ret;
1749 
1750 get_ddb_index:
1751 	tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
1752 
1753 	if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
1754 		DEBUG2(ql4_printk(KERN_INFO, ha,
1755 				  "Free DDB index not available\n"));
1756 		ret = QLA_ERROR;
1757 		goto exit_get_ddb_index;
1758 	}
1759 
1760 	if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
1761 		goto get_ddb_index;
1762 
1763 	DEBUG2(ql4_printk(KERN_INFO, ha,
1764 			  "Found a free DDB index at %d\n", tmp_ddb_index));
1765 	ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
1766 	if (ret == QLA_ERROR) {
1767 		if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
1768 			ql4_printk(KERN_INFO, ha,
1769 				   "DDB index = %d not available trying next\n",
1770 				   tmp_ddb_index);
1771 			goto get_ddb_index;
1772 		}
1773 		DEBUG2(ql4_printk(KERN_INFO, ha,
1774 				  "Free FW DDB not available\n"));
1775 	}
1776 
1777 	*ddb_index = tmp_ddb_index;
1778 
1779 exit_get_ddb_index:
1780 	return ret;
1781 }
1782 
1783 static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
1784 				   struct ddb_entry *ddb_entry,
1785 				   char *existing_ipaddr,
1786 				   char *user_ipaddr)
1787 {
1788 	uint8_t dst_ipaddr[IPv6_ADDR_LEN];
1789 	char formatted_ipaddr[DDB_IPADDR_LEN];
1790 	int status = QLA_SUCCESS, ret = 0;
1791 
1792 	if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
1793 		ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
1794 			       '\0', NULL);
1795 		if (ret == 0) {
1796 			status = QLA_ERROR;
1797 			goto out_match;
1798 		}
1799 		ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
1800 	} else {
1801 		ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
1802 			       '\0', NULL);
1803 		if (ret == 0) {
1804 			status = QLA_ERROR;
1805 			goto out_match;
1806 		}
1807 		ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
1808 	}
1809 
1810 	if (strcmp(existing_ipaddr, formatted_ipaddr))
1811 		status = QLA_ERROR;
1812 
1813 out_match:
1814 	return status;
1815 }
1816 
1817 static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
1818 				      struct iscsi_cls_conn *cls_conn)
1819 {
1820 	int idx = 0, max_ddbs, rval;
1821 	struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1822 	struct iscsi_session *sess, *existing_sess;
1823 	struct iscsi_conn *conn, *existing_conn;
1824 	struct ddb_entry *ddb_entry;
1825 
1826 	sess = cls_sess->dd_data;
1827 	conn = cls_conn->dd_data;
1828 
1829 	if (sess->targetname == NULL ||
1830 	    conn->persistent_address == NULL ||
1831 	    conn->persistent_port == 0)
1832 		return QLA_ERROR;
1833 
1834 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
1835 				     MAX_DEV_DB_ENTRIES;
1836 
1837 	for (idx = 0; idx < max_ddbs; idx++) {
1838 		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
1839 		if (ddb_entry == NULL)
1840 			continue;
1841 
1842 		if (ddb_entry->ddb_type != FLASH_DDB)
1843 			continue;
1844 
1845 		existing_sess = ddb_entry->sess->dd_data;
1846 		existing_conn = ddb_entry->conn->dd_data;
1847 
1848 		if (existing_sess->targetname == NULL ||
1849 		    existing_conn->persistent_address == NULL ||
1850 		    existing_conn->persistent_port == 0)
1851 			continue;
1852 
1853 		DEBUG2(ql4_printk(KERN_INFO, ha,
1854 				  "IQN = %s User IQN = %s\n",
1855 				  existing_sess->targetname,
1856 				  sess->targetname));
1857 
1858 		DEBUG2(ql4_printk(KERN_INFO, ha,
1859 				  "IP = %s User IP = %s\n",
1860 				  existing_conn->persistent_address,
1861 				  conn->persistent_address));
1862 
1863 		DEBUG2(ql4_printk(KERN_INFO, ha,
1864 				  "Port = %d User Port = %d\n",
1865 				  existing_conn->persistent_port,
1866 				  conn->persistent_port));
1867 
1868 		if (strcmp(existing_sess->targetname, sess->targetname))
1869 			continue;
1870 		rval = qla4xxx_match_ipaddress(ha, ddb_entry,
1871 					existing_conn->persistent_address,
1872 					conn->persistent_address);
1873 		if (rval == QLA_ERROR)
1874 			continue;
1875 		if (existing_conn->persistent_port != conn->persistent_port)
1876 			continue;
1877 		break;
1878 	}
1879 
1880 	if (idx == max_ddbs)
1881 		return QLA_ERROR;
1882 
1883 	DEBUG2(ql4_printk(KERN_INFO, ha,
1884 			  "Match found in fwdb sessions\n"));
1885 	return QLA_SUCCESS;
1886 }
1887 
1888 static struct iscsi_cls_session *
1889 qla4xxx_session_create(struct iscsi_endpoint *ep,
1890 			uint16_t cmds_max, uint16_t qdepth,
1891 			uint32_t initial_cmdsn)
1892 {
1893 	struct iscsi_cls_session *cls_sess;
1894 	struct scsi_qla_host *ha;
1895 	struct qla_endpoint *qla_ep;
1896 	struct ddb_entry *ddb_entry;
1897 	uint16_t ddb_index;
1898 	struct iscsi_session *sess;
1899 	struct sockaddr *dst_addr;
1900 	int ret;
1901 
1902 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1903 	if (!ep) {
1904 		printk(KERN_ERR "qla4xxx: missing ep.\n");
1905 		return NULL;
1906 	}
1907 
1908 	qla_ep = ep->dd_data;
1909 	dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
1910 	ha = to_qla_host(qla_ep->host);
1911 
1912 	ret = qla4xxx_get_ddb_index(ha, &ddb_index);
1913 	if (ret == QLA_ERROR)
1914 		return NULL;
1915 
1916 	cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
1917 				       cmds_max, sizeof(struct ddb_entry),
1918 				       sizeof(struct ql4_task_data),
1919 				       initial_cmdsn, ddb_index);
1920 	if (!cls_sess)
1921 		return NULL;
1922 
1923 	sess = cls_sess->dd_data;
1924 	ddb_entry = sess->dd_data;
1925 	ddb_entry->fw_ddb_index = ddb_index;
1926 	ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
1927 	ddb_entry->ha = ha;
1928 	ddb_entry->sess = cls_sess;
1929 	ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
1930 	ddb_entry->ddb_change = qla4xxx_ddb_change;
1931 	cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
1932 	ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
1933 	ha->tot_ddbs++;
1934 
1935 	return cls_sess;
1936 }
1937 
1938 static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
1939 {
1940 	struct iscsi_session *sess;
1941 	struct ddb_entry *ddb_entry;
1942 	struct scsi_qla_host *ha;
1943 	unsigned long flags, wtime;
1944 	struct dev_db_entry *fw_ddb_entry = NULL;
1945 	dma_addr_t fw_ddb_entry_dma;
1946 	uint32_t ddb_state;
1947 	int ret;
1948 
1949 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1950 	sess = cls_sess->dd_data;
1951 	ddb_entry = sess->dd_data;
1952 	ha = ddb_entry->ha;
1953 
1954 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1955 					  &fw_ddb_entry_dma, GFP_KERNEL);
1956 	if (!fw_ddb_entry) {
1957 		ql4_printk(KERN_ERR, ha,
1958 			   "%s: Unable to allocate dma buffer\n", __func__);
1959 		goto destroy_session;
1960 	}
1961 
1962 	wtime = jiffies + (HZ * LOGOUT_TOV);
1963 	do {
1964 		ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
1965 					      fw_ddb_entry, fw_ddb_entry_dma,
1966 					      NULL, NULL, &ddb_state, NULL,
1967 					      NULL, NULL);
1968 		if (ret == QLA_ERROR)
1969 			goto destroy_session;
1970 
1971 		if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
1972 		    (ddb_state == DDB_DS_SESSION_FAILED))
1973 			goto destroy_session;
1974 
1975 		schedule_timeout_uninterruptible(HZ);
1976 	} while ((time_after(wtime, jiffies)));
1977 
1978 destroy_session:
1979 	qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
1980 
1981 	spin_lock_irqsave(&ha->hardware_lock, flags);
1982 	qla4xxx_free_ddb(ha, ddb_entry);
1983 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1984 
1985 	iscsi_session_teardown(cls_sess);
1986 
1987 	if (fw_ddb_entry)
1988 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1989 				  fw_ddb_entry, fw_ddb_entry_dma);
1990 }
1991 
1992 static struct iscsi_cls_conn *
1993 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
1994 {
1995 	struct iscsi_cls_conn *cls_conn;
1996 	struct iscsi_session *sess;
1997 	struct ddb_entry *ddb_entry;
1998 
1999 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
2000 	cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
2001 				    conn_idx);
2002 	if (!cls_conn)
2003 		return NULL;
2004 
2005 	sess = cls_sess->dd_data;
2006 	ddb_entry = sess->dd_data;
2007 	ddb_entry->conn = cls_conn;
2008 
2009 	return cls_conn;
2010 }
2011 
2012 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
2013 			     struct iscsi_cls_conn *cls_conn,
2014 			     uint64_t transport_fd, int is_leading)
2015 {
2016 	struct iscsi_conn *conn;
2017 	struct qla_conn *qla_conn;
2018 	struct iscsi_endpoint *ep;
2019 
2020 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
2021 
2022 	if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
2023 		return -EINVAL;
2024 	ep = iscsi_lookup_endpoint(transport_fd);
2025 	conn = cls_conn->dd_data;
2026 	qla_conn = conn->dd_data;
2027 	qla_conn->qla_ep = ep->dd_data;
2028 	return 0;
2029 }
2030 
2031 static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
2032 {
2033 	struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
2034 	struct iscsi_session *sess;
2035 	struct ddb_entry *ddb_entry;
2036 	struct scsi_qla_host *ha;
2037 	struct dev_db_entry *fw_ddb_entry = NULL;
2038 	dma_addr_t fw_ddb_entry_dma;
2039 	uint32_t mbx_sts = 0;
2040 	int ret = 0;
2041 	int status = QLA_SUCCESS;
2042 
2043 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
2044 	sess = cls_sess->dd_data;
2045 	ddb_entry = sess->dd_data;
2046 	ha = ddb_entry->ha;
2047 
2048 	/* Check if we have  matching FW DDB, if yes then do not
2049 	 * login to this target. This could cause target to logout previous
2050 	 * connection
2051 	 */
2052 	ret = qla4xxx_match_fwdb_session(ha, cls_conn);
2053 	if (ret == QLA_SUCCESS) {
2054 		ql4_printk(KERN_INFO, ha,
2055 			   "Session already exist in FW.\n");
2056 		ret = -EEXIST;
2057 		goto exit_conn_start;
2058 	}
2059 
2060 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2061 					  &fw_ddb_entry_dma, GFP_KERNEL);
2062 	if (!fw_ddb_entry) {
2063 		ql4_printk(KERN_ERR, ha,
2064 			   "%s: Unable to allocate dma buffer\n", __func__);
2065 		ret = -ENOMEM;
2066 		goto exit_conn_start;
2067 	}
2068 
2069 	ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
2070 	if (ret) {
2071 		/* If iscsid is stopped and started then no need to do
2072 		* set param again since ddb state will be already
2073 		* active and FW does not allow set ddb to an
2074 		* active session.
2075 		*/
2076 		if (mbx_sts)
2077 			if (ddb_entry->fw_ddb_device_state ==
2078 						DDB_DS_SESSION_ACTIVE) {
2079 				ddb_entry->unblock_sess(ddb_entry->sess);
2080 				goto exit_set_param;
2081 			}
2082 
2083 		ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n",
2084 			   __func__, ddb_entry->fw_ddb_index);
2085 		goto exit_conn_start;
2086 	}
2087 
2088 	status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
2089 	if (status == QLA_ERROR) {
2090 		ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
2091 			   sess->targetname);
2092 		ret = -EINVAL;
2093 		goto exit_conn_start;
2094 	}
2095 
2096 	if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE)
2097 		ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
2098 
2099 	DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__,
2100 		      ddb_entry->fw_ddb_device_state));
2101 
2102 exit_set_param:
2103 	ret = 0;
2104 
2105 exit_conn_start:
2106 	if (fw_ddb_entry)
2107 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2108 				  fw_ddb_entry, fw_ddb_entry_dma);
2109 	return ret;
2110 }
2111 
2112 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
2113 {
2114 	struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
2115 	struct iscsi_session *sess;
2116 	struct scsi_qla_host *ha;
2117 	struct ddb_entry *ddb_entry;
2118 	int options;
2119 
2120 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
2121 	sess = cls_sess->dd_data;
2122 	ddb_entry = sess->dd_data;
2123 	ha = ddb_entry->ha;
2124 
2125 	options = LOGOUT_OPTION_CLOSE_SESSION;
2126 	if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
2127 		ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
2128 }
2129 
2130 static void qla4xxx_task_work(struct work_struct *wdata)
2131 {
2132 	struct ql4_task_data *task_data;
2133 	struct scsi_qla_host *ha;
2134 	struct passthru_status *sts;
2135 	struct iscsi_task *task;
2136 	struct iscsi_hdr *hdr;
2137 	uint8_t *data;
2138 	uint32_t data_len;
2139 	struct iscsi_conn *conn;
2140 	int hdr_len;
2141 	itt_t itt;
2142 
2143 	task_data = container_of(wdata, struct ql4_task_data, task_work);
2144 	ha = task_data->ha;
2145 	task = task_data->task;
2146 	sts = &task_data->sts;
2147 	hdr_len = sizeof(struct iscsi_hdr);
2148 
2149 	DEBUG3(printk(KERN_INFO "Status returned\n"));
2150 	DEBUG3(qla4xxx_dump_buffer(sts, 64));
2151 	DEBUG3(printk(KERN_INFO "Response buffer"));
2152 	DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64));
2153 
2154 	conn = task->conn;
2155 
2156 	switch (sts->completionStatus) {
2157 	case PASSTHRU_STATUS_COMPLETE:
2158 		hdr = (struct iscsi_hdr *)task_data->resp_buffer;
2159 		/* Assign back the itt in hdr, until we use the PREASSIGN_TAG */
2160 		itt = sts->handle;
2161 		hdr->itt = itt;
2162 		data = task_data->resp_buffer + hdr_len;
2163 		data_len = task_data->resp_len - hdr_len;
2164 		iscsi_complete_pdu(conn, hdr, data, data_len);
2165 		break;
2166 	default:
2167 		ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n",
2168 			   sts->completionStatus);
2169 		break;
2170 	}
2171 	return;
2172 }
2173 
2174 static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
2175 {
2176 	struct ql4_task_data *task_data;
2177 	struct iscsi_session *sess;
2178 	struct ddb_entry *ddb_entry;
2179 	struct scsi_qla_host *ha;
2180 	int hdr_len;
2181 
2182 	sess = task->conn->session;
2183 	ddb_entry = sess->dd_data;
2184 	ha = ddb_entry->ha;
2185 	task_data = task->dd_data;
2186 	memset(task_data, 0, sizeof(struct ql4_task_data));
2187 
2188 	if (task->sc) {
2189 		ql4_printk(KERN_INFO, ha,
2190 			   "%s: SCSI Commands not implemented\n", __func__);
2191 		return -EINVAL;
2192 	}
2193 
2194 	hdr_len = sizeof(struct iscsi_hdr);
2195 	task_data->ha = ha;
2196 	task_data->task = task;
2197 
2198 	if (task->data_count) {
2199 		task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
2200 						     task->data_count,
2201 						     PCI_DMA_TODEVICE);
2202 	}
2203 
2204 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
2205 		      __func__, task->conn->max_recv_dlength, hdr_len));
2206 
2207 	task_data->resp_len = task->conn->max_recv_dlength + hdr_len;
2208 	task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev,
2209 						    task_data->resp_len,
2210 						    &task_data->resp_dma,
2211 						    GFP_ATOMIC);
2212 	if (!task_data->resp_buffer)
2213 		goto exit_alloc_pdu;
2214 
2215 	task_data->req_len = task->data_count + hdr_len;
2216 	task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev,
2217 						   task_data->req_len,
2218 						   &task_data->req_dma,
2219 						   GFP_ATOMIC);
2220 	if (!task_data->req_buffer)
2221 		goto exit_alloc_pdu;
2222 
2223 	task->hdr = task_data->req_buffer;
2224 
2225 	INIT_WORK(&task_data->task_work, qla4xxx_task_work);
2226 
2227 	return 0;
2228 
2229 exit_alloc_pdu:
2230 	if (task_data->resp_buffer)
2231 		dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
2232 				  task_data->resp_buffer, task_data->resp_dma);
2233 
2234 	if (task_data->req_buffer)
2235 		dma_free_coherent(&ha->pdev->dev, task_data->req_len,
2236 				  task_data->req_buffer, task_data->req_dma);
2237 	return -ENOMEM;
2238 }
2239 
2240 static void qla4xxx_task_cleanup(struct iscsi_task *task)
2241 {
2242 	struct ql4_task_data *task_data;
2243 	struct iscsi_session *sess;
2244 	struct ddb_entry *ddb_entry;
2245 	struct scsi_qla_host *ha;
2246 	int hdr_len;
2247 
2248 	hdr_len = sizeof(struct iscsi_hdr);
2249 	sess = task->conn->session;
2250 	ddb_entry = sess->dd_data;
2251 	ha = ddb_entry->ha;
2252 	task_data = task->dd_data;
2253 
2254 	if (task->data_count) {
2255 		dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
2256 				 task->data_count, PCI_DMA_TODEVICE);
2257 	}
2258 
2259 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
2260 		      __func__, task->conn->max_recv_dlength, hdr_len));
2261 
2262 	dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
2263 			  task_data->resp_buffer, task_data->resp_dma);
2264 	dma_free_coherent(&ha->pdev->dev, task_data->req_len,
2265 			  task_data->req_buffer, task_data->req_dma);
2266 	return;
2267 }
2268 
2269 static int qla4xxx_task_xmit(struct iscsi_task *task)
2270 {
2271 	struct scsi_cmnd *sc = task->sc;
2272 	struct iscsi_session *sess = task->conn->session;
2273 	struct ddb_entry *ddb_entry = sess->dd_data;
2274 	struct scsi_qla_host *ha = ddb_entry->ha;
2275 
2276 	if (!sc)
2277 		return qla4xxx_send_passthru0(task);
2278 
2279 	ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n",
2280 		   __func__);
2281 	return -ENOSYS;
2282 }
2283 
2284 static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess,
2285 					 struct iscsi_bus_flash_conn *conn,
2286 					 struct dev_db_entry *fw_ddb_entry)
2287 {
2288 	unsigned long options = 0;
2289 	int rc = 0;
2290 
2291 	options = le16_to_cpu(fw_ddb_entry->options);
2292 	conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
2293 	if (test_bit(OPT_IPV6_DEVICE, &options)) {
2294 		rc = iscsi_switch_str_param(&sess->portal_type,
2295 					    PORTAL_TYPE_IPV6);
2296 		if (rc)
2297 			goto exit_copy;
2298 	} else {
2299 		rc = iscsi_switch_str_param(&sess->portal_type,
2300 					    PORTAL_TYPE_IPV4);
2301 		if (rc)
2302 			goto exit_copy;
2303 	}
2304 
2305 	sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
2306 					      &options);
2307 	sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
2308 	sess->entry_state = test_bit(OPT_ENTRY_STATE, &options);
2309 
2310 	options = le16_to_cpu(fw_ddb_entry->iscsi_options);
2311 	conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
2312 	conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
2313 	sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
2314 	sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
2315 	sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
2316 					    &options);
2317 	sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
2318 	sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
2319 	conn->snack_req_en = test_bit(ISCSIOPT_SNACK_REQ_EN, &options);
2320 	sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
2321 					     &options);
2322 	sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
2323 	sess->discovery_auth_optional =
2324 			test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
2325 	if (test_bit(ISCSIOPT_ERL1, &options))
2326 		sess->erl |= BIT_1;
2327 	if (test_bit(ISCSIOPT_ERL0, &options))
2328 		sess->erl |= BIT_0;
2329 
2330 	options = le16_to_cpu(fw_ddb_entry->tcp_options);
2331 	conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
2332 	conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
2333 	conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
2334 	if (test_bit(TCPOPT_TIMER_SCALE3, &options))
2335 		conn->tcp_timer_scale |= BIT_3;
2336 	if (test_bit(TCPOPT_TIMER_SCALE2, &options))
2337 		conn->tcp_timer_scale |= BIT_2;
2338 	if (test_bit(TCPOPT_TIMER_SCALE1, &options))
2339 		conn->tcp_timer_scale |= BIT_1;
2340 
2341 	conn->tcp_timer_scale >>= 1;
2342 	conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
2343 
2344 	options = le16_to_cpu(fw_ddb_entry->ip_options);
2345 	conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
2346 
2347 	conn->max_recv_dlength = BYTE_UNITS *
2348 			  le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
2349 	conn->max_xmit_dlength = BYTE_UNITS *
2350 			  le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
2351 	sess->first_burst = BYTE_UNITS *
2352 			       le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
2353 	sess->max_burst = BYTE_UNITS *
2354 				 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
2355 	sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
2356 	sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2357 	sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
2358 	sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
2359 	conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
2360 	conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
2361 	conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
2362 	conn->ipv6_flow_label = le16_to_cpu(fw_ddb_entry->ipv6_flow_lbl);
2363 	conn->keepalive_timeout = le16_to_cpu(fw_ddb_entry->ka_timeout);
2364 	conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
2365 	conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
2366 	conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
2367 	sess->discovery_parent_idx = le16_to_cpu(fw_ddb_entry->ddb_link);
2368 	sess->discovery_parent_type = le16_to_cpu(fw_ddb_entry->ddb_link);
2369 	sess->chap_out_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2370 	sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
2371 
2372 	sess->default_taskmgmt_timeout =
2373 				le16_to_cpu(fw_ddb_entry->def_timeout);
2374 	conn->port = le16_to_cpu(fw_ddb_entry->port);
2375 
2376 	options = le16_to_cpu(fw_ddb_entry->options);
2377 	conn->ipaddress = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
2378 	if (!conn->ipaddress) {
2379 		rc = -ENOMEM;
2380 		goto exit_copy;
2381 	}
2382 
2383 	conn->redirect_ipaddr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
2384 	if (!conn->redirect_ipaddr) {
2385 		rc = -ENOMEM;
2386 		goto exit_copy;
2387 	}
2388 
2389 	memcpy(conn->ipaddress, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
2390 	memcpy(conn->redirect_ipaddr, fw_ddb_entry->tgt_addr, IPv6_ADDR_LEN);
2391 
2392 	if (test_bit(OPT_IPV6_DEVICE, &options)) {
2393 		conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos;
2394 
2395 		conn->link_local_ipv6_addr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
2396 		if (!conn->link_local_ipv6_addr) {
2397 			rc = -ENOMEM;
2398 			goto exit_copy;
2399 		}
2400 
2401 		memcpy(conn->link_local_ipv6_addr,
2402 		       fw_ddb_entry->link_local_ipv6_addr, IPv6_ADDR_LEN);
2403 	} else {
2404 		conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
2405 	}
2406 
2407 	if (fw_ddb_entry->iscsi_name[0]) {
2408 		rc = iscsi_switch_str_param(&sess->targetname,
2409 					    (char *)fw_ddb_entry->iscsi_name);
2410 		if (rc)
2411 			goto exit_copy;
2412 	}
2413 
2414 	if (fw_ddb_entry->iscsi_alias[0]) {
2415 		rc = iscsi_switch_str_param(&sess->targetalias,
2416 					    (char *)fw_ddb_entry->iscsi_alias);
2417 		if (rc)
2418 			goto exit_copy;
2419 	}
2420 
2421 	COPY_ISID(sess->isid, fw_ddb_entry->isid);
2422 
2423 exit_copy:
2424 	return rc;
2425 }
2426 
2427 static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
2428 				       struct iscsi_bus_flash_conn *conn,
2429 				       struct dev_db_entry *fw_ddb_entry)
2430 {
2431 	uint16_t options;
2432 	int rc = 0;
2433 
2434 	options = le16_to_cpu(fw_ddb_entry->options);
2435 	SET_BITVAL(conn->is_fw_assigned_ipv6,  options, BIT_11);
2436 	if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
2437 		options |= BIT_8;
2438 	else
2439 		options &= ~BIT_8;
2440 
2441 	SET_BITVAL(sess->auto_snd_tgt_disable, options, BIT_6);
2442 	SET_BITVAL(sess->discovery_sess, options, BIT_4);
2443 	SET_BITVAL(sess->entry_state, options, BIT_3);
2444 	fw_ddb_entry->options = cpu_to_le16(options);
2445 
2446 	options = le16_to_cpu(fw_ddb_entry->iscsi_options);
2447 	SET_BITVAL(conn->hdrdgst_en, options, BIT_13);
2448 	SET_BITVAL(conn->datadgst_en, options, BIT_12);
2449 	SET_BITVAL(sess->imm_data_en, options, BIT_11);
2450 	SET_BITVAL(sess->initial_r2t_en, options, BIT_10);
2451 	SET_BITVAL(sess->dataseq_inorder_en, options, BIT_9);
2452 	SET_BITVAL(sess->pdu_inorder_en, options, BIT_8);
2453 	SET_BITVAL(sess->chap_auth_en, options, BIT_7);
2454 	SET_BITVAL(conn->snack_req_en, options, BIT_6);
2455 	SET_BITVAL(sess->discovery_logout_en, options, BIT_5);
2456 	SET_BITVAL(sess->bidi_chap_en, options, BIT_4);
2457 	SET_BITVAL(sess->discovery_auth_optional, options, BIT_3);
2458 	SET_BITVAL(sess->erl & BIT_1, options, BIT_1);
2459 	SET_BITVAL(sess->erl & BIT_0, options, BIT_0);
2460 	fw_ddb_entry->iscsi_options = cpu_to_le16(options);
2461 
2462 	options = le16_to_cpu(fw_ddb_entry->tcp_options);
2463 	SET_BITVAL(conn->tcp_timestamp_stat, options, BIT_6);
2464 	SET_BITVAL(conn->tcp_nagle_disable, options, BIT_5);
2465 	SET_BITVAL(conn->tcp_wsf_disable, options, BIT_4);
2466 	SET_BITVAL(conn->tcp_timer_scale & BIT_2, options, BIT_3);
2467 	SET_BITVAL(conn->tcp_timer_scale & BIT_1, options, BIT_2);
2468 	SET_BITVAL(conn->tcp_timer_scale & BIT_0, options, BIT_1);
2469 	SET_BITVAL(conn->tcp_timestamp_en, options, BIT_0);
2470 	fw_ddb_entry->tcp_options = cpu_to_le16(options);
2471 
2472 	options = le16_to_cpu(fw_ddb_entry->ip_options);
2473 	SET_BITVAL(conn->fragment_disable, options, BIT_4);
2474 	fw_ddb_entry->ip_options = cpu_to_le16(options);
2475 
2476 	fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t);
2477 	fw_ddb_entry->iscsi_max_rcv_data_seg_len =
2478 			       cpu_to_le16(conn->max_recv_dlength / BYTE_UNITS);
2479 	fw_ddb_entry->iscsi_max_snd_data_seg_len =
2480 			       cpu_to_le16(conn->max_xmit_dlength / BYTE_UNITS);
2481 	fw_ddb_entry->iscsi_first_burst_len =
2482 				cpu_to_le16(sess->first_burst / BYTE_UNITS);
2483 	fw_ddb_entry->iscsi_max_burst_len = cpu_to_le16(sess->max_burst /
2484 					    BYTE_UNITS);
2485 	fw_ddb_entry->iscsi_def_time2wait = cpu_to_le16(sess->time2wait);
2486 	fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain);
2487 	fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt);
2488 	fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size);
2489 	fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf);
2490 	fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf);
2491 	fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label);
2492 	fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout);
2493 	fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port);
2494 	fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn);
2495 	fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn);
2496 	fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_idx);
2497 	fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx);
2498 	fw_ddb_entry->tsid = cpu_to_le16(sess->tsid);
2499 	fw_ddb_entry->port = cpu_to_le16(conn->port);
2500 	fw_ddb_entry->def_timeout =
2501 				cpu_to_le16(sess->default_taskmgmt_timeout);
2502 
2503 	if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
2504 		fw_ddb_entry->ipv4_tos = conn->ipv6_traffic_class;
2505 	else
2506 		fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
2507 
2508 	if (conn->ipaddress)
2509 		memcpy(fw_ddb_entry->ip_addr, conn->ipaddress,
2510 		       sizeof(fw_ddb_entry->ip_addr));
2511 
2512 	if (conn->redirect_ipaddr)
2513 		memcpy(fw_ddb_entry->tgt_addr, conn->redirect_ipaddr,
2514 		       sizeof(fw_ddb_entry->tgt_addr));
2515 
2516 	if (conn->link_local_ipv6_addr)
2517 		memcpy(fw_ddb_entry->link_local_ipv6_addr,
2518 		       conn->link_local_ipv6_addr,
2519 		       sizeof(fw_ddb_entry->link_local_ipv6_addr));
2520 
2521 	if (sess->targetname)
2522 		memcpy(fw_ddb_entry->iscsi_name, sess->targetname,
2523 		       sizeof(fw_ddb_entry->iscsi_name));
2524 
2525 	if (sess->targetalias)
2526 		memcpy(fw_ddb_entry->iscsi_alias, sess->targetalias,
2527 		       sizeof(fw_ddb_entry->iscsi_alias));
2528 
2529 	COPY_ISID(fw_ddb_entry->isid, sess->isid);
2530 
2531 	return rc;
2532 }
2533 
2534 static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn,
2535 					     struct iscsi_session *sess,
2536 					     struct dev_db_entry *fw_ddb_entry)
2537 {
2538 	unsigned long options = 0;
2539 	uint16_t ddb_link;
2540 	uint16_t disc_parent;
2541 
2542 	options = le16_to_cpu(fw_ddb_entry->options);
2543 	conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
2544 	sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
2545 					      &options);
2546 	sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
2547 
2548 	options = le16_to_cpu(fw_ddb_entry->iscsi_options);
2549 	conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
2550 	conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
2551 	sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
2552 	sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
2553 	sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
2554 					    &options);
2555 	sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
2556 	sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
2557 	sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
2558 					     &options);
2559 	sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
2560 	sess->discovery_auth_optional =
2561 			test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
2562 	if (test_bit(ISCSIOPT_ERL1, &options))
2563 		sess->erl |= BIT_1;
2564 	if (test_bit(ISCSIOPT_ERL0, &options))
2565 		sess->erl |= BIT_0;
2566 
2567 	options = le16_to_cpu(fw_ddb_entry->tcp_options);
2568 	conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
2569 	conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
2570 	conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
2571 	if (test_bit(TCPOPT_TIMER_SCALE3, &options))
2572 		conn->tcp_timer_scale |= BIT_3;
2573 	if (test_bit(TCPOPT_TIMER_SCALE2, &options))
2574 		conn->tcp_timer_scale |= BIT_2;
2575 	if (test_bit(TCPOPT_TIMER_SCALE1, &options))
2576 		conn->tcp_timer_scale |= BIT_1;
2577 
2578 	conn->tcp_timer_scale >>= 1;
2579 	conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
2580 
2581 	options = le16_to_cpu(fw_ddb_entry->ip_options);
2582 	conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
2583 
2584 	conn->max_recv_dlength = BYTE_UNITS *
2585 			  le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
2586 	conn->max_xmit_dlength = BYTE_UNITS *
2587 			  le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
2588 	sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
2589 	sess->first_burst = BYTE_UNITS *
2590 			       le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
2591 	sess->max_burst = BYTE_UNITS *
2592 				 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
2593 	sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2594 	sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
2595 	sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
2596 	conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
2597 	conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
2598 	conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
2599 	conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
2600 	conn->keepalive_tmo = le16_to_cpu(fw_ddb_entry->ka_timeout);
2601 	conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
2602 	conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
2603 	conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
2604 	sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
2605 	COPY_ISID(sess->isid, fw_ddb_entry->isid);
2606 
2607 	ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
2608 	if (ddb_link == DDB_ISNS)
2609 		disc_parent = ISCSI_DISC_PARENT_ISNS;
2610 	else if (ddb_link == DDB_NO_LINK)
2611 		disc_parent = ISCSI_DISC_PARENT_UNKNOWN;
2612 	else if (ddb_link < MAX_DDB_ENTRIES)
2613 		disc_parent = ISCSI_DISC_PARENT_SENDTGT;
2614 	else
2615 		disc_parent = ISCSI_DISC_PARENT_UNKNOWN;
2616 
2617 	iscsi_set_param(conn->cls_conn, ISCSI_PARAM_DISCOVERY_PARENT_TYPE,
2618 			iscsi_get_discovery_parent_name(disc_parent), 0);
2619 
2620 	iscsi_set_param(conn->cls_conn, ISCSI_PARAM_TARGET_ALIAS,
2621 			(char *)fw_ddb_entry->iscsi_alias, 0);
2622 }
2623 
2624 static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
2625 				     struct dev_db_entry *fw_ddb_entry,
2626 				     struct iscsi_cls_session *cls_sess,
2627 				     struct iscsi_cls_conn *cls_conn)
2628 {
2629 	int buflen = 0;
2630 	struct iscsi_session *sess;
2631 	struct ddb_entry *ddb_entry;
2632 	struct ql4_chap_table chap_tbl;
2633 	struct iscsi_conn *conn;
2634 	char ip_addr[DDB_IPADDR_LEN];
2635 	uint16_t options = 0;
2636 
2637 	sess = cls_sess->dd_data;
2638 	ddb_entry = sess->dd_data;
2639 	conn = cls_conn->dd_data;
2640 	memset(&chap_tbl, 0, sizeof(chap_tbl));
2641 
2642 	ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2643 
2644 	qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry);
2645 
2646 	sess->def_taskmgmt_tmo = le16_to_cpu(fw_ddb_entry->def_timeout);
2647 	conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
2648 
2649 	memset(ip_addr, 0, sizeof(ip_addr));
2650 	options = le16_to_cpu(fw_ddb_entry->options);
2651 	if (options & DDB_OPT_IPV6_DEVICE) {
2652 		iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv6", 4);
2653 
2654 		memset(ip_addr, 0, sizeof(ip_addr));
2655 		sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
2656 	} else {
2657 		iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv4", 4);
2658 		sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
2659 	}
2660 
2661 	iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
2662 			(char *)ip_addr, buflen);
2663 	iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
2664 			(char *)fw_ddb_entry->iscsi_name, buflen);
2665 	iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
2666 			(char *)ha->name_string, buflen);
2667 
2668 	if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) {
2669 		if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name,
2670 						   chap_tbl.secret,
2671 						   ddb_entry->chap_tbl_idx)) {
2672 			iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME,
2673 					(char *)chap_tbl.name,
2674 					strlen((char *)chap_tbl.name));
2675 			iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD,
2676 					(char *)chap_tbl.secret,
2677 					chap_tbl.secret_len);
2678 		}
2679 	}
2680 }
2681 
2682 void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
2683 					     struct ddb_entry *ddb_entry)
2684 {
2685 	struct iscsi_cls_session *cls_sess;
2686 	struct iscsi_cls_conn *cls_conn;
2687 	uint32_t ddb_state;
2688 	dma_addr_t fw_ddb_entry_dma;
2689 	struct dev_db_entry *fw_ddb_entry;
2690 
2691 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2692 					  &fw_ddb_entry_dma, GFP_KERNEL);
2693 	if (!fw_ddb_entry) {
2694 		ql4_printk(KERN_ERR, ha,
2695 			   "%s: Unable to allocate dma buffer\n", __func__);
2696 		goto exit_session_conn_fwddb_param;
2697 	}
2698 
2699 	if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
2700 				    fw_ddb_entry_dma, NULL, NULL, &ddb_state,
2701 				    NULL, NULL, NULL) == QLA_ERROR) {
2702 		DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
2703 				  "get_ddb_entry for fw_ddb_index %d\n",
2704 				  ha->host_no, __func__,
2705 				  ddb_entry->fw_ddb_index));
2706 		goto exit_session_conn_fwddb_param;
2707 	}
2708 
2709 	cls_sess = ddb_entry->sess;
2710 
2711 	cls_conn = ddb_entry->conn;
2712 
2713 	/* Update params */
2714 	qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
2715 
2716 exit_session_conn_fwddb_param:
2717 	if (fw_ddb_entry)
2718 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2719 				  fw_ddb_entry, fw_ddb_entry_dma);
2720 }
2721 
2722 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
2723 				       struct ddb_entry *ddb_entry)
2724 {
2725 	struct iscsi_cls_session *cls_sess;
2726 	struct iscsi_cls_conn *cls_conn;
2727 	struct iscsi_session *sess;
2728 	struct iscsi_conn *conn;
2729 	uint32_t ddb_state;
2730 	dma_addr_t fw_ddb_entry_dma;
2731 	struct dev_db_entry *fw_ddb_entry;
2732 
2733 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2734 					  &fw_ddb_entry_dma, GFP_KERNEL);
2735 	if (!fw_ddb_entry) {
2736 		ql4_printk(KERN_ERR, ha,
2737 			   "%s: Unable to allocate dma buffer\n", __func__);
2738 		goto exit_session_conn_param;
2739 	}
2740 
2741 	if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
2742 				    fw_ddb_entry_dma, NULL, NULL, &ddb_state,
2743 				    NULL, NULL, NULL) == QLA_ERROR) {
2744 		DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
2745 				  "get_ddb_entry for fw_ddb_index %d\n",
2746 				  ha->host_no, __func__,
2747 				  ddb_entry->fw_ddb_index));
2748 		goto exit_session_conn_param;
2749 	}
2750 
2751 	cls_sess = ddb_entry->sess;
2752 	sess = cls_sess->dd_data;
2753 
2754 	cls_conn = ddb_entry->conn;
2755 	conn = cls_conn->dd_data;
2756 
2757 	/* Update timers after login */
2758 	ddb_entry->default_relogin_timeout =
2759 		(le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
2760 		 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
2761 		 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
2762 	ddb_entry->default_time2wait =
2763 				le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2764 
2765 	/* Update params */
2766 	ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2767 	qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry);
2768 
2769 	memcpy(sess->initiatorname, ha->name_string,
2770 	       min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
2771 
2772 exit_session_conn_param:
2773 	if (fw_ddb_entry)
2774 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2775 				  fw_ddb_entry, fw_ddb_entry_dma);
2776 }
2777 
2778 /*
2779  * Timer routines
2780  */
2781 
2782 static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
2783 				unsigned long interval)
2784 {
2785 	DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
2786 		     __func__, ha->host->host_no));
2787 	init_timer(&ha->timer);
2788 	ha->timer.expires = jiffies + interval * HZ;
2789 	ha->timer.data = (unsigned long)ha;
2790 	ha->timer.function = (void (*)(unsigned long))func;
2791 	add_timer(&ha->timer);
2792 	ha->timer_active = 1;
2793 }
2794 
2795 static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
2796 {
2797 	del_timer_sync(&ha->timer);
2798 	ha->timer_active = 0;
2799 }
2800 
2801 /***
2802  * qla4xxx_mark_device_missing - blocks the session
2803  * @cls_session: Pointer to the session to be blocked
2804  * @ddb_entry: Pointer to device database entry
2805  *
2806  * This routine marks a device missing and close connection.
2807  **/
2808 void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session)
2809 {
2810 	iscsi_block_session(cls_session);
2811 }
2812 
2813 /**
2814  * qla4xxx_mark_all_devices_missing - mark all devices as missing.
2815  * @ha: Pointer to host adapter structure.
2816  *
2817  * This routine marks a device missing and resets the relogin retry count.
2818  **/
2819 void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
2820 {
2821 	iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing);
2822 }
2823 
2824 static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
2825 				       struct ddb_entry *ddb_entry,
2826 				       struct scsi_cmnd *cmd)
2827 {
2828 	struct srb *srb;
2829 
2830 	srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
2831 	if (!srb)
2832 		return srb;
2833 
2834 	kref_init(&srb->srb_ref);
2835 	srb->ha = ha;
2836 	srb->ddb = ddb_entry;
2837 	srb->cmd = cmd;
2838 	srb->flags = 0;
2839 	CMD_SP(cmd) = (void *)srb;
2840 
2841 	return srb;
2842 }
2843 
2844 static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
2845 {
2846 	struct scsi_cmnd *cmd = srb->cmd;
2847 
2848 	if (srb->flags & SRB_DMA_VALID) {
2849 		scsi_dma_unmap(cmd);
2850 		srb->flags &= ~SRB_DMA_VALID;
2851 	}
2852 	CMD_SP(cmd) = NULL;
2853 }
2854 
2855 void qla4xxx_srb_compl(struct kref *ref)
2856 {
2857 	struct srb *srb = container_of(ref, struct srb, srb_ref);
2858 	struct scsi_cmnd *cmd = srb->cmd;
2859 	struct scsi_qla_host *ha = srb->ha;
2860 
2861 	qla4xxx_srb_free_dma(ha, srb);
2862 
2863 	mempool_free(srb, ha->srb_mempool);
2864 
2865 	cmd->scsi_done(cmd);
2866 }
2867 
2868 /**
2869  * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
2870  * @host: scsi host
2871  * @cmd: Pointer to Linux's SCSI command structure
2872  *
2873  * Remarks:
2874  * This routine is invoked by Linux to send a SCSI command to the driver.
2875  * The mid-level driver tries to ensure that queuecommand never gets
2876  * invoked concurrently with itself or the interrupt handler (although
2877  * the interrupt handler may call this routine as part of request-
2878  * completion handling).   Unfortunely, it sometimes calls the scheduler
2879  * in interrupt context which is a big NO! NO!.
2880  **/
2881 static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2882 {
2883 	struct scsi_qla_host *ha = to_qla_host(host);
2884 	struct ddb_entry *ddb_entry = cmd->device->hostdata;
2885 	struct iscsi_cls_session *sess = ddb_entry->sess;
2886 	struct srb *srb;
2887 	int rval;
2888 
2889 	if (test_bit(AF_EEH_BUSY, &ha->flags)) {
2890 		if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
2891 			cmd->result = DID_NO_CONNECT << 16;
2892 		else
2893 			cmd->result = DID_REQUEUE << 16;
2894 		goto qc_fail_command;
2895 	}
2896 
2897 	if (!sess) {
2898 		cmd->result = DID_IMM_RETRY << 16;
2899 		goto qc_fail_command;
2900 	}
2901 
2902 	rval = iscsi_session_chkready(sess);
2903 	if (rval) {
2904 		cmd->result = rval;
2905 		goto qc_fail_command;
2906 	}
2907 
2908 	if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
2909 	    test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
2910 	    test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2911 	    test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
2912 	    test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
2913 	    !test_bit(AF_ONLINE, &ha->flags) ||
2914 	    !test_bit(AF_LINK_UP, &ha->flags) ||
2915 	    test_bit(AF_LOOPBACK, &ha->flags) ||
2916 	    test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) ||
2917 	    test_bit(DPC_RESTORE_ACB, &ha->dpc_flags) ||
2918 	    test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
2919 		goto qc_host_busy;
2920 
2921 	srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
2922 	if (!srb)
2923 		goto qc_host_busy;
2924 
2925 	rval = qla4xxx_send_command_to_isp(ha, srb);
2926 	if (rval != QLA_SUCCESS)
2927 		goto qc_host_busy_free_sp;
2928 
2929 	return 0;
2930 
2931 qc_host_busy_free_sp:
2932 	qla4xxx_srb_free_dma(ha, srb);
2933 	mempool_free(srb, ha->srb_mempool);
2934 
2935 qc_host_busy:
2936 	return SCSI_MLQUEUE_HOST_BUSY;
2937 
2938 qc_fail_command:
2939 	cmd->scsi_done(cmd);
2940 
2941 	return 0;
2942 }
2943 
2944 /**
2945  * qla4xxx_mem_free - frees memory allocated to adapter
2946  * @ha: Pointer to host adapter structure.
2947  *
2948  * Frees memory previously allocated by qla4xxx_mem_alloc
2949  **/
2950 static void qla4xxx_mem_free(struct scsi_qla_host *ha)
2951 {
2952 	if (ha->queues)
2953 		dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
2954 				  ha->queues_dma);
2955 
2956 	 if (ha->fw_dump)
2957 		vfree(ha->fw_dump);
2958 
2959 	ha->queues_len = 0;
2960 	ha->queues = NULL;
2961 	ha->queues_dma = 0;
2962 	ha->request_ring = NULL;
2963 	ha->request_dma = 0;
2964 	ha->response_ring = NULL;
2965 	ha->response_dma = 0;
2966 	ha->shadow_regs = NULL;
2967 	ha->shadow_regs_dma = 0;
2968 	ha->fw_dump = NULL;
2969 	ha->fw_dump_size = 0;
2970 
2971 	/* Free srb pool. */
2972 	if (ha->srb_mempool)
2973 		mempool_destroy(ha->srb_mempool);
2974 
2975 	ha->srb_mempool = NULL;
2976 
2977 	if (ha->chap_dma_pool)
2978 		dma_pool_destroy(ha->chap_dma_pool);
2979 
2980 	if (ha->chap_list)
2981 		vfree(ha->chap_list);
2982 	ha->chap_list = NULL;
2983 
2984 	if (ha->fw_ddb_dma_pool)
2985 		dma_pool_destroy(ha->fw_ddb_dma_pool);
2986 
2987 	/* release io space registers  */
2988 	if (is_qla8022(ha)) {
2989 		if (ha->nx_pcibase)
2990 			iounmap(
2991 			    (struct device_reg_82xx __iomem *)ha->nx_pcibase);
2992 	} else if (is_qla8032(ha) || is_qla8042(ha)) {
2993 		if (ha->nx_pcibase)
2994 			iounmap(
2995 			    (struct device_reg_83xx __iomem *)ha->nx_pcibase);
2996 	} else if (ha->reg) {
2997 		iounmap(ha->reg);
2998 	}
2999 
3000 	if (ha->reset_tmplt.buff)
3001 		vfree(ha->reset_tmplt.buff);
3002 
3003 	pci_release_regions(ha->pdev);
3004 }
3005 
3006 /**
3007  * qla4xxx_mem_alloc - allocates memory for use by adapter.
3008  * @ha: Pointer to host adapter structure
3009  *
3010  * Allocates DMA memory for request and response queues. Also allocates memory
3011  * for srbs.
3012  **/
3013 static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
3014 {
3015 	unsigned long align;
3016 
3017 	/* Allocate contiguous block of DMA memory for queues. */
3018 	ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
3019 			  (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
3020 			  sizeof(struct shadow_regs) +
3021 			  MEM_ALIGN_VALUE +
3022 			  (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
3023 	ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
3024 					&ha->queues_dma, GFP_KERNEL);
3025 	if (ha->queues == NULL) {
3026 		ql4_printk(KERN_WARNING, ha,
3027 		    "Memory Allocation failed - queues.\n");
3028 
3029 		goto mem_alloc_error_exit;
3030 	}
3031 	memset(ha->queues, 0, ha->queues_len);
3032 
3033 	/*
3034 	 * As per RISC alignment requirements -- the bus-address must be a
3035 	 * multiple of the request-ring size (in bytes).
3036 	 */
3037 	align = 0;
3038 	if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
3039 		align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
3040 					   (MEM_ALIGN_VALUE - 1));
3041 
3042 	/* Update request and response queue pointers. */
3043 	ha->request_dma = ha->queues_dma + align;
3044 	ha->request_ring = (struct queue_entry *) (ha->queues + align);
3045 	ha->response_dma = ha->queues_dma + align +
3046 		(REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
3047 	ha->response_ring = (struct queue_entry *) (ha->queues + align +
3048 						    (REQUEST_QUEUE_DEPTH *
3049 						     QUEUE_SIZE));
3050 	ha->shadow_regs_dma = ha->queues_dma + align +
3051 		(REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
3052 		(RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
3053 	ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
3054 						  (REQUEST_QUEUE_DEPTH *
3055 						   QUEUE_SIZE) +
3056 						  (RESPONSE_QUEUE_DEPTH *
3057 						   QUEUE_SIZE));
3058 
3059 	/* Allocate memory for srb pool. */
3060 	ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
3061 					 mempool_free_slab, srb_cachep);
3062 	if (ha->srb_mempool == NULL) {
3063 		ql4_printk(KERN_WARNING, ha,
3064 		    "Memory Allocation failed - SRB Pool.\n");
3065 
3066 		goto mem_alloc_error_exit;
3067 	}
3068 
3069 	ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev,
3070 					    CHAP_DMA_BLOCK_SIZE, 8, 0);
3071 
3072 	if (ha->chap_dma_pool == NULL) {
3073 		ql4_printk(KERN_WARNING, ha,
3074 		    "%s: chap_dma_pool allocation failed..\n", __func__);
3075 		goto mem_alloc_error_exit;
3076 	}
3077 
3078 	ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
3079 					      DDB_DMA_BLOCK_SIZE, 8, 0);
3080 
3081 	if (ha->fw_ddb_dma_pool == NULL) {
3082 		ql4_printk(KERN_WARNING, ha,
3083 			   "%s: fw_ddb_dma_pool allocation failed..\n",
3084 			   __func__);
3085 		goto mem_alloc_error_exit;
3086 	}
3087 
3088 	return QLA_SUCCESS;
3089 
3090 mem_alloc_error_exit:
3091 	qla4xxx_mem_free(ha);
3092 	return QLA_ERROR;
3093 }
3094 
3095 /**
3096  * qla4_8xxx_check_temp - Check the ISP82XX temperature.
3097  * @ha: adapter block pointer.
3098  *
3099  * Note: The caller should not hold the idc lock.
3100  **/
3101 static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
3102 {
3103 	uint32_t temp, temp_state, temp_val;
3104 	int status = QLA_SUCCESS;
3105 
3106 	temp = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_TEMP_STATE);
3107 
3108 	temp_state = qla82xx_get_temp_state(temp);
3109 	temp_val = qla82xx_get_temp_val(temp);
3110 
3111 	if (temp_state == QLA82XX_TEMP_PANIC) {
3112 		ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
3113 			   " exceeds maximum allowed. Hardware has been shut"
3114 			   " down.\n", temp_val);
3115 		status = QLA_ERROR;
3116 	} else if (temp_state == QLA82XX_TEMP_WARN) {
3117 		if (ha->temperature == QLA82XX_TEMP_NORMAL)
3118 			ql4_printk(KERN_WARNING, ha, "Device temperature %d"
3119 				   " degrees C exceeds operating range."
3120 				   " Immediate action needed.\n", temp_val);
3121 	} else {
3122 		if (ha->temperature == QLA82XX_TEMP_WARN)
3123 			ql4_printk(KERN_INFO, ha, "Device temperature is"
3124 				   " now %d degrees C in normal range.\n",
3125 				   temp_val);
3126 	}
3127 	ha->temperature = temp_state;
3128 	return status;
3129 }
3130 
3131 /**
3132  * qla4_8xxx_check_fw_alive  - Check firmware health
3133  * @ha: Pointer to host adapter structure.
3134  *
3135  * Context: Interrupt
3136  **/
3137 static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
3138 {
3139 	uint32_t fw_heartbeat_counter;
3140 	int status = QLA_SUCCESS;
3141 
3142 	fw_heartbeat_counter = qla4_8xxx_rd_direct(ha,
3143 						   QLA8XXX_PEG_ALIVE_COUNTER);
3144 	/* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
3145 	if (fw_heartbeat_counter == 0xffffffff) {
3146 		DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
3147 		    "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
3148 		    ha->host_no, __func__));
3149 		return status;
3150 	}
3151 
3152 	if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
3153 		ha->seconds_since_last_heartbeat++;
3154 		/* FW not alive after 2 seconds */
3155 		if (ha->seconds_since_last_heartbeat == 2) {
3156 			ha->seconds_since_last_heartbeat = 0;
3157 			qla4_8xxx_dump_peg_reg(ha);
3158 			status = QLA_ERROR;
3159 		}
3160 	} else
3161 		ha->seconds_since_last_heartbeat = 0;
3162 
3163 	ha->fw_heartbeat_counter = fw_heartbeat_counter;
3164 	return status;
3165 }
3166 
3167 static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha)
3168 {
3169 	uint32_t halt_status;
3170 	int halt_status_unrecoverable = 0;
3171 
3172 	halt_status = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1);
3173 
3174 	if (is_qla8022(ha)) {
3175 		ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
3176 			   __func__);
3177 		qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
3178 				CRB_NIU_XG_PAUSE_CTL_P0 |
3179 				CRB_NIU_XG_PAUSE_CTL_P1);
3180 
3181 		if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
3182 			ql4_printk(KERN_ERR, ha, "%s: Firmware aborted with error code 0x00006700. Device is being reset\n",
3183 				   __func__);
3184 		if (halt_status & HALT_STATUS_UNRECOVERABLE)
3185 			halt_status_unrecoverable = 1;
3186 	} else if (is_qla8032(ha) || is_qla8042(ha)) {
3187 		if (halt_status & QLA83XX_HALT_STATUS_FW_RESET)
3188 			ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n",
3189 				   __func__);
3190 		else if (halt_status & QLA83XX_HALT_STATUS_UNRECOVERABLE)
3191 			halt_status_unrecoverable = 1;
3192 	}
3193 
3194 	/*
3195 	 * Since we cannot change dev_state in interrupt context,
3196 	 * set appropriate DPC flag then wakeup DPC
3197 	 */
3198 	if (halt_status_unrecoverable) {
3199 		set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
3200 	} else {
3201 		ql4_printk(KERN_INFO, ha, "%s: detect abort needed!\n",
3202 			   __func__);
3203 		set_bit(DPC_RESET_HA, &ha->dpc_flags);
3204 	}
3205 	qla4xxx_mailbox_premature_completion(ha);
3206 	qla4xxx_wake_dpc(ha);
3207 }
3208 
3209 /**
3210  * qla4_8xxx_watchdog - Poll dev state
3211  * @ha: Pointer to host adapter structure.
3212  *
3213  * Context: Interrupt
3214  **/
3215 void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
3216 {
3217 	uint32_t dev_state;
3218 	uint32_t idc_ctrl;
3219 
3220 	/* don't poll if reset is going on */
3221 	if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
3222 	    test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
3223 	    test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
3224 		dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
3225 
3226 		if (qla4_8xxx_check_temp(ha)) {
3227 			if (is_qla8022(ha)) {
3228 				ql4_printk(KERN_INFO, ha, "disabling pause transmit on port 0 & 1.\n");
3229 				qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
3230 						CRB_NIU_XG_PAUSE_CTL_P0 |
3231 						CRB_NIU_XG_PAUSE_CTL_P1);
3232 			}
3233 			set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
3234 			qla4xxx_wake_dpc(ha);
3235 		} else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
3236 			   !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
3237 
3238 			ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n",
3239 				   __func__);
3240 
3241 			if (is_qla8032(ha) || is_qla8042(ha)) {
3242 				idc_ctrl = qla4_83xx_rd_reg(ha,
3243 							QLA83XX_IDC_DRV_CTRL);
3244 				if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) {
3245 					ql4_printk(KERN_INFO, ha, "%s: Graceful reset bit is not set\n",
3246 						   __func__);
3247 					qla4xxx_mailbox_premature_completion(
3248 									    ha);
3249 				}
3250 			}
3251 
3252 			if ((is_qla8032(ha) || is_qla8042(ha)) ||
3253 			    (is_qla8022(ha) && !ql4xdontresethba)) {
3254 				set_bit(DPC_RESET_HA, &ha->dpc_flags);
3255 				qla4xxx_wake_dpc(ha);
3256 			}
3257 		} else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
3258 		    !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
3259 			ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
3260 			    __func__);
3261 			set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
3262 			qla4xxx_wake_dpc(ha);
3263 		} else  {
3264 			/* Check firmware health */
3265 			if (qla4_8xxx_check_fw_alive(ha))
3266 				qla4_8xxx_process_fw_error(ha);
3267 		}
3268 	}
3269 }
3270 
3271 static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
3272 {
3273 	struct iscsi_session *sess;
3274 	struct ddb_entry *ddb_entry;
3275 	struct scsi_qla_host *ha;
3276 
3277 	sess = cls_sess->dd_data;
3278 	ddb_entry = sess->dd_data;
3279 	ha = ddb_entry->ha;
3280 
3281 	if (!(ddb_entry->ddb_type == FLASH_DDB))
3282 		return;
3283 
3284 	if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
3285 	    !iscsi_is_session_online(cls_sess)) {
3286 		if (atomic_read(&ddb_entry->retry_relogin_timer) !=
3287 		    INVALID_ENTRY) {
3288 			if (atomic_read(&ddb_entry->retry_relogin_timer) ==
3289 					0) {
3290 				atomic_set(&ddb_entry->retry_relogin_timer,
3291 					   INVALID_ENTRY);
3292 				set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
3293 				set_bit(DF_RELOGIN, &ddb_entry->flags);
3294 				DEBUG2(ql4_printk(KERN_INFO, ha,
3295 				       "%s: index [%d] login device\n",
3296 					__func__, ddb_entry->fw_ddb_index));
3297 			} else
3298 				atomic_dec(&ddb_entry->retry_relogin_timer);
3299 		}
3300 	}
3301 
3302 	/* Wait for relogin to timeout */
3303 	if (atomic_read(&ddb_entry->relogin_timer) &&
3304 	    (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
3305 		/*
3306 		 * If the relogin times out and the device is
3307 		 * still NOT ONLINE then try and relogin again.
3308 		 */
3309 		if (!iscsi_is_session_online(cls_sess)) {
3310 			/* Reset retry relogin timer */
3311 			atomic_inc(&ddb_entry->relogin_retry_count);
3312 			DEBUG2(ql4_printk(KERN_INFO, ha,
3313 				"%s: index[%d] relogin timed out-retrying"
3314 				" relogin (%d), retry (%d)\n", __func__,
3315 				ddb_entry->fw_ddb_index,
3316 				atomic_read(&ddb_entry->relogin_retry_count),
3317 				ddb_entry->default_time2wait + 4));
3318 			set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
3319 			atomic_set(&ddb_entry->retry_relogin_timer,
3320 				   ddb_entry->default_time2wait + 4);
3321 		}
3322 	}
3323 }
3324 
3325 /**
3326  * qla4xxx_timer - checks every second for work to do.
3327  * @ha: Pointer to host adapter structure.
3328  **/
3329 static void qla4xxx_timer(struct scsi_qla_host *ha)
3330 {
3331 	int start_dpc = 0;
3332 	uint16_t w;
3333 
3334 	iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
3335 
3336 	/* If we are in the middle of AER/EEH processing
3337 	 * skip any processing and reschedule the timer
3338 	 */
3339 	if (test_bit(AF_EEH_BUSY, &ha->flags)) {
3340 		mod_timer(&ha->timer, jiffies + HZ);
3341 		return;
3342 	}
3343 
3344 	/* Hardware read to trigger an EEH error during mailbox waits. */
3345 	if (!pci_channel_offline(ha->pdev))
3346 		pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
3347 
3348 	if (is_qla80XX(ha))
3349 		qla4_8xxx_watchdog(ha);
3350 
3351 	if (is_qla40XX(ha)) {
3352 		/* Check for heartbeat interval. */
3353 		if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
3354 		    ha->heartbeat_interval != 0) {
3355 			ha->seconds_since_last_heartbeat++;
3356 			if (ha->seconds_since_last_heartbeat >
3357 			    ha->heartbeat_interval + 2)
3358 				set_bit(DPC_RESET_HA, &ha->dpc_flags);
3359 		}
3360 	}
3361 
3362 	/* Process any deferred work. */
3363 	if (!list_empty(&ha->work_list))
3364 		start_dpc++;
3365 
3366 	/* Wakeup the dpc routine for this adapter, if needed. */
3367 	if (start_dpc ||
3368 	     test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
3369 	     test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
3370 	     test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
3371 	     test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
3372 	     test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
3373 	     test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
3374 	     test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
3375 	     test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
3376 	     test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
3377 	     test_bit(DPC_AEN, &ha->dpc_flags)) {
3378 		DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
3379 			      " - dpc flags = 0x%lx\n",
3380 			      ha->host_no, __func__, ha->dpc_flags));
3381 		qla4xxx_wake_dpc(ha);
3382 	}
3383 
3384 	/* Reschedule timer thread to call us back in one second */
3385 	mod_timer(&ha->timer, jiffies + HZ);
3386 
3387 	DEBUG2(ha->seconds_since_last_intr++);
3388 }
3389 
3390 /**
3391  * qla4xxx_cmd_wait - waits for all outstanding commands to complete
3392  * @ha: Pointer to host adapter structure.
3393  *
3394  * This routine stalls the driver until all outstanding commands are returned.
3395  * Caller must release the Hardware Lock prior to calling this routine.
3396  **/
3397 static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
3398 {
3399 	uint32_t index = 0;
3400 	unsigned long flags;
3401 	struct scsi_cmnd *cmd;
3402 
3403 	unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ);
3404 
3405 	DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to "
3406 	    "complete\n", WAIT_CMD_TOV));
3407 
3408 	while (!time_after_eq(jiffies, wtime)) {
3409 		spin_lock_irqsave(&ha->hardware_lock, flags);
3410 		/* Find a command that hasn't completed. */
3411 		for (index = 0; index < ha->host->can_queue; index++) {
3412 			cmd = scsi_host_find_tag(ha->host, index);
3413 			/*
3414 			 * We cannot just check if the index is valid,
3415 			 * becase if we are run from the scsi eh, then
3416 			 * the scsi/block layer is going to prevent
3417 			 * the tag from being released.
3418 			 */
3419 			if (cmd != NULL && CMD_SP(cmd))
3420 				break;
3421 		}
3422 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
3423 
3424 		/* If No Commands are pending, wait is complete */
3425 		if (index == ha->host->can_queue)
3426 			return QLA_SUCCESS;
3427 
3428 		msleep(1000);
3429 	}
3430 	/* If we timed out on waiting for commands to come back
3431 	 * return ERROR. */
3432 	return QLA_ERROR;
3433 }
3434 
3435 int qla4xxx_hw_reset(struct scsi_qla_host *ha)
3436 {
3437 	uint32_t ctrl_status;
3438 	unsigned long flags = 0;
3439 
3440 	DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
3441 
3442 	if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
3443 		return QLA_ERROR;
3444 
3445 	spin_lock_irqsave(&ha->hardware_lock, flags);
3446 
3447 	/*
3448 	 * If the SCSI Reset Interrupt bit is set, clear it.
3449 	 * Otherwise, the Soft Reset won't work.
3450 	 */
3451 	ctrl_status = readw(&ha->reg->ctrl_status);
3452 	if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
3453 		writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
3454 
3455 	/* Issue Soft Reset */
3456 	writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
3457 	readl(&ha->reg->ctrl_status);
3458 
3459 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3460 	return QLA_SUCCESS;
3461 }
3462 
3463 /**
3464  * qla4xxx_soft_reset - performs soft reset.
3465  * @ha: Pointer to host adapter structure.
3466  **/
3467 int qla4xxx_soft_reset(struct scsi_qla_host *ha)
3468 {
3469 	uint32_t max_wait_time;
3470 	unsigned long flags = 0;
3471 	int status;
3472 	uint32_t ctrl_status;
3473 
3474 	status = qla4xxx_hw_reset(ha);
3475 	if (status != QLA_SUCCESS)
3476 		return status;
3477 
3478 	status = QLA_ERROR;
3479 	/* Wait until the Network Reset Intr bit is cleared */
3480 	max_wait_time = RESET_INTR_TOV;
3481 	do {
3482 		spin_lock_irqsave(&ha->hardware_lock, flags);
3483 		ctrl_status = readw(&ha->reg->ctrl_status);
3484 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
3485 
3486 		if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
3487 			break;
3488 
3489 		msleep(1000);
3490 	} while ((--max_wait_time));
3491 
3492 	if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
3493 		DEBUG2(printk(KERN_WARNING
3494 			      "scsi%ld: Network Reset Intr not cleared by "
3495 			      "Network function, clearing it now!\n",
3496 			      ha->host_no));
3497 		spin_lock_irqsave(&ha->hardware_lock, flags);
3498 		writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
3499 		readl(&ha->reg->ctrl_status);
3500 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
3501 	}
3502 
3503 	/* Wait until the firmware tells us the Soft Reset is done */
3504 	max_wait_time = SOFT_RESET_TOV;
3505 	do {
3506 		spin_lock_irqsave(&ha->hardware_lock, flags);
3507 		ctrl_status = readw(&ha->reg->ctrl_status);
3508 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
3509 
3510 		if ((ctrl_status & CSR_SOFT_RESET) == 0) {
3511 			status = QLA_SUCCESS;
3512 			break;
3513 		}
3514 
3515 		msleep(1000);
3516 	} while ((--max_wait_time));
3517 
3518 	/*
3519 	 * Also, make sure that the SCSI Reset Interrupt bit has been cleared
3520 	 * after the soft reset has taken place.
3521 	 */
3522 	spin_lock_irqsave(&ha->hardware_lock, flags);
3523 	ctrl_status = readw(&ha->reg->ctrl_status);
3524 	if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
3525 		writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
3526 		readl(&ha->reg->ctrl_status);
3527 	}
3528 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3529 
3530 	/* If soft reset fails then most probably the bios on other
3531 	 * function is also enabled.
3532 	 * Since the initialization is sequential the other fn
3533 	 * wont be able to acknowledge the soft reset.
3534 	 * Issue a force soft reset to workaround this scenario.
3535 	 */
3536 	if (max_wait_time == 0) {
3537 		/* Issue Force Soft Reset */
3538 		spin_lock_irqsave(&ha->hardware_lock, flags);
3539 		writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
3540 		readl(&ha->reg->ctrl_status);
3541 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
3542 		/* Wait until the firmware tells us the Soft Reset is done */
3543 		max_wait_time = SOFT_RESET_TOV;
3544 		do {
3545 			spin_lock_irqsave(&ha->hardware_lock, flags);
3546 			ctrl_status = readw(&ha->reg->ctrl_status);
3547 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
3548 
3549 			if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
3550 				status = QLA_SUCCESS;
3551 				break;
3552 			}
3553 
3554 			msleep(1000);
3555 		} while ((--max_wait_time));
3556 	}
3557 
3558 	return status;
3559 }
3560 
3561 /**
3562  * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S.
3563  * @ha: Pointer to host adapter structure.
3564  * @res: returned scsi status
3565  *
3566  * This routine is called just prior to a HARD RESET to return all
3567  * outstanding commands back to the Operating System.
3568  * Caller should make sure that the following locks are released
3569  * before this calling routine: Hardware lock, and io_request_lock.
3570  **/
3571 static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res)
3572 {
3573 	struct srb *srb;
3574 	int i;
3575 	unsigned long flags;
3576 
3577 	spin_lock_irqsave(&ha->hardware_lock, flags);
3578 	for (i = 0; i < ha->host->can_queue; i++) {
3579 		srb = qla4xxx_del_from_active_array(ha, i);
3580 		if (srb != NULL) {
3581 			srb->cmd->result = res;
3582 			kref_put(&srb->srb_ref, qla4xxx_srb_compl);
3583 		}
3584 	}
3585 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3586 }
3587 
3588 void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
3589 {
3590 	clear_bit(AF_ONLINE, &ha->flags);
3591 
3592 	/* Disable the board */
3593 	ql4_printk(KERN_INFO, ha, "Disabling the board\n");
3594 
3595 	qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
3596 	qla4xxx_mark_all_devices_missing(ha);
3597 	clear_bit(AF_INIT_DONE, &ha->flags);
3598 }
3599 
3600 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
3601 {
3602 	struct iscsi_session *sess;
3603 	struct ddb_entry *ddb_entry;
3604 
3605 	sess = cls_session->dd_data;
3606 	ddb_entry = sess->dd_data;
3607 	ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
3608 
3609 	if (ddb_entry->ddb_type == FLASH_DDB)
3610 		iscsi_block_session(ddb_entry->sess);
3611 	else
3612 		iscsi_session_failure(cls_session->dd_data,
3613 				      ISCSI_ERR_CONN_FAILED);
3614 }
3615 
3616 /**
3617  * qla4xxx_recover_adapter - recovers adapter after a fatal error
3618  * @ha: Pointer to host adapter structure.
3619  **/
3620 static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
3621 {
3622 	int status = QLA_ERROR;
3623 	uint8_t reset_chip = 0;
3624 	uint32_t dev_state;
3625 	unsigned long wait;
3626 
3627 	/* Stall incoming I/O until we are done */
3628 	scsi_block_requests(ha->host);
3629 	clear_bit(AF_ONLINE, &ha->flags);
3630 	clear_bit(AF_LINK_UP, &ha->flags);
3631 
3632 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
3633 
3634 	set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
3635 
3636 	if ((is_qla8032(ha) || is_qla8042(ha)) &&
3637 	    !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
3638 		ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
3639 			   __func__);
3640 		/* disable pause frame for ISP83xx */
3641 		qla4_83xx_disable_pause(ha);
3642 	}
3643 
3644 	iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
3645 
3646 	if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
3647 		reset_chip = 1;
3648 
3649 	/* For the DPC_RESET_HA_INTR case (ISP-4xxx specific)
3650 	 * do not reset adapter, jump to initialize_adapter */
3651 	if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
3652 		status = QLA_SUCCESS;
3653 		goto recover_ha_init_adapter;
3654 	}
3655 
3656 	/* For the ISP-8xxx adapter, issue a stop_firmware if invoked
3657 	 * from eh_host_reset or ioctl module */
3658 	if (is_qla80XX(ha) && !reset_chip &&
3659 	    test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
3660 
3661 		DEBUG2(ql4_printk(KERN_INFO, ha,
3662 		    "scsi%ld: %s - Performing stop_firmware...\n",
3663 		    ha->host_no, __func__));
3664 		status = ha->isp_ops->reset_firmware(ha);
3665 		if (status == QLA_SUCCESS) {
3666 			if (!test_bit(AF_FW_RECOVERY, &ha->flags))
3667 				qla4xxx_cmd_wait(ha);
3668 
3669 			ha->isp_ops->disable_intrs(ha);
3670 			qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3671 			qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
3672 		} else {
3673 			/* If the stop_firmware fails then
3674 			 * reset the entire chip */
3675 			reset_chip = 1;
3676 			clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3677 			set_bit(DPC_RESET_HA, &ha->dpc_flags);
3678 		}
3679 	}
3680 
3681 	/* Issue full chip reset if recovering from a catastrophic error,
3682 	 * or if stop_firmware fails for ISP-8xxx.
3683 	 * This is the default case for ISP-4xxx */
3684 	if (is_qla40XX(ha) || reset_chip) {
3685 		if (is_qla40XX(ha))
3686 			goto chip_reset;
3687 
3688 		/* Check if 8XXX firmware is alive or not
3689 		 * We may have arrived here from NEED_RESET
3690 		 * detection only */
3691 		if (test_bit(AF_FW_RECOVERY, &ha->flags))
3692 			goto chip_reset;
3693 
3694 		wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ);
3695 		while (time_before(jiffies, wait)) {
3696 			if (qla4_8xxx_check_fw_alive(ha)) {
3697 				qla4xxx_mailbox_premature_completion(ha);
3698 				break;
3699 			}
3700 
3701 			set_current_state(TASK_UNINTERRUPTIBLE);
3702 			schedule_timeout(HZ);
3703 		}
3704 chip_reset:
3705 		if (!test_bit(AF_FW_RECOVERY, &ha->flags))
3706 			qla4xxx_cmd_wait(ha);
3707 
3708 		qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3709 		qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
3710 		DEBUG2(ql4_printk(KERN_INFO, ha,
3711 		    "scsi%ld: %s - Performing chip reset..\n",
3712 		    ha->host_no, __func__));
3713 		status = ha->isp_ops->reset_chip(ha);
3714 	}
3715 
3716 	/* Flush any pending ddb changed AENs */
3717 	qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3718 
3719 recover_ha_init_adapter:
3720 	/* Upon successful firmware/chip reset, re-initialize the adapter */
3721 	if (status == QLA_SUCCESS) {
3722 		/* For ISP-4xxx, force function 1 to always initialize
3723 		 * before function 3 to prevent both funcions from
3724 		 * stepping on top of the other */
3725 		if (is_qla40XX(ha) && (ha->mac_index == 3))
3726 			ssleep(6);
3727 
3728 		/* NOTE: AF_ONLINE flag set upon successful completion of
3729 		 *       qla4xxx_initialize_adapter */
3730 		status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
3731 	}
3732 
3733 	/* Retry failed adapter initialization, if necessary
3734 	 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific)
3735 	 * case to prevent ping-pong resets between functions */
3736 	if (!test_bit(AF_ONLINE, &ha->flags) &&
3737 	    !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
3738 		/* Adapter initialization failed, see if we can retry
3739 		 * resetting the ha.
3740 		 * Since we don't want to block the DPC for too long
3741 		 * with multiple resets in the same thread,
3742 		 * utilize DPC to retry */
3743 		if (is_qla80XX(ha)) {
3744 			ha->isp_ops->idc_lock(ha);
3745 			dev_state = qla4_8xxx_rd_direct(ha,
3746 							QLA8XXX_CRB_DEV_STATE);
3747 			ha->isp_ops->idc_unlock(ha);
3748 			if (dev_state == QLA8XXX_DEV_FAILED) {
3749 				ql4_printk(KERN_INFO, ha, "%s: don't retry "
3750 					   "recover adapter. H/W is in Failed "
3751 					   "state\n", __func__);
3752 				qla4xxx_dead_adapter_cleanup(ha);
3753 				clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3754 				clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3755 				clear_bit(DPC_RESET_HA_FW_CONTEXT,
3756 						&ha->dpc_flags);
3757 				status = QLA_ERROR;
3758 
3759 				goto exit_recover;
3760 			}
3761 		}
3762 
3763 		if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
3764 			ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
3765 			DEBUG2(printk("scsi%ld: recover adapter - retrying "
3766 				      "(%d) more times\n", ha->host_no,
3767 				      ha->retry_reset_ha_cnt));
3768 			set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3769 			status = QLA_ERROR;
3770 		} else {
3771 			if (ha->retry_reset_ha_cnt > 0) {
3772 				/* Schedule another Reset HA--DPC will retry */
3773 				ha->retry_reset_ha_cnt--;
3774 				DEBUG2(printk("scsi%ld: recover adapter - "
3775 					      "retry remaining %d\n",
3776 					      ha->host_no,
3777 					      ha->retry_reset_ha_cnt));
3778 				status = QLA_ERROR;
3779 			}
3780 
3781 			if (ha->retry_reset_ha_cnt == 0) {
3782 				/* Recover adapter retries have been exhausted.
3783 				 * Adapter DEAD */
3784 				DEBUG2(printk("scsi%ld: recover adapter "
3785 					      "failed - board disabled\n",
3786 					      ha->host_no));
3787 				qla4xxx_dead_adapter_cleanup(ha);
3788 				clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3789 				clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3790 				clear_bit(DPC_RESET_HA_FW_CONTEXT,
3791 					  &ha->dpc_flags);
3792 				status = QLA_ERROR;
3793 			}
3794 		}
3795 	} else {
3796 		clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3797 		clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3798 		clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3799 	}
3800 
3801 exit_recover:
3802 	ha->adapter_error_count++;
3803 
3804 	if (test_bit(AF_ONLINE, &ha->flags))
3805 		ha->isp_ops->enable_intrs(ha);
3806 
3807 	scsi_unblock_requests(ha->host);
3808 
3809 	clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
3810 	DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no,
3811 	    status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
3812 
3813 	return status;
3814 }
3815 
3816 static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
3817 {
3818 	struct iscsi_session *sess;
3819 	struct ddb_entry *ddb_entry;
3820 	struct scsi_qla_host *ha;
3821 
3822 	sess = cls_session->dd_data;
3823 	ddb_entry = sess->dd_data;
3824 	ha = ddb_entry->ha;
3825 	if (!iscsi_is_session_online(cls_session)) {
3826 		if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
3827 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3828 				   " unblock session\n", ha->host_no, __func__,
3829 				   ddb_entry->fw_ddb_index);
3830 			iscsi_unblock_session(ddb_entry->sess);
3831 		} else {
3832 			/* Trigger relogin */
3833 			if (ddb_entry->ddb_type == FLASH_DDB) {
3834 				if (!(test_bit(DF_RELOGIN, &ddb_entry->flags) ||
3835 				      test_bit(DF_DISABLE_RELOGIN,
3836 					       &ddb_entry->flags)))
3837 					qla4xxx_arm_relogin_timer(ddb_entry);
3838 			} else
3839 				iscsi_session_failure(cls_session->dd_data,
3840 						      ISCSI_ERR_CONN_FAILED);
3841 		}
3842 	}
3843 }
3844 
3845 int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
3846 {
3847 	struct iscsi_session *sess;
3848 	struct ddb_entry *ddb_entry;
3849 	struct scsi_qla_host *ha;
3850 
3851 	sess = cls_session->dd_data;
3852 	ddb_entry = sess->dd_data;
3853 	ha = ddb_entry->ha;
3854 	ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3855 		   " unblock session\n", ha->host_no, __func__,
3856 		   ddb_entry->fw_ddb_index);
3857 
3858 	iscsi_unblock_session(ddb_entry->sess);
3859 
3860 	/* Start scan target */
3861 	if (test_bit(AF_ONLINE, &ha->flags)) {
3862 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3863 			   " start scan\n", ha->host_no, __func__,
3864 			   ddb_entry->fw_ddb_index);
3865 		scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
3866 	}
3867 	return QLA_SUCCESS;
3868 }
3869 
3870 int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
3871 {
3872 	struct iscsi_session *sess;
3873 	struct ddb_entry *ddb_entry;
3874 	struct scsi_qla_host *ha;
3875 	int status = QLA_SUCCESS;
3876 
3877 	sess = cls_session->dd_data;
3878 	ddb_entry = sess->dd_data;
3879 	ha = ddb_entry->ha;
3880 	ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3881 		   " unblock user space session\n", ha->host_no, __func__,
3882 		   ddb_entry->fw_ddb_index);
3883 
3884 	if (!iscsi_is_session_online(cls_session)) {
3885 		iscsi_conn_start(ddb_entry->conn);
3886 		iscsi_conn_login_event(ddb_entry->conn,
3887 				       ISCSI_CONN_STATE_LOGGED_IN);
3888 	} else {
3889 		ql4_printk(KERN_INFO, ha,
3890 			   "scsi%ld: %s: ddb[%d] session [%d] already logged in\n",
3891 			   ha->host_no, __func__, ddb_entry->fw_ddb_index,
3892 			   cls_session->sid);
3893 		status = QLA_ERROR;
3894 	}
3895 
3896 	return status;
3897 }
3898 
3899 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
3900 {
3901 	iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
3902 }
3903 
3904 static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
3905 {
3906 	uint16_t relogin_timer;
3907 	struct iscsi_session *sess;
3908 	struct ddb_entry *ddb_entry;
3909 	struct scsi_qla_host *ha;
3910 
3911 	sess = cls_sess->dd_data;
3912 	ddb_entry = sess->dd_data;
3913 	ha = ddb_entry->ha;
3914 
3915 	relogin_timer = max(ddb_entry->default_relogin_timeout,
3916 			    (uint16_t)RELOGIN_TOV);
3917 	atomic_set(&ddb_entry->relogin_timer, relogin_timer);
3918 
3919 	DEBUG2(ql4_printk(KERN_INFO, ha,
3920 			  "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
3921 			  ddb_entry->fw_ddb_index, relogin_timer));
3922 
3923 	qla4xxx_login_flash_ddb(cls_sess);
3924 }
3925 
3926 static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
3927 {
3928 	struct iscsi_session *sess;
3929 	struct ddb_entry *ddb_entry;
3930 	struct scsi_qla_host *ha;
3931 
3932 	sess = cls_sess->dd_data;
3933 	ddb_entry = sess->dd_data;
3934 	ha = ddb_entry->ha;
3935 
3936 	if (!(ddb_entry->ddb_type == FLASH_DDB))
3937 		return;
3938 
3939 	if (test_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))
3940 		return;
3941 
3942 	if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
3943 	    !iscsi_is_session_online(cls_sess)) {
3944 		DEBUG2(ql4_printk(KERN_INFO, ha,
3945 				  "relogin issued\n"));
3946 		qla4xxx_relogin_flash_ddb(cls_sess);
3947 	}
3948 }
3949 
3950 void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
3951 {
3952 	if (ha->dpc_thread)
3953 		queue_work(ha->dpc_thread, &ha->dpc_work);
3954 }
3955 
3956 static struct qla4_work_evt *
3957 qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size,
3958 		   enum qla4_work_type type)
3959 {
3960 	struct qla4_work_evt *e;
3961 	uint32_t size = sizeof(struct qla4_work_evt) + data_size;
3962 
3963 	e = kzalloc(size, GFP_ATOMIC);
3964 	if (!e)
3965 		return NULL;
3966 
3967 	INIT_LIST_HEAD(&e->list);
3968 	e->type = type;
3969 	return e;
3970 }
3971 
3972 static void qla4xxx_post_work(struct scsi_qla_host *ha,
3973 			     struct qla4_work_evt *e)
3974 {
3975 	unsigned long flags;
3976 
3977 	spin_lock_irqsave(&ha->work_lock, flags);
3978 	list_add_tail(&e->list, &ha->work_list);
3979 	spin_unlock_irqrestore(&ha->work_lock, flags);
3980 	qla4xxx_wake_dpc(ha);
3981 }
3982 
3983 int qla4xxx_post_aen_work(struct scsi_qla_host *ha,
3984 			  enum iscsi_host_event_code aen_code,
3985 			  uint32_t data_size, uint8_t *data)
3986 {
3987 	struct qla4_work_evt *e;
3988 
3989 	e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN);
3990 	if (!e)
3991 		return QLA_ERROR;
3992 
3993 	e->u.aen.code = aen_code;
3994 	e->u.aen.data_size = data_size;
3995 	memcpy(e->u.aen.data, data, data_size);
3996 
3997 	qla4xxx_post_work(ha, e);
3998 
3999 	return QLA_SUCCESS;
4000 }
4001 
4002 int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
4003 			       uint32_t status, uint32_t pid,
4004 			       uint32_t data_size, uint8_t *data)
4005 {
4006 	struct qla4_work_evt *e;
4007 
4008 	e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS);
4009 	if (!e)
4010 		return QLA_ERROR;
4011 
4012 	e->u.ping.status = status;
4013 	e->u.ping.pid = pid;
4014 	e->u.ping.data_size = data_size;
4015 	memcpy(e->u.ping.data, data, data_size);
4016 
4017 	qla4xxx_post_work(ha, e);
4018 
4019 	return QLA_SUCCESS;
4020 }
4021 
4022 static void qla4xxx_do_work(struct scsi_qla_host *ha)
4023 {
4024 	struct qla4_work_evt *e, *tmp;
4025 	unsigned long flags;
4026 	LIST_HEAD(work);
4027 
4028 	spin_lock_irqsave(&ha->work_lock, flags);
4029 	list_splice_init(&ha->work_list, &work);
4030 	spin_unlock_irqrestore(&ha->work_lock, flags);
4031 
4032 	list_for_each_entry_safe(e, tmp, &work, list) {
4033 		list_del_init(&e->list);
4034 
4035 		switch (e->type) {
4036 		case QLA4_EVENT_AEN:
4037 			iscsi_post_host_event(ha->host_no,
4038 					      &qla4xxx_iscsi_transport,
4039 					      e->u.aen.code,
4040 					      e->u.aen.data_size,
4041 					      e->u.aen.data);
4042 			break;
4043 		case QLA4_EVENT_PING_STATUS:
4044 			iscsi_ping_comp_event(ha->host_no,
4045 					      &qla4xxx_iscsi_transport,
4046 					      e->u.ping.status,
4047 					      e->u.ping.pid,
4048 					      e->u.ping.data_size,
4049 					      e->u.ping.data);
4050 			break;
4051 		default:
4052 			ql4_printk(KERN_WARNING, ha, "event type: 0x%x not "
4053 				   "supported", e->type);
4054 		}
4055 		kfree(e);
4056 	}
4057 }
4058 
4059 /**
4060  * qla4xxx_do_dpc - dpc routine
4061  * @data: in our case pointer to adapter structure
4062  *
4063  * This routine is a task that is schedule by the interrupt handler
4064  * to perform the background processing for interrupts.  We put it
4065  * on a task queue that is consumed whenever the scheduler runs; that's
4066  * so you can do anything (i.e. put the process to sleep etc).  In fact,
4067  * the mid-level tries to sleep when it reaches the driver threshold
4068  * "host->can_queue". This can cause a panic if we were in our interrupt code.
4069  **/
4070 static void qla4xxx_do_dpc(struct work_struct *work)
4071 {
4072 	struct scsi_qla_host *ha =
4073 		container_of(work, struct scsi_qla_host, dpc_work);
4074 	int status = QLA_ERROR;
4075 
4076 	DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
4077 	    "flags = 0x%08lx, dpc_flags = 0x%08lx\n",
4078 	    ha->host_no, __func__, ha->flags, ha->dpc_flags))
4079 
4080 	/* Initialization not yet finished. Don't do anything yet. */
4081 	if (!test_bit(AF_INIT_DONE, &ha->flags))
4082 		return;
4083 
4084 	if (test_bit(AF_EEH_BUSY, &ha->flags)) {
4085 		DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
4086 		    ha->host_no, __func__, ha->flags));
4087 		return;
4088 	}
4089 
4090 	/* post events to application */
4091 	qla4xxx_do_work(ha);
4092 
4093 	if (is_qla80XX(ha)) {
4094 		if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
4095 			if (is_qla8032(ha) || is_qla8042(ha)) {
4096 				ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
4097 					   __func__);
4098 				/* disable pause frame for ISP83xx */
4099 				qla4_83xx_disable_pause(ha);
4100 			}
4101 
4102 			ha->isp_ops->idc_lock(ha);
4103 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
4104 					    QLA8XXX_DEV_FAILED);
4105 			ha->isp_ops->idc_unlock(ha);
4106 			ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
4107 			qla4_8xxx_device_state_handler(ha);
4108 		}
4109 
4110 		if (test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) {
4111 			if (is_qla8042(ha)) {
4112 				if (ha->idc_info.info2 &
4113 				    ENABLE_INTERNAL_LOOPBACK) {
4114 					ql4_printk(KERN_INFO, ha, "%s: Disabling ACB\n",
4115 						   __func__);
4116 					status = qla4_84xx_config_acb(ha,
4117 							    ACB_CONFIG_DISABLE);
4118 					if (status != QLA_SUCCESS) {
4119 						ql4_printk(KERN_INFO, ha, "%s: ACB config failed\n",
4120 							   __func__);
4121 					}
4122 				}
4123 			}
4124 			qla4_83xx_post_idc_ack(ha);
4125 			clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags);
4126 		}
4127 
4128 		if (is_qla8042(ha) &&
4129 		    test_bit(DPC_RESTORE_ACB, &ha->dpc_flags)) {
4130 			ql4_printk(KERN_INFO, ha, "%s: Restoring ACB\n",
4131 				   __func__);
4132 			if (qla4_84xx_config_acb(ha, ACB_CONFIG_SET) !=
4133 			    QLA_SUCCESS) {
4134 				ql4_printk(KERN_INFO, ha, "%s: ACB config failed ",
4135 					   __func__);
4136 			}
4137 			clear_bit(DPC_RESTORE_ACB, &ha->dpc_flags);
4138 		}
4139 
4140 		if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
4141 			qla4_8xxx_need_qsnt_handler(ha);
4142 		}
4143 	}
4144 
4145 	if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) &&
4146 	    (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
4147 	    test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
4148 	    test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
4149 		if ((is_qla8022(ha) && ql4xdontresethba) ||
4150 		    ((is_qla8032(ha) || is_qla8042(ha)) &&
4151 		     qla4_83xx_idc_dontreset(ha))) {
4152 			DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
4153 			    ha->host_no, __func__));
4154 			clear_bit(DPC_RESET_HA, &ha->dpc_flags);
4155 			clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
4156 			clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
4157 			goto dpc_post_reset_ha;
4158 		}
4159 		if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
4160 		    test_bit(DPC_RESET_HA, &ha->dpc_flags))
4161 			qla4xxx_recover_adapter(ha);
4162 
4163 		if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
4164 			uint8_t wait_time = RESET_INTR_TOV;
4165 
4166 			while ((readw(&ha->reg->ctrl_status) &
4167 				(CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
4168 				if (--wait_time == 0)
4169 					break;
4170 				msleep(1000);
4171 			}
4172 			if (wait_time == 0)
4173 				DEBUG2(printk("scsi%ld: %s: SR|FSR "
4174 					      "bit not cleared-- resetting\n",
4175 					      ha->host_no, __func__));
4176 			qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
4177 			if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
4178 				qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
4179 				status = qla4xxx_recover_adapter(ha);
4180 			}
4181 			clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
4182 			if (status == QLA_SUCCESS)
4183 				ha->isp_ops->enable_intrs(ha);
4184 		}
4185 	}
4186 
4187 dpc_post_reset_ha:
4188 	/* ---- process AEN? --- */
4189 	if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
4190 		qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
4191 
4192 	/* ---- Get DHCP IP Address? --- */
4193 	if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
4194 		qla4xxx_get_dhcp_ip_address(ha);
4195 
4196 	/* ---- relogin device? --- */
4197 	if (adapter_up(ha) &&
4198 	    test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
4199 		iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
4200 	}
4201 
4202 	/* ---- link change? --- */
4203 	if (!test_bit(AF_LOOPBACK, &ha->flags) &&
4204 	    test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
4205 		if (!test_bit(AF_LINK_UP, &ha->flags)) {
4206 			/* ---- link down? --- */
4207 			qla4xxx_mark_all_devices_missing(ha);
4208 		} else {
4209 			/* ---- link up? --- *
4210 			 * F/W will auto login to all devices ONLY ONCE after
4211 			 * link up during driver initialization and runtime
4212 			 * fatal error recovery.  Therefore, the driver must
4213 			 * manually relogin to devices when recovering from
4214 			 * connection failures, logouts, expired KATO, etc. */
4215 			if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
4216 				qla4xxx_build_ddb_list(ha, ha->is_reset);
4217 				iscsi_host_for_each_session(ha->host,
4218 						qla4xxx_login_flash_ddb);
4219 			} else
4220 				qla4xxx_relogin_all_devices(ha);
4221 		}
4222 	}
4223 }
4224 
4225 /**
4226  * qla4xxx_free_adapter - release the adapter
4227  * @ha: pointer to adapter structure
4228  **/
4229 static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
4230 {
4231 	qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
4232 
4233 	/* Turn-off interrupts on the card. */
4234 	ha->isp_ops->disable_intrs(ha);
4235 
4236 	if (is_qla40XX(ha)) {
4237 		writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
4238 		       &ha->reg->ctrl_status);
4239 		readl(&ha->reg->ctrl_status);
4240 	} else if (is_qla8022(ha)) {
4241 		writel(0, &ha->qla4_82xx_reg->host_int);
4242 		readl(&ha->qla4_82xx_reg->host_int);
4243 	} else if (is_qla8032(ha) || is_qla8042(ha)) {
4244 		writel(0, &ha->qla4_83xx_reg->risc_intr);
4245 		readl(&ha->qla4_83xx_reg->risc_intr);
4246 	}
4247 
4248 	/* Remove timer thread, if present */
4249 	if (ha->timer_active)
4250 		qla4xxx_stop_timer(ha);
4251 
4252 	/* Kill the kernel thread for this host */
4253 	if (ha->dpc_thread)
4254 		destroy_workqueue(ha->dpc_thread);
4255 
4256 	/* Kill the kernel thread for this host */
4257 	if (ha->task_wq)
4258 		destroy_workqueue(ha->task_wq);
4259 
4260 	/* Put firmware in known state */
4261 	ha->isp_ops->reset_firmware(ha);
4262 
4263 	if (is_qla80XX(ha)) {
4264 		ha->isp_ops->idc_lock(ha);
4265 		qla4_8xxx_clear_drv_active(ha);
4266 		ha->isp_ops->idc_unlock(ha);
4267 	}
4268 
4269 	/* Detach interrupts */
4270 	qla4xxx_free_irqs(ha);
4271 
4272 	/* free extra memory */
4273 	qla4xxx_mem_free(ha);
4274 }
4275 
4276 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
4277 {
4278 	int status = 0;
4279 	unsigned long mem_base, mem_len, db_base, db_len;
4280 	struct pci_dev *pdev = ha->pdev;
4281 
4282 	status = pci_request_regions(pdev, DRIVER_NAME);
4283 	if (status) {
4284 		printk(KERN_WARNING
4285 		    "scsi(%ld) Failed to reserve PIO regions (%s) "
4286 		    "status=%d\n", ha->host_no, pci_name(pdev), status);
4287 		goto iospace_error_exit;
4288 	}
4289 
4290 	DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
4291 	    __func__, pdev->revision));
4292 	ha->revision_id = pdev->revision;
4293 
4294 	/* remap phys address */
4295 	mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
4296 	mem_len = pci_resource_len(pdev, 0);
4297 	DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n",
4298 	    __func__, mem_base, mem_len));
4299 
4300 	/* mapping of pcibase pointer */
4301 	ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len);
4302 	if (!ha->nx_pcibase) {
4303 		printk(KERN_ERR
4304 		    "cannot remap MMIO (%s), aborting\n", pci_name(pdev));
4305 		pci_release_regions(ha->pdev);
4306 		goto iospace_error_exit;
4307 	}
4308 
4309 	/* Mapping of IO base pointer, door bell read and write pointer */
4310 
4311 	/* mapping of IO base pointer */
4312 	if (is_qla8022(ha)) {
4313 		ha->qla4_82xx_reg = (struct device_reg_82xx  __iomem *)
4314 				    ((uint8_t *)ha->nx_pcibase + 0xbc000 +
4315 				     (ha->pdev->devfn << 11));
4316 		ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
4317 				    QLA82XX_CAM_RAM_DB2);
4318 	} else if (is_qla8032(ha) || is_qla8042(ha)) {
4319 		ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *)
4320 				    ((uint8_t *)ha->nx_pcibase);
4321 	}
4322 
4323 	db_base = pci_resource_start(pdev, 4);  /* doorbell is on bar 4 */
4324 	db_len = pci_resource_len(pdev, 4);
4325 
4326 	return 0;
4327 iospace_error_exit:
4328 	return -ENOMEM;
4329 }
4330 
4331 /***
4332  * qla4xxx_iospace_config - maps registers
4333  * @ha: pointer to adapter structure
4334  *
4335  * This routines maps HBA's registers from the pci address space
4336  * into the kernel virtual address space for memory mapped i/o.
4337  **/
4338 int qla4xxx_iospace_config(struct scsi_qla_host *ha)
4339 {
4340 	unsigned long pio, pio_len, pio_flags;
4341 	unsigned long mmio, mmio_len, mmio_flags;
4342 
4343 	pio = pci_resource_start(ha->pdev, 0);
4344 	pio_len = pci_resource_len(ha->pdev, 0);
4345 	pio_flags = pci_resource_flags(ha->pdev, 0);
4346 	if (pio_flags & IORESOURCE_IO) {
4347 		if (pio_len < MIN_IOBASE_LEN) {
4348 			ql4_printk(KERN_WARNING, ha,
4349 				"Invalid PCI I/O region size\n");
4350 			pio = 0;
4351 		}
4352 	} else {
4353 		ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n");
4354 		pio = 0;
4355 	}
4356 
4357 	/* Use MMIO operations for all accesses. */
4358 	mmio = pci_resource_start(ha->pdev, 1);
4359 	mmio_len = pci_resource_len(ha->pdev, 1);
4360 	mmio_flags = pci_resource_flags(ha->pdev, 1);
4361 
4362 	if (!(mmio_flags & IORESOURCE_MEM)) {
4363 		ql4_printk(KERN_ERR, ha,
4364 		    "region #0 not an MMIO resource, aborting\n");
4365 
4366 		goto iospace_error_exit;
4367 	}
4368 
4369 	if (mmio_len < MIN_IOBASE_LEN) {
4370 		ql4_printk(KERN_ERR, ha,
4371 		    "Invalid PCI mem region size, aborting\n");
4372 		goto iospace_error_exit;
4373 	}
4374 
4375 	if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
4376 		ql4_printk(KERN_WARNING, ha,
4377 		    "Failed to reserve PIO/MMIO regions\n");
4378 
4379 		goto iospace_error_exit;
4380 	}
4381 
4382 	ha->pio_address = pio;
4383 	ha->pio_length = pio_len;
4384 	ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
4385 	if (!ha->reg) {
4386 		ql4_printk(KERN_ERR, ha,
4387 		    "cannot remap MMIO, aborting\n");
4388 
4389 		goto iospace_error_exit;
4390 	}
4391 
4392 	return 0;
4393 
4394 iospace_error_exit:
4395 	return -ENOMEM;
4396 }
4397 
4398 static struct isp_operations qla4xxx_isp_ops = {
4399 	.iospace_config         = qla4xxx_iospace_config,
4400 	.pci_config             = qla4xxx_pci_config,
4401 	.disable_intrs          = qla4xxx_disable_intrs,
4402 	.enable_intrs           = qla4xxx_enable_intrs,
4403 	.start_firmware         = qla4xxx_start_firmware,
4404 	.intr_handler           = qla4xxx_intr_handler,
4405 	.interrupt_service_routine = qla4xxx_interrupt_service_routine,
4406 	.reset_chip             = qla4xxx_soft_reset,
4407 	.reset_firmware         = qla4xxx_hw_reset,
4408 	.queue_iocb             = qla4xxx_queue_iocb,
4409 	.complete_iocb          = qla4xxx_complete_iocb,
4410 	.rd_shdw_req_q_out      = qla4xxx_rd_shdw_req_q_out,
4411 	.rd_shdw_rsp_q_in       = qla4xxx_rd_shdw_rsp_q_in,
4412 	.get_sys_info           = qla4xxx_get_sys_info,
4413 	.queue_mailbox_command	= qla4xxx_queue_mbox_cmd,
4414 	.process_mailbox_interrupt = qla4xxx_process_mbox_intr,
4415 };
4416 
4417 static struct isp_operations qla4_82xx_isp_ops = {
4418 	.iospace_config         = qla4_8xxx_iospace_config,
4419 	.pci_config             = qla4_8xxx_pci_config,
4420 	.disable_intrs          = qla4_82xx_disable_intrs,
4421 	.enable_intrs           = qla4_82xx_enable_intrs,
4422 	.start_firmware         = qla4_8xxx_load_risc,
4423 	.restart_firmware	= qla4_82xx_try_start_fw,
4424 	.intr_handler           = qla4_82xx_intr_handler,
4425 	.interrupt_service_routine = qla4_82xx_interrupt_service_routine,
4426 	.need_reset		= qla4_8xxx_need_reset,
4427 	.reset_chip             = qla4_82xx_isp_reset,
4428 	.reset_firmware         = qla4_8xxx_stop_firmware,
4429 	.queue_iocb             = qla4_82xx_queue_iocb,
4430 	.complete_iocb          = qla4_82xx_complete_iocb,
4431 	.rd_shdw_req_q_out      = qla4_82xx_rd_shdw_req_q_out,
4432 	.rd_shdw_rsp_q_in       = qla4_82xx_rd_shdw_rsp_q_in,
4433 	.get_sys_info           = qla4_8xxx_get_sys_info,
4434 	.rd_reg_direct		= qla4_82xx_rd_32,
4435 	.wr_reg_direct		= qla4_82xx_wr_32,
4436 	.rd_reg_indirect	= qla4_82xx_md_rd_32,
4437 	.wr_reg_indirect	= qla4_82xx_md_wr_32,
4438 	.idc_lock		= qla4_82xx_idc_lock,
4439 	.idc_unlock		= qla4_82xx_idc_unlock,
4440 	.rom_lock_recovery	= qla4_82xx_rom_lock_recovery,
4441 	.queue_mailbox_command	= qla4_82xx_queue_mbox_cmd,
4442 	.process_mailbox_interrupt = qla4_82xx_process_mbox_intr,
4443 };
4444 
4445 static struct isp_operations qla4_83xx_isp_ops = {
4446 	.iospace_config		= qla4_8xxx_iospace_config,
4447 	.pci_config		= qla4_8xxx_pci_config,
4448 	.disable_intrs		= qla4_83xx_disable_intrs,
4449 	.enable_intrs		= qla4_83xx_enable_intrs,
4450 	.start_firmware		= qla4_8xxx_load_risc,
4451 	.restart_firmware	= qla4_83xx_start_firmware,
4452 	.intr_handler		= qla4_83xx_intr_handler,
4453 	.interrupt_service_routine = qla4_83xx_interrupt_service_routine,
4454 	.need_reset		= qla4_8xxx_need_reset,
4455 	.reset_chip		= qla4_83xx_isp_reset,
4456 	.reset_firmware		= qla4_8xxx_stop_firmware,
4457 	.queue_iocb		= qla4_83xx_queue_iocb,
4458 	.complete_iocb		= qla4_83xx_complete_iocb,
4459 	.rd_shdw_req_q_out	= qla4xxx_rd_shdw_req_q_out,
4460 	.rd_shdw_rsp_q_in	= qla4xxx_rd_shdw_rsp_q_in,
4461 	.get_sys_info		= qla4_8xxx_get_sys_info,
4462 	.rd_reg_direct		= qla4_83xx_rd_reg,
4463 	.wr_reg_direct		= qla4_83xx_wr_reg,
4464 	.rd_reg_indirect	= qla4_83xx_rd_reg_indirect,
4465 	.wr_reg_indirect	= qla4_83xx_wr_reg_indirect,
4466 	.idc_lock		= qla4_83xx_drv_lock,
4467 	.idc_unlock		= qla4_83xx_drv_unlock,
4468 	.rom_lock_recovery	= qla4_83xx_rom_lock_recovery,
4469 	.queue_mailbox_command	= qla4_83xx_queue_mbox_cmd,
4470 	.process_mailbox_interrupt = qla4_83xx_process_mbox_intr,
4471 };
4472 
4473 uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
4474 {
4475 	return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
4476 }
4477 
4478 uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
4479 {
4480 	return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out));
4481 }
4482 
4483 uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
4484 {
4485 	return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
4486 }
4487 
4488 uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
4489 {
4490 	return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in));
4491 }
4492 
4493 static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
4494 {
4495 	struct scsi_qla_host *ha = data;
4496 	char *str = buf;
4497 	int rc;
4498 
4499 	switch (type) {
4500 	case ISCSI_BOOT_ETH_FLAGS:
4501 		rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
4502 		break;
4503 	case ISCSI_BOOT_ETH_INDEX:
4504 		rc = sprintf(str, "0\n");
4505 		break;
4506 	case ISCSI_BOOT_ETH_MAC:
4507 		rc = sysfs_format_mac(str, ha->my_mac,
4508 				      MAC_ADDR_LEN);
4509 		break;
4510 	default:
4511 		rc = -ENOSYS;
4512 		break;
4513 	}
4514 	return rc;
4515 }
4516 
4517 static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
4518 {
4519 	int rc;
4520 
4521 	switch (type) {
4522 	case ISCSI_BOOT_ETH_FLAGS:
4523 	case ISCSI_BOOT_ETH_MAC:
4524 	case ISCSI_BOOT_ETH_INDEX:
4525 		rc = S_IRUGO;
4526 		break;
4527 	default:
4528 		rc = 0;
4529 		break;
4530 	}
4531 	return rc;
4532 }
4533 
4534 static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
4535 {
4536 	struct scsi_qla_host *ha = data;
4537 	char *str = buf;
4538 	int rc;
4539 
4540 	switch (type) {
4541 	case ISCSI_BOOT_INI_INITIATOR_NAME:
4542 		rc = sprintf(str, "%s\n", ha->name_string);
4543 		break;
4544 	default:
4545 		rc = -ENOSYS;
4546 		break;
4547 	}
4548 	return rc;
4549 }
4550 
4551 static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
4552 {
4553 	int rc;
4554 
4555 	switch (type) {
4556 	case ISCSI_BOOT_INI_INITIATOR_NAME:
4557 		rc = S_IRUGO;
4558 		break;
4559 	default:
4560 		rc = 0;
4561 		break;
4562 	}
4563 	return rc;
4564 }
4565 
4566 static ssize_t
4567 qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
4568 			   char *buf)
4569 {
4570 	struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
4571 	char *str = buf;
4572 	int rc;
4573 
4574 	switch (type) {
4575 	case ISCSI_BOOT_TGT_NAME:
4576 		rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name);
4577 		break;
4578 	case ISCSI_BOOT_TGT_IP_ADDR:
4579 		if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1)
4580 			rc = sprintf(buf, "%pI4\n",
4581 				     &boot_conn->dest_ipaddr.ip_address);
4582 		else
4583 			rc = sprintf(str, "%pI6\n",
4584 				     &boot_conn->dest_ipaddr.ip_address);
4585 		break;
4586 	case ISCSI_BOOT_TGT_PORT:
4587 			rc = sprintf(str, "%d\n", boot_conn->dest_port);
4588 		break;
4589 	case ISCSI_BOOT_TGT_CHAP_NAME:
4590 		rc = sprintf(str,  "%.*s\n",
4591 			     boot_conn->chap.target_chap_name_length,
4592 			     (char *)&boot_conn->chap.target_chap_name);
4593 		break;
4594 	case ISCSI_BOOT_TGT_CHAP_SECRET:
4595 		rc = sprintf(str,  "%.*s\n",
4596 			     boot_conn->chap.target_secret_length,
4597 			     (char *)&boot_conn->chap.target_secret);
4598 		break;
4599 	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
4600 		rc = sprintf(str,  "%.*s\n",
4601 			     boot_conn->chap.intr_chap_name_length,
4602 			     (char *)&boot_conn->chap.intr_chap_name);
4603 		break;
4604 	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
4605 		rc = sprintf(str,  "%.*s\n",
4606 			     boot_conn->chap.intr_secret_length,
4607 			     (char *)&boot_conn->chap.intr_secret);
4608 		break;
4609 	case ISCSI_BOOT_TGT_FLAGS:
4610 		rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
4611 		break;
4612 	case ISCSI_BOOT_TGT_NIC_ASSOC:
4613 		rc = sprintf(str, "0\n");
4614 		break;
4615 	default:
4616 		rc = -ENOSYS;
4617 		break;
4618 	}
4619 	return rc;
4620 }
4621 
4622 static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf)
4623 {
4624 	struct scsi_qla_host *ha = data;
4625 	struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess);
4626 
4627 	return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
4628 }
4629 
4630 static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
4631 {
4632 	struct scsi_qla_host *ha = data;
4633 	struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess);
4634 
4635 	return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
4636 }
4637 
4638 static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
4639 {
4640 	int rc;
4641 
4642 	switch (type) {
4643 	case ISCSI_BOOT_TGT_NAME:
4644 	case ISCSI_BOOT_TGT_IP_ADDR:
4645 	case ISCSI_BOOT_TGT_PORT:
4646 	case ISCSI_BOOT_TGT_CHAP_NAME:
4647 	case ISCSI_BOOT_TGT_CHAP_SECRET:
4648 	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
4649 	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
4650 	case ISCSI_BOOT_TGT_NIC_ASSOC:
4651 	case ISCSI_BOOT_TGT_FLAGS:
4652 		rc = S_IRUGO;
4653 		break;
4654 	default:
4655 		rc = 0;
4656 		break;
4657 	}
4658 	return rc;
4659 }
4660 
4661 static void qla4xxx_boot_release(void *data)
4662 {
4663 	struct scsi_qla_host *ha = data;
4664 
4665 	scsi_host_put(ha->host);
4666 }
4667 
4668 static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
4669 {
4670 	dma_addr_t buf_dma;
4671 	uint32_t addr, pri_addr, sec_addr;
4672 	uint32_t offset;
4673 	uint16_t func_num;
4674 	uint8_t val;
4675 	uint8_t *buf = NULL;
4676 	size_t size = 13 * sizeof(uint8_t);
4677 	int ret = QLA_SUCCESS;
4678 
4679 	func_num = PCI_FUNC(ha->pdev->devfn);
4680 
4681 	ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n",
4682 		   __func__, ha->pdev->device, func_num);
4683 
4684 	if (is_qla40XX(ha)) {
4685 		if (func_num == 1) {
4686 			addr = NVRAM_PORT0_BOOT_MODE;
4687 			pri_addr = NVRAM_PORT0_BOOT_PRI_TGT;
4688 			sec_addr = NVRAM_PORT0_BOOT_SEC_TGT;
4689 		} else if (func_num == 3) {
4690 			addr = NVRAM_PORT1_BOOT_MODE;
4691 			pri_addr = NVRAM_PORT1_BOOT_PRI_TGT;
4692 			sec_addr = NVRAM_PORT1_BOOT_SEC_TGT;
4693 		} else {
4694 			ret = QLA_ERROR;
4695 			goto exit_boot_info;
4696 		}
4697 
4698 		/* Check Boot Mode */
4699 		val = rd_nvram_byte(ha, addr);
4700 		if (!(val & 0x07)) {
4701 			DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot "
4702 					  "options : 0x%x\n", __func__, val));
4703 			ret = QLA_ERROR;
4704 			goto exit_boot_info;
4705 		}
4706 
4707 		/* get primary valid target index */
4708 		val = rd_nvram_byte(ha, pri_addr);
4709 		if (val & BIT_7)
4710 			ddb_index[0] = (val & 0x7f);
4711 
4712 		/* get secondary valid target index */
4713 		val = rd_nvram_byte(ha, sec_addr);
4714 		if (val & BIT_7)
4715 			ddb_index[1] = (val & 0x7f);
4716 
4717 	} else if (is_qla80XX(ha)) {
4718 		buf = dma_alloc_coherent(&ha->pdev->dev, size,
4719 					 &buf_dma, GFP_KERNEL);
4720 		if (!buf) {
4721 			DEBUG2(ql4_printk(KERN_ERR, ha,
4722 					  "%s: Unable to allocate dma buffer\n",
4723 					   __func__));
4724 			ret = QLA_ERROR;
4725 			goto exit_boot_info;
4726 		}
4727 
4728 		if (ha->port_num == 0)
4729 			offset = BOOT_PARAM_OFFSET_PORT0;
4730 		else if (ha->port_num == 1)
4731 			offset = BOOT_PARAM_OFFSET_PORT1;
4732 		else {
4733 			ret = QLA_ERROR;
4734 			goto exit_boot_info_free;
4735 		}
4736 		addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) +
4737 		       offset;
4738 		if (qla4xxx_get_flash(ha, buf_dma, addr,
4739 				      13 * sizeof(uint8_t)) != QLA_SUCCESS) {
4740 			DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
4741 					  " failed\n", ha->host_no, __func__));
4742 			ret = QLA_ERROR;
4743 			goto exit_boot_info_free;
4744 		}
4745 		/* Check Boot Mode */
4746 		if (!(buf[1] & 0x07)) {
4747 			DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options"
4748 					  " : 0x%x\n", buf[1]));
4749 			ret = QLA_ERROR;
4750 			goto exit_boot_info_free;
4751 		}
4752 
4753 		/* get primary valid target index */
4754 		if (buf[2] & BIT_7)
4755 			ddb_index[0] = buf[2] & 0x7f;
4756 
4757 		/* get secondary valid target index */
4758 		if (buf[11] & BIT_7)
4759 			ddb_index[1] = buf[11] & 0x7f;
4760 	} else {
4761 		ret = QLA_ERROR;
4762 		goto exit_boot_info;
4763 	}
4764 
4765 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary"
4766 			  " target ID %d\n", __func__, ddb_index[0],
4767 			  ddb_index[1]));
4768 
4769 exit_boot_info_free:
4770 	dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
4771 exit_boot_info:
4772 	ha->pri_ddb_idx = ddb_index[0];
4773 	ha->sec_ddb_idx = ddb_index[1];
4774 	return ret;
4775 }
4776 
4777 /**
4778  * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password
4779  * @ha: pointer to adapter structure
4780  * @username: CHAP username to be returned
4781  * @password: CHAP password to be returned
4782  *
4783  * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP
4784  * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/.
4785  * So from the CHAP cache find the first BIDI CHAP entry and set it
4786  * to the boot record in sysfs.
4787  **/
4788 static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
4789 			    char *password)
4790 {
4791 	int i, ret = -EINVAL;
4792 	int max_chap_entries = 0;
4793 	struct ql4_chap_table *chap_table;
4794 
4795 	if (is_qla80XX(ha))
4796 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
4797 						sizeof(struct ql4_chap_table);
4798 	else
4799 		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
4800 
4801 	if (!ha->chap_list) {
4802 		ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
4803 		return ret;
4804 	}
4805 
4806 	mutex_lock(&ha->chap_sem);
4807 	for (i = 0; i < max_chap_entries; i++) {
4808 		chap_table = (struct ql4_chap_table *)ha->chap_list + i;
4809 		if (chap_table->cookie !=
4810 		    __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
4811 			continue;
4812 		}
4813 
4814 		if (chap_table->flags & BIT_7) /* local */
4815 			continue;
4816 
4817 		if (!(chap_table->flags & BIT_6)) /* Not BIDI */
4818 			continue;
4819 
4820 		strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
4821 		strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
4822 		ret = 0;
4823 		break;
4824 	}
4825 	mutex_unlock(&ha->chap_sem);
4826 
4827 	return ret;
4828 }
4829 
4830 
4831 static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
4832 				   struct ql4_boot_session_info *boot_sess,
4833 				   uint16_t ddb_index)
4834 {
4835 	struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
4836 	struct dev_db_entry *fw_ddb_entry;
4837 	dma_addr_t fw_ddb_entry_dma;
4838 	uint16_t idx;
4839 	uint16_t options;
4840 	int ret = QLA_SUCCESS;
4841 
4842 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
4843 					  &fw_ddb_entry_dma, GFP_KERNEL);
4844 	if (!fw_ddb_entry) {
4845 		DEBUG2(ql4_printk(KERN_ERR, ha,
4846 				  "%s: Unable to allocate dma buffer.\n",
4847 				  __func__));
4848 		ret = QLA_ERROR;
4849 		return ret;
4850 	}
4851 
4852 	if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
4853 				   fw_ddb_entry_dma, ddb_index)) {
4854 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at "
4855 				  "index [%d]\n", __func__, ddb_index));
4856 		ret = QLA_ERROR;
4857 		goto exit_boot_target;
4858 	}
4859 
4860 	/* Update target name and IP from DDB */
4861 	memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name,
4862 	       min(sizeof(boot_sess->target_name),
4863 		   sizeof(fw_ddb_entry->iscsi_name)));
4864 
4865 	options = le16_to_cpu(fw_ddb_entry->options);
4866 	if (options & DDB_OPT_IPV6_DEVICE) {
4867 		memcpy(&boot_conn->dest_ipaddr.ip_address,
4868 		       &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN);
4869 	} else {
4870 		boot_conn->dest_ipaddr.ip_type = 0x1;
4871 		memcpy(&boot_conn->dest_ipaddr.ip_address,
4872 		       &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN);
4873 	}
4874 
4875 	boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port);
4876 
4877 	/* update chap information */
4878 	idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
4879 
4880 	if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options))	{
4881 
4882 		DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n"));
4883 
4884 		ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
4885 				       target_chap_name,
4886 				       (char *)&boot_conn->chap.target_secret,
4887 				       idx);
4888 		if (ret) {
4889 			ql4_printk(KERN_ERR, ha, "Failed to set chap\n");
4890 			ret = QLA_ERROR;
4891 			goto exit_boot_target;
4892 		}
4893 
4894 		boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
4895 		boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN;
4896 	}
4897 
4898 	if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
4899 
4900 		DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n"));
4901 
4902 		ret = qla4xxx_get_bidi_chap(ha,
4903 				    (char *)&boot_conn->chap.intr_chap_name,
4904 				    (char *)&boot_conn->chap.intr_secret);
4905 
4906 		if (ret) {
4907 			ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n");
4908 			ret = QLA_ERROR;
4909 			goto exit_boot_target;
4910 		}
4911 
4912 		boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
4913 		boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN;
4914 	}
4915 
4916 exit_boot_target:
4917 	dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
4918 			  fw_ddb_entry, fw_ddb_entry_dma);
4919 	return ret;
4920 }
4921 
4922 static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
4923 {
4924 	uint16_t ddb_index[2];
4925 	int ret = QLA_ERROR;
4926 	int rval;
4927 
4928 	memset(ddb_index, 0, sizeof(ddb_index));
4929 	ddb_index[0] = 0xffff;
4930 	ddb_index[1] = 0xffff;
4931 	ret = get_fw_boot_info(ha, ddb_index);
4932 	if (ret != QLA_SUCCESS) {
4933 		DEBUG2(ql4_printk(KERN_INFO, ha,
4934 				"%s: No boot target configured.\n", __func__));
4935 		return ret;
4936 	}
4937 
4938 	if (ql4xdisablesysfsboot)
4939 		return QLA_SUCCESS;
4940 
4941 	if (ddb_index[0] == 0xffff)
4942 		goto sec_target;
4943 
4944 	rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
4945 				      ddb_index[0]);
4946 	if (rval != QLA_SUCCESS) {
4947 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not "
4948 				  "configured\n", __func__));
4949 	} else
4950 		ret = QLA_SUCCESS;
4951 
4952 sec_target:
4953 	if (ddb_index[1] == 0xffff)
4954 		goto exit_get_boot_info;
4955 
4956 	rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
4957 				      ddb_index[1]);
4958 	if (rval != QLA_SUCCESS) {
4959 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not"
4960 				  " configured\n", __func__));
4961 	} else
4962 		ret = QLA_SUCCESS;
4963 
4964 exit_get_boot_info:
4965 	return ret;
4966 }
4967 
4968 static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
4969 {
4970 	struct iscsi_boot_kobj *boot_kobj;
4971 
4972 	if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
4973 		return QLA_ERROR;
4974 
4975 	if (ql4xdisablesysfsboot) {
4976 		ql4_printk(KERN_INFO, ha,
4977 			   "%s: syfsboot disabled - driver will trigger login "
4978 			   "and publish session for discovery .\n", __func__);
4979 		return QLA_SUCCESS;
4980 	}
4981 
4982 
4983 	ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
4984 	if (!ha->boot_kset)
4985 		goto kset_free;
4986 
4987 	if (!scsi_host_get(ha->host))
4988 		goto kset_free;
4989 	boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha,
4990 					     qla4xxx_show_boot_tgt_pri_info,
4991 					     qla4xxx_tgt_get_attr_visibility,
4992 					     qla4xxx_boot_release);
4993 	if (!boot_kobj)
4994 		goto put_host;
4995 
4996 	if (!scsi_host_get(ha->host))
4997 		goto kset_free;
4998 	boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha,
4999 					     qla4xxx_show_boot_tgt_sec_info,
5000 					     qla4xxx_tgt_get_attr_visibility,
5001 					     qla4xxx_boot_release);
5002 	if (!boot_kobj)
5003 		goto put_host;
5004 
5005 	if (!scsi_host_get(ha->host))
5006 		goto kset_free;
5007 	boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha,
5008 					       qla4xxx_show_boot_ini_info,
5009 					       qla4xxx_ini_get_attr_visibility,
5010 					       qla4xxx_boot_release);
5011 	if (!boot_kobj)
5012 		goto put_host;
5013 
5014 	if (!scsi_host_get(ha->host))
5015 		goto kset_free;
5016 	boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha,
5017 					       qla4xxx_show_boot_eth_info,
5018 					       qla4xxx_eth_get_attr_visibility,
5019 					       qla4xxx_boot_release);
5020 	if (!boot_kobj)
5021 		goto put_host;
5022 
5023 	return QLA_SUCCESS;
5024 
5025 put_host:
5026 	scsi_host_put(ha->host);
5027 kset_free:
5028 	iscsi_boot_destroy_kset(ha->boot_kset);
5029 	return -ENOMEM;
5030 }
5031 
5032 
5033 /**
5034  * qla4xxx_create chap_list - Create CHAP list from FLASH
5035  * @ha: pointer to adapter structure
5036  *
5037  * Read flash and make a list of CHAP entries, during login when a CHAP entry
5038  * is received, it will be checked in this list. If entry exist then the CHAP
5039  * entry index is set in the DDB. If CHAP entry does not exist in this list
5040  * then a new entry is added in FLASH in CHAP table and the index obtained is
5041  * used in the DDB.
5042  **/
5043 static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
5044 {
5045 	int rval = 0;
5046 	uint8_t *chap_flash_data = NULL;
5047 	uint32_t offset;
5048 	dma_addr_t chap_dma;
5049 	uint32_t chap_size = 0;
5050 
5051 	if (is_qla40XX(ha))
5052 		chap_size = MAX_CHAP_ENTRIES_40XX  *
5053 					sizeof(struct ql4_chap_table);
5054 	else	/* Single region contains CHAP info for both
5055 		 * ports which is divided into half for each port.
5056 		 */
5057 		chap_size = ha->hw.flt_chap_size / 2;
5058 
5059 	chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size,
5060 					  &chap_dma, GFP_KERNEL);
5061 	if (!chap_flash_data) {
5062 		ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n");
5063 		return;
5064 	}
5065 	if (is_qla40XX(ha))
5066 		offset = FLASH_CHAP_OFFSET;
5067 	else {
5068 		offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
5069 		if (ha->port_num == 1)
5070 			offset += chap_size;
5071 	}
5072 
5073 	rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
5074 	if (rval != QLA_SUCCESS)
5075 		goto exit_chap_list;
5076 
5077 	if (ha->chap_list == NULL)
5078 		ha->chap_list = vmalloc(chap_size);
5079 	if (ha->chap_list == NULL) {
5080 		ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n");
5081 		goto exit_chap_list;
5082 	}
5083 
5084 	memcpy(ha->chap_list, chap_flash_data, chap_size);
5085 
5086 exit_chap_list:
5087 	dma_free_coherent(&ha->pdev->dev, chap_size,
5088 			chap_flash_data, chap_dma);
5089 }
5090 
5091 static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
5092 				  struct ql4_tuple_ddb *tddb)
5093 {
5094 	struct scsi_qla_host *ha;
5095 	struct iscsi_cls_session *cls_sess;
5096 	struct iscsi_cls_conn *cls_conn;
5097 	struct iscsi_session *sess;
5098 	struct iscsi_conn *conn;
5099 
5100 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
5101 	ha = ddb_entry->ha;
5102 	cls_sess = ddb_entry->sess;
5103 	sess = cls_sess->dd_data;
5104 	cls_conn = ddb_entry->conn;
5105 	conn = cls_conn->dd_data;
5106 
5107 	tddb->tpgt = sess->tpgt;
5108 	tddb->port = conn->persistent_port;
5109 	strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
5110 	strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
5111 }
5112 
5113 static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
5114 				      struct ql4_tuple_ddb *tddb,
5115 				      uint8_t *flash_isid)
5116 {
5117 	uint16_t options = 0;
5118 
5119 	tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
5120 	memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
5121 	       min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
5122 
5123 	options = le16_to_cpu(fw_ddb_entry->options);
5124 	if (options & DDB_OPT_IPV6_DEVICE)
5125 		sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
5126 	else
5127 		sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
5128 
5129 	tddb->port = le16_to_cpu(fw_ddb_entry->port);
5130 
5131 	if (flash_isid == NULL)
5132 		memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0],
5133 		       sizeof(tddb->isid));
5134 	else
5135 		memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid));
5136 }
5137 
5138 static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
5139 				     struct ql4_tuple_ddb *old_tddb,
5140 				     struct ql4_tuple_ddb *new_tddb,
5141 				     uint8_t is_isid_compare)
5142 {
5143 	if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
5144 		return QLA_ERROR;
5145 
5146 	if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
5147 		return QLA_ERROR;
5148 
5149 	if (old_tddb->port != new_tddb->port)
5150 		return QLA_ERROR;
5151 
5152 	/* For multi sessions, driver generates the ISID, so do not compare
5153 	 * ISID in reset path since it would be a comparison between the
5154 	 * driver generated ISID and firmware generated ISID. This could
5155 	 * lead to adding duplicated DDBs in the list as driver generated
5156 	 * ISID would not match firmware generated ISID.
5157 	 */
5158 	if (is_isid_compare) {
5159 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x"
5160 			"%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n",
5161 			__func__, old_tddb->isid[5], old_tddb->isid[4],
5162 			old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1],
5163 			old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4],
5164 			new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1],
5165 			new_tddb->isid[0]));
5166 
5167 		if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
5168 			   sizeof(old_tddb->isid)))
5169 			return QLA_ERROR;
5170 	}
5171 
5172 	DEBUG2(ql4_printk(KERN_INFO, ha,
5173 			  "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
5174 			  old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
5175 			  old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
5176 			  new_tddb->ip_addr, new_tddb->iscsi_name));
5177 
5178 	return QLA_SUCCESS;
5179 }
5180 
5181 static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
5182 				     struct dev_db_entry *fw_ddb_entry,
5183 				     uint32_t *index)
5184 {
5185 	struct ddb_entry *ddb_entry;
5186 	struct ql4_tuple_ddb *fw_tddb = NULL;
5187 	struct ql4_tuple_ddb *tmp_tddb = NULL;
5188 	int idx;
5189 	int ret = QLA_ERROR;
5190 
5191 	fw_tddb = vzalloc(sizeof(*fw_tddb));
5192 	if (!fw_tddb) {
5193 		DEBUG2(ql4_printk(KERN_WARNING, ha,
5194 				  "Memory Allocation failed.\n"));
5195 		ret = QLA_SUCCESS;
5196 		goto exit_check;
5197 	}
5198 
5199 	tmp_tddb = vzalloc(sizeof(*tmp_tddb));
5200 	if (!tmp_tddb) {
5201 		DEBUG2(ql4_printk(KERN_WARNING, ha,
5202 				  "Memory Allocation failed.\n"));
5203 		ret = QLA_SUCCESS;
5204 		goto exit_check;
5205 	}
5206 
5207 	qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
5208 
5209 	for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
5210 		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
5211 		if (ddb_entry == NULL)
5212 			continue;
5213 
5214 		qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
5215 		if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) {
5216 			ret = QLA_SUCCESS; /* found */
5217 			if (index != NULL)
5218 				*index = idx;
5219 			goto exit_check;
5220 		}
5221 	}
5222 
5223 exit_check:
5224 	if (fw_tddb)
5225 		vfree(fw_tddb);
5226 	if (tmp_tddb)
5227 		vfree(tmp_tddb);
5228 	return ret;
5229 }
5230 
5231 /**
5232  * qla4xxx_check_existing_isid - check if target with same isid exist
5233  *				 in target list
5234  * @list_nt: list of target
5235  * @isid: isid to check
5236  *
5237  * This routine return QLA_SUCCESS if target with same isid exist
5238  **/
5239 static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid)
5240 {
5241 	struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
5242 	struct dev_db_entry *fw_ddb_entry;
5243 
5244 	list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
5245 		fw_ddb_entry = &nt_ddb_idx->fw_ddb;
5246 
5247 		if (memcmp(&fw_ddb_entry->isid[0], &isid[0],
5248 			   sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) {
5249 			return QLA_SUCCESS;
5250 		}
5251 	}
5252 	return QLA_ERROR;
5253 }
5254 
5255 /**
5256  * qla4xxx_update_isid - compare ddbs and updated isid
5257  * @ha: Pointer to host adapter structure.
5258  * @list_nt: list of nt target
5259  * @fw_ddb_entry: firmware ddb entry
5260  *
5261  * This routine update isid if ddbs have same iqn, same isid and
5262  * different IP addr.
5263  * Return QLA_SUCCESS if isid is updated.
5264  **/
5265 static int qla4xxx_update_isid(struct scsi_qla_host *ha,
5266 			       struct list_head *list_nt,
5267 			       struct dev_db_entry *fw_ddb_entry)
5268 {
5269 	uint8_t base_value, i;
5270 
5271 	base_value = fw_ddb_entry->isid[1] & 0x1f;
5272 	for (i = 0; i < 8; i++) {
5273 		fw_ddb_entry->isid[1] = (base_value | (i << 5));
5274 		if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
5275 			break;
5276 	}
5277 
5278 	if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
5279 		return QLA_ERROR;
5280 
5281 	return QLA_SUCCESS;
5282 }
5283 
5284 /**
5285  * qla4xxx_should_update_isid - check if isid need to update
5286  * @ha: Pointer to host adapter structure.
5287  * @old_tddb: ddb tuple
5288  * @new_tddb: ddb tuple
5289  *
5290  * Return QLA_SUCCESS if different IP, different PORT, same iqn,
5291  * same isid
5292  **/
5293 static int qla4xxx_should_update_isid(struct scsi_qla_host *ha,
5294 				      struct ql4_tuple_ddb *old_tddb,
5295 				      struct ql4_tuple_ddb *new_tddb)
5296 {
5297 	if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) {
5298 		/* Same ip */
5299 		if (old_tddb->port == new_tddb->port)
5300 			return QLA_ERROR;
5301 	}
5302 
5303 	if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
5304 		/* different iqn */
5305 		return QLA_ERROR;
5306 
5307 	if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
5308 		   sizeof(old_tddb->isid)))
5309 		/* different isid */
5310 		return QLA_ERROR;
5311 
5312 	return QLA_SUCCESS;
5313 }
5314 
5315 /**
5316  * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt
5317  * @ha: Pointer to host adapter structure.
5318  * @list_nt: list of nt target.
5319  * @fw_ddb_entry: firmware ddb entry.
5320  *
5321  * This routine check if fw_ddb_entry already exists in list_nt to avoid
5322  * duplicate ddb in list_nt.
5323  * Return QLA_SUCCESS if duplicate ddb exit in list_nl.
5324  * Note: This function also update isid of DDB if required.
5325  **/
5326 
5327 static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
5328 				       struct list_head *list_nt,
5329 				       struct dev_db_entry *fw_ddb_entry)
5330 {
5331 	struct qla_ddb_index  *nt_ddb_idx, *nt_ddb_idx_tmp;
5332 	struct ql4_tuple_ddb *fw_tddb = NULL;
5333 	struct ql4_tuple_ddb *tmp_tddb = NULL;
5334 	int rval, ret = QLA_ERROR;
5335 
5336 	fw_tddb = vzalloc(sizeof(*fw_tddb));
5337 	if (!fw_tddb) {
5338 		DEBUG2(ql4_printk(KERN_WARNING, ha,
5339 				  "Memory Allocation failed.\n"));
5340 		ret = QLA_SUCCESS;
5341 		goto exit_check;
5342 	}
5343 
5344 	tmp_tddb = vzalloc(sizeof(*tmp_tddb));
5345 	if (!tmp_tddb) {
5346 		DEBUG2(ql4_printk(KERN_WARNING, ha,
5347 				  "Memory Allocation failed.\n"));
5348 		ret = QLA_SUCCESS;
5349 		goto exit_check;
5350 	}
5351 
5352 	qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
5353 
5354 	list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
5355 		qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb,
5356 					  nt_ddb_idx->flash_isid);
5357 		ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true);
5358 		/* found duplicate ddb */
5359 		if (ret == QLA_SUCCESS)
5360 			goto exit_check;
5361 	}
5362 
5363 	list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
5364 		qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL);
5365 
5366 		ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb);
5367 		if (ret == QLA_SUCCESS) {
5368 			rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry);
5369 			if (rval == QLA_SUCCESS)
5370 				ret = QLA_ERROR;
5371 			else
5372 				ret = QLA_SUCCESS;
5373 
5374 			goto exit_check;
5375 		}
5376 	}
5377 
5378 exit_check:
5379 	if (fw_tddb)
5380 		vfree(fw_tddb);
5381 	if (tmp_tddb)
5382 		vfree(tmp_tddb);
5383 	return ret;
5384 }
5385 
5386 static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
5387 {
5388 	struct qla_ddb_index  *ddb_idx, *ddb_idx_tmp;
5389 
5390 	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
5391 		list_del_init(&ddb_idx->list);
5392 		vfree(ddb_idx);
5393 	}
5394 }
5395 
5396 static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
5397 					struct dev_db_entry *fw_ddb_entry)
5398 {
5399 	struct iscsi_endpoint *ep;
5400 	struct sockaddr_in *addr;
5401 	struct sockaddr_in6 *addr6;
5402 	struct sockaddr *t_addr;
5403 	struct sockaddr_storage *dst_addr;
5404 	char *ip;
5405 
5406 	/* TODO: need to destroy on unload iscsi_endpoint*/
5407 	dst_addr = vmalloc(sizeof(*dst_addr));
5408 	if (!dst_addr)
5409 		return NULL;
5410 
5411 	if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
5412 		t_addr = (struct sockaddr *)dst_addr;
5413 		t_addr->sa_family = AF_INET6;
5414 		addr6 = (struct sockaddr_in6 *)dst_addr;
5415 		ip = (char *)&addr6->sin6_addr;
5416 		memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
5417 		addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
5418 
5419 	} else {
5420 		t_addr = (struct sockaddr *)dst_addr;
5421 		t_addr->sa_family = AF_INET;
5422 		addr = (struct sockaddr_in *)dst_addr;
5423 		ip = (char *)&addr->sin_addr;
5424 		memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
5425 		addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
5426 	}
5427 
5428 	ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
5429 	vfree(dst_addr);
5430 	return ep;
5431 }
5432 
5433 static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
5434 {
5435 	if (ql4xdisablesysfsboot)
5436 		return QLA_SUCCESS;
5437 	if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
5438 		return QLA_ERROR;
5439 	return QLA_SUCCESS;
5440 }
5441 
5442 static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
5443 					  struct ddb_entry *ddb_entry,
5444 					  uint16_t idx)
5445 {
5446 	uint16_t def_timeout;
5447 
5448 	ddb_entry->ddb_type = FLASH_DDB;
5449 	ddb_entry->fw_ddb_index = INVALID_ENTRY;
5450 	ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
5451 	ddb_entry->ha = ha;
5452 	ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
5453 	ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
5454 	ddb_entry->chap_tbl_idx = INVALID_ENTRY;
5455 
5456 	atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
5457 	atomic_set(&ddb_entry->relogin_timer, 0);
5458 	atomic_set(&ddb_entry->relogin_retry_count, 0);
5459 	def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
5460 	ddb_entry->default_relogin_timeout =
5461 		(def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
5462 		def_timeout : LOGIN_TOV;
5463 	ddb_entry->default_time2wait =
5464 		le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
5465 
5466 	if (ql4xdisablesysfsboot &&
5467 	    (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx))
5468 		set_bit(DF_BOOT_TGT, &ddb_entry->flags);
5469 }
5470 
5471 static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
5472 {
5473 	uint32_t idx = 0;
5474 	uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
5475 	uint32_t sts[MBOX_REG_COUNT];
5476 	uint32_t ip_state;
5477 	unsigned long wtime;
5478 	int ret;
5479 
5480 	wtime = jiffies + (HZ * IP_CONFIG_TOV);
5481 	do {
5482 		for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
5483 			if (ip_idx[idx] == -1)
5484 				continue;
5485 
5486 			ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
5487 
5488 			if (ret == QLA_ERROR) {
5489 				ip_idx[idx] = -1;
5490 				continue;
5491 			}
5492 
5493 			ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
5494 
5495 			DEBUG2(ql4_printk(KERN_INFO, ha,
5496 					  "Waiting for IP state for idx = %d, state = 0x%x\n",
5497 					  ip_idx[idx], ip_state));
5498 			if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
5499 			    ip_state == IP_ADDRSTATE_INVALID ||
5500 			    ip_state == IP_ADDRSTATE_PREFERRED ||
5501 			    ip_state == IP_ADDRSTATE_DEPRICATED ||
5502 			    ip_state == IP_ADDRSTATE_DISABLING)
5503 				ip_idx[idx] = -1;
5504 		}
5505 
5506 		/* Break if all IP states checked */
5507 		if ((ip_idx[0] == -1) &&
5508 		    (ip_idx[1] == -1) &&
5509 		    (ip_idx[2] == -1) &&
5510 		    (ip_idx[3] == -1))
5511 			break;
5512 		schedule_timeout_uninterruptible(HZ);
5513 	} while (time_after(wtime, jiffies));
5514 }
5515 
5516 static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry,
5517 				  struct dev_db_entry *flash_ddb_entry)
5518 {
5519 	uint16_t options = 0;
5520 	size_t ip_len = IP_ADDR_LEN;
5521 
5522 	options = le16_to_cpu(fw_ddb_entry->options);
5523 	if (options & DDB_OPT_IPV6_DEVICE)
5524 		ip_len = IPv6_ADDR_LEN;
5525 
5526 	if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len))
5527 		return QLA_ERROR;
5528 
5529 	if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0],
5530 		   sizeof(fw_ddb_entry->isid)))
5531 		return QLA_ERROR;
5532 
5533 	if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port,
5534 		   sizeof(fw_ddb_entry->port)))
5535 		return QLA_ERROR;
5536 
5537 	return QLA_SUCCESS;
5538 }
5539 
5540 static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha,
5541 				     struct dev_db_entry *fw_ddb_entry,
5542 				     uint32_t fw_idx, uint32_t *flash_index)
5543 {
5544 	struct dev_db_entry *flash_ddb_entry;
5545 	dma_addr_t flash_ddb_entry_dma;
5546 	uint32_t idx = 0;
5547 	int max_ddbs;
5548 	int ret = QLA_ERROR, status;
5549 
5550 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
5551 				     MAX_DEV_DB_ENTRIES;
5552 
5553 	flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
5554 					 &flash_ddb_entry_dma);
5555 	if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) {
5556 		ql4_printk(KERN_ERR, ha, "Out of memory\n");
5557 		goto exit_find_st_idx;
5558 	}
5559 
5560 	status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
5561 					  flash_ddb_entry_dma, fw_idx);
5562 	if (status == QLA_SUCCESS) {
5563 		status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
5564 		if (status == QLA_SUCCESS) {
5565 			*flash_index = fw_idx;
5566 			ret = QLA_SUCCESS;
5567 			goto exit_find_st_idx;
5568 		}
5569 	}
5570 
5571 	for (idx = 0; idx < max_ddbs; idx++) {
5572 		status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
5573 						  flash_ddb_entry_dma, idx);
5574 		if (status == QLA_ERROR)
5575 			continue;
5576 
5577 		status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
5578 		if (status == QLA_SUCCESS) {
5579 			*flash_index = idx;
5580 			ret = QLA_SUCCESS;
5581 			goto exit_find_st_idx;
5582 		}
5583 	}
5584 
5585 	if (idx == max_ddbs)
5586 		ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n",
5587 			   fw_idx);
5588 
5589 exit_find_st_idx:
5590 	if (flash_ddb_entry)
5591 		dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry,
5592 			      flash_ddb_entry_dma);
5593 
5594 	return ret;
5595 }
5596 
5597 static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
5598 				  struct list_head *list_st)
5599 {
5600 	struct qla_ddb_index  *st_ddb_idx;
5601 	int max_ddbs;
5602 	int fw_idx_size;
5603 	struct dev_db_entry *fw_ddb_entry;
5604 	dma_addr_t fw_ddb_dma;
5605 	int ret;
5606 	uint32_t idx = 0, next_idx = 0;
5607 	uint32_t state = 0, conn_err = 0;
5608 	uint32_t flash_index = -1;
5609 	uint16_t conn_id = 0;
5610 
5611 	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
5612 				      &fw_ddb_dma);
5613 	if (fw_ddb_entry == NULL) {
5614 		DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
5615 		goto exit_st_list;
5616 	}
5617 
5618 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
5619 				     MAX_DEV_DB_ENTRIES;
5620 	fw_idx_size = sizeof(struct qla_ddb_index);
5621 
5622 	for (idx = 0; idx < max_ddbs; idx = next_idx) {
5623 		ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
5624 					      NULL, &next_idx, &state,
5625 					      &conn_err, NULL, &conn_id);
5626 		if (ret == QLA_ERROR)
5627 			break;
5628 
5629 		/* Ignore DDB if invalid state (unassigned) */
5630 		if (state == DDB_DS_UNASSIGNED)
5631 			goto continue_next_st;
5632 
5633 		/* Check if ST, add to the list_st */
5634 		if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
5635 			goto continue_next_st;
5636 
5637 		st_ddb_idx = vzalloc(fw_idx_size);
5638 		if (!st_ddb_idx)
5639 			break;
5640 
5641 		ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx,
5642 						&flash_index);
5643 		if (ret == QLA_ERROR) {
5644 			ql4_printk(KERN_ERR, ha,
5645 				   "No flash entry for ST at idx [%d]\n", idx);
5646 			st_ddb_idx->flash_ddb_idx = idx;
5647 		} else {
5648 			ql4_printk(KERN_INFO, ha,
5649 				   "ST at idx [%d] is stored at flash [%d]\n",
5650 				   idx, flash_index);
5651 			st_ddb_idx->flash_ddb_idx = flash_index;
5652 		}
5653 
5654 		st_ddb_idx->fw_ddb_idx = idx;
5655 
5656 		list_add_tail(&st_ddb_idx->list, list_st);
5657 continue_next_st:
5658 		if (next_idx == 0)
5659 			break;
5660 	}
5661 
5662 exit_st_list:
5663 	if (fw_ddb_entry)
5664 		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
5665 }
5666 
5667 /**
5668  * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
5669  * @ha: pointer to adapter structure
5670  * @list_ddb: List from which failed ddb to be removed
5671  *
5672  * Iterate over the list of DDBs and find and remove DDBs that are either in
5673  * no connection active state or failed state
5674  **/
5675 static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
5676 				      struct list_head *list_ddb)
5677 {
5678 	struct qla_ddb_index  *ddb_idx, *ddb_idx_tmp;
5679 	uint32_t next_idx = 0;
5680 	uint32_t state = 0, conn_err = 0;
5681 	int ret;
5682 
5683 	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
5684 		ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
5685 					      NULL, 0, NULL, &next_idx, &state,
5686 					      &conn_err, NULL, NULL);
5687 		if (ret == QLA_ERROR)
5688 			continue;
5689 
5690 		if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
5691 		    state == DDB_DS_SESSION_FAILED) {
5692 			list_del_init(&ddb_idx->list);
5693 			vfree(ddb_idx);
5694 		}
5695 	}
5696 }
5697 
5698 static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha,
5699 					 struct ddb_entry *ddb_entry,
5700 					 struct dev_db_entry *fw_ddb_entry)
5701 {
5702 	struct iscsi_cls_session *cls_sess;
5703 	struct iscsi_session *sess;
5704 	uint32_t max_ddbs = 0;
5705 	uint16_t ddb_link = -1;
5706 
5707 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
5708 				     MAX_DEV_DB_ENTRIES;
5709 
5710 	cls_sess = ddb_entry->sess;
5711 	sess = cls_sess->dd_data;
5712 
5713 	ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
5714 	if (ddb_link < max_ddbs)
5715 		sess->discovery_parent_idx = ddb_link;
5716 	else
5717 		sess->discovery_parent_idx = DDB_NO_LINK;
5718 }
5719 
5720 static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
5721 				   struct dev_db_entry *fw_ddb_entry,
5722 				   int is_reset, uint16_t idx)
5723 {
5724 	struct iscsi_cls_session *cls_sess;
5725 	struct iscsi_session *sess;
5726 	struct iscsi_cls_conn *cls_conn;
5727 	struct iscsi_endpoint *ep;
5728 	uint16_t cmds_max = 32;
5729 	uint16_t conn_id = 0;
5730 	uint32_t initial_cmdsn = 0;
5731 	int ret = QLA_SUCCESS;
5732 
5733 	struct ddb_entry *ddb_entry = NULL;
5734 
5735 	/* Create session object, with INVALID_ENTRY,
5736 	 * the targer_id would get set when we issue the login
5737 	 */
5738 	cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
5739 				       cmds_max, sizeof(struct ddb_entry),
5740 				       sizeof(struct ql4_task_data),
5741 				       initial_cmdsn, INVALID_ENTRY);
5742 	if (!cls_sess) {
5743 		ret = QLA_ERROR;
5744 		goto exit_setup;
5745 	}
5746 
5747 	/*
5748 	 * so calling module_put function to decrement the
5749 	 * reference count.
5750 	 **/
5751 	module_put(qla4xxx_iscsi_transport.owner);
5752 	sess = cls_sess->dd_data;
5753 	ddb_entry = sess->dd_data;
5754 	ddb_entry->sess = cls_sess;
5755 
5756 	cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
5757 	memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
5758 	       sizeof(struct dev_db_entry));
5759 
5760 	qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, idx);
5761 
5762 	cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
5763 
5764 	if (!cls_conn) {
5765 		ret = QLA_ERROR;
5766 		goto exit_setup;
5767 	}
5768 
5769 	ddb_entry->conn = cls_conn;
5770 
5771 	/* Setup ep, for displaying attributes in sysfs */
5772 	ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
5773 	if (ep) {
5774 		ep->conn = cls_conn;
5775 		cls_conn->ep = ep;
5776 	} else {
5777 		DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
5778 		ret = QLA_ERROR;
5779 		goto exit_setup;
5780 	}
5781 
5782 	/* Update sess/conn params */
5783 	qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
5784 	qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry);
5785 
5786 	if (is_reset == RESET_ADAPTER) {
5787 		iscsi_block_session(cls_sess);
5788 		/* Use the relogin path to discover new devices
5789 		 *  by short-circuting the logic of setting
5790 		 *  timer to relogin - instead set the flags
5791 		 *  to initiate login right away.
5792 		 */
5793 		set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
5794 		set_bit(DF_RELOGIN, &ddb_entry->flags);
5795 	}
5796 
5797 exit_setup:
5798 	return ret;
5799 }
5800 
5801 static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha,
5802 				       struct list_head *list_ddb,
5803 				       struct dev_db_entry *fw_ddb_entry)
5804 {
5805 	struct qla_ddb_index  *ddb_idx, *ddb_idx_tmp;
5806 	uint16_t ddb_link;
5807 
5808 	ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
5809 
5810 	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
5811 		if (ddb_idx->fw_ddb_idx == ddb_link) {
5812 			DEBUG2(ql4_printk(KERN_INFO, ha,
5813 					  "Updating NT parent idx from [%d] to [%d]\n",
5814 					  ddb_link, ddb_idx->flash_ddb_idx));
5815 			fw_ddb_entry->ddb_link =
5816 					    cpu_to_le16(ddb_idx->flash_ddb_idx);
5817 			return;
5818 		}
5819 	}
5820 }
5821 
5822 static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
5823 				  struct list_head *list_nt,
5824 				  struct list_head *list_st,
5825 				  int is_reset)
5826 {
5827 	struct dev_db_entry *fw_ddb_entry;
5828 	struct ddb_entry *ddb_entry = NULL;
5829 	dma_addr_t fw_ddb_dma;
5830 	int max_ddbs;
5831 	int fw_idx_size;
5832 	int ret;
5833 	uint32_t idx = 0, next_idx = 0;
5834 	uint32_t state = 0, conn_err = 0;
5835 	uint32_t ddb_idx = -1;
5836 	uint16_t conn_id = 0;
5837 	uint16_t ddb_link = -1;
5838 	struct qla_ddb_index  *nt_ddb_idx;
5839 
5840 	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
5841 				      &fw_ddb_dma);
5842 	if (fw_ddb_entry == NULL) {
5843 		DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
5844 		goto exit_nt_list;
5845 	}
5846 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
5847 				     MAX_DEV_DB_ENTRIES;
5848 	fw_idx_size = sizeof(struct qla_ddb_index);
5849 
5850 	for (idx = 0; idx < max_ddbs; idx = next_idx) {
5851 		ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
5852 					      NULL, &next_idx, &state,
5853 					      &conn_err, NULL, &conn_id);
5854 		if (ret == QLA_ERROR)
5855 			break;
5856 
5857 		if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
5858 			goto continue_next_nt;
5859 
5860 		/* Check if NT, then add to list it */
5861 		if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
5862 			goto continue_next_nt;
5863 
5864 		ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
5865 		if (ddb_link < max_ddbs)
5866 			qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry);
5867 
5868 		if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
5869 		    state == DDB_DS_SESSION_FAILED) &&
5870 		    (is_reset == INIT_ADAPTER))
5871 			goto continue_next_nt;
5872 
5873 		DEBUG2(ql4_printk(KERN_INFO, ha,
5874 				  "Adding  DDB to session = 0x%x\n", idx));
5875 
5876 		if (is_reset == INIT_ADAPTER) {
5877 			nt_ddb_idx = vmalloc(fw_idx_size);
5878 			if (!nt_ddb_idx)
5879 				break;
5880 
5881 			nt_ddb_idx->fw_ddb_idx = idx;
5882 
5883 			/* Copy original isid as it may get updated in function
5884 			 * qla4xxx_update_isid(). We need original isid in
5885 			 * function qla4xxx_compare_tuple_ddb to find duplicate
5886 			 * target */
5887 			memcpy(&nt_ddb_idx->flash_isid[0],
5888 			       &fw_ddb_entry->isid[0],
5889 			       sizeof(nt_ddb_idx->flash_isid));
5890 
5891 			ret = qla4xxx_is_flash_ddb_exists(ha, list_nt,
5892 							  fw_ddb_entry);
5893 			if (ret == QLA_SUCCESS) {
5894 				/* free nt_ddb_idx and do not add to list_nt */
5895 				vfree(nt_ddb_idx);
5896 				goto continue_next_nt;
5897 			}
5898 
5899 			/* Copy updated isid */
5900 			memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
5901 			       sizeof(struct dev_db_entry));
5902 
5903 			list_add_tail(&nt_ddb_idx->list, list_nt);
5904 		} else if (is_reset == RESET_ADAPTER) {
5905 			ret = qla4xxx_is_session_exists(ha, fw_ddb_entry,
5906 							&ddb_idx);
5907 			if (ret == QLA_SUCCESS) {
5908 				ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha,
5909 								       ddb_idx);
5910 				if (ddb_entry != NULL)
5911 					qla4xxx_update_sess_disc_idx(ha,
5912 								     ddb_entry,
5913 								  fw_ddb_entry);
5914 				goto continue_next_nt;
5915 			}
5916 		}
5917 
5918 		ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx);
5919 		if (ret == QLA_ERROR)
5920 			goto exit_nt_list;
5921 
5922 continue_next_nt:
5923 		if (next_idx == 0)
5924 			break;
5925 	}
5926 
5927 exit_nt_list:
5928 	if (fw_ddb_entry)
5929 		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
5930 }
5931 
5932 static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha,
5933 				      struct list_head *list_nt,
5934 				      uint16_t target_id)
5935 {
5936 	struct dev_db_entry *fw_ddb_entry;
5937 	dma_addr_t fw_ddb_dma;
5938 	int max_ddbs;
5939 	int fw_idx_size;
5940 	int ret;
5941 	uint32_t idx = 0, next_idx = 0;
5942 	uint32_t state = 0, conn_err = 0;
5943 	uint16_t conn_id = 0;
5944 	struct qla_ddb_index  *nt_ddb_idx;
5945 
5946 	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
5947 				      &fw_ddb_dma);
5948 	if (fw_ddb_entry == NULL) {
5949 		DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
5950 		goto exit_new_nt_list;
5951 	}
5952 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
5953 				     MAX_DEV_DB_ENTRIES;
5954 	fw_idx_size = sizeof(struct qla_ddb_index);
5955 
5956 	for (idx = 0; idx < max_ddbs; idx = next_idx) {
5957 		ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
5958 					      NULL, &next_idx, &state,
5959 					      &conn_err, NULL, &conn_id);
5960 		if (ret == QLA_ERROR)
5961 			break;
5962 
5963 		/* Check if NT, then add it to list */
5964 		if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
5965 			goto continue_next_new_nt;
5966 
5967 		if (!(state == DDB_DS_NO_CONNECTION_ACTIVE))
5968 			goto continue_next_new_nt;
5969 
5970 		DEBUG2(ql4_printk(KERN_INFO, ha,
5971 				  "Adding  DDB to session = 0x%x\n", idx));
5972 
5973 		nt_ddb_idx = vmalloc(fw_idx_size);
5974 		if (!nt_ddb_idx)
5975 			break;
5976 
5977 		nt_ddb_idx->fw_ddb_idx = idx;
5978 
5979 		ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
5980 		if (ret == QLA_SUCCESS) {
5981 			/* free nt_ddb_idx and do not add to list_nt */
5982 			vfree(nt_ddb_idx);
5983 			goto continue_next_new_nt;
5984 		}
5985 
5986 		if (target_id < max_ddbs)
5987 			fw_ddb_entry->ddb_link = cpu_to_le16(target_id);
5988 
5989 		list_add_tail(&nt_ddb_idx->list, list_nt);
5990 
5991 		ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
5992 					      idx);
5993 		if (ret == QLA_ERROR)
5994 			goto exit_new_nt_list;
5995 
5996 continue_next_new_nt:
5997 		if (next_idx == 0)
5998 			break;
5999 	}
6000 
6001 exit_new_nt_list:
6002 	if (fw_ddb_entry)
6003 		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
6004 }
6005 
6006 /**
6007  * qla4xxx_sysfs_ddb_is_non_persistent - check for non-persistence of ddb entry
6008  * @dev: dev associated with the sysfs entry
6009  * @data: pointer to flashnode session object
6010  *
6011  * Returns:
6012  *	1: if flashnode entry is non-persistent
6013  *	0: if flashnode entry is persistent
6014  **/
6015 static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data)
6016 {
6017 	struct iscsi_bus_flash_session *fnode_sess;
6018 
6019 	if (!iscsi_flashnode_bus_match(dev, NULL))
6020 		return 0;
6021 
6022 	fnode_sess = iscsi_dev_to_flash_session(dev);
6023 
6024 	return (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT);
6025 }
6026 
6027 /**
6028  * qla4xxx_sysfs_ddb_tgt_create - Create sysfs entry for target
6029  * @ha: pointer to host
6030  * @fw_ddb_entry: flash ddb data
6031  * @idx: target index
6032  * @user: if set then this call is made from userland else from kernel
6033  *
6034  * Returns:
6035  * On sucess: QLA_SUCCESS
6036  * On failure: QLA_ERROR
6037  *
6038  * This create separate sysfs entries for session and connection attributes of
6039  * the given fw ddb entry.
6040  * If this is invoked as a result of a userspace call then the entry is marked
6041  * as nonpersistent using flash_state field.
6042  **/
6043 static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
6044 					struct dev_db_entry *fw_ddb_entry,
6045 					uint16_t *idx, int user)
6046 {
6047 	struct iscsi_bus_flash_session *fnode_sess = NULL;
6048 	struct iscsi_bus_flash_conn *fnode_conn = NULL;
6049 	int rc = QLA_ERROR;
6050 
6051 	fnode_sess = iscsi_create_flashnode_sess(ha->host, *idx,
6052 						 &qla4xxx_iscsi_transport, 0);
6053 	if (!fnode_sess) {
6054 		ql4_printk(KERN_ERR, ha,
6055 			   "%s: Unable to create session sysfs entry for flashnode %d of host%lu\n",
6056 			   __func__, *idx, ha->host_no);
6057 		goto exit_tgt_create;
6058 	}
6059 
6060 	fnode_conn = iscsi_create_flashnode_conn(ha->host, fnode_sess,
6061 						 &qla4xxx_iscsi_transport, 0);
6062 	if (!fnode_conn) {
6063 		ql4_printk(KERN_ERR, ha,
6064 			   "%s: Unable to create conn sysfs entry for flashnode %d of host%lu\n",
6065 			   __func__, *idx, ha->host_no);
6066 		goto free_sess;
6067 	}
6068 
6069 	if (user) {
6070 		fnode_sess->flash_state = DEV_DB_NON_PERSISTENT;
6071 	} else {
6072 		fnode_sess->flash_state = DEV_DB_PERSISTENT;
6073 
6074 		if (*idx == ha->pri_ddb_idx || *idx == ha->sec_ddb_idx)
6075 			fnode_sess->is_boot_target = 1;
6076 		else
6077 			fnode_sess->is_boot_target = 0;
6078 	}
6079 
6080 	rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
6081 					   fw_ddb_entry);
6082 
6083 	ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
6084 		   __func__, fnode_sess->dev.kobj.name);
6085 
6086 	ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
6087 		   __func__, fnode_conn->dev.kobj.name);
6088 
6089 	return QLA_SUCCESS;
6090 
6091 free_sess:
6092 	iscsi_destroy_flashnode_sess(fnode_sess);
6093 
6094 exit_tgt_create:
6095 	return QLA_ERROR;
6096 }
6097 
6098 /**
6099  * qla4xxx_sysfs_ddb_add - Add new ddb entry in flash
6100  * @shost: pointer to host
6101  * @buf: type of ddb entry (ipv4/ipv6)
6102  * @len: length of buf
6103  *
6104  * This creates new ddb entry in the flash by finding first free index and
6105  * storing default ddb there. And then create sysfs entry for the new ddb entry.
6106  **/
6107 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
6108 				 int len)
6109 {
6110 	struct scsi_qla_host *ha = to_qla_host(shost);
6111 	struct dev_db_entry *fw_ddb_entry = NULL;
6112 	dma_addr_t fw_ddb_entry_dma;
6113 	struct device *dev;
6114 	uint16_t idx = 0;
6115 	uint16_t max_ddbs = 0;
6116 	uint32_t options = 0;
6117 	uint32_t rval = QLA_ERROR;
6118 
6119 	if (strncasecmp(PORTAL_TYPE_IPV4, buf, 4) &&
6120 	    strncasecmp(PORTAL_TYPE_IPV6, buf, 4)) {
6121 		DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Invalid portal type\n",
6122 				  __func__));
6123 		goto exit_ddb_add;
6124 	}
6125 
6126 	max_ddbs =  is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
6127 				     MAX_DEV_DB_ENTRIES;
6128 
6129 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6130 					  &fw_ddb_entry_dma, GFP_KERNEL);
6131 	if (!fw_ddb_entry) {
6132 		DEBUG2(ql4_printk(KERN_ERR, ha,
6133 				  "%s: Unable to allocate dma buffer\n",
6134 				  __func__));
6135 		goto exit_ddb_add;
6136 	}
6137 
6138 	dev = iscsi_find_flashnode_sess(ha->host, NULL,
6139 					qla4xxx_sysfs_ddb_is_non_persistent);
6140 	if (dev) {
6141 		ql4_printk(KERN_ERR, ha,
6142 			   "%s: A non-persistent entry %s found\n",
6143 			   __func__, dev->kobj.name);
6144 		put_device(dev);
6145 		goto exit_ddb_add;
6146 	}
6147 
6148 	/* Index 0 and 1 are reserved for boot target entries */
6149 	for (idx = 2; idx < max_ddbs; idx++) {
6150 		if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry,
6151 					     fw_ddb_entry_dma, idx))
6152 			break;
6153 	}
6154 
6155 	if (idx == max_ddbs)
6156 		goto exit_ddb_add;
6157 
6158 	if (!strncasecmp("ipv6", buf, 4))
6159 		options |= IPV6_DEFAULT_DDB_ENTRY;
6160 
6161 	rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
6162 	if (rval == QLA_ERROR)
6163 		goto exit_ddb_add;
6164 
6165 	rval = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 1);
6166 
6167 exit_ddb_add:
6168 	if (fw_ddb_entry)
6169 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6170 				  fw_ddb_entry, fw_ddb_entry_dma);
6171 	if (rval == QLA_SUCCESS)
6172 		return idx;
6173 	else
6174 		return -EIO;
6175 }
6176 
6177 /**
6178  * qla4xxx_sysfs_ddb_apply - write the target ddb contents to Flash
6179  * @fnode_sess: pointer to session attrs of flash ddb entry
6180  * @fnode_conn: pointer to connection attrs of flash ddb entry
6181  *
6182  * This writes the contents of target ddb buffer to Flash with a valid cookie
6183  * value in order to make the ddb entry persistent.
6184  **/
6185 static int  qla4xxx_sysfs_ddb_apply(struct iscsi_bus_flash_session *fnode_sess,
6186 				    struct iscsi_bus_flash_conn *fnode_conn)
6187 {
6188 	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6189 	struct scsi_qla_host *ha = to_qla_host(shost);
6190 	uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO;
6191 	struct dev_db_entry *fw_ddb_entry = NULL;
6192 	dma_addr_t fw_ddb_entry_dma;
6193 	uint32_t options = 0;
6194 	int rval = 0;
6195 
6196 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6197 					  &fw_ddb_entry_dma, GFP_KERNEL);
6198 	if (!fw_ddb_entry) {
6199 		DEBUG2(ql4_printk(KERN_ERR, ha,
6200 				  "%s: Unable to allocate dma buffer\n",
6201 				  __func__));
6202 		rval = -ENOMEM;
6203 		goto exit_ddb_apply;
6204 	}
6205 
6206 	if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6207 		options |= IPV6_DEFAULT_DDB_ENTRY;
6208 
6209 	rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
6210 	if (rval == QLA_ERROR)
6211 		goto exit_ddb_apply;
6212 
6213 	dev_db_start_offset += (fnode_sess->target_id *
6214 				sizeof(*fw_ddb_entry));
6215 
6216 	qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
6217 	fw_ddb_entry->cookie = DDB_VALID_COOKIE;
6218 
6219 	rval = qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
6220 				 sizeof(*fw_ddb_entry), FLASH_OPT_RMW_COMMIT);
6221 
6222 	if (rval == QLA_SUCCESS) {
6223 		fnode_sess->flash_state = DEV_DB_PERSISTENT;
6224 		ql4_printk(KERN_INFO, ha,
6225 			   "%s: flash node %u of host %lu written to flash\n",
6226 			   __func__, fnode_sess->target_id, ha->host_no);
6227 	} else {
6228 		rval = -EIO;
6229 		ql4_printk(KERN_ERR, ha,
6230 			   "%s: Error while writing flash node %u of host %lu to flash\n",
6231 			   __func__, fnode_sess->target_id, ha->host_no);
6232 	}
6233 
6234 exit_ddb_apply:
6235 	if (fw_ddb_entry)
6236 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6237 				  fw_ddb_entry, fw_ddb_entry_dma);
6238 	return rval;
6239 }
6240 
6241 static ssize_t qla4xxx_sysfs_ddb_conn_open(struct scsi_qla_host *ha,
6242 					   struct dev_db_entry *fw_ddb_entry,
6243 					   uint16_t idx)
6244 {
6245 	struct dev_db_entry *ddb_entry = NULL;
6246 	dma_addr_t ddb_entry_dma;
6247 	unsigned long wtime;
6248 	uint32_t mbx_sts = 0;
6249 	uint32_t state = 0, conn_err = 0;
6250 	uint16_t tmo = 0;
6251 	int ret = 0;
6252 
6253 	ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
6254 				       &ddb_entry_dma, GFP_KERNEL);
6255 	if (!ddb_entry) {
6256 		DEBUG2(ql4_printk(KERN_ERR, ha,
6257 				  "%s: Unable to allocate dma buffer\n",
6258 				  __func__));
6259 		return QLA_ERROR;
6260 	}
6261 
6262 	memcpy(ddb_entry, fw_ddb_entry, sizeof(*ddb_entry));
6263 
6264 	ret = qla4xxx_set_ddb_entry(ha, idx, ddb_entry_dma, &mbx_sts);
6265 	if (ret != QLA_SUCCESS) {
6266 		DEBUG2(ql4_printk(KERN_ERR, ha,
6267 				  "%s: Unable to set ddb entry for index %d\n",
6268 				  __func__, idx));
6269 		goto exit_ddb_conn_open;
6270 	}
6271 
6272 	qla4xxx_conn_open(ha, idx);
6273 
6274 	/* To ensure that sendtargets is done, wait for at least 12 secs */
6275 	tmo = ((ha->def_timeout > LOGIN_TOV) &&
6276 	       (ha->def_timeout < LOGIN_TOV * 10) ?
6277 	       ha->def_timeout : LOGIN_TOV);
6278 
6279 	DEBUG2(ql4_printk(KERN_INFO, ha,
6280 			  "Default time to wait for login to ddb %d\n", tmo));
6281 
6282 	wtime = jiffies + (HZ * tmo);
6283 	do {
6284 		ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
6285 					      NULL, &state, &conn_err, NULL,
6286 					      NULL);
6287 		if (ret == QLA_ERROR)
6288 			continue;
6289 
6290 		if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
6291 		    state == DDB_DS_SESSION_FAILED)
6292 			break;
6293 
6294 		schedule_timeout_uninterruptible(HZ / 10);
6295 	} while (time_after(wtime, jiffies));
6296 
6297 exit_ddb_conn_open:
6298 	if (ddb_entry)
6299 		dma_free_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
6300 				  ddb_entry, ddb_entry_dma);
6301 	return ret;
6302 }
6303 
6304 static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha,
6305 				struct dev_db_entry *fw_ddb_entry,
6306 				uint16_t target_id)
6307 {
6308 	struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
6309 	struct list_head list_nt;
6310 	uint16_t ddb_index;
6311 	int ret = 0;
6312 
6313 	if (test_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags)) {
6314 		ql4_printk(KERN_WARNING, ha,
6315 			   "%s: A discovery already in progress!\n", __func__);
6316 		return QLA_ERROR;
6317 	}
6318 
6319 	INIT_LIST_HEAD(&list_nt);
6320 
6321 	set_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
6322 
6323 	ret = qla4xxx_get_ddb_index(ha, &ddb_index);
6324 	if (ret == QLA_ERROR)
6325 		goto exit_login_st_clr_bit;
6326 
6327 	ret = qla4xxx_sysfs_ddb_conn_open(ha, fw_ddb_entry, ddb_index);
6328 	if (ret == QLA_ERROR)
6329 		goto exit_login_st;
6330 
6331 	qla4xxx_build_new_nt_list(ha, &list_nt, target_id);
6332 
6333 	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) {
6334 		list_del_init(&ddb_idx->list);
6335 		qla4xxx_clear_ddb_entry(ha, ddb_idx->fw_ddb_idx);
6336 		vfree(ddb_idx);
6337 	}
6338 
6339 exit_login_st:
6340 	if (qla4xxx_clear_ddb_entry(ha, ddb_index) == QLA_ERROR) {
6341 		ql4_printk(KERN_ERR, ha,
6342 			   "Unable to clear DDB index = 0x%x\n", ddb_index);
6343 	}
6344 
6345 	clear_bit(ddb_index, ha->ddb_idx_map);
6346 
6347 exit_login_st_clr_bit:
6348 	clear_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
6349 	return ret;
6350 }
6351 
6352 static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha,
6353 				struct dev_db_entry *fw_ddb_entry,
6354 				uint16_t idx)
6355 {
6356 	int ret = QLA_ERROR;
6357 
6358 	ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
6359 	if (ret != QLA_SUCCESS)
6360 		ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
6361 					      idx);
6362 	else
6363 		ret = -EPERM;
6364 
6365 	return ret;
6366 }
6367 
6368 /**
6369  * qla4xxx_sysfs_ddb_login - Login to the specified target
6370  * @fnode_sess: pointer to session attrs of flash ddb entry
6371  * @fnode_conn: pointer to connection attrs of flash ddb entry
6372  *
6373  * This logs in to the specified target
6374  **/
6375 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
6376 				   struct iscsi_bus_flash_conn *fnode_conn)
6377 {
6378 	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6379 	struct scsi_qla_host *ha = to_qla_host(shost);
6380 	struct dev_db_entry *fw_ddb_entry = NULL;
6381 	dma_addr_t fw_ddb_entry_dma;
6382 	uint32_t options = 0;
6383 	int ret = 0;
6384 
6385 	if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) {
6386 		ql4_printk(KERN_ERR, ha,
6387 			   "%s: Target info is not persistent\n", __func__);
6388 		ret = -EIO;
6389 		goto exit_ddb_login;
6390 	}
6391 
6392 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6393 					  &fw_ddb_entry_dma, GFP_KERNEL);
6394 	if (!fw_ddb_entry) {
6395 		DEBUG2(ql4_printk(KERN_ERR, ha,
6396 				  "%s: Unable to allocate dma buffer\n",
6397 				  __func__));
6398 		ret = -ENOMEM;
6399 		goto exit_ddb_login;
6400 	}
6401 
6402 	if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6403 		options |= IPV6_DEFAULT_DDB_ENTRY;
6404 
6405 	ret = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
6406 	if (ret == QLA_ERROR)
6407 		goto exit_ddb_login;
6408 
6409 	qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
6410 	fw_ddb_entry->cookie = DDB_VALID_COOKIE;
6411 
6412 	if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
6413 		ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry,
6414 					   fnode_sess->target_id);
6415 	else
6416 		ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry,
6417 					   fnode_sess->target_id);
6418 
6419 	if (ret > 0)
6420 		ret = -EIO;
6421 
6422 exit_ddb_login:
6423 	if (fw_ddb_entry)
6424 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6425 				  fw_ddb_entry, fw_ddb_entry_dma);
6426 	return ret;
6427 }
6428 
6429 /**
6430  * qla4xxx_sysfs_ddb_logout_sid - Logout session for the specified target
6431  * @cls_sess: pointer to session to be logged out
6432  *
6433  * This performs session log out from the specified target
6434  **/
6435 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess)
6436 {
6437 	struct iscsi_session *sess;
6438 	struct ddb_entry *ddb_entry = NULL;
6439 	struct scsi_qla_host *ha;
6440 	struct dev_db_entry *fw_ddb_entry = NULL;
6441 	dma_addr_t fw_ddb_entry_dma;
6442 	unsigned long flags;
6443 	unsigned long wtime;
6444 	uint32_t ddb_state;
6445 	int options;
6446 	int ret = 0;
6447 
6448 	sess = cls_sess->dd_data;
6449 	ddb_entry = sess->dd_data;
6450 	ha = ddb_entry->ha;
6451 
6452 	if (ddb_entry->ddb_type != FLASH_DDB) {
6453 		ql4_printk(KERN_ERR, ha, "%s: Not a flash node session\n",
6454 			   __func__);
6455 		ret = -ENXIO;
6456 		goto exit_ddb_logout;
6457 	}
6458 
6459 	if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
6460 		ql4_printk(KERN_ERR, ha,
6461 			   "%s: Logout from boot target entry is not permitted.\n",
6462 			   __func__);
6463 		ret = -EPERM;
6464 		goto exit_ddb_logout;
6465 	}
6466 
6467 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6468 					  &fw_ddb_entry_dma, GFP_KERNEL);
6469 	if (!fw_ddb_entry) {
6470 		ql4_printk(KERN_ERR, ha,
6471 			   "%s: Unable to allocate dma buffer\n", __func__);
6472 		ret = -ENOMEM;
6473 		goto exit_ddb_logout;
6474 	}
6475 
6476 	if (test_and_set_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))
6477 		goto ddb_logout_init;
6478 
6479 	ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
6480 				      fw_ddb_entry, fw_ddb_entry_dma,
6481 				      NULL, NULL, &ddb_state, NULL,
6482 				      NULL, NULL);
6483 	if (ret == QLA_ERROR)
6484 		goto ddb_logout_init;
6485 
6486 	if (ddb_state == DDB_DS_SESSION_ACTIVE)
6487 		goto ddb_logout_init;
6488 
6489 	/* wait until next relogin is triggered using DF_RELOGIN and
6490 	 * clear DF_RELOGIN to avoid invocation of further relogin
6491 	 */
6492 	wtime = jiffies + (HZ * RELOGIN_TOV);
6493 	do {
6494 		if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags))
6495 			goto ddb_logout_init;
6496 
6497 		schedule_timeout_uninterruptible(HZ);
6498 	} while ((time_after(wtime, jiffies)));
6499 
6500 ddb_logout_init:
6501 	atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
6502 	atomic_set(&ddb_entry->relogin_timer, 0);
6503 
6504 	options = LOGOUT_OPTION_CLOSE_SESSION;
6505 	qla4xxx_session_logout_ddb(ha, ddb_entry, options);
6506 
6507 	memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
6508 	wtime = jiffies + (HZ * LOGOUT_TOV);
6509 	do {
6510 		ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
6511 					      fw_ddb_entry, fw_ddb_entry_dma,
6512 					      NULL, NULL, &ddb_state, NULL,
6513 					      NULL, NULL);
6514 		if (ret == QLA_ERROR)
6515 			goto ddb_logout_clr_sess;
6516 
6517 		if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
6518 		    (ddb_state == DDB_DS_SESSION_FAILED))
6519 			goto ddb_logout_clr_sess;
6520 
6521 		schedule_timeout_uninterruptible(HZ);
6522 	} while ((time_after(wtime, jiffies)));
6523 
6524 ddb_logout_clr_sess:
6525 	qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
6526 	/*
6527 	 * we have decremented the reference count of the driver
6528 	 * when we setup the session to have the driver unload
6529 	 * to be seamless without actually destroying the
6530 	 * session
6531 	 **/
6532 	try_module_get(qla4xxx_iscsi_transport.owner);
6533 	iscsi_destroy_endpoint(ddb_entry->conn->ep);
6534 
6535 	spin_lock_irqsave(&ha->hardware_lock, flags);
6536 	qla4xxx_free_ddb(ha, ddb_entry);
6537 	clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map);
6538 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
6539 
6540 	iscsi_session_teardown(ddb_entry->sess);
6541 
6542 	clear_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags);
6543 	ret = QLA_SUCCESS;
6544 
6545 exit_ddb_logout:
6546 	if (fw_ddb_entry)
6547 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6548 				  fw_ddb_entry, fw_ddb_entry_dma);
6549 	return ret;
6550 }
6551 
6552 /**
6553  * qla4xxx_sysfs_ddb_logout - Logout from the specified target
6554  * @fnode_sess: pointer to session attrs of flash ddb entry
6555  * @fnode_conn: pointer to connection attrs of flash ddb entry
6556  *
6557  * This performs log out from the specified target
6558  **/
6559 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
6560 				    struct iscsi_bus_flash_conn *fnode_conn)
6561 {
6562 	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6563 	struct scsi_qla_host *ha = to_qla_host(shost);
6564 	struct ql4_tuple_ddb *flash_tddb = NULL;
6565 	struct ql4_tuple_ddb *tmp_tddb = NULL;
6566 	struct dev_db_entry *fw_ddb_entry = NULL;
6567 	struct ddb_entry *ddb_entry = NULL;
6568 	dma_addr_t fw_ddb_dma;
6569 	uint32_t next_idx = 0;
6570 	uint32_t state = 0, conn_err = 0;
6571 	uint16_t conn_id = 0;
6572 	int idx, index;
6573 	int status, ret = 0;
6574 
6575 	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
6576 				      &fw_ddb_dma);
6577 	if (fw_ddb_entry == NULL) {
6578 		ql4_printk(KERN_ERR, ha, "%s:Out of memory\n", __func__);
6579 		ret = -ENOMEM;
6580 		goto exit_ddb_logout;
6581 	}
6582 
6583 	flash_tddb = vzalloc(sizeof(*flash_tddb));
6584 	if (!flash_tddb) {
6585 		ql4_printk(KERN_WARNING, ha,
6586 			   "%s:Memory Allocation failed.\n", __func__);
6587 		ret = -ENOMEM;
6588 		goto exit_ddb_logout;
6589 	}
6590 
6591 	tmp_tddb = vzalloc(sizeof(*tmp_tddb));
6592 	if (!tmp_tddb) {
6593 		ql4_printk(KERN_WARNING, ha,
6594 			   "%s:Memory Allocation failed.\n", __func__);
6595 		ret = -ENOMEM;
6596 		goto exit_ddb_logout;
6597 	}
6598 
6599 	if (!fnode_sess->targetname) {
6600 		ql4_printk(KERN_ERR, ha,
6601 			   "%s:Cannot logout from SendTarget entry\n",
6602 			   __func__);
6603 		ret = -EPERM;
6604 		goto exit_ddb_logout;
6605 	}
6606 
6607 	if (fnode_sess->is_boot_target) {
6608 		ql4_printk(KERN_ERR, ha,
6609 			   "%s: Logout from boot target entry is not permitted.\n",
6610 			   __func__);
6611 		ret = -EPERM;
6612 		goto exit_ddb_logout;
6613 	}
6614 
6615 	strncpy(flash_tddb->iscsi_name, fnode_sess->targetname,
6616 		ISCSI_NAME_SIZE);
6617 
6618 	if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6619 		sprintf(flash_tddb->ip_addr, "%pI6", fnode_conn->ipaddress);
6620 	else
6621 		sprintf(flash_tddb->ip_addr, "%pI4", fnode_conn->ipaddress);
6622 
6623 	flash_tddb->tpgt = fnode_sess->tpgt;
6624 	flash_tddb->port = fnode_conn->port;
6625 
6626 	COPY_ISID(flash_tddb->isid, fnode_sess->isid);
6627 
6628 	for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
6629 		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
6630 		if (ddb_entry == NULL)
6631 			continue;
6632 
6633 		if (ddb_entry->ddb_type != FLASH_DDB)
6634 			continue;
6635 
6636 		index = ddb_entry->sess->target_id;
6637 		status = qla4xxx_get_fwddb_entry(ha, index, fw_ddb_entry,
6638 						 fw_ddb_dma, NULL, &next_idx,
6639 						 &state, &conn_err, NULL,
6640 						 &conn_id);
6641 		if (status == QLA_ERROR) {
6642 			ret = -ENOMEM;
6643 			break;
6644 		}
6645 
6646 		qla4xxx_convert_param_ddb(fw_ddb_entry, tmp_tddb, NULL);
6647 
6648 		status = qla4xxx_compare_tuple_ddb(ha, flash_tddb, tmp_tddb,
6649 						   true);
6650 		if (status == QLA_SUCCESS) {
6651 			ret = qla4xxx_sysfs_ddb_logout_sid(ddb_entry->sess);
6652 			break;
6653 		}
6654 	}
6655 
6656 	if (idx == MAX_DDB_ENTRIES)
6657 		ret = -ESRCH;
6658 
6659 exit_ddb_logout:
6660 	if (flash_tddb)
6661 		vfree(flash_tddb);
6662 	if (tmp_tddb)
6663 		vfree(tmp_tddb);
6664 	if (fw_ddb_entry)
6665 		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
6666 
6667 	return ret;
6668 }
6669 
6670 static int
6671 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
6672 			    int param, char *buf)
6673 {
6674 	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6675 	struct scsi_qla_host *ha = to_qla_host(shost);
6676 	struct iscsi_bus_flash_conn *fnode_conn;
6677 	struct ql4_chap_table chap_tbl;
6678 	struct device *dev;
6679 	int parent_type;
6680 	int rc = 0;
6681 
6682 	dev = iscsi_find_flashnode_conn(fnode_sess);
6683 	if (!dev)
6684 		return -EIO;
6685 
6686 	fnode_conn = iscsi_dev_to_flash_conn(dev);
6687 
6688 	switch (param) {
6689 	case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
6690 		rc = sprintf(buf, "%u\n", fnode_conn->is_fw_assigned_ipv6);
6691 		break;
6692 	case ISCSI_FLASHNODE_PORTAL_TYPE:
6693 		rc = sprintf(buf, "%s\n", fnode_sess->portal_type);
6694 		break;
6695 	case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
6696 		rc = sprintf(buf, "%u\n", fnode_sess->auto_snd_tgt_disable);
6697 		break;
6698 	case ISCSI_FLASHNODE_DISCOVERY_SESS:
6699 		rc = sprintf(buf, "%u\n", fnode_sess->discovery_sess);
6700 		break;
6701 	case ISCSI_FLASHNODE_ENTRY_EN:
6702 		rc = sprintf(buf, "%u\n", fnode_sess->entry_state);
6703 		break;
6704 	case ISCSI_FLASHNODE_HDR_DGST_EN:
6705 		rc = sprintf(buf, "%u\n", fnode_conn->hdrdgst_en);
6706 		break;
6707 	case ISCSI_FLASHNODE_DATA_DGST_EN:
6708 		rc = sprintf(buf, "%u\n", fnode_conn->datadgst_en);
6709 		break;
6710 	case ISCSI_FLASHNODE_IMM_DATA_EN:
6711 		rc = sprintf(buf, "%u\n", fnode_sess->imm_data_en);
6712 		break;
6713 	case ISCSI_FLASHNODE_INITIAL_R2T_EN:
6714 		rc = sprintf(buf, "%u\n", fnode_sess->initial_r2t_en);
6715 		break;
6716 	case ISCSI_FLASHNODE_DATASEQ_INORDER:
6717 		rc = sprintf(buf, "%u\n", fnode_sess->dataseq_inorder_en);
6718 		break;
6719 	case ISCSI_FLASHNODE_PDU_INORDER:
6720 		rc = sprintf(buf, "%u\n", fnode_sess->pdu_inorder_en);
6721 		break;
6722 	case ISCSI_FLASHNODE_CHAP_AUTH_EN:
6723 		rc = sprintf(buf, "%u\n", fnode_sess->chap_auth_en);
6724 		break;
6725 	case ISCSI_FLASHNODE_SNACK_REQ_EN:
6726 		rc = sprintf(buf, "%u\n", fnode_conn->snack_req_en);
6727 		break;
6728 	case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
6729 		rc = sprintf(buf, "%u\n", fnode_sess->discovery_logout_en);
6730 		break;
6731 	case ISCSI_FLASHNODE_BIDI_CHAP_EN:
6732 		rc = sprintf(buf, "%u\n", fnode_sess->bidi_chap_en);
6733 		break;
6734 	case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
6735 		rc = sprintf(buf, "%u\n", fnode_sess->discovery_auth_optional);
6736 		break;
6737 	case ISCSI_FLASHNODE_ERL:
6738 		rc = sprintf(buf, "%u\n", fnode_sess->erl);
6739 		break;
6740 	case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
6741 		rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_stat);
6742 		break;
6743 	case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
6744 		rc = sprintf(buf, "%u\n", fnode_conn->tcp_nagle_disable);
6745 		break;
6746 	case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
6747 		rc = sprintf(buf, "%u\n", fnode_conn->tcp_wsf_disable);
6748 		break;
6749 	case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
6750 		rc = sprintf(buf, "%u\n", fnode_conn->tcp_timer_scale);
6751 		break;
6752 	case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
6753 		rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_en);
6754 		break;
6755 	case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
6756 		rc = sprintf(buf, "%u\n", fnode_conn->fragment_disable);
6757 		break;
6758 	case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
6759 		rc = sprintf(buf, "%u\n", fnode_conn->max_recv_dlength);
6760 		break;
6761 	case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
6762 		rc = sprintf(buf, "%u\n", fnode_conn->max_xmit_dlength);
6763 		break;
6764 	case ISCSI_FLASHNODE_FIRST_BURST:
6765 		rc = sprintf(buf, "%u\n", fnode_sess->first_burst);
6766 		break;
6767 	case ISCSI_FLASHNODE_DEF_TIME2WAIT:
6768 		rc = sprintf(buf, "%u\n", fnode_sess->time2wait);
6769 		break;
6770 	case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
6771 		rc = sprintf(buf, "%u\n", fnode_sess->time2retain);
6772 		break;
6773 	case ISCSI_FLASHNODE_MAX_R2T:
6774 		rc = sprintf(buf, "%u\n", fnode_sess->max_r2t);
6775 		break;
6776 	case ISCSI_FLASHNODE_KEEPALIVE_TMO:
6777 		rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout);
6778 		break;
6779 	case ISCSI_FLASHNODE_ISID:
6780 		rc = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n",
6781 			     fnode_sess->isid[0], fnode_sess->isid[1],
6782 			     fnode_sess->isid[2], fnode_sess->isid[3],
6783 			     fnode_sess->isid[4], fnode_sess->isid[5]);
6784 		break;
6785 	case ISCSI_FLASHNODE_TSID:
6786 		rc = sprintf(buf, "%u\n", fnode_sess->tsid);
6787 		break;
6788 	case ISCSI_FLASHNODE_PORT:
6789 		rc = sprintf(buf, "%d\n", fnode_conn->port);
6790 		break;
6791 	case ISCSI_FLASHNODE_MAX_BURST:
6792 		rc = sprintf(buf, "%u\n", fnode_sess->max_burst);
6793 		break;
6794 	case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
6795 		rc = sprintf(buf, "%u\n",
6796 			     fnode_sess->default_taskmgmt_timeout);
6797 		break;
6798 	case ISCSI_FLASHNODE_IPADDR:
6799 		if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6800 			rc = sprintf(buf, "%pI6\n", fnode_conn->ipaddress);
6801 		else
6802 			rc = sprintf(buf, "%pI4\n", fnode_conn->ipaddress);
6803 		break;
6804 	case ISCSI_FLASHNODE_ALIAS:
6805 		if (fnode_sess->targetalias)
6806 			rc = sprintf(buf, "%s\n", fnode_sess->targetalias);
6807 		else
6808 			rc = sprintf(buf, "\n");
6809 		break;
6810 	case ISCSI_FLASHNODE_REDIRECT_IPADDR:
6811 		if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6812 			rc = sprintf(buf, "%pI6\n",
6813 				     fnode_conn->redirect_ipaddr);
6814 		else
6815 			rc = sprintf(buf, "%pI4\n",
6816 				     fnode_conn->redirect_ipaddr);
6817 		break;
6818 	case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
6819 		rc = sprintf(buf, "%u\n", fnode_conn->max_segment_size);
6820 		break;
6821 	case ISCSI_FLASHNODE_LOCAL_PORT:
6822 		rc = sprintf(buf, "%u\n", fnode_conn->local_port);
6823 		break;
6824 	case ISCSI_FLASHNODE_IPV4_TOS:
6825 		rc = sprintf(buf, "%u\n", fnode_conn->ipv4_tos);
6826 		break;
6827 	case ISCSI_FLASHNODE_IPV6_TC:
6828 		if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6829 			rc = sprintf(buf, "%u\n",
6830 				     fnode_conn->ipv6_traffic_class);
6831 		else
6832 			rc = sprintf(buf, "\n");
6833 		break;
6834 	case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
6835 		rc = sprintf(buf, "%u\n", fnode_conn->ipv6_flow_label);
6836 		break;
6837 	case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
6838 		if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6839 			rc = sprintf(buf, "%pI6\n",
6840 				     fnode_conn->link_local_ipv6_addr);
6841 		else
6842 			rc = sprintf(buf, "\n");
6843 		break;
6844 	case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
6845 		rc = sprintf(buf, "%u\n", fnode_sess->discovery_parent_idx);
6846 		break;
6847 	case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
6848 		if (fnode_sess->discovery_parent_type == DDB_ISNS)
6849 			parent_type = ISCSI_DISC_PARENT_ISNS;
6850 		else if (fnode_sess->discovery_parent_type == DDB_NO_LINK)
6851 			parent_type = ISCSI_DISC_PARENT_UNKNOWN;
6852 		else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
6853 			parent_type = ISCSI_DISC_PARENT_SENDTGT;
6854 		else
6855 			parent_type = ISCSI_DISC_PARENT_UNKNOWN;
6856 
6857 		rc = sprintf(buf, "%s\n",
6858 			     iscsi_get_discovery_parent_name(parent_type));
6859 		break;
6860 	case ISCSI_FLASHNODE_NAME:
6861 		if (fnode_sess->targetname)
6862 			rc = sprintf(buf, "%s\n", fnode_sess->targetname);
6863 		else
6864 			rc = sprintf(buf, "\n");
6865 		break;
6866 	case ISCSI_FLASHNODE_TPGT:
6867 		rc = sprintf(buf, "%u\n", fnode_sess->tpgt);
6868 		break;
6869 	case ISCSI_FLASHNODE_TCP_XMIT_WSF:
6870 		rc = sprintf(buf, "%u\n", fnode_conn->tcp_xmit_wsf);
6871 		break;
6872 	case ISCSI_FLASHNODE_TCP_RECV_WSF:
6873 		rc = sprintf(buf, "%u\n", fnode_conn->tcp_recv_wsf);
6874 		break;
6875 	case ISCSI_FLASHNODE_CHAP_OUT_IDX:
6876 		rc = sprintf(buf, "%u\n", fnode_sess->chap_out_idx);
6877 		break;
6878 	case ISCSI_FLASHNODE_USERNAME:
6879 		if (fnode_sess->chap_auth_en) {
6880 			qla4xxx_get_uni_chap_at_index(ha,
6881 						      chap_tbl.name,
6882 						      chap_tbl.secret,
6883 						      fnode_sess->chap_out_idx);
6884 			rc = sprintf(buf, "%s\n", chap_tbl.name);
6885 		} else {
6886 			rc = sprintf(buf, "\n");
6887 		}
6888 		break;
6889 	case ISCSI_FLASHNODE_PASSWORD:
6890 		if (fnode_sess->chap_auth_en) {
6891 			qla4xxx_get_uni_chap_at_index(ha,
6892 						      chap_tbl.name,
6893 						      chap_tbl.secret,
6894 						      fnode_sess->chap_out_idx);
6895 			rc = sprintf(buf, "%s\n", chap_tbl.secret);
6896 		} else {
6897 			rc = sprintf(buf, "\n");
6898 		}
6899 		break;
6900 	case ISCSI_FLASHNODE_STATSN:
6901 		rc = sprintf(buf, "%u\n", fnode_conn->statsn);
6902 		break;
6903 	case ISCSI_FLASHNODE_EXP_STATSN:
6904 		rc = sprintf(buf, "%u\n", fnode_conn->exp_statsn);
6905 		break;
6906 	case ISCSI_FLASHNODE_IS_BOOT_TGT:
6907 		rc = sprintf(buf, "%u\n", fnode_sess->is_boot_target);
6908 		break;
6909 	default:
6910 		rc = -ENOSYS;
6911 		break;
6912 	}
6913 
6914 	put_device(dev);
6915 	return rc;
6916 }
6917 
6918 /**
6919  * qla4xxx_sysfs_ddb_set_param - Set parameter for firmware DDB entry
6920  * @fnode_sess: pointer to session attrs of flash ddb entry
6921  * @fnode_conn: pointer to connection attrs of flash ddb entry
6922  * @data: Parameters and their values to update
6923  * @len: len of data
6924  *
6925  * This sets the parameter of flash ddb entry and writes them to flash
6926  **/
6927 static int
6928 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
6929 			    struct iscsi_bus_flash_conn *fnode_conn,
6930 			    void *data, int len)
6931 {
6932 	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6933 	struct scsi_qla_host *ha = to_qla_host(shost);
6934 	struct iscsi_flashnode_param_info *fnode_param;
6935 	struct ql4_chap_table chap_tbl;
6936 	struct nlattr *attr;
6937 	uint16_t chap_out_idx = INVALID_ENTRY;
6938 	int rc = QLA_ERROR;
6939 	uint32_t rem = len;
6940 
6941 	memset((void *)&chap_tbl, 0, sizeof(chap_tbl));
6942 	nla_for_each_attr(attr, data, len, rem) {
6943 		fnode_param = nla_data(attr);
6944 
6945 		switch (fnode_param->param) {
6946 		case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
6947 			fnode_conn->is_fw_assigned_ipv6 = fnode_param->value[0];
6948 			break;
6949 		case ISCSI_FLASHNODE_PORTAL_TYPE:
6950 			memcpy(fnode_sess->portal_type, fnode_param->value,
6951 			       strlen(fnode_sess->portal_type));
6952 			break;
6953 		case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
6954 			fnode_sess->auto_snd_tgt_disable =
6955 							fnode_param->value[0];
6956 			break;
6957 		case ISCSI_FLASHNODE_DISCOVERY_SESS:
6958 			fnode_sess->discovery_sess = fnode_param->value[0];
6959 			break;
6960 		case ISCSI_FLASHNODE_ENTRY_EN:
6961 			fnode_sess->entry_state = fnode_param->value[0];
6962 			break;
6963 		case ISCSI_FLASHNODE_HDR_DGST_EN:
6964 			fnode_conn->hdrdgst_en = fnode_param->value[0];
6965 			break;
6966 		case ISCSI_FLASHNODE_DATA_DGST_EN:
6967 			fnode_conn->datadgst_en = fnode_param->value[0];
6968 			break;
6969 		case ISCSI_FLASHNODE_IMM_DATA_EN:
6970 			fnode_sess->imm_data_en = fnode_param->value[0];
6971 			break;
6972 		case ISCSI_FLASHNODE_INITIAL_R2T_EN:
6973 			fnode_sess->initial_r2t_en = fnode_param->value[0];
6974 			break;
6975 		case ISCSI_FLASHNODE_DATASEQ_INORDER:
6976 			fnode_sess->dataseq_inorder_en = fnode_param->value[0];
6977 			break;
6978 		case ISCSI_FLASHNODE_PDU_INORDER:
6979 			fnode_sess->pdu_inorder_en = fnode_param->value[0];
6980 			break;
6981 		case ISCSI_FLASHNODE_CHAP_AUTH_EN:
6982 			fnode_sess->chap_auth_en = fnode_param->value[0];
6983 			/* Invalidate chap index if chap auth is disabled */
6984 			if (!fnode_sess->chap_auth_en)
6985 				fnode_sess->chap_out_idx = INVALID_ENTRY;
6986 
6987 			break;
6988 		case ISCSI_FLASHNODE_SNACK_REQ_EN:
6989 			fnode_conn->snack_req_en = fnode_param->value[0];
6990 			break;
6991 		case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
6992 			fnode_sess->discovery_logout_en = fnode_param->value[0];
6993 			break;
6994 		case ISCSI_FLASHNODE_BIDI_CHAP_EN:
6995 			fnode_sess->bidi_chap_en = fnode_param->value[0];
6996 			break;
6997 		case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
6998 			fnode_sess->discovery_auth_optional =
6999 							fnode_param->value[0];
7000 			break;
7001 		case ISCSI_FLASHNODE_ERL:
7002 			fnode_sess->erl = fnode_param->value[0];
7003 			break;
7004 		case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
7005 			fnode_conn->tcp_timestamp_stat = fnode_param->value[0];
7006 			break;
7007 		case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
7008 			fnode_conn->tcp_nagle_disable = fnode_param->value[0];
7009 			break;
7010 		case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
7011 			fnode_conn->tcp_wsf_disable = fnode_param->value[0];
7012 			break;
7013 		case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
7014 			fnode_conn->tcp_timer_scale = fnode_param->value[0];
7015 			break;
7016 		case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
7017 			fnode_conn->tcp_timestamp_en = fnode_param->value[0];
7018 			break;
7019 		case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
7020 			fnode_conn->fragment_disable = fnode_param->value[0];
7021 			break;
7022 		case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
7023 			fnode_conn->max_recv_dlength =
7024 					*(unsigned *)fnode_param->value;
7025 			break;
7026 		case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
7027 			fnode_conn->max_xmit_dlength =
7028 					*(unsigned *)fnode_param->value;
7029 			break;
7030 		case ISCSI_FLASHNODE_FIRST_BURST:
7031 			fnode_sess->first_burst =
7032 					*(unsigned *)fnode_param->value;
7033 			break;
7034 		case ISCSI_FLASHNODE_DEF_TIME2WAIT:
7035 			fnode_sess->time2wait = *(uint16_t *)fnode_param->value;
7036 			break;
7037 		case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
7038 			fnode_sess->time2retain =
7039 						*(uint16_t *)fnode_param->value;
7040 			break;
7041 		case ISCSI_FLASHNODE_MAX_R2T:
7042 			fnode_sess->max_r2t =
7043 					*(uint16_t *)fnode_param->value;
7044 			break;
7045 		case ISCSI_FLASHNODE_KEEPALIVE_TMO:
7046 			fnode_conn->keepalive_timeout =
7047 				*(uint16_t *)fnode_param->value;
7048 			break;
7049 		case ISCSI_FLASHNODE_ISID:
7050 			memcpy(fnode_sess->isid, fnode_param->value,
7051 			       sizeof(fnode_sess->isid));
7052 			break;
7053 		case ISCSI_FLASHNODE_TSID:
7054 			fnode_sess->tsid = *(uint16_t *)fnode_param->value;
7055 			break;
7056 		case ISCSI_FLASHNODE_PORT:
7057 			fnode_conn->port = *(uint16_t *)fnode_param->value;
7058 			break;
7059 		case ISCSI_FLASHNODE_MAX_BURST:
7060 			fnode_sess->max_burst = *(unsigned *)fnode_param->value;
7061 			break;
7062 		case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
7063 			fnode_sess->default_taskmgmt_timeout =
7064 						*(uint16_t *)fnode_param->value;
7065 			break;
7066 		case ISCSI_FLASHNODE_IPADDR:
7067 			memcpy(fnode_conn->ipaddress, fnode_param->value,
7068 			       IPv6_ADDR_LEN);
7069 			break;
7070 		case ISCSI_FLASHNODE_ALIAS:
7071 			rc = iscsi_switch_str_param(&fnode_sess->targetalias,
7072 						    (char *)fnode_param->value);
7073 			break;
7074 		case ISCSI_FLASHNODE_REDIRECT_IPADDR:
7075 			memcpy(fnode_conn->redirect_ipaddr, fnode_param->value,
7076 			       IPv6_ADDR_LEN);
7077 			break;
7078 		case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
7079 			fnode_conn->max_segment_size =
7080 					*(unsigned *)fnode_param->value;
7081 			break;
7082 		case ISCSI_FLASHNODE_LOCAL_PORT:
7083 			fnode_conn->local_port =
7084 						*(uint16_t *)fnode_param->value;
7085 			break;
7086 		case ISCSI_FLASHNODE_IPV4_TOS:
7087 			fnode_conn->ipv4_tos = fnode_param->value[0];
7088 			break;
7089 		case ISCSI_FLASHNODE_IPV6_TC:
7090 			fnode_conn->ipv6_traffic_class = fnode_param->value[0];
7091 			break;
7092 		case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
7093 			fnode_conn->ipv6_flow_label = fnode_param->value[0];
7094 			break;
7095 		case ISCSI_FLASHNODE_NAME:
7096 			rc = iscsi_switch_str_param(&fnode_sess->targetname,
7097 						    (char *)fnode_param->value);
7098 			break;
7099 		case ISCSI_FLASHNODE_TPGT:
7100 			fnode_sess->tpgt = *(uint16_t *)fnode_param->value;
7101 			break;
7102 		case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
7103 			memcpy(fnode_conn->link_local_ipv6_addr,
7104 			       fnode_param->value, IPv6_ADDR_LEN);
7105 			break;
7106 		case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
7107 			fnode_sess->discovery_parent_idx =
7108 						*(uint16_t *)fnode_param->value;
7109 			break;
7110 		case ISCSI_FLASHNODE_TCP_XMIT_WSF:
7111 			fnode_conn->tcp_xmit_wsf =
7112 						*(uint8_t *)fnode_param->value;
7113 			break;
7114 		case ISCSI_FLASHNODE_TCP_RECV_WSF:
7115 			fnode_conn->tcp_recv_wsf =
7116 						*(uint8_t *)fnode_param->value;
7117 			break;
7118 		case ISCSI_FLASHNODE_STATSN:
7119 			fnode_conn->statsn = *(uint32_t *)fnode_param->value;
7120 			break;
7121 		case ISCSI_FLASHNODE_EXP_STATSN:
7122 			fnode_conn->exp_statsn =
7123 						*(uint32_t *)fnode_param->value;
7124 			break;
7125 		case ISCSI_FLASHNODE_CHAP_OUT_IDX:
7126 			chap_out_idx = *(uint16_t *)fnode_param->value;
7127 			if (!qla4xxx_get_uni_chap_at_index(ha,
7128 							   chap_tbl.name,
7129 							   chap_tbl.secret,
7130 							   chap_out_idx)) {
7131 				fnode_sess->chap_out_idx = chap_out_idx;
7132 				/* Enable chap auth if chap index is valid */
7133 				fnode_sess->chap_auth_en = QL4_PARAM_ENABLE;
7134 			}
7135 			break;
7136 		default:
7137 			ql4_printk(KERN_ERR, ha,
7138 				   "%s: No such sysfs attribute\n", __func__);
7139 			rc = -ENOSYS;
7140 			goto exit_set_param;
7141 		}
7142 	}
7143 
7144 	rc = qla4xxx_sysfs_ddb_apply(fnode_sess, fnode_conn);
7145 
7146 exit_set_param:
7147 	return rc;
7148 }
7149 
7150 /**
7151  * qla4xxx_sysfs_ddb_delete - Delete firmware DDB entry
7152  * @fnode_sess: pointer to session attrs of flash ddb entry
7153  *
7154  * This invalidates the flash ddb entry at the given index
7155  **/
7156 static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
7157 {
7158 	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
7159 	struct scsi_qla_host *ha = to_qla_host(shost);
7160 	uint32_t dev_db_start_offset;
7161 	uint32_t dev_db_end_offset;
7162 	struct dev_db_entry *fw_ddb_entry = NULL;
7163 	dma_addr_t fw_ddb_entry_dma;
7164 	uint16_t *ddb_cookie = NULL;
7165 	size_t ddb_size = 0;
7166 	void *pddb = NULL;
7167 	int target_id;
7168 	int rc = 0;
7169 
7170 	if (fnode_sess->is_boot_target) {
7171 		rc = -EPERM;
7172 		DEBUG2(ql4_printk(KERN_ERR, ha,
7173 				  "%s: Deletion of boot target entry is not permitted.\n",
7174 				  __func__));
7175 		goto exit_ddb_del;
7176 	}
7177 
7178 	if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT)
7179 		goto sysfs_ddb_del;
7180 
7181 	if (is_qla40XX(ha)) {
7182 		dev_db_start_offset = FLASH_OFFSET_DB_INFO;
7183 		dev_db_end_offset = FLASH_OFFSET_DB_END;
7184 		dev_db_start_offset += (fnode_sess->target_id *
7185 				       sizeof(*fw_ddb_entry));
7186 		ddb_size = sizeof(*fw_ddb_entry);
7187 	} else {
7188 		dev_db_start_offset = FLASH_RAW_ACCESS_ADDR +
7189 				      (ha->hw.flt_region_ddb << 2);
7190 		/* flt_ddb_size is DDB table size for both ports
7191 		 * so divide it by 2 to calculate the offset for second port
7192 		 */
7193 		if (ha->port_num == 1)
7194 			dev_db_start_offset += (ha->hw.flt_ddb_size / 2);
7195 
7196 		dev_db_end_offset = dev_db_start_offset +
7197 				    (ha->hw.flt_ddb_size / 2);
7198 
7199 		dev_db_start_offset += (fnode_sess->target_id *
7200 				       sizeof(*fw_ddb_entry));
7201 		dev_db_start_offset += offsetof(struct dev_db_entry, cookie);
7202 
7203 		ddb_size = sizeof(*ddb_cookie);
7204 	}
7205 
7206 	DEBUG2(ql4_printk(KERN_ERR, ha, "%s: start offset=%u, end offset=%u\n",
7207 			  __func__, dev_db_start_offset, dev_db_end_offset));
7208 
7209 	if (dev_db_start_offset > dev_db_end_offset) {
7210 		rc = -EIO;
7211 		DEBUG2(ql4_printk(KERN_ERR, ha, "%s:Invalid DDB index %u\n",
7212 				  __func__, fnode_sess->target_id));
7213 		goto exit_ddb_del;
7214 	}
7215 
7216 	pddb = dma_alloc_coherent(&ha->pdev->dev, ddb_size,
7217 				  &fw_ddb_entry_dma, GFP_KERNEL);
7218 	if (!pddb) {
7219 		rc = -ENOMEM;
7220 		DEBUG2(ql4_printk(KERN_ERR, ha,
7221 				  "%s: Unable to allocate dma buffer\n",
7222 				  __func__));
7223 		goto exit_ddb_del;
7224 	}
7225 
7226 	if (is_qla40XX(ha)) {
7227 		fw_ddb_entry = pddb;
7228 		memset(fw_ddb_entry, 0, ddb_size);
7229 		ddb_cookie = &fw_ddb_entry->cookie;
7230 	} else {
7231 		ddb_cookie = pddb;
7232 	}
7233 
7234 	/* invalidate the cookie */
7235 	*ddb_cookie = 0xFFEE;
7236 	qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
7237 			  ddb_size, FLASH_OPT_RMW_COMMIT);
7238 
7239 sysfs_ddb_del:
7240 	target_id = fnode_sess->target_id;
7241 	iscsi_destroy_flashnode_sess(fnode_sess);
7242 	ql4_printk(KERN_INFO, ha,
7243 		   "%s: session and conn entries for flashnode %u of host %lu deleted\n",
7244 		   __func__, target_id, ha->host_no);
7245 exit_ddb_del:
7246 	if (pddb)
7247 		dma_free_coherent(&ha->pdev->dev, ddb_size, pddb,
7248 				  fw_ddb_entry_dma);
7249 	return rc;
7250 }
7251 
7252 /**
7253  * qla4xxx_sysfs_ddb_export - Create sysfs entries for firmware DDBs
7254  * @ha: pointer to adapter structure
7255  *
7256  * Export the firmware DDB for all send targets and normal targets to sysfs.
7257  **/
7258 static int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha)
7259 {
7260 	struct dev_db_entry *fw_ddb_entry = NULL;
7261 	dma_addr_t fw_ddb_entry_dma;
7262 	uint16_t max_ddbs;
7263 	uint16_t idx = 0;
7264 	int ret = QLA_SUCCESS;
7265 
7266 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
7267 					  sizeof(*fw_ddb_entry),
7268 					  &fw_ddb_entry_dma, GFP_KERNEL);
7269 	if (!fw_ddb_entry) {
7270 		DEBUG2(ql4_printk(KERN_ERR, ha,
7271 				  "%s: Unable to allocate dma buffer\n",
7272 				  __func__));
7273 		return -ENOMEM;
7274 	}
7275 
7276 	max_ddbs =  is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
7277 				     MAX_DEV_DB_ENTRIES;
7278 
7279 	for (idx = 0; idx < max_ddbs; idx++) {
7280 		if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma,
7281 					     idx))
7282 			continue;
7283 
7284 		ret = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 0);
7285 		if (ret) {
7286 			ret = -EIO;
7287 			break;
7288 		}
7289 	}
7290 
7291 	dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry,
7292 			  fw_ddb_entry_dma);
7293 
7294 	return ret;
7295 }
7296 
7297 static void qla4xxx_sysfs_ddb_remove(struct scsi_qla_host *ha)
7298 {
7299 	iscsi_destroy_all_flashnode(ha->host);
7300 }
7301 
7302 /**
7303  * qla4xxx_build_ddb_list - Build ddb list and setup sessions
7304  * @ha: pointer to adapter structure
7305  * @is_reset: Is this init path or reset path
7306  *
7307  * Create a list of sendtargets (st) from firmware DDBs, issue send targets
7308  * using connection open, then create the list of normal targets (nt)
7309  * from firmware DDBs. Based on the list of nt setup session and connection
7310  * objects.
7311  **/
7312 void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
7313 {
7314 	uint16_t tmo = 0;
7315 	struct list_head list_st, list_nt;
7316 	struct qla_ddb_index  *st_ddb_idx, *st_ddb_idx_tmp;
7317 	unsigned long wtime;
7318 
7319 	if (!test_bit(AF_LINK_UP, &ha->flags)) {
7320 		set_bit(AF_BUILD_DDB_LIST, &ha->flags);
7321 		ha->is_reset = is_reset;
7322 		return;
7323 	}
7324 
7325 	INIT_LIST_HEAD(&list_st);
7326 	INIT_LIST_HEAD(&list_nt);
7327 
7328 	qla4xxx_build_st_list(ha, &list_st);
7329 
7330 	/* Before issuing conn open mbox, ensure all IPs states are configured
7331 	 * Note, conn open fails if IPs are not configured
7332 	 */
7333 	qla4xxx_wait_for_ip_configuration(ha);
7334 
7335 	/* Go thru the STs and fire the sendtargets by issuing conn open mbx */
7336 	list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
7337 		qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
7338 	}
7339 
7340 	/* Wait to ensure all sendtargets are done for min 12 sec wait */
7341 	tmo = ((ha->def_timeout > LOGIN_TOV) &&
7342 	       (ha->def_timeout < LOGIN_TOV * 10) ?
7343 	       ha->def_timeout : LOGIN_TOV);
7344 
7345 	DEBUG2(ql4_printk(KERN_INFO, ha,
7346 			  "Default time to wait for build ddb %d\n", tmo));
7347 
7348 	wtime = jiffies + (HZ * tmo);
7349 	do {
7350 		if (list_empty(&list_st))
7351 			break;
7352 
7353 		qla4xxx_remove_failed_ddb(ha, &list_st);
7354 		schedule_timeout_uninterruptible(HZ / 10);
7355 	} while (time_after(wtime, jiffies));
7356 
7357 
7358 	qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset);
7359 
7360 	qla4xxx_free_ddb_list(&list_st);
7361 	qla4xxx_free_ddb_list(&list_nt);
7362 
7363 	qla4xxx_free_ddb_index(ha);
7364 }
7365 
7366 /**
7367  * qla4xxx_wait_login_resp_boot_tgt -  Wait for iSCSI boot target login
7368  * response.
7369  * @ha: pointer to adapter structure
7370  *
7371  * When the boot entry is normal iSCSI target then DF_BOOT_TGT flag will be
7372  * set in DDB and we will wait for login response of boot targets during
7373  * probe.
7374  **/
7375 static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha)
7376 {
7377 	struct ddb_entry *ddb_entry;
7378 	struct dev_db_entry *fw_ddb_entry = NULL;
7379 	dma_addr_t fw_ddb_entry_dma;
7380 	unsigned long wtime;
7381 	uint32_t ddb_state;
7382 	int max_ddbs, idx, ret;
7383 
7384 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
7385 				     MAX_DEV_DB_ENTRIES;
7386 
7387 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7388 					  &fw_ddb_entry_dma, GFP_KERNEL);
7389 	if (!fw_ddb_entry) {
7390 		ql4_printk(KERN_ERR, ha,
7391 			   "%s: Unable to allocate dma buffer\n", __func__);
7392 		goto exit_login_resp;
7393 	}
7394 
7395 	wtime = jiffies + (HZ * BOOT_LOGIN_RESP_TOV);
7396 
7397 	for (idx = 0; idx < max_ddbs; idx++) {
7398 		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
7399 		if (ddb_entry == NULL)
7400 			continue;
7401 
7402 		if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
7403 			DEBUG2(ql4_printk(KERN_INFO, ha,
7404 					  "%s: DDB index [%d]\n", __func__,
7405 					  ddb_entry->fw_ddb_index));
7406 			do {
7407 				ret = qla4xxx_get_fwddb_entry(ha,
7408 						ddb_entry->fw_ddb_index,
7409 						fw_ddb_entry, fw_ddb_entry_dma,
7410 						NULL, NULL, &ddb_state, NULL,
7411 						NULL, NULL);
7412 				if (ret == QLA_ERROR)
7413 					goto exit_login_resp;
7414 
7415 				if ((ddb_state == DDB_DS_SESSION_ACTIVE) ||
7416 				    (ddb_state == DDB_DS_SESSION_FAILED))
7417 					break;
7418 
7419 				schedule_timeout_uninterruptible(HZ);
7420 
7421 			} while ((time_after(wtime, jiffies)));
7422 
7423 			if (!time_after(wtime, jiffies)) {
7424 				DEBUG2(ql4_printk(KERN_INFO, ha,
7425 						  "%s: Login response wait timer expired\n",
7426 						  __func__));
7427 				 goto exit_login_resp;
7428 			}
7429 		}
7430 	}
7431 
7432 exit_login_resp:
7433 	if (fw_ddb_entry)
7434 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7435 				  fw_ddb_entry, fw_ddb_entry_dma);
7436 }
7437 
7438 /**
7439  * qla4xxx_probe_adapter - callback function to probe HBA
7440  * @pdev: pointer to pci_dev structure
7441  * @pci_device_id: pointer to pci_device entry
7442  *
7443  * This routine will probe for Qlogic 4xxx iSCSI host adapters.
7444  * It returns zero if successful. It also initializes all data necessary for
7445  * the driver.
7446  **/
7447 static int qla4xxx_probe_adapter(struct pci_dev *pdev,
7448 				 const struct pci_device_id *ent)
7449 {
7450 	int ret = -ENODEV, status;
7451 	struct Scsi_Host *host;
7452 	struct scsi_qla_host *ha;
7453 	uint8_t init_retry_count = 0;
7454 	char buf[34];
7455 	struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
7456 	uint32_t dev_state;
7457 
7458 	if (pci_enable_device(pdev))
7459 		return -1;
7460 
7461 	host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0);
7462 	if (host == NULL) {
7463 		printk(KERN_WARNING
7464 		       "qla4xxx: Couldn't allocate host from scsi layer!\n");
7465 		goto probe_disable_device;
7466 	}
7467 
7468 	/* Clear our data area */
7469 	ha = to_qla_host(host);
7470 	memset(ha, 0, sizeof(*ha));
7471 
7472 	/* Save the information from PCI BIOS.	*/
7473 	ha->pdev = pdev;
7474 	ha->host = host;
7475 	ha->host_no = host->host_no;
7476 	ha->func_num = PCI_FUNC(ha->pdev->devfn);
7477 
7478 	pci_enable_pcie_error_reporting(pdev);
7479 
7480 	/* Setup Runtime configurable options */
7481 	if (is_qla8022(ha)) {
7482 		ha->isp_ops = &qla4_82xx_isp_ops;
7483 		ha->reg_tbl = (uint32_t *) qla4_82xx_reg_tbl;
7484 		ha->qdr_sn_window = -1;
7485 		ha->ddr_mn_window = -1;
7486 		ha->curr_window = 255;
7487 		nx_legacy_intr = &legacy_intr[ha->func_num];
7488 		ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
7489 		ha->nx_legacy_intr.tgt_status_reg =
7490 			nx_legacy_intr->tgt_status_reg;
7491 		ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
7492 		ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
7493 	} else if (is_qla8032(ha) || is_qla8042(ha)) {
7494 		ha->isp_ops = &qla4_83xx_isp_ops;
7495 		ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl;
7496 	} else {
7497 		ha->isp_ops = &qla4xxx_isp_ops;
7498 	}
7499 
7500 	if (is_qla80XX(ha)) {
7501 		rwlock_init(&ha->hw_lock);
7502 		ha->pf_bit = ha->func_num << 16;
7503 		/* Set EEH reset type to fundamental if required by hba */
7504 		pdev->needs_freset = 1;
7505 	}
7506 
7507 	/* Configure PCI I/O space. */
7508 	ret = ha->isp_ops->iospace_config(ha);
7509 	if (ret)
7510 		goto probe_failed_ioconfig;
7511 
7512 	ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n",
7513 		   pdev->device, pdev->irq, ha->reg);
7514 
7515 	qla4xxx_config_dma_addressing(ha);
7516 
7517 	/* Initialize lists and spinlocks. */
7518 	INIT_LIST_HEAD(&ha->free_srb_q);
7519 
7520 	mutex_init(&ha->mbox_sem);
7521 	mutex_init(&ha->chap_sem);
7522 	init_completion(&ha->mbx_intr_comp);
7523 	init_completion(&ha->disable_acb_comp);
7524 
7525 	spin_lock_init(&ha->hardware_lock);
7526 	spin_lock_init(&ha->work_lock);
7527 
7528 	/* Initialize work list */
7529 	INIT_LIST_HEAD(&ha->work_list);
7530 
7531 	/* Allocate dma buffers */
7532 	if (qla4xxx_mem_alloc(ha)) {
7533 		ql4_printk(KERN_WARNING, ha,
7534 		    "[ERROR] Failed to allocate memory for adapter\n");
7535 
7536 		ret = -ENOMEM;
7537 		goto probe_failed;
7538 	}
7539 
7540 	host->cmd_per_lun = 3;
7541 	host->max_channel = 0;
7542 	host->max_lun = MAX_LUNS - 1;
7543 	host->max_id = MAX_TARGETS;
7544 	host->max_cmd_len = IOCB_MAX_CDB_LEN;
7545 	host->can_queue = MAX_SRBS ;
7546 	host->transportt = qla4xxx_scsi_transport;
7547 
7548 	ret = scsi_init_shared_tag_map(host, MAX_SRBS);
7549 	if (ret) {
7550 		ql4_printk(KERN_WARNING, ha,
7551 			   "%s: scsi_init_shared_tag_map failed\n", __func__);
7552 		goto probe_failed;
7553 	}
7554 
7555 	pci_set_drvdata(pdev, ha);
7556 
7557 	ret = scsi_add_host(host, &pdev->dev);
7558 	if (ret)
7559 		goto probe_failed;
7560 
7561 	if (is_qla80XX(ha))
7562 		qla4_8xxx_get_flash_info(ha);
7563 
7564 	if (is_qla8032(ha) || is_qla8042(ha)) {
7565 		qla4_83xx_read_reset_template(ha);
7566 		/*
7567 		 * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0.
7568 		 * If DONRESET_BIT0 is set, drivers should not set dev_state
7569 		 * to NEED_RESET. But if NEED_RESET is set, drivers should
7570 		 * should honor the reset.
7571 		 */
7572 		if (ql4xdontresethba == 1)
7573 			qla4_83xx_set_idc_dontreset(ha);
7574 	}
7575 
7576 	/*
7577 	 * Initialize the Host adapter request/response queues and
7578 	 * firmware
7579 	 * NOTE: interrupts enabled upon successful completion
7580 	 */
7581 	status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
7582 
7583 	/* Dont retry adapter initialization if IRQ allocation failed */
7584 	if (is_qla80XX(ha) && !test_bit(AF_IRQ_ATTACHED, &ha->flags)) {
7585 		ql4_printk(KERN_WARNING, ha, "%s: Skipping retry of adapter initialization\n",
7586 			   __func__);
7587 		goto skip_retry_init;
7588 	}
7589 
7590 	while ((!test_bit(AF_ONLINE, &ha->flags)) &&
7591 	    init_retry_count++ < MAX_INIT_RETRIES) {
7592 
7593 		if (is_qla80XX(ha)) {
7594 			ha->isp_ops->idc_lock(ha);
7595 			dev_state = qla4_8xxx_rd_direct(ha,
7596 							QLA8XXX_CRB_DEV_STATE);
7597 			ha->isp_ops->idc_unlock(ha);
7598 			if (dev_state == QLA8XXX_DEV_FAILED) {
7599 				ql4_printk(KERN_WARNING, ha, "%s: don't retry "
7600 				    "initialize adapter. H/W is in failed state\n",
7601 				    __func__);
7602 				break;
7603 			}
7604 		}
7605 		DEBUG2(printk("scsi: %s: retrying adapter initialization "
7606 			      "(%d)\n", __func__, init_retry_count));
7607 
7608 		if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
7609 			continue;
7610 
7611 		status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
7612 	}
7613 
7614 skip_retry_init:
7615 	if (!test_bit(AF_ONLINE, &ha->flags)) {
7616 		ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
7617 
7618 		if ((is_qla8022(ha) && ql4xdontresethba) ||
7619 		    ((is_qla8032(ha) || is_qla8042(ha)) &&
7620 		     qla4_83xx_idc_dontreset(ha))) {
7621 			/* Put the device in failed state. */
7622 			DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
7623 			ha->isp_ops->idc_lock(ha);
7624 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
7625 					    QLA8XXX_DEV_FAILED);
7626 			ha->isp_ops->idc_unlock(ha);
7627 		}
7628 		ret = -ENODEV;
7629 		goto remove_host;
7630 	}
7631 
7632 	/* Startup the kernel thread for this host adapter. */
7633 	DEBUG2(printk("scsi: %s: Starting kernel thread for "
7634 		      "qla4xxx_dpc\n", __func__));
7635 	sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
7636 	ha->dpc_thread = create_singlethread_workqueue(buf);
7637 	if (!ha->dpc_thread) {
7638 		ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
7639 		ret = -ENODEV;
7640 		goto remove_host;
7641 	}
7642 	INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
7643 
7644 	ha->task_wq = alloc_workqueue("qla4xxx_%lu_task", WQ_MEM_RECLAIM, 1,
7645 				      ha->host_no);
7646 	if (!ha->task_wq) {
7647 		ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
7648 		ret = -ENODEV;
7649 		goto remove_host;
7650 	}
7651 
7652 	/*
7653 	 * For ISP-8XXX, request_irqs is called in qla4_8xxx_load_risc
7654 	 * (which is called indirectly by qla4xxx_initialize_adapter),
7655 	 * so that irqs will be registered after crbinit but before
7656 	 * mbx_intr_enable.
7657 	 */
7658 	if (is_qla40XX(ha)) {
7659 		ret = qla4xxx_request_irqs(ha);
7660 		if (ret) {
7661 			ql4_printk(KERN_WARNING, ha, "Failed to reserve "
7662 			    "interrupt %d already in use.\n", pdev->irq);
7663 			goto remove_host;
7664 		}
7665 	}
7666 
7667 	pci_save_state(ha->pdev);
7668 	ha->isp_ops->enable_intrs(ha);
7669 
7670 	/* Start timer thread. */
7671 	qla4xxx_start_timer(ha, qla4xxx_timer, 1);
7672 
7673 	set_bit(AF_INIT_DONE, &ha->flags);
7674 
7675 	qla4_8xxx_alloc_sysfs_attr(ha);
7676 
7677 	printk(KERN_INFO
7678 	       " QLogic iSCSI HBA Driver version: %s\n"
7679 	       "  QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
7680 	       qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
7681 	       ha->host_no, ha->fw_info.fw_major, ha->fw_info.fw_minor,
7682 	       ha->fw_info.fw_patch, ha->fw_info.fw_build);
7683 
7684 	/* Set the driver version */
7685 	if (is_qla80XX(ha))
7686 		qla4_8xxx_set_param(ha, SET_DRVR_VERSION);
7687 
7688 	if (qla4xxx_setup_boot_info(ha))
7689 		ql4_printk(KERN_ERR, ha,
7690 			   "%s: No iSCSI boot target configured\n", __func__);
7691 
7692 	if (qla4xxx_sysfs_ddb_export(ha))
7693 		ql4_printk(KERN_ERR, ha,
7694 			   "%s: Error exporting ddb to sysfs\n", __func__);
7695 
7696 		/* Perform the build ddb list and login to each */
7697 	qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
7698 	iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
7699 	qla4xxx_wait_login_resp_boot_tgt(ha);
7700 
7701 	qla4xxx_create_chap_list(ha);
7702 
7703 	qla4xxx_create_ifaces(ha);
7704 	return 0;
7705 
7706 remove_host:
7707 	scsi_remove_host(ha->host);
7708 
7709 probe_failed:
7710 	qla4xxx_free_adapter(ha);
7711 
7712 probe_failed_ioconfig:
7713 	pci_disable_pcie_error_reporting(pdev);
7714 	scsi_host_put(ha->host);
7715 
7716 probe_disable_device:
7717 	pci_disable_device(pdev);
7718 
7719 	return ret;
7720 }
7721 
7722 /**
7723  * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
7724  * @ha: pointer to adapter structure
7725  *
7726  * Mark the other ISP-4xxx port to indicate that the driver is being removed,
7727  * so that the other port will not re-initialize while in the process of
7728  * removing the ha due to driver unload or hba hotplug.
7729  **/
7730 static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
7731 {
7732 	struct scsi_qla_host *other_ha = NULL;
7733 	struct pci_dev *other_pdev = NULL;
7734 	int fn = ISP4XXX_PCI_FN_2;
7735 
7736 	/*iscsi function numbers for ISP4xxx is 1 and 3*/
7737 	if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
7738 		fn = ISP4XXX_PCI_FN_1;
7739 
7740 	other_pdev =
7741 		pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
7742 		ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
7743 		fn));
7744 
7745 	/* Get other_ha if other_pdev is valid and state is enable*/
7746 	if (other_pdev) {
7747 		if (atomic_read(&other_pdev->enable_cnt)) {
7748 			other_ha = pci_get_drvdata(other_pdev);
7749 			if (other_ha) {
7750 				set_bit(AF_HA_REMOVAL, &other_ha->flags);
7751 				DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
7752 				    "Prevent %s reinit\n", __func__,
7753 				    dev_name(&other_ha->pdev->dev)));
7754 			}
7755 		}
7756 		pci_dev_put(other_pdev);
7757 	}
7758 }
7759 
7760 static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
7761 {
7762 	struct ddb_entry *ddb_entry;
7763 	int options;
7764 	int idx;
7765 
7766 	for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
7767 
7768 		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
7769 		if ((ddb_entry != NULL) &&
7770 		    (ddb_entry->ddb_type == FLASH_DDB)) {
7771 
7772 			options = LOGOUT_OPTION_CLOSE_SESSION;
7773 			if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
7774 			    == QLA_ERROR)
7775 				ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
7776 					   __func__);
7777 
7778 			qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
7779 			/*
7780 			 * we have decremented the reference count of the driver
7781 			 * when we setup the session to have the driver unload
7782 			 * to be seamless without actually destroying the
7783 			 * session
7784 			 **/
7785 			try_module_get(qla4xxx_iscsi_transport.owner);
7786 			iscsi_destroy_endpoint(ddb_entry->conn->ep);
7787 			qla4xxx_free_ddb(ha, ddb_entry);
7788 			iscsi_session_teardown(ddb_entry->sess);
7789 		}
7790 	}
7791 }
7792 /**
7793  * qla4xxx_remove_adapter - callback function to remove adapter.
7794  * @pci_dev: PCI device pointer
7795  **/
7796 static void qla4xxx_remove_adapter(struct pci_dev *pdev)
7797 {
7798 	struct scsi_qla_host *ha;
7799 
7800 	/*
7801 	 * If the PCI device is disabled then it means probe_adapter had
7802 	 * failed and resources already cleaned up on probe_adapter exit.
7803 	 */
7804 	if (!pci_is_enabled(pdev))
7805 		return;
7806 
7807 	ha = pci_get_drvdata(pdev);
7808 
7809 	if (is_qla40XX(ha))
7810 		qla4xxx_prevent_other_port_reinit(ha);
7811 
7812 	/* destroy iface from sysfs */
7813 	qla4xxx_destroy_ifaces(ha);
7814 
7815 	if ((!ql4xdisablesysfsboot) && ha->boot_kset)
7816 		iscsi_boot_destroy_kset(ha->boot_kset);
7817 
7818 	qla4xxx_destroy_fw_ddb_session(ha);
7819 	qla4_8xxx_free_sysfs_attr(ha);
7820 
7821 	qla4xxx_sysfs_ddb_remove(ha);
7822 	scsi_remove_host(ha->host);
7823 
7824 	qla4xxx_free_adapter(ha);
7825 
7826 	scsi_host_put(ha->host);
7827 
7828 	pci_disable_pcie_error_reporting(pdev);
7829 	pci_disable_device(pdev);
7830 }
7831 
7832 /**
7833  * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
7834  * @ha: HA context
7835  *
7836  * At exit, the @ha's flags.enable_64bit_addressing set to indicated
7837  * supported addressing method.
7838  */
7839 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
7840 {
7841 	int retval;
7842 
7843 	/* Update our PCI device dma_mask for full 64 bit mask */
7844 	if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
7845 		if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
7846 			dev_dbg(&ha->pdev->dev,
7847 				  "Failed to set 64 bit PCI consistent mask; "
7848 				   "using 32 bit.\n");
7849 			retval = pci_set_consistent_dma_mask(ha->pdev,
7850 							     DMA_BIT_MASK(32));
7851 		}
7852 	} else
7853 		retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
7854 }
7855 
7856 static int qla4xxx_slave_alloc(struct scsi_device *sdev)
7857 {
7858 	struct iscsi_cls_session *cls_sess;
7859 	struct iscsi_session *sess;
7860 	struct ddb_entry *ddb;
7861 	int queue_depth = QL4_DEF_QDEPTH;
7862 
7863 	cls_sess = starget_to_session(sdev->sdev_target);
7864 	sess = cls_sess->dd_data;
7865 	ddb = sess->dd_data;
7866 
7867 	sdev->hostdata = ddb;
7868 	sdev->tagged_supported = 1;
7869 
7870 	if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
7871 		queue_depth = ql4xmaxqdepth;
7872 
7873 	scsi_activate_tcq(sdev, queue_depth);
7874 	return 0;
7875 }
7876 
7877 static int qla4xxx_slave_configure(struct scsi_device *sdev)
7878 {
7879 	sdev->tagged_supported = 1;
7880 	return 0;
7881 }
7882 
7883 static void qla4xxx_slave_destroy(struct scsi_device *sdev)
7884 {
7885 	scsi_deactivate_tcq(sdev, 1);
7886 }
7887 
7888 static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
7889 				      int reason)
7890 {
7891 	if (!ql4xqfulltracking)
7892 		return -EOPNOTSUPP;
7893 
7894 	return iscsi_change_queue_depth(sdev, qdepth, reason);
7895 }
7896 
7897 /**
7898  * qla4xxx_del_from_active_array - returns an active srb
7899  * @ha: Pointer to host adapter structure.
7900  * @index: index into the active_array
7901  *
7902  * This routine removes and returns the srb at the specified index
7903  **/
7904 struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
7905     uint32_t index)
7906 {
7907 	struct srb *srb = NULL;
7908 	struct scsi_cmnd *cmd = NULL;
7909 
7910 	cmd = scsi_host_find_tag(ha->host, index);
7911 	if (!cmd)
7912 		return srb;
7913 
7914 	srb = (struct srb *)CMD_SP(cmd);
7915 	if (!srb)
7916 		return srb;
7917 
7918 	/* update counters */
7919 	if (srb->flags & SRB_DMA_VALID) {
7920 		ha->iocb_cnt -= srb->iocb_cnt;
7921 		if (srb->cmd)
7922 			srb->cmd->host_scribble =
7923 				(unsigned char *)(unsigned long) MAX_SRBS;
7924 	}
7925 	return srb;
7926 }
7927 
7928 /**
7929  * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
7930  * @ha: Pointer to host adapter structure.
7931  * @cmd: Scsi Command to wait on.
7932  *
7933  * This routine waits for the command to be returned by the Firmware
7934  * for some max time.
7935  **/
7936 static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
7937 				      struct scsi_cmnd *cmd)
7938 {
7939 	int done = 0;
7940 	struct srb *rp;
7941 	uint32_t max_wait_time = EH_WAIT_CMD_TOV;
7942 	int ret = SUCCESS;
7943 
7944 	/* Dont wait on command if PCI error is being handled
7945 	 * by PCI AER driver
7946 	 */
7947 	if (unlikely(pci_channel_offline(ha->pdev)) ||
7948 	    (test_bit(AF_EEH_BUSY, &ha->flags))) {
7949 		ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
7950 		    ha->host_no, __func__);
7951 		return ret;
7952 	}
7953 
7954 	do {
7955 		/* Checking to see if its returned to OS */
7956 		rp = (struct srb *) CMD_SP(cmd);
7957 		if (rp == NULL) {
7958 			done++;
7959 			break;
7960 		}
7961 
7962 		msleep(2000);
7963 	} while (max_wait_time--);
7964 
7965 	return done;
7966 }
7967 
7968 /**
7969  * qla4xxx_wait_for_hba_online - waits for HBA to come online
7970  * @ha: Pointer to host adapter structure
7971  **/
7972 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
7973 {
7974 	unsigned long wait_online;
7975 
7976 	wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
7977 	while (time_before(jiffies, wait_online)) {
7978 
7979 		if (adapter_up(ha))
7980 			return QLA_SUCCESS;
7981 
7982 		msleep(2000);
7983 	}
7984 
7985 	return QLA_ERROR;
7986 }
7987 
7988 /**
7989  * qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
7990  * @ha: pointer to HBA
7991  * @t: target id
7992  * @l: lun id
7993  *
7994  * This function waits for all outstanding commands to a lun to complete. It
7995  * returns 0 if all pending commands are returned and 1 otherwise.
7996  **/
7997 static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
7998 					struct scsi_target *stgt,
7999 					struct scsi_device *sdev)
8000 {
8001 	int cnt;
8002 	int status = 0;
8003 	struct scsi_cmnd *cmd;
8004 
8005 	/*
8006 	 * Waiting for all commands for the designated target or dev
8007 	 * in the active array
8008 	 */
8009 	for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
8010 		cmd = scsi_host_find_tag(ha->host, cnt);
8011 		if (cmd && stgt == scsi_target(cmd->device) &&
8012 		    (!sdev || sdev == cmd->device)) {
8013 			if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
8014 				status++;
8015 				break;
8016 			}
8017 		}
8018 	}
8019 	return status;
8020 }
8021 
8022 /**
8023  * qla4xxx_eh_abort - callback for abort task.
8024  * @cmd: Pointer to Linux's SCSI command structure
8025  *
8026  * This routine is called by the Linux OS to abort the specified
8027  * command.
8028  **/
8029 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
8030 {
8031 	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
8032 	unsigned int id = cmd->device->id;
8033 	unsigned int lun = cmd->device->lun;
8034 	unsigned long flags;
8035 	struct srb *srb = NULL;
8036 	int ret = SUCCESS;
8037 	int wait = 0;
8038 
8039 	ql4_printk(KERN_INFO, ha,
8040 	    "scsi%ld:%d:%d: Abort command issued cmd=%p\n",
8041 	    ha->host_no, id, lun, cmd);
8042 
8043 	spin_lock_irqsave(&ha->hardware_lock, flags);
8044 	srb = (struct srb *) CMD_SP(cmd);
8045 	if (!srb) {
8046 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
8047 		return SUCCESS;
8048 	}
8049 	kref_get(&srb->srb_ref);
8050 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
8051 
8052 	if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
8053 		DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
8054 		    ha->host_no, id, lun));
8055 		ret = FAILED;
8056 	} else {
8057 		DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n",
8058 		    ha->host_no, id, lun));
8059 		wait = 1;
8060 	}
8061 
8062 	kref_put(&srb->srb_ref, qla4xxx_srb_compl);
8063 
8064 	/* Wait for command to complete */
8065 	if (wait) {
8066 		if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
8067 			DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n",
8068 			    ha->host_no, id, lun));
8069 			ret = FAILED;
8070 		}
8071 	}
8072 
8073 	ql4_printk(KERN_INFO, ha,
8074 	    "scsi%ld:%d:%d: Abort command - %s\n",
8075 	    ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed");
8076 
8077 	return ret;
8078 }
8079 
8080 /**
8081  * qla4xxx_eh_device_reset - callback for target reset.
8082  * @cmd: Pointer to Linux's SCSI command structure
8083  *
8084  * This routine is called by the Linux OS to reset all luns on the
8085  * specified target.
8086  **/
8087 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
8088 {
8089 	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
8090 	struct ddb_entry *ddb_entry = cmd->device->hostdata;
8091 	int ret = FAILED, stat;
8092 
8093 	if (!ddb_entry)
8094 		return ret;
8095 
8096 	ret = iscsi_block_scsi_eh(cmd);
8097 	if (ret)
8098 		return ret;
8099 	ret = FAILED;
8100 
8101 	ql4_printk(KERN_INFO, ha,
8102 		   "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no,
8103 		   cmd->device->channel, cmd->device->id, cmd->device->lun);
8104 
8105 	DEBUG2(printk(KERN_INFO
8106 		      "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
8107 		      "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
8108 		      cmd, jiffies, cmd->request->timeout / HZ,
8109 		      ha->dpc_flags, cmd->result, cmd->allowed));
8110 
8111 	/* FIXME: wait for hba to go online */
8112 	stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
8113 	if (stat != QLA_SUCCESS) {
8114 		ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat);
8115 		goto eh_dev_reset_done;
8116 	}
8117 
8118 	if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
8119 					 cmd->device)) {
8120 		ql4_printk(KERN_INFO, ha,
8121 			   "DEVICE RESET FAILED - waiting for "
8122 			   "commands.\n");
8123 		goto eh_dev_reset_done;
8124 	}
8125 
8126 	/* Send marker. */
8127 	if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
8128 		MM_LUN_RESET) != QLA_SUCCESS)
8129 		goto eh_dev_reset_done;
8130 
8131 	ql4_printk(KERN_INFO, ha,
8132 		   "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
8133 		   ha->host_no, cmd->device->channel, cmd->device->id,
8134 		   cmd->device->lun);
8135 
8136 	ret = SUCCESS;
8137 
8138 eh_dev_reset_done:
8139 
8140 	return ret;
8141 }
8142 
8143 /**
8144  * qla4xxx_eh_target_reset - callback for target reset.
8145  * @cmd: Pointer to Linux's SCSI command structure
8146  *
8147  * This routine is called by the Linux OS to reset the target.
8148  **/
8149 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
8150 {
8151 	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
8152 	struct ddb_entry *ddb_entry = cmd->device->hostdata;
8153 	int stat, ret;
8154 
8155 	if (!ddb_entry)
8156 		return FAILED;
8157 
8158 	ret = iscsi_block_scsi_eh(cmd);
8159 	if (ret)
8160 		return ret;
8161 
8162 	starget_printk(KERN_INFO, scsi_target(cmd->device),
8163 		       "WARM TARGET RESET ISSUED.\n");
8164 
8165 	DEBUG2(printk(KERN_INFO
8166 		      "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
8167 		      "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
8168 		      ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
8169 		      ha->dpc_flags, cmd->result, cmd->allowed));
8170 
8171 	stat = qla4xxx_reset_target(ha, ddb_entry);
8172 	if (stat != QLA_SUCCESS) {
8173 		starget_printk(KERN_INFO, scsi_target(cmd->device),
8174 			       "WARM TARGET RESET FAILED.\n");
8175 		return FAILED;
8176 	}
8177 
8178 	if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
8179 					 NULL)) {
8180 		starget_printk(KERN_INFO, scsi_target(cmd->device),
8181 			       "WARM TARGET DEVICE RESET FAILED - "
8182 			       "waiting for commands.\n");
8183 		return FAILED;
8184 	}
8185 
8186 	/* Send marker. */
8187 	if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
8188 		MM_TGT_WARM_RESET) != QLA_SUCCESS) {
8189 		starget_printk(KERN_INFO, scsi_target(cmd->device),
8190 			       "WARM TARGET DEVICE RESET FAILED - "
8191 			       "marker iocb failed.\n");
8192 		return FAILED;
8193 	}
8194 
8195 	starget_printk(KERN_INFO, scsi_target(cmd->device),
8196 		       "WARM TARGET RESET SUCCEEDED.\n");
8197 	return SUCCESS;
8198 }
8199 
8200 /**
8201  * qla4xxx_is_eh_active - check if error handler is running
8202  * @shost: Pointer to SCSI Host struct
8203  *
8204  * This routine finds that if reset host is called in EH
8205  * scenario or from some application like sg_reset
8206  **/
8207 static int qla4xxx_is_eh_active(struct Scsi_Host *shost)
8208 {
8209 	if (shost->shost_state == SHOST_RECOVERY)
8210 		return 1;
8211 	return 0;
8212 }
8213 
8214 /**
8215  * qla4xxx_eh_host_reset - kernel callback
8216  * @cmd: Pointer to Linux's SCSI command structure
8217  *
8218  * This routine is invoked by the Linux kernel to perform fatal error
8219  * recovery on the specified adapter.
8220  **/
8221 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
8222 {
8223 	int return_status = FAILED;
8224 	struct scsi_qla_host *ha;
8225 
8226 	ha = to_qla_host(cmd->device->host);
8227 
8228 	if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba)
8229 		qla4_83xx_set_idc_dontreset(ha);
8230 
8231 	/*
8232 	 * For ISP8324 and ISP8042, if IDC_CTRL DONTRESET_BIT0 is set by other
8233 	 * protocol drivers, we should not set device_state to NEED_RESET
8234 	 */
8235 	if (ql4xdontresethba ||
8236 	    ((is_qla8032(ha) || is_qla8042(ha)) &&
8237 	     qla4_83xx_idc_dontreset(ha))) {
8238 		DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
8239 		     ha->host_no, __func__));
8240 
8241 		/* Clear outstanding srb in queues */
8242 		if (qla4xxx_is_eh_active(cmd->device->host))
8243 			qla4xxx_abort_active_cmds(ha, DID_ABORT << 16);
8244 
8245 		return FAILED;
8246 	}
8247 
8248 	ql4_printk(KERN_INFO, ha,
8249 		   "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no,
8250 		   cmd->device->channel, cmd->device->id, cmd->device->lun);
8251 
8252 	if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
8253 		DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host.  Adapter "
8254 			      "DEAD.\n", ha->host_no, cmd->device->channel,
8255 			      __func__));
8256 
8257 		return FAILED;
8258 	}
8259 
8260 	if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
8261 		if (is_qla80XX(ha))
8262 			set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
8263 		else
8264 			set_bit(DPC_RESET_HA, &ha->dpc_flags);
8265 	}
8266 
8267 	if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS)
8268 		return_status = SUCCESS;
8269 
8270 	ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n",
8271 		   return_status == FAILED ? "FAILED" : "SUCCEEDED");
8272 
8273 	return return_status;
8274 }
8275 
8276 static int qla4xxx_context_reset(struct scsi_qla_host *ha)
8277 {
8278 	uint32_t mbox_cmd[MBOX_REG_COUNT];
8279 	uint32_t mbox_sts[MBOX_REG_COUNT];
8280 	struct addr_ctrl_blk_def *acb = NULL;
8281 	uint32_t acb_len = sizeof(struct addr_ctrl_blk_def);
8282 	int rval = QLA_SUCCESS;
8283 	dma_addr_t acb_dma;
8284 
8285 	acb = dma_alloc_coherent(&ha->pdev->dev,
8286 				 sizeof(struct addr_ctrl_blk_def),
8287 				 &acb_dma, GFP_KERNEL);
8288 	if (!acb) {
8289 		ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
8290 			   __func__);
8291 		rval = -ENOMEM;
8292 		goto exit_port_reset;
8293 	}
8294 
8295 	memset(acb, 0, acb_len);
8296 
8297 	rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len);
8298 	if (rval != QLA_SUCCESS) {
8299 		rval = -EIO;
8300 		goto exit_free_acb;
8301 	}
8302 
8303 	rval = qla4xxx_disable_acb(ha);
8304 	if (rval != QLA_SUCCESS) {
8305 		rval = -EIO;
8306 		goto exit_free_acb;
8307 	}
8308 
8309 	wait_for_completion_timeout(&ha->disable_acb_comp,
8310 				    DISABLE_ACB_TOV * HZ);
8311 
8312 	rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
8313 	if (rval != QLA_SUCCESS) {
8314 		rval = -EIO;
8315 		goto exit_free_acb;
8316 	}
8317 
8318 exit_free_acb:
8319 	dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def),
8320 			  acb, acb_dma);
8321 exit_port_reset:
8322 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__,
8323 			  rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
8324 	return rval;
8325 }
8326 
8327 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
8328 {
8329 	struct scsi_qla_host *ha = to_qla_host(shost);
8330 	int rval = QLA_SUCCESS;
8331 	uint32_t idc_ctrl;
8332 
8333 	if (ql4xdontresethba) {
8334 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
8335 				  __func__));
8336 		rval = -EPERM;
8337 		goto exit_host_reset;
8338 	}
8339 
8340 	if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
8341 		goto recover_adapter;
8342 
8343 	switch (reset_type) {
8344 	case SCSI_ADAPTER_RESET:
8345 		set_bit(DPC_RESET_HA, &ha->dpc_flags);
8346 		break;
8347 	case SCSI_FIRMWARE_RESET:
8348 		if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
8349 			if (is_qla80XX(ha))
8350 				/* set firmware context reset */
8351 				set_bit(DPC_RESET_HA_FW_CONTEXT,
8352 					&ha->dpc_flags);
8353 			else {
8354 				rval = qla4xxx_context_reset(ha);
8355 				goto exit_host_reset;
8356 			}
8357 		}
8358 		break;
8359 	}
8360 
8361 recover_adapter:
8362 	/* For ISP8324 and ISP8042 set graceful reset bit in IDC_DRV_CTRL if
8363 	 * reset is issued by application */
8364 	if ((is_qla8032(ha) || is_qla8042(ha)) &&
8365 	    test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
8366 		idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
8367 		qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
8368 				 (idc_ctrl | GRACEFUL_RESET_BIT1));
8369 	}
8370 
8371 	rval = qla4xxx_recover_adapter(ha);
8372 	if (rval != QLA_SUCCESS) {
8373 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
8374 				  __func__));
8375 		rval = -EIO;
8376 	}
8377 
8378 exit_host_reset:
8379 	return rval;
8380 }
8381 
8382 /* PCI AER driver recovers from all correctable errors w/o
8383  * driver intervention. For uncorrectable errors PCI AER
8384  * driver calls the following device driver's callbacks
8385  *
8386  * - Fatal Errors - link_reset
8387  * - Non-Fatal Errors - driver's pci_error_detected() which
8388  * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
8389  *
8390  * PCI AER driver calls
8391  * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled
8392  *               returns RECOVERED or NEED_RESET if fw_hung
8393  * NEED_RESET - driver's slot_reset()
8394  * DISCONNECT - device is dead & cannot recover
8395  * RECOVERED - driver's pci_resume()
8396  */
8397 static pci_ers_result_t
8398 qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
8399 {
8400 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
8401 
8402 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
8403 	    ha->host_no, __func__, state);
8404 
8405 	if (!is_aer_supported(ha))
8406 		return PCI_ERS_RESULT_NONE;
8407 
8408 	switch (state) {
8409 	case pci_channel_io_normal:
8410 		clear_bit(AF_EEH_BUSY, &ha->flags);
8411 		return PCI_ERS_RESULT_CAN_RECOVER;
8412 	case pci_channel_io_frozen:
8413 		set_bit(AF_EEH_BUSY, &ha->flags);
8414 		qla4xxx_mailbox_premature_completion(ha);
8415 		qla4xxx_free_irqs(ha);
8416 		pci_disable_device(pdev);
8417 		/* Return back all IOs */
8418 		qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
8419 		return PCI_ERS_RESULT_NEED_RESET;
8420 	case pci_channel_io_perm_failure:
8421 		set_bit(AF_EEH_BUSY, &ha->flags);
8422 		set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
8423 		qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
8424 		return PCI_ERS_RESULT_DISCONNECT;
8425 	}
8426 	return PCI_ERS_RESULT_NEED_RESET;
8427 }
8428 
8429 /**
8430  * qla4xxx_pci_mmio_enabled() gets called if
8431  * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
8432  * and read/write to the device still works.
8433  **/
8434 static pci_ers_result_t
8435 qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
8436 {
8437 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
8438 
8439 	if (!is_aer_supported(ha))
8440 		return PCI_ERS_RESULT_NONE;
8441 
8442 	return PCI_ERS_RESULT_RECOVERED;
8443 }
8444 
8445 static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
8446 {
8447 	uint32_t rval = QLA_ERROR;
8448 	int fn;
8449 	struct pci_dev *other_pdev = NULL;
8450 
8451 	ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
8452 
8453 	set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
8454 
8455 	if (test_bit(AF_ONLINE, &ha->flags)) {
8456 		clear_bit(AF_ONLINE, &ha->flags);
8457 		clear_bit(AF_LINK_UP, &ha->flags);
8458 		iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
8459 		qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
8460 	}
8461 
8462 	fn = PCI_FUNC(ha->pdev->devfn);
8463 	while (fn > 0) {
8464 		fn--;
8465 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at "
8466 		    "func %x\n", ha->host_no, __func__, fn);
8467 		/* Get the pci device given the domain, bus,
8468 		 * slot/function number */
8469 		other_pdev =
8470 		    pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
8471 		    ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
8472 		    fn));
8473 
8474 		if (!other_pdev)
8475 			continue;
8476 
8477 		if (atomic_read(&other_pdev->enable_cnt)) {
8478 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI "
8479 			    "func in enabled state%x\n", ha->host_no,
8480 			    __func__, fn);
8481 			pci_dev_put(other_pdev);
8482 			break;
8483 		}
8484 		pci_dev_put(other_pdev);
8485 	}
8486 
8487 	/* The first function on the card, the reset owner will
8488 	 * start & initialize the firmware. The other functions
8489 	 * on the card will reset the firmware context
8490 	 */
8491 	if (!fn) {
8492 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
8493 		    "0x%x is the owner\n", ha->host_no, __func__,
8494 		    ha->pdev->devfn);
8495 
8496 		ha->isp_ops->idc_lock(ha);
8497 		qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
8498 				    QLA8XXX_DEV_COLD);
8499 		ha->isp_ops->idc_unlock(ha);
8500 
8501 		rval = qla4_8xxx_update_idc_reg(ha);
8502 		if (rval == QLA_ERROR) {
8503 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: FAILED\n",
8504 				   ha->host_no, __func__);
8505 			ha->isp_ops->idc_lock(ha);
8506 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
8507 					    QLA8XXX_DEV_FAILED);
8508 			ha->isp_ops->idc_unlock(ha);
8509 			goto exit_error_recovery;
8510 		}
8511 
8512 		clear_bit(AF_FW_RECOVERY, &ha->flags);
8513 		rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
8514 
8515 		if (rval != QLA_SUCCESS) {
8516 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
8517 			    "FAILED\n", ha->host_no, __func__);
8518 			ha->isp_ops->idc_lock(ha);
8519 			qla4_8xxx_clear_drv_active(ha);
8520 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
8521 					    QLA8XXX_DEV_FAILED);
8522 			ha->isp_ops->idc_unlock(ha);
8523 		} else {
8524 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
8525 			    "READY\n", ha->host_no, __func__);
8526 			ha->isp_ops->idc_lock(ha);
8527 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
8528 					    QLA8XXX_DEV_READY);
8529 			/* Clear driver state register */
8530 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0);
8531 			qla4_8xxx_set_drv_active(ha);
8532 			ha->isp_ops->idc_unlock(ha);
8533 			ha->isp_ops->enable_intrs(ha);
8534 		}
8535 	} else {
8536 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
8537 		    "the reset owner\n", ha->host_no, __func__,
8538 		    ha->pdev->devfn);
8539 		if ((qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE) ==
8540 		     QLA8XXX_DEV_READY)) {
8541 			clear_bit(AF_FW_RECOVERY, &ha->flags);
8542 			rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
8543 			if (rval == QLA_SUCCESS)
8544 				ha->isp_ops->enable_intrs(ha);
8545 
8546 			ha->isp_ops->idc_lock(ha);
8547 			qla4_8xxx_set_drv_active(ha);
8548 			ha->isp_ops->idc_unlock(ha);
8549 		}
8550 	}
8551 exit_error_recovery:
8552 	clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
8553 	return rval;
8554 }
8555 
8556 static pci_ers_result_t
8557 qla4xxx_pci_slot_reset(struct pci_dev *pdev)
8558 {
8559 	pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
8560 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
8561 	int rc;
8562 
8563 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
8564 	    ha->host_no, __func__);
8565 
8566 	if (!is_aer_supported(ha))
8567 		return PCI_ERS_RESULT_NONE;
8568 
8569 	/* Restore the saved state of PCIe device -
8570 	 * BAR registers, PCI Config space, PCIX, MSI,
8571 	 * IOV states
8572 	 */
8573 	pci_restore_state(pdev);
8574 
8575 	/* pci_restore_state() clears the saved_state flag of the device
8576 	 * save restored state which resets saved_state flag
8577 	 */
8578 	pci_save_state(pdev);
8579 
8580 	/* Initialize device or resume if in suspended state */
8581 	rc = pci_enable_device(pdev);
8582 	if (rc) {
8583 		ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable "
8584 		    "device after reset\n", ha->host_no, __func__);
8585 		goto exit_slot_reset;
8586 	}
8587 
8588 	ha->isp_ops->disable_intrs(ha);
8589 
8590 	if (is_qla80XX(ha)) {
8591 		if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
8592 			ret = PCI_ERS_RESULT_RECOVERED;
8593 			goto exit_slot_reset;
8594 		} else
8595 			goto exit_slot_reset;
8596 	}
8597 
8598 exit_slot_reset:
8599 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
8600 	    "device after reset\n", ha->host_no, __func__, ret);
8601 	return ret;
8602 }
8603 
8604 static void
8605 qla4xxx_pci_resume(struct pci_dev *pdev)
8606 {
8607 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
8608 	int ret;
8609 
8610 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
8611 	    ha->host_no, __func__);
8612 
8613 	ret = qla4xxx_wait_for_hba_online(ha);
8614 	if (ret != QLA_SUCCESS) {
8615 		ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
8616 		    "resume I/O from slot/link_reset\n", ha->host_no,
8617 		     __func__);
8618 	}
8619 
8620 	pci_cleanup_aer_uncorrect_error_status(pdev);
8621 	clear_bit(AF_EEH_BUSY, &ha->flags);
8622 }
8623 
8624 static const struct pci_error_handlers qla4xxx_err_handler = {
8625 	.error_detected = qla4xxx_pci_error_detected,
8626 	.mmio_enabled = qla4xxx_pci_mmio_enabled,
8627 	.slot_reset = qla4xxx_pci_slot_reset,
8628 	.resume = qla4xxx_pci_resume,
8629 };
8630 
8631 static struct pci_device_id qla4xxx_pci_tbl[] = {
8632 	{
8633 		.vendor		= PCI_VENDOR_ID_QLOGIC,
8634 		.device		= PCI_DEVICE_ID_QLOGIC_ISP4010,
8635 		.subvendor	= PCI_ANY_ID,
8636 		.subdevice	= PCI_ANY_ID,
8637 	},
8638 	{
8639 		.vendor		= PCI_VENDOR_ID_QLOGIC,
8640 		.device		= PCI_DEVICE_ID_QLOGIC_ISP4022,
8641 		.subvendor	= PCI_ANY_ID,
8642 		.subdevice	= PCI_ANY_ID,
8643 	},
8644 	{
8645 		.vendor		= PCI_VENDOR_ID_QLOGIC,
8646 		.device		= PCI_DEVICE_ID_QLOGIC_ISP4032,
8647 		.subvendor	= PCI_ANY_ID,
8648 		.subdevice	= PCI_ANY_ID,
8649 	},
8650 	{
8651 		.vendor         = PCI_VENDOR_ID_QLOGIC,
8652 		.device         = PCI_DEVICE_ID_QLOGIC_ISP8022,
8653 		.subvendor      = PCI_ANY_ID,
8654 		.subdevice      = PCI_ANY_ID,
8655 	},
8656 	{
8657 		.vendor		= PCI_VENDOR_ID_QLOGIC,
8658 		.device		= PCI_DEVICE_ID_QLOGIC_ISP8324,
8659 		.subvendor	= PCI_ANY_ID,
8660 		.subdevice	= PCI_ANY_ID,
8661 	},
8662 	{
8663 		.vendor		= PCI_VENDOR_ID_QLOGIC,
8664 		.device		= PCI_DEVICE_ID_QLOGIC_ISP8042,
8665 		.subvendor	= PCI_ANY_ID,
8666 		.subdevice	= PCI_ANY_ID,
8667 	},
8668 	{0, 0},
8669 };
8670 MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
8671 
8672 static struct pci_driver qla4xxx_pci_driver = {
8673 	.name		= DRIVER_NAME,
8674 	.id_table	= qla4xxx_pci_tbl,
8675 	.probe		= qla4xxx_probe_adapter,
8676 	.remove		= qla4xxx_remove_adapter,
8677 	.err_handler = &qla4xxx_err_handler,
8678 };
8679 
8680 static int __init qla4xxx_module_init(void)
8681 {
8682 	int ret;
8683 
8684 	/* Allocate cache for SRBs. */
8685 	srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
8686 				       SLAB_HWCACHE_ALIGN, NULL);
8687 	if (srb_cachep == NULL) {
8688 		printk(KERN_ERR
8689 		       "%s: Unable to allocate SRB cache..."
8690 		       "Failing load!\n", DRIVER_NAME);
8691 		ret = -ENOMEM;
8692 		goto no_srp_cache;
8693 	}
8694 
8695 	/* Derive version string. */
8696 	strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
8697 	if (ql4xextended_error_logging)
8698 		strcat(qla4xxx_version_str, "-debug");
8699 
8700 	qla4xxx_scsi_transport =
8701 		iscsi_register_transport(&qla4xxx_iscsi_transport);
8702 	if (!qla4xxx_scsi_transport){
8703 		ret = -ENODEV;
8704 		goto release_srb_cache;
8705 	}
8706 
8707 	ret = pci_register_driver(&qla4xxx_pci_driver);
8708 	if (ret)
8709 		goto unregister_transport;
8710 
8711 	printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
8712 	return 0;
8713 
8714 unregister_transport:
8715 	iscsi_unregister_transport(&qla4xxx_iscsi_transport);
8716 release_srb_cache:
8717 	kmem_cache_destroy(srb_cachep);
8718 no_srp_cache:
8719 	return ret;
8720 }
8721 
8722 static void __exit qla4xxx_module_exit(void)
8723 {
8724 	pci_unregister_driver(&qla4xxx_pci_driver);
8725 	iscsi_unregister_transport(&qla4xxx_iscsi_transport);
8726 	kmem_cache_destroy(srb_cachep);
8727 }
8728 
8729 module_init(qla4xxx_module_init);
8730 module_exit(qla4xxx_module_exit);
8731 
8732 MODULE_AUTHOR("QLogic Corporation");
8733 MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
8734 MODULE_LICENSE("GPL");
8735 MODULE_VERSION(QLA4XXX_DRIVER_VERSION);
8736