xref: /openbmc/linux/drivers/scsi/be2iscsi/be_main.c (revision 293d5b43)
1 /**
2  * Copyright (C) 2005 - 2015 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
11  *
12  * Contact Information:
13  * linux-drivers@avagotech.com
14  *
15  * Emulex
16  * 3333 Susan Street
17  * Costa Mesa, CA 92626
18  */
19 
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/string.h>
27 #include <linux/kernel.h>
28 #include <linux/semaphore.h>
29 #include <linux/iscsi_boot_sysfs.h>
30 #include <linux/module.h>
31 #include <linux/bsg-lib.h>
32 #include <linux/irq_poll.h>
33 
34 #include <scsi/libiscsi.h>
35 #include <scsi/scsi_bsg_iscsi.h>
36 #include <scsi/scsi_netlink.h>
37 #include <scsi/scsi_transport_iscsi.h>
38 #include <scsi/scsi_transport.h>
39 #include <scsi/scsi_cmnd.h>
40 #include <scsi/scsi_device.h>
41 #include <scsi/scsi_host.h>
42 #include <scsi/scsi.h>
43 #include "be_main.h"
44 #include "be_iscsi.h"
45 #include "be_mgmt.h"
46 #include "be_cmds.h"
47 
48 static unsigned int be_iopoll_budget = 10;
49 static unsigned int be_max_phys_size = 64;
50 static unsigned int enable_msix = 1;
51 
52 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
53 MODULE_VERSION(BUILD_STR);
54 MODULE_AUTHOR("Emulex Corporation");
55 MODULE_LICENSE("GPL");
56 module_param(be_iopoll_budget, int, 0);
57 module_param(enable_msix, int, 0);
58 module_param(be_max_phys_size, uint, S_IRUGO);
59 MODULE_PARM_DESC(be_max_phys_size,
60 		"Maximum Size (In Kilobytes) of physically contiguous "
61 		"memory that can be allocated. Range is 16 - 128");
62 
63 #define beiscsi_disp_param(_name)\
64 ssize_t	\
65 beiscsi_##_name##_disp(struct device *dev,\
66 			struct device_attribute *attrib, char *buf)	\
67 {	\
68 	struct Scsi_Host *shost = class_to_shost(dev);\
69 	struct beiscsi_hba *phba = iscsi_host_priv(shost); \
70 	uint32_t param_val = 0;	\
71 	param_val = phba->attr_##_name;\
72 	return snprintf(buf, PAGE_SIZE, "%d\n",\
73 			phba->attr_##_name);\
74 }
75 
76 #define beiscsi_change_param(_name, _minval, _maxval, _defaval)\
77 int \
78 beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\
79 {\
80 	if (val >= _minval && val <= _maxval) {\
81 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
82 			    "BA_%d : beiscsi_"#_name" updated "\
83 			    "from 0x%x ==> 0x%x\n",\
84 			    phba->attr_##_name, val); \
85 		phba->attr_##_name = val;\
86 		return 0;\
87 	} \
88 	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \
89 		    "BA_%d beiscsi_"#_name" attribute "\
90 		    "cannot be updated to 0x%x, "\
91 		    "range allowed is ["#_minval" - "#_maxval"]\n", val);\
92 		return -EINVAL;\
93 }
94 
95 #define beiscsi_store_param(_name)  \
96 ssize_t \
97 beiscsi_##_name##_store(struct device *dev,\
98 			 struct device_attribute *attr, const char *buf,\
99 			 size_t count) \
100 { \
101 	struct Scsi_Host  *shost = class_to_shost(dev);\
102 	struct beiscsi_hba *phba = iscsi_host_priv(shost);\
103 	uint32_t param_val = 0;\
104 	if (!isdigit(buf[0]))\
105 		return -EINVAL;\
106 	if (sscanf(buf, "%i", &param_val) != 1)\
107 		return -EINVAL;\
108 	if (beiscsi_##_name##_change(phba, param_val) == 0) \
109 		return strlen(buf);\
110 	else \
111 		return -EINVAL;\
112 }
113 
114 #define beiscsi_init_param(_name, _minval, _maxval, _defval) \
115 int \
116 beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \
117 { \
118 	if (val >= _minval && val <= _maxval) {\
119 		phba->attr_##_name = val;\
120 		return 0;\
121 	} \
122 	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
123 		    "BA_%d beiscsi_"#_name" attribute " \
124 		    "cannot be updated to 0x%x, "\
125 		    "range allowed is ["#_minval" - "#_maxval"]\n", val);\
126 	phba->attr_##_name = _defval;\
127 	return -EINVAL;\
128 }
129 
130 #define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \
131 static uint beiscsi_##_name = _defval;\
132 module_param(beiscsi_##_name, uint, S_IRUGO);\
133 MODULE_PARM_DESC(beiscsi_##_name, _descp);\
134 beiscsi_disp_param(_name)\
135 beiscsi_change_param(_name, _minval, _maxval, _defval)\
136 beiscsi_store_param(_name)\
137 beiscsi_init_param(_name, _minval, _maxval, _defval)\
138 DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\
139 	      beiscsi_##_name##_disp, beiscsi_##_name##_store)
140 
141 /*
142  * When new log level added update the
143  * the MAX allowed value for log_enable
144  */
145 BEISCSI_RW_ATTR(log_enable, 0x00,
146 		0xFF, 0x00, "Enable logging Bit Mask\n"
147 		"\t\t\t\tInitialization Events	: 0x01\n"
148 		"\t\t\t\tMailbox Events		: 0x02\n"
149 		"\t\t\t\tMiscellaneous Events	: 0x04\n"
150 		"\t\t\t\tError Handling		: 0x08\n"
151 		"\t\t\t\tIO Path Events		: 0x10\n"
152 		"\t\t\t\tConfiguration Path	: 0x20\n"
153 		"\t\t\t\tiSCSI Protocol		: 0x40\n");
154 
155 DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
156 DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL);
157 DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL);
158 DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL);
159 DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO,
160 	     beiscsi_active_session_disp, NULL);
161 DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO,
162 	     beiscsi_free_session_disp, NULL);
163 struct device_attribute *beiscsi_attrs[] = {
164 	&dev_attr_beiscsi_log_enable,
165 	&dev_attr_beiscsi_drvr_ver,
166 	&dev_attr_beiscsi_adapter_family,
167 	&dev_attr_beiscsi_fw_ver,
168 	&dev_attr_beiscsi_active_session_count,
169 	&dev_attr_beiscsi_free_session_count,
170 	&dev_attr_beiscsi_phys_port,
171 	NULL,
172 };
173 
174 static char const *cqe_desc[] = {
175 	"RESERVED_DESC",
176 	"SOL_CMD_COMPLETE",
177 	"SOL_CMD_KILLED_DATA_DIGEST_ERR",
178 	"CXN_KILLED_PDU_SIZE_EXCEEDS_DSL",
179 	"CXN_KILLED_BURST_LEN_MISMATCH",
180 	"CXN_KILLED_AHS_RCVD",
181 	"CXN_KILLED_HDR_DIGEST_ERR",
182 	"CXN_KILLED_UNKNOWN_HDR",
183 	"CXN_KILLED_STALE_ITT_TTT_RCVD",
184 	"CXN_KILLED_INVALID_ITT_TTT_RCVD",
185 	"CXN_KILLED_RST_RCVD",
186 	"CXN_KILLED_TIMED_OUT",
187 	"CXN_KILLED_RST_SENT",
188 	"CXN_KILLED_FIN_RCVD",
189 	"CXN_KILLED_BAD_UNSOL_PDU_RCVD",
190 	"CXN_KILLED_BAD_WRB_INDEX_ERROR",
191 	"CXN_KILLED_OVER_RUN_RESIDUAL",
192 	"CXN_KILLED_UNDER_RUN_RESIDUAL",
193 	"CMD_KILLED_INVALID_STATSN_RCVD",
194 	"CMD_KILLED_INVALID_R2T_RCVD",
195 	"CMD_CXN_KILLED_LUN_INVALID",
196 	"CMD_CXN_KILLED_ICD_INVALID",
197 	"CMD_CXN_KILLED_ITT_INVALID",
198 	"CMD_CXN_KILLED_SEQ_OUTOFORDER",
199 	"CMD_CXN_KILLED_INVALID_DATASN_RCVD",
200 	"CXN_INVALIDATE_NOTIFY",
201 	"CXN_INVALIDATE_INDEX_NOTIFY",
202 	"CMD_INVALIDATED_NOTIFY",
203 	"UNSOL_HDR_NOTIFY",
204 	"UNSOL_DATA_NOTIFY",
205 	"UNSOL_DATA_DIGEST_ERROR_NOTIFY",
206 	"DRIVERMSG_NOTIFY",
207 	"CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN",
208 	"SOL_CMD_KILLED_DIF_ERR",
209 	"CXN_KILLED_SYN_RCVD",
210 	"CXN_KILLED_IMM_DATA_RCVD"
211 };
212 
213 static int beiscsi_slave_configure(struct scsi_device *sdev)
214 {
215 	blk_queue_max_segment_size(sdev->request_queue, 65536);
216 	return 0;
217 }
218 
219 static int beiscsi_eh_abort(struct scsi_cmnd *sc)
220 {
221 	struct iscsi_cls_session *cls_session;
222 	struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
223 	struct beiscsi_io_task *aborted_io_task;
224 	struct iscsi_conn *conn;
225 	struct beiscsi_conn *beiscsi_conn;
226 	struct beiscsi_hba *phba;
227 	struct iscsi_session *session;
228 	struct invalidate_command_table *inv_tbl;
229 	struct be_dma_mem nonemb_cmd;
230 	unsigned int cid, tag, num_invalidate;
231 	int rc;
232 
233 	cls_session = starget_to_session(scsi_target(sc->device));
234 	session = cls_session->dd_data;
235 
236 	spin_lock_bh(&session->frwd_lock);
237 	if (!aborted_task || !aborted_task->sc) {
238 		/* we raced */
239 		spin_unlock_bh(&session->frwd_lock);
240 		return SUCCESS;
241 	}
242 
243 	aborted_io_task = aborted_task->dd_data;
244 	if (!aborted_io_task->scsi_cmnd) {
245 		/* raced or invalid command */
246 		spin_unlock_bh(&session->frwd_lock);
247 		return SUCCESS;
248 	}
249 	spin_unlock_bh(&session->frwd_lock);
250 	/* Invalidate WRB Posted for this Task */
251 	AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
252 		      aborted_io_task->pwrb_handle->pwrb,
253 		      1);
254 
255 	conn = aborted_task->conn;
256 	beiscsi_conn = conn->dd_data;
257 	phba = beiscsi_conn->phba;
258 
259 	/* invalidate iocb */
260 	cid = beiscsi_conn->beiscsi_conn_cid;
261 	inv_tbl = phba->inv_tbl;
262 	memset(inv_tbl, 0x0, sizeof(*inv_tbl));
263 	inv_tbl->cid = cid;
264 	inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
265 	num_invalidate = 1;
266 	nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
267 				sizeof(struct invalidate_commands_params_in),
268 				&nonemb_cmd.dma);
269 	if (nonemb_cmd.va == NULL) {
270 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
271 			    "BM_%d : Failed to allocate memory for"
272 			    "mgmt_invalidate_icds\n");
273 		return FAILED;
274 	}
275 	nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
276 
277 	tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
278 				   cid, &nonemb_cmd);
279 	if (!tag) {
280 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
281 			    "BM_%d : mgmt_invalidate_icds could not be"
282 			    "submitted\n");
283 		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
284 				    nonemb_cmd.va, nonemb_cmd.dma);
285 
286 		return FAILED;
287 	}
288 
289 	rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
290 	if (rc != -EBUSY)
291 		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
292 				    nonemb_cmd.va, nonemb_cmd.dma);
293 
294 	return iscsi_eh_abort(sc);
295 }
296 
297 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
298 {
299 	struct iscsi_task *abrt_task;
300 	struct beiscsi_io_task *abrt_io_task;
301 	struct iscsi_conn *conn;
302 	struct beiscsi_conn *beiscsi_conn;
303 	struct beiscsi_hba *phba;
304 	struct iscsi_session *session;
305 	struct iscsi_cls_session *cls_session;
306 	struct invalidate_command_table *inv_tbl;
307 	struct be_dma_mem nonemb_cmd;
308 	unsigned int cid, tag, i, num_invalidate;
309 	int rc;
310 
311 	/* invalidate iocbs */
312 	cls_session = starget_to_session(scsi_target(sc->device));
313 	session = cls_session->dd_data;
314 	spin_lock_bh(&session->frwd_lock);
315 	if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
316 		spin_unlock_bh(&session->frwd_lock);
317 		return FAILED;
318 	}
319 	conn = session->leadconn;
320 	beiscsi_conn = conn->dd_data;
321 	phba = beiscsi_conn->phba;
322 	cid = beiscsi_conn->beiscsi_conn_cid;
323 	inv_tbl = phba->inv_tbl;
324 	memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
325 	num_invalidate = 0;
326 	for (i = 0; i < conn->session->cmds_max; i++) {
327 		abrt_task = conn->session->cmds[i];
328 		abrt_io_task = abrt_task->dd_data;
329 		if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
330 			continue;
331 
332 		if (sc->device->lun != abrt_task->sc->device->lun)
333 			continue;
334 
335 		/* Invalidate WRB Posted for this Task */
336 		AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
337 			      abrt_io_task->pwrb_handle->pwrb,
338 			      1);
339 
340 		inv_tbl->cid = cid;
341 		inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
342 		num_invalidate++;
343 		inv_tbl++;
344 	}
345 	spin_unlock_bh(&session->frwd_lock);
346 	inv_tbl = phba->inv_tbl;
347 
348 	nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
349 				sizeof(struct invalidate_commands_params_in),
350 				&nonemb_cmd.dma);
351 	if (nonemb_cmd.va == NULL) {
352 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
353 			    "BM_%d : Failed to allocate memory for"
354 			    "mgmt_invalidate_icds\n");
355 		return FAILED;
356 	}
357 	nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
358 	memset(nonemb_cmd.va, 0, nonemb_cmd.size);
359 	tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
360 				   cid, &nonemb_cmd);
361 	if (!tag) {
362 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
363 			    "BM_%d : mgmt_invalidate_icds could not be"
364 			    " submitted\n");
365 		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
366 				    nonemb_cmd.va, nonemb_cmd.dma);
367 		return FAILED;
368 	}
369 
370 	rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
371 	if (rc != -EBUSY)
372 		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
373 				    nonemb_cmd.va, nonemb_cmd.dma);
374 	return iscsi_eh_device_reset(sc);
375 }
376 
377 static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
378 {
379 	struct beiscsi_hba *phba = data;
380 	struct mgmt_session_info *boot_sess = &phba->boot_sess;
381 	struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
382 	char *str = buf;
383 	int rc;
384 
385 	switch (type) {
386 	case ISCSI_BOOT_TGT_NAME:
387 		rc = sprintf(buf, "%.*s\n",
388 			    (int)strlen(boot_sess->target_name),
389 			    (char *)&boot_sess->target_name);
390 		break;
391 	case ISCSI_BOOT_TGT_IP_ADDR:
392 		if (boot_conn->dest_ipaddr.ip_type == 0x1)
393 			rc = sprintf(buf, "%pI4\n",
394 				(char *)&boot_conn->dest_ipaddr.addr);
395 		else
396 			rc = sprintf(str, "%pI6\n",
397 				(char *)&boot_conn->dest_ipaddr.addr);
398 		break;
399 	case ISCSI_BOOT_TGT_PORT:
400 		rc = sprintf(str, "%d\n", boot_conn->dest_port);
401 		break;
402 
403 	case ISCSI_BOOT_TGT_CHAP_NAME:
404 		rc = sprintf(str,  "%.*s\n",
405 			     boot_conn->negotiated_login_options.auth_data.chap.
406 			     target_chap_name_length,
407 			     (char *)&boot_conn->negotiated_login_options.
408 			     auth_data.chap.target_chap_name);
409 		break;
410 	case ISCSI_BOOT_TGT_CHAP_SECRET:
411 		rc = sprintf(str,  "%.*s\n",
412 			     boot_conn->negotiated_login_options.auth_data.chap.
413 			     target_secret_length,
414 			     (char *)&boot_conn->negotiated_login_options.
415 			     auth_data.chap.target_secret);
416 		break;
417 	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
418 		rc = sprintf(str,  "%.*s\n",
419 			     boot_conn->negotiated_login_options.auth_data.chap.
420 			     intr_chap_name_length,
421 			     (char *)&boot_conn->negotiated_login_options.
422 			     auth_data.chap.intr_chap_name);
423 		break;
424 	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
425 		rc = sprintf(str,  "%.*s\n",
426 			     boot_conn->negotiated_login_options.auth_data.chap.
427 			     intr_secret_length,
428 			     (char *)&boot_conn->negotiated_login_options.
429 			     auth_data.chap.intr_secret);
430 		break;
431 	case ISCSI_BOOT_TGT_FLAGS:
432 		rc = sprintf(str, "2\n");
433 		break;
434 	case ISCSI_BOOT_TGT_NIC_ASSOC:
435 		rc = sprintf(str, "0\n");
436 		break;
437 	default:
438 		rc = -ENOSYS;
439 		break;
440 	}
441 	return rc;
442 }
443 
444 static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
445 {
446 	struct beiscsi_hba *phba = data;
447 	char *str = buf;
448 	int rc;
449 
450 	switch (type) {
451 	case ISCSI_BOOT_INI_INITIATOR_NAME:
452 		rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname);
453 		break;
454 	default:
455 		rc = -ENOSYS;
456 		break;
457 	}
458 	return rc;
459 }
460 
461 static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
462 {
463 	struct beiscsi_hba *phba = data;
464 	char *str = buf;
465 	int rc;
466 
467 	switch (type) {
468 	case ISCSI_BOOT_ETH_FLAGS:
469 		rc = sprintf(str, "2\n");
470 		break;
471 	case ISCSI_BOOT_ETH_INDEX:
472 		rc = sprintf(str, "0\n");
473 		break;
474 	case ISCSI_BOOT_ETH_MAC:
475 		rc  = beiscsi_get_macaddr(str, phba);
476 		break;
477 	default:
478 		rc = -ENOSYS;
479 		break;
480 	}
481 	return rc;
482 }
483 
484 
485 static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
486 {
487 	umode_t rc;
488 
489 	switch (type) {
490 	case ISCSI_BOOT_TGT_NAME:
491 	case ISCSI_BOOT_TGT_IP_ADDR:
492 	case ISCSI_BOOT_TGT_PORT:
493 	case ISCSI_BOOT_TGT_CHAP_NAME:
494 	case ISCSI_BOOT_TGT_CHAP_SECRET:
495 	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
496 	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
497 	case ISCSI_BOOT_TGT_NIC_ASSOC:
498 	case ISCSI_BOOT_TGT_FLAGS:
499 		rc = S_IRUGO;
500 		break;
501 	default:
502 		rc = 0;
503 		break;
504 	}
505 	return rc;
506 }
507 
508 static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
509 {
510 	umode_t rc;
511 
512 	switch (type) {
513 	case ISCSI_BOOT_INI_INITIATOR_NAME:
514 		rc = S_IRUGO;
515 		break;
516 	default:
517 		rc = 0;
518 		break;
519 	}
520 	return rc;
521 }
522 
523 
524 static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
525 {
526 	umode_t rc;
527 
528 	switch (type) {
529 	case ISCSI_BOOT_ETH_FLAGS:
530 	case ISCSI_BOOT_ETH_MAC:
531 	case ISCSI_BOOT_ETH_INDEX:
532 		rc = S_IRUGO;
533 		break;
534 	default:
535 		rc = 0;
536 		break;
537 	}
538 	return rc;
539 }
540 
541 /*------------------- PCI Driver operations and data ----------------- */
542 static const struct pci_device_id beiscsi_pci_id_table[] = {
543 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
544 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
545 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
546 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
547 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
548 	{ PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) },
549 	{ 0 }
550 };
551 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
552 
553 
554 static struct scsi_host_template beiscsi_sht = {
555 	.module = THIS_MODULE,
556 	.name = "Emulex 10Gbe open-iscsi Initiator Driver",
557 	.proc_name = DRV_NAME,
558 	.queuecommand = iscsi_queuecommand,
559 	.change_queue_depth = scsi_change_queue_depth,
560 	.slave_configure = beiscsi_slave_configure,
561 	.target_alloc = iscsi_target_alloc,
562 	.eh_abort_handler = beiscsi_eh_abort,
563 	.eh_device_reset_handler = beiscsi_eh_device_reset,
564 	.eh_target_reset_handler = iscsi_eh_session_reset,
565 	.shost_attrs = beiscsi_attrs,
566 	.sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
567 	.can_queue = BE2_IO_DEPTH,
568 	.this_id = -1,
569 	.max_sectors = BEISCSI_MAX_SECTORS,
570 	.cmd_per_lun = BEISCSI_CMD_PER_LUN,
571 	.use_clustering = ENABLE_CLUSTERING,
572 	.vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
573 	.track_queue_depth = 1,
574 };
575 
576 static struct scsi_transport_template *beiscsi_scsi_transport;
577 
578 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
579 {
580 	struct beiscsi_hba *phba;
581 	struct Scsi_Host *shost;
582 
583 	shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
584 	if (!shost) {
585 		dev_err(&pcidev->dev,
586 			"beiscsi_hba_alloc - iscsi_host_alloc failed\n");
587 		return NULL;
588 	}
589 	shost->max_id = BE2_MAX_SESSIONS;
590 	shost->max_channel = 0;
591 	shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
592 	shost->max_lun = BEISCSI_NUM_MAX_LUN;
593 	shost->transportt = beiscsi_scsi_transport;
594 	phba = iscsi_host_priv(shost);
595 	memset(phba, 0, sizeof(*phba));
596 	phba->shost = shost;
597 	phba->pcidev = pci_dev_get(pcidev);
598 	pci_set_drvdata(pcidev, phba);
599 	phba->interface_handle = 0xFFFFFFFF;
600 
601 	return phba;
602 }
603 
604 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
605 {
606 	if (phba->csr_va) {
607 		iounmap(phba->csr_va);
608 		phba->csr_va = NULL;
609 	}
610 	if (phba->db_va) {
611 		iounmap(phba->db_va);
612 		phba->db_va = NULL;
613 	}
614 	if (phba->pci_va) {
615 		iounmap(phba->pci_va);
616 		phba->pci_va = NULL;
617 	}
618 }
619 
620 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
621 				struct pci_dev *pcidev)
622 {
623 	u8 __iomem *addr;
624 	int pcicfg_reg;
625 
626 	addr = ioremap_nocache(pci_resource_start(pcidev, 2),
627 			       pci_resource_len(pcidev, 2));
628 	if (addr == NULL)
629 		return -ENOMEM;
630 	phba->ctrl.csr = addr;
631 	phba->csr_va = addr;
632 	phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
633 
634 	addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
635 	if (addr == NULL)
636 		goto pci_map_err;
637 	phba->ctrl.db = addr;
638 	phba->db_va = addr;
639 	phba->db_pa.u.a64.address =  pci_resource_start(pcidev, 4);
640 
641 	if (phba->generation == BE_GEN2)
642 		pcicfg_reg = 1;
643 	else
644 		pcicfg_reg = 0;
645 
646 	addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
647 			       pci_resource_len(pcidev, pcicfg_reg));
648 
649 	if (addr == NULL)
650 		goto pci_map_err;
651 	phba->ctrl.pcicfg = addr;
652 	phba->pci_va = addr;
653 	phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
654 	return 0;
655 
656 pci_map_err:
657 	beiscsi_unmap_pci_function(phba);
658 	return -ENOMEM;
659 }
660 
661 static int beiscsi_enable_pci(struct pci_dev *pcidev)
662 {
663 	int ret;
664 
665 	ret = pci_enable_device(pcidev);
666 	if (ret) {
667 		dev_err(&pcidev->dev,
668 			"beiscsi_enable_pci - enable device failed\n");
669 		return ret;
670 	}
671 
672 	ret = pci_request_regions(pcidev, DRV_NAME);
673 	if (ret) {
674 		dev_err(&pcidev->dev,
675 				"beiscsi_enable_pci - request region failed\n");
676 		goto pci_dev_disable;
677 	}
678 
679 	pci_set_master(pcidev);
680 	ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
681 	if (ret) {
682 		ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
683 		if (ret) {
684 			dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
685 			goto pci_region_release;
686 		} else {
687 			ret = pci_set_consistent_dma_mask(pcidev,
688 							  DMA_BIT_MASK(32));
689 		}
690 	} else {
691 		ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
692 		if (ret) {
693 			dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
694 			goto pci_region_release;
695 		}
696 	}
697 	return 0;
698 
699 pci_region_release:
700 	pci_release_regions(pcidev);
701 pci_dev_disable:
702 	pci_disable_device(pcidev);
703 
704 	return ret;
705 }
706 
707 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
708 {
709 	struct be_ctrl_info *ctrl = &phba->ctrl;
710 	struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
711 	struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
712 	int status = 0;
713 
714 	ctrl->pdev = pdev;
715 	status = beiscsi_map_pci_bars(phba, pdev);
716 	if (status)
717 		return status;
718 	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
719 	mbox_mem_alloc->va = pci_alloc_consistent(pdev,
720 						  mbox_mem_alloc->size,
721 						  &mbox_mem_alloc->dma);
722 	if (!mbox_mem_alloc->va) {
723 		beiscsi_unmap_pci_function(phba);
724 		return -ENOMEM;
725 	}
726 
727 	mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
728 	mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
729 	mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
730 	memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
731 	mutex_init(&ctrl->mbox_lock);
732 	spin_lock_init(&phba->ctrl.mcc_lock);
733 
734 	return status;
735 }
736 
737 /**
738  * beiscsi_get_params()- Set the config paramters
739  * @phba: ptr  device priv structure
740  **/
741 static void beiscsi_get_params(struct beiscsi_hba *phba)
742 {
743 	uint32_t total_cid_count = 0;
744 	uint32_t total_icd_count = 0;
745 	uint8_t ulp_num = 0;
746 
747 	total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
748 			  BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
749 
750 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
751 		uint32_t align_mask = 0;
752 		uint32_t icd_post_per_page = 0;
753 		uint32_t icd_count_unavailable = 0;
754 		uint32_t icd_start = 0, icd_count = 0;
755 		uint32_t icd_start_align = 0, icd_count_align = 0;
756 
757 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
758 			icd_start = phba->fw_config.iscsi_icd_start[ulp_num];
759 			icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
760 
761 			/* Get ICD count that can be posted on each page */
762 			icd_post_per_page = (PAGE_SIZE / (BE2_SGE *
763 					     sizeof(struct iscsi_sge)));
764 			align_mask = (icd_post_per_page - 1);
765 
766 			/* Check if icd_start is aligned ICD per page posting */
767 			if (icd_start % icd_post_per_page) {
768 				icd_start_align = ((icd_start +
769 						    icd_post_per_page) &
770 						    ~(align_mask));
771 				phba->fw_config.
772 					iscsi_icd_start[ulp_num] =
773 					icd_start_align;
774 			}
775 
776 			icd_count_align = (icd_count & ~align_mask);
777 
778 			/* ICD discarded in the process of alignment */
779 			if (icd_start_align)
780 				icd_count_unavailable = ((icd_start_align -
781 							  icd_start) +
782 							 (icd_count -
783 							  icd_count_align));
784 
785 			/* Updated ICD count available */
786 			phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count -
787 					icd_count_unavailable);
788 
789 			beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
790 					"BM_%d : Aligned ICD values\n"
791 					"\t ICD Start : %d\n"
792 					"\t ICD Count : %d\n"
793 					"\t ICD Discarded : %d\n",
794 					phba->fw_config.
795 					iscsi_icd_start[ulp_num],
796 					phba->fw_config.
797 					iscsi_icd_count[ulp_num],
798 					icd_count_unavailable);
799 			break;
800 		}
801 	}
802 
803 	total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
804 	phba->params.ios_per_ctrl = (total_icd_count -
805 				    (total_cid_count +
806 				     BE2_TMFS + BE2_NOPOUT_REQ));
807 	phba->params.cxns_per_ctrl = total_cid_count;
808 	phba->params.asyncpdus_per_ctrl = total_cid_count;
809 	phba->params.icds_per_ctrl = total_icd_count;
810 	phba->params.num_sge_per_io = BE2_SGE;
811 	phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
812 	phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
813 	phba->params.eq_timer = 64;
814 	phba->params.num_eq_entries = 1024;
815 	phba->params.num_cq_entries = 1024;
816 	phba->params.wrbs_per_cxn = 256;
817 }
818 
819 static void hwi_ring_eq_db(struct beiscsi_hba *phba,
820 			   unsigned int id, unsigned int clr_interrupt,
821 			   unsigned int num_processed,
822 			   unsigned char rearm, unsigned char event)
823 {
824 	u32 val = 0;
825 
826 	if (rearm)
827 		val |= 1 << DB_EQ_REARM_SHIFT;
828 	if (clr_interrupt)
829 		val |= 1 << DB_EQ_CLR_SHIFT;
830 	if (event)
831 		val |= 1 << DB_EQ_EVNT_SHIFT;
832 
833 	val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
834 	/* Setting lower order EQ_ID Bits */
835 	val |= (id & DB_EQ_RING_ID_LOW_MASK);
836 
837 	/* Setting Higher order EQ_ID Bits */
838 	val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) &
839 		  DB_EQ_RING_ID_HIGH_MASK)
840 		  << DB_EQ_HIGH_SET_SHIFT);
841 
842 	iowrite32(val, phba->db_va + DB_EQ_OFFSET);
843 }
844 
845 /**
846  * be_isr_mcc - The isr routine of the driver.
847  * @irq: Not used
848  * @dev_id: Pointer to host adapter structure
849  */
850 static irqreturn_t be_isr_mcc(int irq, void *dev_id)
851 {
852 	struct beiscsi_hba *phba;
853 	struct be_eq_entry *eqe = NULL;
854 	struct be_queue_info *eq;
855 	struct be_queue_info *mcc;
856 	unsigned int num_eq_processed;
857 	struct be_eq_obj *pbe_eq;
858 	unsigned long flags;
859 
860 	pbe_eq = dev_id;
861 	eq = &pbe_eq->q;
862 	phba =  pbe_eq->phba;
863 	mcc = &phba->ctrl.mcc_obj.cq;
864 	eqe = queue_tail_node(eq);
865 
866 	num_eq_processed = 0;
867 
868 	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
869 				& EQE_VALID_MASK) {
870 		if (((eqe->dw[offsetof(struct amap_eq_entry,
871 		     resource_id) / 32] &
872 		     EQE_RESID_MASK) >> 16) == mcc->id) {
873 			spin_lock_irqsave(&phba->isr_lock, flags);
874 			pbe_eq->todo_mcc_cq = true;
875 			spin_unlock_irqrestore(&phba->isr_lock, flags);
876 		}
877 		AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
878 		queue_tail_inc(eq);
879 		eqe = queue_tail_node(eq);
880 		num_eq_processed++;
881 	}
882 	if (pbe_eq->todo_mcc_cq)
883 		queue_work(phba->wq, &pbe_eq->work_cqs);
884 	if (num_eq_processed)
885 		hwi_ring_eq_db(phba, eq->id, 1,	num_eq_processed, 1, 1);
886 
887 	return IRQ_HANDLED;
888 }
889 
890 /**
891  * be_isr_msix - The isr routine of the driver.
892  * @irq: Not used
893  * @dev_id: Pointer to host adapter structure
894  */
895 static irqreturn_t be_isr_msix(int irq, void *dev_id)
896 {
897 	struct beiscsi_hba *phba;
898 	struct be_queue_info *eq;
899 	struct be_eq_obj *pbe_eq;
900 
901 	pbe_eq = dev_id;
902 	eq = &pbe_eq->q;
903 
904 	phba = pbe_eq->phba;
905 
906 	/* disable interrupt till iopoll completes */
907 	hwi_ring_eq_db(phba, eq->id, 1,	0, 0, 1);
908 	irq_poll_sched(&pbe_eq->iopoll);
909 
910 	return IRQ_HANDLED;
911 }
912 
913 /**
914  * be_isr - The isr routine of the driver.
915  * @irq: Not used
916  * @dev_id: Pointer to host adapter structure
917  */
918 static irqreturn_t be_isr(int irq, void *dev_id)
919 {
920 	struct beiscsi_hba *phba;
921 	struct hwi_controller *phwi_ctrlr;
922 	struct hwi_context_memory *phwi_context;
923 	struct be_eq_entry *eqe = NULL;
924 	struct be_queue_info *eq;
925 	struct be_queue_info *mcc;
926 	unsigned long flags, index;
927 	unsigned int num_mcceq_processed, num_ioeq_processed;
928 	struct be_ctrl_info *ctrl;
929 	struct be_eq_obj *pbe_eq;
930 	int isr;
931 
932 	phba = dev_id;
933 	ctrl = &phba->ctrl;
934 	isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
935 		       (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
936 	if (!isr)
937 		return IRQ_NONE;
938 
939 	phwi_ctrlr = phba->phwi_ctrlr;
940 	phwi_context = phwi_ctrlr->phwi_ctxt;
941 	pbe_eq = &phwi_context->be_eq[0];
942 
943 	eq = &phwi_context->be_eq[0].q;
944 	mcc = &phba->ctrl.mcc_obj.cq;
945 	index = 0;
946 	eqe = queue_tail_node(eq);
947 
948 	num_ioeq_processed = 0;
949 	num_mcceq_processed = 0;
950 	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
951 				& EQE_VALID_MASK) {
952 		if (((eqe->dw[offsetof(struct amap_eq_entry,
953 		     resource_id) / 32] &
954 		     EQE_RESID_MASK) >> 16) == mcc->id) {
955 			spin_lock_irqsave(&phba->isr_lock, flags);
956 			pbe_eq->todo_mcc_cq = true;
957 			spin_unlock_irqrestore(&phba->isr_lock, flags);
958 			num_mcceq_processed++;
959 		} else {
960 			irq_poll_sched(&pbe_eq->iopoll);
961 			num_ioeq_processed++;
962 		}
963 		AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
964 		queue_tail_inc(eq);
965 		eqe = queue_tail_node(eq);
966 	}
967 	if (num_ioeq_processed || num_mcceq_processed) {
968 		if (pbe_eq->todo_mcc_cq)
969 			queue_work(phba->wq, &pbe_eq->work_cqs);
970 
971 		if ((num_mcceq_processed) && (!num_ioeq_processed))
972 			hwi_ring_eq_db(phba, eq->id, 0,
973 				      (num_ioeq_processed +
974 				       num_mcceq_processed) , 1, 1);
975 		else
976 			hwi_ring_eq_db(phba, eq->id, 0,
977 				       (num_ioeq_processed +
978 					num_mcceq_processed), 0, 1);
979 
980 		return IRQ_HANDLED;
981 	} else
982 		return IRQ_NONE;
983 }
984 
985 
986 static int beiscsi_init_irqs(struct beiscsi_hba *phba)
987 {
988 	struct pci_dev *pcidev = phba->pcidev;
989 	struct hwi_controller *phwi_ctrlr;
990 	struct hwi_context_memory *phwi_context;
991 	int ret, msix_vec, i, j;
992 
993 	phwi_ctrlr = phba->phwi_ctrlr;
994 	phwi_context = phwi_ctrlr->phwi_ctxt;
995 
996 	if (phba->msix_enabled) {
997 		for (i = 0; i < phba->num_cpus; i++) {
998 			phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME,
999 						    GFP_KERNEL);
1000 			if (!phba->msi_name[i]) {
1001 				ret = -ENOMEM;
1002 				goto free_msix_irqs;
1003 			}
1004 
1005 			sprintf(phba->msi_name[i], "beiscsi_%02x_%02x",
1006 				phba->shost->host_no, i);
1007 			msix_vec = phba->msix_entries[i].vector;
1008 			ret = request_irq(msix_vec, be_isr_msix, 0,
1009 					  phba->msi_name[i],
1010 					  &phwi_context->be_eq[i]);
1011 			if (ret) {
1012 				beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1013 					    "BM_%d : beiscsi_init_irqs-Failed to"
1014 					    "register msix for i = %d\n",
1015 					    i);
1016 				kfree(phba->msi_name[i]);
1017 				goto free_msix_irqs;
1018 			}
1019 		}
1020 		phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL);
1021 		if (!phba->msi_name[i]) {
1022 			ret = -ENOMEM;
1023 			goto free_msix_irqs;
1024 		}
1025 		sprintf(phba->msi_name[i], "beiscsi_mcc_%02x",
1026 			phba->shost->host_no);
1027 		msix_vec = phba->msix_entries[i].vector;
1028 		ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i],
1029 				  &phwi_context->be_eq[i]);
1030 		if (ret) {
1031 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT ,
1032 				    "BM_%d : beiscsi_init_irqs-"
1033 				    "Failed to register beiscsi_msix_mcc\n");
1034 			kfree(phba->msi_name[i]);
1035 			goto free_msix_irqs;
1036 		}
1037 
1038 	} else {
1039 		ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
1040 				  "beiscsi", phba);
1041 		if (ret) {
1042 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1043 				    "BM_%d : beiscsi_init_irqs-"
1044 				    "Failed to register irq\\n");
1045 			return ret;
1046 		}
1047 	}
1048 	return 0;
1049 free_msix_irqs:
1050 	for (j = i - 1; j >= 0; j--) {
1051 		kfree(phba->msi_name[j]);
1052 		msix_vec = phba->msix_entries[j].vector;
1053 		free_irq(msix_vec, &phwi_context->be_eq[j]);
1054 	}
1055 	return ret;
1056 }
1057 
1058 void hwi_ring_cq_db(struct beiscsi_hba *phba,
1059 			   unsigned int id, unsigned int num_processed,
1060 			   unsigned char rearm)
1061 {
1062 	u32 val = 0;
1063 
1064 	if (rearm)
1065 		val |= 1 << DB_CQ_REARM_SHIFT;
1066 
1067 	val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
1068 
1069 	/* Setting lower order CQ_ID Bits */
1070 	val |= (id & DB_CQ_RING_ID_LOW_MASK);
1071 
1072 	/* Setting Higher order CQ_ID Bits */
1073 	val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) &
1074 		  DB_CQ_RING_ID_HIGH_MASK)
1075 		  << DB_CQ_HIGH_SET_SHIFT);
1076 
1077 	iowrite32(val, phba->db_va + DB_CQ_OFFSET);
1078 }
1079 
1080 static unsigned int
1081 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
1082 			  struct beiscsi_hba *phba,
1083 			  struct pdu_base *ppdu,
1084 			  unsigned long pdu_len,
1085 			  void *pbuffer, unsigned long buf_len)
1086 {
1087 	struct iscsi_conn *conn = beiscsi_conn->conn;
1088 	struct iscsi_session *session = conn->session;
1089 	struct iscsi_task *task;
1090 	struct beiscsi_io_task *io_task;
1091 	struct iscsi_hdr *login_hdr;
1092 
1093 	switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
1094 						PDUBASE_OPCODE_MASK) {
1095 	case ISCSI_OP_NOOP_IN:
1096 		pbuffer = NULL;
1097 		buf_len = 0;
1098 		break;
1099 	case ISCSI_OP_ASYNC_EVENT:
1100 		break;
1101 	case ISCSI_OP_REJECT:
1102 		WARN_ON(!pbuffer);
1103 		WARN_ON(!(buf_len == 48));
1104 		beiscsi_log(phba, KERN_ERR,
1105 			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1106 			    "BM_%d : In ISCSI_OP_REJECT\n");
1107 		break;
1108 	case ISCSI_OP_LOGIN_RSP:
1109 	case ISCSI_OP_TEXT_RSP:
1110 		task = conn->login_task;
1111 		io_task = task->dd_data;
1112 		login_hdr = (struct iscsi_hdr *)ppdu;
1113 		login_hdr->itt = io_task->libiscsi_itt;
1114 		break;
1115 	default:
1116 		beiscsi_log(phba, KERN_WARNING,
1117 			    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1118 			    "BM_%d : Unrecognized opcode 0x%x in async msg\n",
1119 			    (ppdu->
1120 			     dw[offsetof(struct amap_pdu_base, opcode) / 32]
1121 			     & PDUBASE_OPCODE_MASK));
1122 		return 1;
1123 	}
1124 
1125 	spin_lock_bh(&session->back_lock);
1126 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
1127 	spin_unlock_bh(&session->back_lock);
1128 	return 0;
1129 }
1130 
1131 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
1132 {
1133 	struct sgl_handle *psgl_handle;
1134 
1135 	spin_lock_bh(&phba->io_sgl_lock);
1136 	if (phba->io_sgl_hndl_avbl) {
1137 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
1138 			    "BM_%d : In alloc_io_sgl_handle,"
1139 			    " io_sgl_alloc_index=%d\n",
1140 			    phba->io_sgl_alloc_index);
1141 
1142 		psgl_handle = phba->io_sgl_hndl_base[phba->
1143 						io_sgl_alloc_index];
1144 		phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
1145 		phba->io_sgl_hndl_avbl--;
1146 		if (phba->io_sgl_alloc_index == (phba->params.
1147 						 ios_per_ctrl - 1))
1148 			phba->io_sgl_alloc_index = 0;
1149 		else
1150 			phba->io_sgl_alloc_index++;
1151 	} else
1152 		psgl_handle = NULL;
1153 	spin_unlock_bh(&phba->io_sgl_lock);
1154 	return psgl_handle;
1155 }
1156 
1157 static void
1158 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1159 {
1160 	spin_lock_bh(&phba->io_sgl_lock);
1161 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
1162 		    "BM_%d : In free_,io_sgl_free_index=%d\n",
1163 		    phba->io_sgl_free_index);
1164 
1165 	if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
1166 		/*
1167 		 * this can happen if clean_task is called on a task that
1168 		 * failed in xmit_task or alloc_pdu.
1169 		 */
1170 		 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
1171 			     "BM_%d : Double Free in IO SGL io_sgl_free_index=%d,"
1172 			     "value there=%p\n", phba->io_sgl_free_index,
1173 			     phba->io_sgl_hndl_base
1174 			     [phba->io_sgl_free_index]);
1175 		 spin_unlock_bh(&phba->io_sgl_lock);
1176 		return;
1177 	}
1178 	phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
1179 	phba->io_sgl_hndl_avbl++;
1180 	if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
1181 		phba->io_sgl_free_index = 0;
1182 	else
1183 		phba->io_sgl_free_index++;
1184 	spin_unlock_bh(&phba->io_sgl_lock);
1185 }
1186 
1187 static inline struct wrb_handle *
1188 beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context,
1189 		       unsigned int wrbs_per_cxn)
1190 {
1191 	struct wrb_handle *pwrb_handle;
1192 
1193 	spin_lock_bh(&pwrb_context->wrb_lock);
1194 	pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index];
1195 	pwrb_context->wrb_handles_available--;
1196 	if (pwrb_context->alloc_index == (wrbs_per_cxn - 1))
1197 		pwrb_context->alloc_index = 0;
1198 	else
1199 		pwrb_context->alloc_index++;
1200 	spin_unlock_bh(&pwrb_context->wrb_lock);
1201 
1202 	return pwrb_handle;
1203 }
1204 
1205 /**
1206  * alloc_wrb_handle - To allocate a wrb handle
1207  * @phba: The hba pointer
1208  * @cid: The cid to use for allocation
1209  * @pwrb_context: ptr to ptr to wrb context
1210  *
1211  * This happens under session_lock until submission to chip
1212  */
1213 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
1214 				    struct hwi_wrb_context **pcontext)
1215 {
1216 	struct hwi_wrb_context *pwrb_context;
1217 	struct hwi_controller *phwi_ctrlr;
1218 	uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
1219 
1220 	phwi_ctrlr = phba->phwi_ctrlr;
1221 	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
1222 	/* return the context address */
1223 	*pcontext = pwrb_context;
1224 	return beiscsi_get_wrb_handle(pwrb_context, phba->params.wrbs_per_cxn);
1225 }
1226 
1227 static inline void
1228 beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context,
1229 		       struct wrb_handle *pwrb_handle,
1230 		       unsigned int wrbs_per_cxn)
1231 {
1232 	spin_lock_bh(&pwrb_context->wrb_lock);
1233 	pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
1234 	pwrb_context->wrb_handles_available++;
1235 	if (pwrb_context->free_index == (wrbs_per_cxn - 1))
1236 		pwrb_context->free_index = 0;
1237 	else
1238 		pwrb_context->free_index++;
1239 	spin_unlock_bh(&pwrb_context->wrb_lock);
1240 }
1241 
1242 /**
1243  * free_wrb_handle - To free the wrb handle back to pool
1244  * @phba: The hba pointer
1245  * @pwrb_context: The context to free from
1246  * @pwrb_handle: The wrb_handle to free
1247  *
1248  * This happens under session_lock until submission to chip
1249  */
1250 static void
1251 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
1252 		struct wrb_handle *pwrb_handle)
1253 {
1254 	beiscsi_put_wrb_handle(pwrb_context,
1255 			       pwrb_handle,
1256 			       phba->params.wrbs_per_cxn);
1257 	beiscsi_log(phba, KERN_INFO,
1258 		    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1259 		    "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x"
1260 		    "wrb_handles_available=%d\n",
1261 		    pwrb_handle, pwrb_context->free_index,
1262 		    pwrb_context->wrb_handles_available);
1263 }
1264 
1265 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
1266 {
1267 	struct sgl_handle *psgl_handle;
1268 
1269 	spin_lock_bh(&phba->mgmt_sgl_lock);
1270 	if (phba->eh_sgl_hndl_avbl) {
1271 		psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
1272 		phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
1273 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1274 			    "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n",
1275 			    phba->eh_sgl_alloc_index,
1276 			    phba->eh_sgl_alloc_index);
1277 
1278 		phba->eh_sgl_hndl_avbl--;
1279 		if (phba->eh_sgl_alloc_index ==
1280 		    (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
1281 		     1))
1282 			phba->eh_sgl_alloc_index = 0;
1283 		else
1284 			phba->eh_sgl_alloc_index++;
1285 	} else
1286 		psgl_handle = NULL;
1287 	spin_unlock_bh(&phba->mgmt_sgl_lock);
1288 	return psgl_handle;
1289 }
1290 
1291 void
1292 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1293 {
1294 	spin_lock_bh(&phba->mgmt_sgl_lock);
1295 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1296 		    "BM_%d : In  free_mgmt_sgl_handle,"
1297 		    "eh_sgl_free_index=%d\n",
1298 		    phba->eh_sgl_free_index);
1299 
1300 	if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
1301 		/*
1302 		 * this can happen if clean_task is called on a task that
1303 		 * failed in xmit_task or alloc_pdu.
1304 		 */
1305 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
1306 			    "BM_%d : Double Free in eh SGL ,"
1307 			    "eh_sgl_free_index=%d\n",
1308 			    phba->eh_sgl_free_index);
1309 		spin_unlock_bh(&phba->mgmt_sgl_lock);
1310 		return;
1311 	}
1312 	phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
1313 	phba->eh_sgl_hndl_avbl++;
1314 	if (phba->eh_sgl_free_index ==
1315 	    (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
1316 		phba->eh_sgl_free_index = 0;
1317 	else
1318 		phba->eh_sgl_free_index++;
1319 	spin_unlock_bh(&phba->mgmt_sgl_lock);
1320 }
1321 
1322 static void
1323 be_complete_io(struct beiscsi_conn *beiscsi_conn,
1324 		struct iscsi_task *task,
1325 		struct common_sol_cqe *csol_cqe)
1326 {
1327 	struct beiscsi_io_task *io_task = task->dd_data;
1328 	struct be_status_bhs *sts_bhs =
1329 				(struct be_status_bhs *)io_task->cmd_bhs;
1330 	struct iscsi_conn *conn = beiscsi_conn->conn;
1331 	unsigned char *sense;
1332 	u32 resid = 0, exp_cmdsn, max_cmdsn;
1333 	u8 rsp, status, flags;
1334 
1335 	exp_cmdsn = csol_cqe->exp_cmdsn;
1336 	max_cmdsn = (csol_cqe->exp_cmdsn +
1337 		     csol_cqe->cmd_wnd - 1);
1338 	rsp = csol_cqe->i_resp;
1339 	status = csol_cqe->i_sts;
1340 	flags = csol_cqe->i_flags;
1341 	resid = csol_cqe->res_cnt;
1342 
1343 	if (!task->sc) {
1344 		if (io_task->scsi_cmnd) {
1345 			scsi_dma_unmap(io_task->scsi_cmnd);
1346 			io_task->scsi_cmnd = NULL;
1347 		}
1348 
1349 		return;
1350 	}
1351 	task->sc->result = (DID_OK << 16) | status;
1352 	if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
1353 		task->sc->result = DID_ERROR << 16;
1354 		goto unmap;
1355 	}
1356 
1357 	/* bidi not initially supported */
1358 	if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
1359 		if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
1360 			task->sc->result = DID_ERROR << 16;
1361 
1362 		if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
1363 			scsi_set_resid(task->sc, resid);
1364 			if (!status && (scsi_bufflen(task->sc) - resid <
1365 			    task->sc->underflow))
1366 				task->sc->result = DID_ERROR << 16;
1367 		}
1368 	}
1369 
1370 	if (status == SAM_STAT_CHECK_CONDITION) {
1371 		u16 sense_len;
1372 		unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
1373 
1374 		sense = sts_bhs->sense_info + sizeof(unsigned short);
1375 		sense_len = be16_to_cpu(*slen);
1376 		memcpy(task->sc->sense_buffer, sense,
1377 		       min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
1378 	}
1379 
1380 	if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ)
1381 		conn->rxdata_octets += resid;
1382 unmap:
1383 	if (io_task->scsi_cmnd) {
1384 		scsi_dma_unmap(io_task->scsi_cmnd);
1385 		io_task->scsi_cmnd = NULL;
1386 	}
1387 	iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
1388 }
1389 
1390 static void
1391 be_complete_logout(struct beiscsi_conn *beiscsi_conn,
1392 		    struct iscsi_task *task,
1393 		    struct common_sol_cqe *csol_cqe)
1394 {
1395 	struct iscsi_logout_rsp *hdr;
1396 	struct beiscsi_io_task *io_task = task->dd_data;
1397 	struct iscsi_conn *conn = beiscsi_conn->conn;
1398 
1399 	hdr = (struct iscsi_logout_rsp *)task->hdr;
1400 	hdr->opcode = ISCSI_OP_LOGOUT_RSP;
1401 	hdr->t2wait = 5;
1402 	hdr->t2retain = 0;
1403 	hdr->flags = csol_cqe->i_flags;
1404 	hdr->response = csol_cqe->i_resp;
1405 	hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
1406 	hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1407 				     csol_cqe->cmd_wnd - 1);
1408 
1409 	hdr->dlength[0] = 0;
1410 	hdr->dlength[1] = 0;
1411 	hdr->dlength[2] = 0;
1412 	hdr->hlength = 0;
1413 	hdr->itt = io_task->libiscsi_itt;
1414 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1415 }
1416 
1417 static void
1418 be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
1419 		 struct iscsi_task *task,
1420 		 struct common_sol_cqe *csol_cqe)
1421 {
1422 	struct iscsi_tm_rsp *hdr;
1423 	struct iscsi_conn *conn = beiscsi_conn->conn;
1424 	struct beiscsi_io_task *io_task = task->dd_data;
1425 
1426 	hdr = (struct iscsi_tm_rsp *)task->hdr;
1427 	hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
1428 	hdr->flags = csol_cqe->i_flags;
1429 	hdr->response = csol_cqe->i_resp;
1430 	hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
1431 	hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1432 				     csol_cqe->cmd_wnd - 1);
1433 
1434 	hdr->itt = io_task->libiscsi_itt;
1435 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1436 }
1437 
1438 static void
1439 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1440 		       struct beiscsi_hba *phba, struct sol_cqe *psol)
1441 {
1442 	struct hwi_wrb_context *pwrb_context;
1443 	struct wrb_handle *pwrb_handle = NULL;
1444 	struct hwi_controller *phwi_ctrlr;
1445 	struct iscsi_task *task;
1446 	struct beiscsi_io_task *io_task;
1447 	uint16_t wrb_index, cid, cri_index;
1448 
1449 	phwi_ctrlr = phba->phwi_ctrlr;
1450 	if (is_chip_be2_be3r(phba)) {
1451 		wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
1452 					  wrb_idx, psol);
1453 		cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
1454 				    cid, psol);
1455 	} else {
1456 		wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
1457 					  wrb_idx, psol);
1458 		cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
1459 				    cid, psol);
1460 	}
1461 
1462 	cri_index = BE_GET_CRI_FROM_CID(cid);
1463 	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
1464 	pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index];
1465 	task = pwrb_handle->pio_handle;
1466 
1467 	io_task = task->dd_data;
1468 	memset(io_task->pwrb_handle->pwrb, 0, sizeof(struct iscsi_wrb));
1469 	iscsi_put_task(task);
1470 }
1471 
1472 static void
1473 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
1474 			struct iscsi_task *task,
1475 			struct common_sol_cqe *csol_cqe)
1476 {
1477 	struct iscsi_nopin *hdr;
1478 	struct iscsi_conn *conn = beiscsi_conn->conn;
1479 	struct beiscsi_io_task *io_task = task->dd_data;
1480 
1481 	hdr = (struct iscsi_nopin *)task->hdr;
1482 	hdr->flags = csol_cqe->i_flags;
1483 	hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
1484 	hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1485 				     csol_cqe->cmd_wnd - 1);
1486 
1487 	hdr->opcode = ISCSI_OP_NOOP_IN;
1488 	hdr->itt = io_task->libiscsi_itt;
1489 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1490 }
1491 
1492 static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
1493 		struct sol_cqe *psol,
1494 		struct common_sol_cqe *csol_cqe)
1495 {
1496 	if (is_chip_be2_be3r(phba)) {
1497 		csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe,
1498 						    i_exp_cmd_sn, psol);
1499 		csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe,
1500 						  i_res_cnt, psol);
1501 		csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
1502 						  i_cmd_wnd, psol);
1503 		csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe,
1504 						    wrb_index, psol);
1505 		csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe,
1506 					      cid, psol);
1507 		csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe,
1508 						 hw_sts, psol);
1509 		csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe,
1510 						 i_resp, psol);
1511 		csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe,
1512 						i_sts, psol);
1513 		csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe,
1514 						  i_flags, psol);
1515 	} else {
1516 		csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1517 						    i_exp_cmd_sn, psol);
1518 		csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1519 						  i_res_cnt, psol);
1520 		csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1521 						    wrb_index, psol);
1522 		csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1523 					      cid, psol);
1524 		csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1525 						 hw_sts, psol);
1526 		csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1527 						  i_cmd_wnd, psol);
1528 		if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
1529 				  cmd_cmpl, psol))
1530 			csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1531 							i_sts, psol);
1532 		else
1533 			csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1534 							 i_sts, psol);
1535 		if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
1536 				  u, psol))
1537 			csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW;
1538 
1539 		if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
1540 				  o, psol))
1541 			csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW;
1542 	}
1543 }
1544 
1545 
1546 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1547 			     struct beiscsi_hba *phba, struct sol_cqe *psol)
1548 {
1549 	struct hwi_wrb_context *pwrb_context;
1550 	struct wrb_handle *pwrb_handle;
1551 	struct iscsi_wrb *pwrb = NULL;
1552 	struct hwi_controller *phwi_ctrlr;
1553 	struct iscsi_task *task;
1554 	unsigned int type;
1555 	struct iscsi_conn *conn = beiscsi_conn->conn;
1556 	struct iscsi_session *session = conn->session;
1557 	struct common_sol_cqe csol_cqe = {0};
1558 	uint16_t cri_index = 0;
1559 
1560 	phwi_ctrlr = phba->phwi_ctrlr;
1561 
1562 	/* Copy the elements to a common structure */
1563 	adapter_get_sol_cqe(phba, psol, &csol_cqe);
1564 
1565 	cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid);
1566 	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
1567 
1568 	pwrb_handle = pwrb_context->pwrb_handle_basestd[
1569 		      csol_cqe.wrb_index];
1570 
1571 	task = pwrb_handle->pio_handle;
1572 	pwrb = pwrb_handle->pwrb;
1573 	type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type;
1574 
1575 	spin_lock_bh(&session->back_lock);
1576 	switch (type) {
1577 	case HWH_TYPE_IO:
1578 	case HWH_TYPE_IO_RD:
1579 		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
1580 		     ISCSI_OP_NOOP_OUT)
1581 			be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe);
1582 		else
1583 			be_complete_io(beiscsi_conn, task, &csol_cqe);
1584 		break;
1585 
1586 	case HWH_TYPE_LOGOUT:
1587 		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
1588 			be_complete_logout(beiscsi_conn, task, &csol_cqe);
1589 		else
1590 			be_complete_tmf(beiscsi_conn, task, &csol_cqe);
1591 		break;
1592 
1593 	case HWH_TYPE_LOGIN:
1594 		beiscsi_log(phba, KERN_ERR,
1595 			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1596 			    "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in"
1597 			    " hwi_complete_cmd- Solicited path\n");
1598 		break;
1599 
1600 	case HWH_TYPE_NOP:
1601 		be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe);
1602 		break;
1603 
1604 	default:
1605 		beiscsi_log(phba, KERN_WARNING,
1606 			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1607 			    "BM_%d : In hwi_complete_cmd, unknown type = %d"
1608 			    "wrb_index 0x%x CID 0x%x\n", type,
1609 			    csol_cqe.wrb_index,
1610 			    csol_cqe.cid);
1611 		break;
1612 	}
1613 
1614 	spin_unlock_bh(&session->back_lock);
1615 }
1616 
1617 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1618 					  *pasync_ctx, unsigned int is_header,
1619 					  unsigned int host_write_ptr)
1620 {
1621 	if (is_header)
1622 		return &pasync_ctx->async_entry[host_write_ptr].
1623 		    header_busy_list;
1624 	else
1625 		return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1626 }
1627 
1628 static struct async_pdu_handle *
1629 hwi_get_async_handle(struct beiscsi_hba *phba,
1630 		     struct beiscsi_conn *beiscsi_conn,
1631 		     struct hwi_async_pdu_context *pasync_ctx,
1632 		     struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1633 {
1634 	struct be_bus_address phys_addr;
1635 	struct list_head *pbusy_list;
1636 	struct async_pdu_handle *pasync_handle = NULL;
1637 	unsigned char is_header = 0;
1638 	unsigned int index, dpl;
1639 
1640 	if (is_chip_be2_be3r(phba)) {
1641 		dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1642 				    dpl, pdpdu_cqe);
1643 		index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1644 				      index, pdpdu_cqe);
1645 	} else {
1646 		dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
1647 				    dpl, pdpdu_cqe);
1648 		index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
1649 				      index, pdpdu_cqe);
1650 	}
1651 
1652 	phys_addr.u.a32.address_lo =
1653 		(pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1654 					db_addr_lo) / 32] - dpl);
1655 	phys_addr.u.a32.address_hi =
1656 		pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1657 				       db_addr_hi) / 32];
1658 
1659 	phys_addr.u.a64.address =
1660 			*((unsigned long long *)(&phys_addr.u.a64.address));
1661 
1662 	switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1663 			& PDUCQE_CODE_MASK) {
1664 	case UNSOL_HDR_NOTIFY:
1665 		is_header = 1;
1666 
1667 		 pbusy_list = hwi_get_async_busy_list(pasync_ctx,
1668 						      is_header, index);
1669 		break;
1670 	case UNSOL_DATA_NOTIFY:
1671 		 pbusy_list = hwi_get_async_busy_list(pasync_ctx,
1672 						      is_header, index);
1673 		break;
1674 	default:
1675 		pbusy_list = NULL;
1676 		beiscsi_log(phba, KERN_WARNING,
1677 			    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1678 			    "BM_%d : Unexpected code=%d\n",
1679 			    pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1680 			    code) / 32] & PDUCQE_CODE_MASK);
1681 		return NULL;
1682 	}
1683 
1684 	WARN_ON(list_empty(pbusy_list));
1685 	list_for_each_entry(pasync_handle, pbusy_list, link) {
1686 		if (pasync_handle->pa.u.a64.address == phys_addr.u.a64.address)
1687 			break;
1688 	}
1689 
1690 	WARN_ON(!pasync_handle);
1691 
1692 	pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID(
1693 			     beiscsi_conn->beiscsi_conn_cid);
1694 	pasync_handle->is_header = is_header;
1695 	pasync_handle->buffer_len = dpl;
1696 	*pcq_index = index;
1697 
1698 	return pasync_handle;
1699 }
1700 
1701 static unsigned int
1702 hwi_update_async_writables(struct beiscsi_hba *phba,
1703 			    struct hwi_async_pdu_context *pasync_ctx,
1704 			    unsigned int is_header, unsigned int cq_index)
1705 {
1706 	struct list_head *pbusy_list;
1707 	struct async_pdu_handle *pasync_handle;
1708 	unsigned int num_entries, writables = 0;
1709 	unsigned int *pep_read_ptr, *pwritables;
1710 
1711 	num_entries = pasync_ctx->num_entries;
1712 	if (is_header) {
1713 		pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1714 		pwritables = &pasync_ctx->async_header.writables;
1715 	} else {
1716 		pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1717 		pwritables = &pasync_ctx->async_data.writables;
1718 	}
1719 
1720 	while ((*pep_read_ptr) != cq_index) {
1721 		(*pep_read_ptr)++;
1722 		*pep_read_ptr = (*pep_read_ptr) % num_entries;
1723 
1724 		pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1725 						     *pep_read_ptr);
1726 		if (writables == 0)
1727 			WARN_ON(list_empty(pbusy_list));
1728 
1729 		if (!list_empty(pbusy_list)) {
1730 			pasync_handle = list_entry(pbusy_list->next,
1731 						   struct async_pdu_handle,
1732 						   link);
1733 			WARN_ON(!pasync_handle);
1734 			pasync_handle->consumed = 1;
1735 		}
1736 
1737 		writables++;
1738 	}
1739 
1740 	if (!writables) {
1741 		beiscsi_log(phba, KERN_ERR,
1742 			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1743 			    "BM_%d : Duplicate notification received - index 0x%x!!\n",
1744 			    cq_index);
1745 		WARN_ON(1);
1746 	}
1747 
1748 	*pwritables = *pwritables + writables;
1749 	return 0;
1750 }
1751 
1752 static void hwi_free_async_msg(struct beiscsi_hba *phba,
1753 			       struct hwi_async_pdu_context *pasync_ctx,
1754 			       unsigned int cri)
1755 {
1756 	struct async_pdu_handle *pasync_handle, *tmp_handle;
1757 	struct list_head *plist;
1758 
1759 	plist  = &pasync_ctx->async_entry[cri].wait_queue.list;
1760 	list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1761 		list_del(&pasync_handle->link);
1762 
1763 		if (pasync_handle->is_header) {
1764 			list_add_tail(&pasync_handle->link,
1765 				      &pasync_ctx->async_header.free_list);
1766 			pasync_ctx->async_header.free_entries++;
1767 		} else {
1768 			list_add_tail(&pasync_handle->link,
1769 				      &pasync_ctx->async_data.free_list);
1770 			pasync_ctx->async_data.free_entries++;
1771 		}
1772 	}
1773 
1774 	INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1775 	pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1776 	pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1777 }
1778 
1779 static struct phys_addr *
1780 hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1781 		     unsigned int is_header, unsigned int host_write_ptr)
1782 {
1783 	struct phys_addr *pasync_sge = NULL;
1784 
1785 	if (is_header)
1786 		pasync_sge = pasync_ctx->async_header.ring_base;
1787 	else
1788 		pasync_sge = pasync_ctx->async_data.ring_base;
1789 
1790 	return pasync_sge + host_write_ptr;
1791 }
1792 
1793 static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1794 				    unsigned int is_header, uint8_t ulp_num)
1795 {
1796 	struct hwi_controller *phwi_ctrlr;
1797 	struct hwi_async_pdu_context *pasync_ctx;
1798 	struct async_pdu_handle *pasync_handle;
1799 	struct list_head *pfree_link, *pbusy_list;
1800 	struct phys_addr *pasync_sge;
1801 	unsigned int ring_id, num_entries;
1802 	unsigned int host_write_num, doorbell_offset;
1803 	unsigned int writables;
1804 	unsigned int i = 0;
1805 	u32 doorbell = 0;
1806 
1807 	phwi_ctrlr = phba->phwi_ctrlr;
1808 	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
1809 	num_entries = pasync_ctx->num_entries;
1810 
1811 	if (is_header) {
1812 		writables = min(pasync_ctx->async_header.writables,
1813 				pasync_ctx->async_header.free_entries);
1814 		pfree_link = pasync_ctx->async_header.free_list.next;
1815 		host_write_num = pasync_ctx->async_header.host_write_ptr;
1816 		ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id;
1817 		doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num].
1818 				  doorbell_offset;
1819 	} else {
1820 		writables = min(pasync_ctx->async_data.writables,
1821 				pasync_ctx->async_data.free_entries);
1822 		pfree_link = pasync_ctx->async_data.free_list.next;
1823 		host_write_num = pasync_ctx->async_data.host_write_ptr;
1824 		ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id;
1825 		doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num].
1826 				  doorbell_offset;
1827 	}
1828 
1829 	writables = (writables / 8) * 8;
1830 	if (writables) {
1831 		for (i = 0; i < writables; i++) {
1832 			pbusy_list =
1833 			    hwi_get_async_busy_list(pasync_ctx, is_header,
1834 						    host_write_num);
1835 			pasync_handle =
1836 			    list_entry(pfree_link, struct async_pdu_handle,
1837 								link);
1838 			WARN_ON(!pasync_handle);
1839 			pasync_handle->consumed = 0;
1840 
1841 			pfree_link = pfree_link->next;
1842 
1843 			pasync_sge = hwi_get_ring_address(pasync_ctx,
1844 						is_header, host_write_num);
1845 
1846 			pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1847 			pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1848 
1849 			list_move(&pasync_handle->link, pbusy_list);
1850 
1851 			host_write_num++;
1852 			host_write_num = host_write_num % num_entries;
1853 		}
1854 
1855 		if (is_header) {
1856 			pasync_ctx->async_header.host_write_ptr =
1857 							host_write_num;
1858 			pasync_ctx->async_header.free_entries -= writables;
1859 			pasync_ctx->async_header.writables -= writables;
1860 			pasync_ctx->async_header.busy_entries += writables;
1861 		} else {
1862 			pasync_ctx->async_data.host_write_ptr = host_write_num;
1863 			pasync_ctx->async_data.free_entries -= writables;
1864 			pasync_ctx->async_data.writables -= writables;
1865 			pasync_ctx->async_data.busy_entries += writables;
1866 		}
1867 
1868 		doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1869 		doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1870 		doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1871 		doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1872 					<< DB_DEF_PDU_CQPROC_SHIFT;
1873 
1874 		iowrite32(doorbell, phba->db_va + doorbell_offset);
1875 	}
1876 }
1877 
1878 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1879 					 struct beiscsi_conn *beiscsi_conn,
1880 					 struct i_t_dpdu_cqe *pdpdu_cqe)
1881 {
1882 	struct hwi_controller *phwi_ctrlr;
1883 	struct hwi_async_pdu_context *pasync_ctx;
1884 	struct async_pdu_handle *pasync_handle = NULL;
1885 	unsigned int cq_index = -1;
1886 	uint16_t cri_index = BE_GET_CRI_FROM_CID(
1887 			     beiscsi_conn->beiscsi_conn_cid);
1888 
1889 	phwi_ctrlr = phba->phwi_ctrlr;
1890 	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
1891 		     BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
1892 		     cri_index));
1893 
1894 	pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1895 					     pdpdu_cqe, &cq_index);
1896 	BUG_ON(pasync_handle->is_header != 0);
1897 	if (pasync_handle->consumed == 0)
1898 		hwi_update_async_writables(phba, pasync_ctx,
1899 					   pasync_handle->is_header, cq_index);
1900 
1901 	hwi_free_async_msg(phba, pasync_ctx, pasync_handle->cri);
1902 	hwi_post_async_buffers(phba, pasync_handle->is_header,
1903 			       BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
1904 			       cri_index));
1905 }
1906 
1907 static unsigned int
1908 hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1909 		  struct beiscsi_hba *phba,
1910 		  struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1911 {
1912 	struct list_head *plist;
1913 	struct async_pdu_handle *pasync_handle;
1914 	void *phdr = NULL;
1915 	unsigned int hdr_len = 0, buf_len = 0;
1916 	unsigned int status, index = 0, offset = 0;
1917 	void *pfirst_buffer = NULL;
1918 	unsigned int num_buf = 0;
1919 
1920 	plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1921 
1922 	list_for_each_entry(pasync_handle, plist, link) {
1923 		if (index == 0) {
1924 			phdr = pasync_handle->pbuffer;
1925 			hdr_len = pasync_handle->buffer_len;
1926 		} else {
1927 			buf_len = pasync_handle->buffer_len;
1928 			if (!num_buf) {
1929 				pfirst_buffer = pasync_handle->pbuffer;
1930 				num_buf++;
1931 			}
1932 			memcpy(pfirst_buffer + offset,
1933 			       pasync_handle->pbuffer, buf_len);
1934 			offset += buf_len;
1935 		}
1936 		index++;
1937 	}
1938 
1939 	status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1940 					    phdr, hdr_len, pfirst_buffer,
1941 					    offset);
1942 
1943 	hwi_free_async_msg(phba, pasync_ctx, cri);
1944 	return 0;
1945 }
1946 
1947 static unsigned int
1948 hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1949 		     struct beiscsi_hba *phba,
1950 		     struct async_pdu_handle *pasync_handle)
1951 {
1952 	struct hwi_async_pdu_context *pasync_ctx;
1953 	struct hwi_controller *phwi_ctrlr;
1954 	unsigned int bytes_needed = 0, status = 0;
1955 	unsigned short cri = pasync_handle->cri;
1956 	struct pdu_base *ppdu;
1957 
1958 	phwi_ctrlr = phba->phwi_ctrlr;
1959 	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
1960 		     BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
1961 		     BE_GET_CRI_FROM_CID(beiscsi_conn->
1962 				 beiscsi_conn_cid)));
1963 
1964 	list_del(&pasync_handle->link);
1965 	if (pasync_handle->is_header) {
1966 		pasync_ctx->async_header.busy_entries--;
1967 		if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1968 			hwi_free_async_msg(phba, pasync_ctx, cri);
1969 			BUG();
1970 		}
1971 
1972 		pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1973 		pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1974 		pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1975 				(unsigned short)pasync_handle->buffer_len;
1976 		list_add_tail(&pasync_handle->link,
1977 			      &pasync_ctx->async_entry[cri].wait_queue.list);
1978 
1979 		ppdu = pasync_handle->pbuffer;
1980 		bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1981 			data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1982 			0xFFFF0000) | ((be16_to_cpu((ppdu->
1983 			dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1984 			& PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1985 
1986 		if (status == 0) {
1987 			pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1988 			    bytes_needed;
1989 
1990 			if (bytes_needed == 0)
1991 				status = hwi_fwd_async_msg(beiscsi_conn, phba,
1992 							   pasync_ctx, cri);
1993 		}
1994 	} else {
1995 		pasync_ctx->async_data.busy_entries--;
1996 		if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1997 			list_add_tail(&pasync_handle->link,
1998 				      &pasync_ctx->async_entry[cri].wait_queue.
1999 				      list);
2000 			pasync_ctx->async_entry[cri].wait_queue.
2001 				bytes_received +=
2002 				(unsigned short)pasync_handle->buffer_len;
2003 
2004 			if (pasync_ctx->async_entry[cri].wait_queue.
2005 			    bytes_received >=
2006 			    pasync_ctx->async_entry[cri].wait_queue.
2007 			    bytes_needed)
2008 				status = hwi_fwd_async_msg(beiscsi_conn, phba,
2009 							   pasync_ctx, cri);
2010 		}
2011 	}
2012 	return status;
2013 }
2014 
2015 static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
2016 					 struct beiscsi_hba *phba,
2017 					 struct i_t_dpdu_cqe *pdpdu_cqe)
2018 {
2019 	struct hwi_controller *phwi_ctrlr;
2020 	struct hwi_async_pdu_context *pasync_ctx;
2021 	struct async_pdu_handle *pasync_handle = NULL;
2022 	unsigned int cq_index = -1;
2023 	uint16_t cri_index = BE_GET_CRI_FROM_CID(
2024 			     beiscsi_conn->beiscsi_conn_cid);
2025 
2026 	phwi_ctrlr = phba->phwi_ctrlr;
2027 	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
2028 		     BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
2029 		     cri_index));
2030 
2031 	pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
2032 					     pdpdu_cqe, &cq_index);
2033 
2034 	if (pasync_handle->consumed == 0)
2035 		hwi_update_async_writables(phba, pasync_ctx,
2036 					   pasync_handle->is_header, cq_index);
2037 
2038 	hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
2039 	hwi_post_async_buffers(phba, pasync_handle->is_header,
2040 			       BEISCSI_GET_ULP_FROM_CRI(
2041 			       phwi_ctrlr, cri_index));
2042 }
2043 
2044 void beiscsi_process_mcc_cq(struct beiscsi_hba *phba)
2045 {
2046 	struct be_queue_info *mcc_cq;
2047 	struct  be_mcc_compl *mcc_compl;
2048 	unsigned int num_processed = 0;
2049 
2050 	mcc_cq = &phba->ctrl.mcc_obj.cq;
2051 	mcc_compl = queue_tail_node(mcc_cq);
2052 	mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
2053 	while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
2054 		if (num_processed >= 32) {
2055 			hwi_ring_cq_db(phba, mcc_cq->id,
2056 					num_processed, 0);
2057 			num_processed = 0;
2058 		}
2059 		if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
2060 			beiscsi_process_async_event(phba, mcc_compl);
2061 		} else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
2062 			beiscsi_process_mcc_compl(&phba->ctrl, mcc_compl);
2063 		}
2064 
2065 		mcc_compl->flags = 0;
2066 		queue_tail_inc(mcc_cq);
2067 		mcc_compl = queue_tail_node(mcc_cq);
2068 		mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
2069 		num_processed++;
2070 	}
2071 
2072 	if (num_processed > 0)
2073 		hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1);
2074 }
2075 
2076 /**
2077  * beiscsi_process_cq()- Process the Completion Queue
2078  * @pbe_eq: Event Q on which the Completion has come
2079  * @budget: Max number of events to processed
2080  *
2081  * return
2082  *     Number of Completion Entries processed.
2083  **/
2084 unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
2085 {
2086 	struct be_queue_info *cq;
2087 	struct sol_cqe *sol;
2088 	struct dmsg_cqe *dmsg;
2089 	unsigned int total = 0;
2090 	unsigned int num_processed = 0;
2091 	unsigned short code = 0, cid = 0;
2092 	uint16_t cri_index = 0;
2093 	struct beiscsi_conn *beiscsi_conn;
2094 	struct beiscsi_endpoint *beiscsi_ep;
2095 	struct iscsi_endpoint *ep;
2096 	struct beiscsi_hba *phba;
2097 
2098 	cq = pbe_eq->cq;
2099 	sol = queue_tail_node(cq);
2100 	phba = pbe_eq->phba;
2101 
2102 	while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
2103 	       CQE_VALID_MASK) {
2104 		be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
2105 
2106 		 code = (sol->dw[offsetof(struct amap_sol_cqe, code) /
2107 			 32] & CQE_CODE_MASK);
2108 
2109 		 /* Get the CID */
2110 		if (is_chip_be2_be3r(phba)) {
2111 			cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol);
2112 		} else {
2113 			if ((code == DRIVERMSG_NOTIFY) ||
2114 			    (code == UNSOL_HDR_NOTIFY) ||
2115 			    (code == UNSOL_DATA_NOTIFY))
2116 				cid = AMAP_GET_BITS(
2117 						    struct amap_i_t_dpdu_cqe_v2,
2118 						    cid, sol);
2119 			 else
2120 				 cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
2121 						     cid, sol);
2122 		}
2123 
2124 		cri_index = BE_GET_CRI_FROM_CID(cid);
2125 		ep = phba->ep_array[cri_index];
2126 
2127 		if (ep == NULL) {
2128 			/* connection has already been freed
2129 			 * just move on to next one
2130 			 */
2131 			beiscsi_log(phba, KERN_WARNING,
2132 				    BEISCSI_LOG_INIT,
2133 				    "BM_%d : proc cqe of disconn ep: cid %d\n",
2134 				    cid);
2135 			goto proc_next_cqe;
2136 		}
2137 
2138 		beiscsi_ep = ep->dd_data;
2139 		beiscsi_conn = beiscsi_ep->conn;
2140 
2141 		/* replenish cq */
2142 		if (num_processed == 32) {
2143 			hwi_ring_cq_db(phba, cq->id, 32, 0);
2144 			num_processed = 0;
2145 		}
2146 		total++;
2147 
2148 		switch (code) {
2149 		case SOL_CMD_COMPLETE:
2150 			hwi_complete_cmd(beiscsi_conn, phba, sol);
2151 			break;
2152 		case DRIVERMSG_NOTIFY:
2153 			beiscsi_log(phba, KERN_INFO,
2154 				    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2155 				    "BM_%d : Received %s[%d] on CID : %d\n",
2156 				    cqe_desc[code], code, cid);
2157 
2158 			dmsg = (struct dmsg_cqe *)sol;
2159 			hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
2160 			break;
2161 		case UNSOL_HDR_NOTIFY:
2162 			beiscsi_log(phba, KERN_INFO,
2163 				    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2164 				    "BM_%d : Received %s[%d] on CID : %d\n",
2165 				    cqe_desc[code], code, cid);
2166 
2167 			spin_lock_bh(&phba->async_pdu_lock);
2168 			hwi_process_default_pdu_ring(beiscsi_conn, phba,
2169 					     (struct i_t_dpdu_cqe *)sol);
2170 			spin_unlock_bh(&phba->async_pdu_lock);
2171 			break;
2172 		case UNSOL_DATA_NOTIFY:
2173 			beiscsi_log(phba, KERN_INFO,
2174 				    BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
2175 				    "BM_%d : Received %s[%d] on CID : %d\n",
2176 				    cqe_desc[code], code, cid);
2177 
2178 			spin_lock_bh(&phba->async_pdu_lock);
2179 			hwi_process_default_pdu_ring(beiscsi_conn, phba,
2180 					     (struct i_t_dpdu_cqe *)sol);
2181 			spin_unlock_bh(&phba->async_pdu_lock);
2182 			break;
2183 		case CXN_INVALIDATE_INDEX_NOTIFY:
2184 		case CMD_INVALIDATED_NOTIFY:
2185 		case CXN_INVALIDATE_NOTIFY:
2186 			beiscsi_log(phba, KERN_ERR,
2187 				    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2188 				    "BM_%d : Ignoring %s[%d] on CID : %d\n",
2189 				    cqe_desc[code], code, cid);
2190 			break;
2191 		case CXN_KILLED_HDR_DIGEST_ERR:
2192 		case SOL_CMD_KILLED_DATA_DIGEST_ERR:
2193 			beiscsi_log(phba, KERN_ERR,
2194 				    BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
2195 				    "BM_%d : Cmd Notification %s[%d] on CID : %d\n",
2196 				    cqe_desc[code], code,  cid);
2197 			break;
2198 		case CMD_KILLED_INVALID_STATSN_RCVD:
2199 		case CMD_KILLED_INVALID_R2T_RCVD:
2200 		case CMD_CXN_KILLED_LUN_INVALID:
2201 		case CMD_CXN_KILLED_ICD_INVALID:
2202 		case CMD_CXN_KILLED_ITT_INVALID:
2203 		case CMD_CXN_KILLED_SEQ_OUTOFORDER:
2204 		case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
2205 			beiscsi_log(phba, KERN_ERR,
2206 				    BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
2207 				    "BM_%d : Cmd Notification %s[%d] on CID : %d\n",
2208 				    cqe_desc[code], code,  cid);
2209 			break;
2210 		case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
2211 			beiscsi_log(phba, KERN_ERR,
2212 				    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2213 				    "BM_%d :  Dropping %s[%d] on DPDU ring on CID : %d\n",
2214 				    cqe_desc[code], code, cid);
2215 			spin_lock_bh(&phba->async_pdu_lock);
2216 			hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
2217 					     (struct i_t_dpdu_cqe *) sol);
2218 			spin_unlock_bh(&phba->async_pdu_lock);
2219 			break;
2220 		case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
2221 		case CXN_KILLED_BURST_LEN_MISMATCH:
2222 		case CXN_KILLED_AHS_RCVD:
2223 		case CXN_KILLED_UNKNOWN_HDR:
2224 		case CXN_KILLED_STALE_ITT_TTT_RCVD:
2225 		case CXN_KILLED_INVALID_ITT_TTT_RCVD:
2226 		case CXN_KILLED_TIMED_OUT:
2227 		case CXN_KILLED_FIN_RCVD:
2228 		case CXN_KILLED_RST_SENT:
2229 		case CXN_KILLED_RST_RCVD:
2230 		case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
2231 		case CXN_KILLED_BAD_WRB_INDEX_ERROR:
2232 		case CXN_KILLED_OVER_RUN_RESIDUAL:
2233 		case CXN_KILLED_UNDER_RUN_RESIDUAL:
2234 		case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
2235 			beiscsi_log(phba, KERN_ERR,
2236 				    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2237 				    "BM_%d : Event %s[%d] received on CID : %d\n",
2238 				    cqe_desc[code], code, cid);
2239 			if (beiscsi_conn)
2240 				iscsi_conn_failure(beiscsi_conn->conn,
2241 						   ISCSI_ERR_CONN_FAILED);
2242 			break;
2243 		default:
2244 			beiscsi_log(phba, KERN_ERR,
2245 				    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2246 				    "BM_%d : Invalid CQE Event Received Code : %d"
2247 				    "CID 0x%x...\n",
2248 				    code, cid);
2249 			break;
2250 		}
2251 
2252 proc_next_cqe:
2253 		AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
2254 		queue_tail_inc(cq);
2255 		sol = queue_tail_node(cq);
2256 		num_processed++;
2257 		if (total == budget)
2258 			break;
2259 	}
2260 
2261 	hwi_ring_cq_db(phba, cq->id, num_processed, 1);
2262 	return total;
2263 }
2264 
2265 void beiscsi_process_all_cqs(struct work_struct *work)
2266 {
2267 	unsigned long flags;
2268 	struct hwi_controller *phwi_ctrlr;
2269 	struct hwi_context_memory *phwi_context;
2270 	struct beiscsi_hba *phba;
2271 	struct be_eq_obj *pbe_eq =
2272 	    container_of(work, struct be_eq_obj, work_cqs);
2273 
2274 	phba = pbe_eq->phba;
2275 	phwi_ctrlr = phba->phwi_ctrlr;
2276 	phwi_context = phwi_ctrlr->phwi_ctxt;
2277 
2278 	if (pbe_eq->todo_mcc_cq) {
2279 		spin_lock_irqsave(&phba->isr_lock, flags);
2280 		pbe_eq->todo_mcc_cq = false;
2281 		spin_unlock_irqrestore(&phba->isr_lock, flags);
2282 		beiscsi_process_mcc_cq(phba);
2283 	}
2284 
2285 	if (pbe_eq->todo_cq) {
2286 		spin_lock_irqsave(&phba->isr_lock, flags);
2287 		pbe_eq->todo_cq = false;
2288 		spin_unlock_irqrestore(&phba->isr_lock, flags);
2289 		beiscsi_process_cq(pbe_eq, BE2_MAX_NUM_CQ_PROC);
2290 	}
2291 
2292 	/* rearm EQ for further interrupts */
2293 	hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
2294 }
2295 
2296 static int be_iopoll(struct irq_poll *iop, int budget)
2297 {
2298 	unsigned int ret, num_eq_processed;
2299 	struct beiscsi_hba *phba;
2300 	struct be_eq_obj *pbe_eq;
2301 	struct be_eq_entry *eqe = NULL;
2302 	struct be_queue_info *eq;
2303 
2304 	num_eq_processed = 0;
2305 	pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
2306 	phba = pbe_eq->phba;
2307 	eq = &pbe_eq->q;
2308 	eqe = queue_tail_node(eq);
2309 
2310 	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] &
2311 			EQE_VALID_MASK) {
2312 		AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
2313 		queue_tail_inc(eq);
2314 		eqe = queue_tail_node(eq);
2315 		num_eq_processed++;
2316 	}
2317 
2318 	hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
2319 
2320 	ret = beiscsi_process_cq(pbe_eq, budget);
2321 	pbe_eq->cq_count += ret;
2322 	if (ret < budget) {
2323 		irq_poll_complete(iop);
2324 		beiscsi_log(phba, KERN_INFO,
2325 			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
2326 			    "BM_%d : rearm pbe_eq->q.id =%d ret %d\n",
2327 			    pbe_eq->q.id, ret);
2328 		hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
2329 	}
2330 	return ret;
2331 }
2332 
2333 static void
2334 hwi_write_sgl_v2(struct iscsi_wrb *pwrb, struct scatterlist *sg,
2335 		  unsigned int num_sg, struct beiscsi_io_task *io_task)
2336 {
2337 	struct iscsi_sge *psgl;
2338 	unsigned int sg_len, index;
2339 	unsigned int sge_len = 0;
2340 	unsigned long long addr;
2341 	struct scatterlist *l_sg;
2342 	unsigned int offset;
2343 
2344 	AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_lo, pwrb,
2345 		      io_task->bhs_pa.u.a32.address_lo);
2346 	AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_hi, pwrb,
2347 		      io_task->bhs_pa.u.a32.address_hi);
2348 
2349 	l_sg = sg;
2350 	for (index = 0; (index < num_sg) && (index < 2); index++,
2351 			sg = sg_next(sg)) {
2352 		if (index == 0) {
2353 			sg_len = sg_dma_len(sg);
2354 			addr = (u64) sg_dma_address(sg);
2355 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2356 				      sge0_addr_lo, pwrb,
2357 				      lower_32_bits(addr));
2358 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2359 				      sge0_addr_hi, pwrb,
2360 				      upper_32_bits(addr));
2361 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2362 				      sge0_len, pwrb,
2363 				      sg_len);
2364 			sge_len = sg_len;
2365 		} else {
2366 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_r2t_offset,
2367 				      pwrb, sge_len);
2368 			sg_len = sg_dma_len(sg);
2369 			addr = (u64) sg_dma_address(sg);
2370 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2371 				      sge1_addr_lo, pwrb,
2372 				      lower_32_bits(addr));
2373 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2374 				      sge1_addr_hi, pwrb,
2375 				      upper_32_bits(addr));
2376 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2377 				      sge1_len, pwrb,
2378 				      sg_len);
2379 		}
2380 	}
2381 	psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2382 	memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
2383 
2384 	AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
2385 
2386 	AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2387 		      io_task->bhs_pa.u.a32.address_hi);
2388 	AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2389 		      io_task->bhs_pa.u.a32.address_lo);
2390 
2391 	if (num_sg == 1) {
2392 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
2393 			      1);
2394 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
2395 			      0);
2396 	} else if (num_sg == 2) {
2397 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
2398 			      0);
2399 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
2400 			      1);
2401 	} else {
2402 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
2403 			      0);
2404 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
2405 			      0);
2406 	}
2407 
2408 	sg = l_sg;
2409 	psgl++;
2410 	psgl++;
2411 	offset = 0;
2412 	for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
2413 		sg_len = sg_dma_len(sg);
2414 		addr = (u64) sg_dma_address(sg);
2415 		AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2416 			      lower_32_bits(addr));
2417 		AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2418 			      upper_32_bits(addr));
2419 		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
2420 		AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
2421 		AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2422 		offset += sg_len;
2423 	}
2424 	psgl--;
2425 	AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2426 }
2427 
2428 static void
2429 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
2430 	      unsigned int num_sg, struct beiscsi_io_task *io_task)
2431 {
2432 	struct iscsi_sge *psgl;
2433 	unsigned int sg_len, index;
2434 	unsigned int sge_len = 0;
2435 	unsigned long long addr;
2436 	struct scatterlist *l_sg;
2437 	unsigned int offset;
2438 
2439 	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2440 				      io_task->bhs_pa.u.a32.address_lo);
2441 	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2442 				      io_task->bhs_pa.u.a32.address_hi);
2443 
2444 	l_sg = sg;
2445 	for (index = 0; (index < num_sg) && (index < 2); index++,
2446 							 sg = sg_next(sg)) {
2447 		if (index == 0) {
2448 			sg_len = sg_dma_len(sg);
2449 			addr = (u64) sg_dma_address(sg);
2450 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
2451 						((u32)(addr & 0xFFFFFFFF)));
2452 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
2453 							((u32)(addr >> 32)));
2454 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2455 							sg_len);
2456 			sge_len = sg_len;
2457 		} else {
2458 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
2459 							pwrb, sge_len);
2460 			sg_len = sg_dma_len(sg);
2461 			addr = (u64) sg_dma_address(sg);
2462 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
2463 						((u32)(addr & 0xFFFFFFFF)));
2464 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
2465 							((u32)(addr >> 32)));
2466 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
2467 							sg_len);
2468 		}
2469 	}
2470 	psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2471 	memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
2472 
2473 	AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
2474 
2475 	AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2476 			io_task->bhs_pa.u.a32.address_hi);
2477 	AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2478 			io_task->bhs_pa.u.a32.address_lo);
2479 
2480 	if (num_sg == 1) {
2481 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2482 								1);
2483 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2484 								0);
2485 	} else if (num_sg == 2) {
2486 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2487 								0);
2488 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2489 								1);
2490 	} else {
2491 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2492 								0);
2493 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2494 								0);
2495 	}
2496 	sg = l_sg;
2497 	psgl++;
2498 	psgl++;
2499 	offset = 0;
2500 	for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
2501 		sg_len = sg_dma_len(sg);
2502 		addr = (u64) sg_dma_address(sg);
2503 		AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2504 						(addr & 0xFFFFFFFF));
2505 		AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2506 						(addr >> 32));
2507 		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
2508 		AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
2509 		AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2510 		offset += sg_len;
2511 	}
2512 	psgl--;
2513 	AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2514 }
2515 
2516 /**
2517  * hwi_write_buffer()- Populate the WRB with task info
2518  * @pwrb: ptr to the WRB entry
2519  * @task: iscsi task which is to be executed
2520  **/
2521 static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
2522 {
2523 	struct iscsi_sge *psgl;
2524 	struct beiscsi_io_task *io_task = task->dd_data;
2525 	struct beiscsi_conn *beiscsi_conn = io_task->conn;
2526 	struct beiscsi_hba *phba = beiscsi_conn->phba;
2527 	uint8_t dsp_value = 0;
2528 
2529 	io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
2530 	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2531 				io_task->bhs_pa.u.a32.address_lo);
2532 	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2533 				io_task->bhs_pa.u.a32.address_hi);
2534 
2535 	if (task->data) {
2536 
2537 		/* Check for the data_count */
2538 		dsp_value = (task->data_count) ? 1 : 0;
2539 
2540 		if (is_chip_be2_be3r(phba))
2541 			AMAP_SET_BITS(struct amap_iscsi_wrb, dsp,
2542 				      pwrb, dsp_value);
2543 		else
2544 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp,
2545 				      pwrb, dsp_value);
2546 
2547 		/* Map addr only if there is data_count */
2548 		if (dsp_value) {
2549 			io_task->mtask_addr = pci_map_single(phba->pcidev,
2550 							     task->data,
2551 							     task->data_count,
2552 							     PCI_DMA_TODEVICE);
2553 			if (pci_dma_mapping_error(phba->pcidev,
2554 						  io_task->mtask_addr))
2555 				return -ENOMEM;
2556 			io_task->mtask_data_count = task->data_count;
2557 		} else
2558 			io_task->mtask_addr = 0;
2559 
2560 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
2561 			      lower_32_bits(io_task->mtask_addr));
2562 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
2563 			      upper_32_bits(io_task->mtask_addr));
2564 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2565 						task->data_count);
2566 
2567 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
2568 	} else {
2569 		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
2570 		io_task->mtask_addr = 0;
2571 	}
2572 
2573 	psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2574 
2575 	AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
2576 
2577 	AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2578 		      io_task->bhs_pa.u.a32.address_hi);
2579 	AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2580 		      io_task->bhs_pa.u.a32.address_lo);
2581 	if (task->data) {
2582 		psgl++;
2583 		AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
2584 		AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
2585 		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
2586 		AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
2587 		AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
2588 		AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2589 
2590 		psgl++;
2591 		if (task->data) {
2592 			AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2593 				      lower_32_bits(io_task->mtask_addr));
2594 			AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2595 				      upper_32_bits(io_task->mtask_addr));
2596 		}
2597 		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
2598 	}
2599 	AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2600 	return 0;
2601 }
2602 
2603 /**
2604  * beiscsi_find_mem_req()- Find mem needed
2605  * @phba: ptr to HBA struct
2606  **/
2607 static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
2608 {
2609 	uint8_t mem_descr_index, ulp_num;
2610 	unsigned int num_cq_pages, num_async_pdu_buf_pages;
2611 	unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
2612 	unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
2613 
2614 	num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2615 				      sizeof(struct sol_cqe));
2616 
2617 	phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
2618 
2619 	phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
2620 						 BE_ISCSI_PDU_HEADER_SIZE;
2621 	phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
2622 					    sizeof(struct hwi_context_memory);
2623 
2624 
2625 	phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
2626 	    * (phba->params.wrbs_per_cxn)
2627 	    * phba->params.cxns_per_ctrl;
2628 	wrb_sz_per_cxn =  sizeof(struct wrb_handle) *
2629 				 (phba->params.wrbs_per_cxn);
2630 	phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
2631 				phba->params.cxns_per_ctrl);
2632 
2633 	phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
2634 		phba->params.icds_per_ctrl;
2635 	phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
2636 		phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
2637 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
2638 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
2639 
2640 			num_async_pdu_buf_sgl_pages =
2641 				PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2642 					       phba, ulp_num) *
2643 					       sizeof(struct phys_addr));
2644 
2645 			num_async_pdu_buf_pages =
2646 				PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2647 					       phba, ulp_num) *
2648 					       phba->params.defpdu_hdr_sz);
2649 
2650 			num_async_pdu_data_pages =
2651 				PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2652 					       phba, ulp_num) *
2653 					       phba->params.defpdu_data_sz);
2654 
2655 			num_async_pdu_data_sgl_pages =
2656 				PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2657 					       phba, ulp_num) *
2658 					       sizeof(struct phys_addr));
2659 
2660 			mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 +
2661 					  (ulp_num * MEM_DESCR_OFFSET));
2662 			phba->mem_req[mem_descr_index] =
2663 					BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2664 					BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE;
2665 
2666 			mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
2667 					  (ulp_num * MEM_DESCR_OFFSET));
2668 			phba->mem_req[mem_descr_index] =
2669 					  num_async_pdu_buf_pages *
2670 					  PAGE_SIZE;
2671 
2672 			mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 +
2673 					  (ulp_num * MEM_DESCR_OFFSET));
2674 			phba->mem_req[mem_descr_index] =
2675 					  num_async_pdu_data_pages *
2676 					  PAGE_SIZE;
2677 
2678 			mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 +
2679 					  (ulp_num * MEM_DESCR_OFFSET));
2680 			phba->mem_req[mem_descr_index] =
2681 					  num_async_pdu_buf_sgl_pages *
2682 					  PAGE_SIZE;
2683 
2684 			mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 +
2685 					  (ulp_num * MEM_DESCR_OFFSET));
2686 			phba->mem_req[mem_descr_index] =
2687 					  num_async_pdu_data_sgl_pages *
2688 					  PAGE_SIZE;
2689 
2690 			mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
2691 					  (ulp_num * MEM_DESCR_OFFSET));
2692 			phba->mem_req[mem_descr_index] =
2693 					  BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2694 					  sizeof(struct async_pdu_handle);
2695 
2696 			mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
2697 					  (ulp_num * MEM_DESCR_OFFSET));
2698 			phba->mem_req[mem_descr_index] =
2699 					  BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2700 					  sizeof(struct async_pdu_handle);
2701 
2702 			mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
2703 					  (ulp_num * MEM_DESCR_OFFSET));
2704 			phba->mem_req[mem_descr_index] =
2705 					  sizeof(struct hwi_async_pdu_context) +
2706 					 (BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2707 					  sizeof(struct hwi_async_entry));
2708 		}
2709 	}
2710 }
2711 
2712 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2713 {
2714 	dma_addr_t bus_add;
2715 	struct hwi_controller *phwi_ctrlr;
2716 	struct be_mem_descriptor *mem_descr;
2717 	struct mem_array *mem_arr, *mem_arr_orig;
2718 	unsigned int i, j, alloc_size, curr_alloc_size;
2719 
2720 	phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
2721 	if (!phba->phwi_ctrlr)
2722 		return -ENOMEM;
2723 
2724 	/* Allocate memory for wrb_context */
2725 	phwi_ctrlr = phba->phwi_ctrlr;
2726 	phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) *
2727 					  phba->params.cxns_per_ctrl,
2728 					  GFP_KERNEL);
2729 	if (!phwi_ctrlr->wrb_context) {
2730 		kfree(phba->phwi_ctrlr);
2731 		return -ENOMEM;
2732 	}
2733 
2734 	phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
2735 				 GFP_KERNEL);
2736 	if (!phba->init_mem) {
2737 		kfree(phwi_ctrlr->wrb_context);
2738 		kfree(phba->phwi_ctrlr);
2739 		return -ENOMEM;
2740 	}
2741 
2742 	mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
2743 			       GFP_KERNEL);
2744 	if (!mem_arr_orig) {
2745 		kfree(phba->init_mem);
2746 		kfree(phwi_ctrlr->wrb_context);
2747 		kfree(phba->phwi_ctrlr);
2748 		return -ENOMEM;
2749 	}
2750 
2751 	mem_descr = phba->init_mem;
2752 	for (i = 0; i < SE_MEM_MAX; i++) {
2753 		if (!phba->mem_req[i]) {
2754 			mem_descr->mem_array = NULL;
2755 			mem_descr++;
2756 			continue;
2757 		}
2758 
2759 		j = 0;
2760 		mem_arr = mem_arr_orig;
2761 		alloc_size = phba->mem_req[i];
2762 		memset(mem_arr, 0, sizeof(struct mem_array) *
2763 		       BEISCSI_MAX_FRAGS_INIT);
2764 		curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
2765 		do {
2766 			mem_arr->virtual_address = pci_alloc_consistent(
2767 							phba->pcidev,
2768 							curr_alloc_size,
2769 							&bus_add);
2770 			if (!mem_arr->virtual_address) {
2771 				if (curr_alloc_size <= BE_MIN_MEM_SIZE)
2772 					goto free_mem;
2773 				if (curr_alloc_size -
2774 					rounddown_pow_of_two(curr_alloc_size))
2775 					curr_alloc_size = rounddown_pow_of_two
2776 							     (curr_alloc_size);
2777 				else
2778 					curr_alloc_size = curr_alloc_size / 2;
2779 			} else {
2780 				mem_arr->bus_address.u.
2781 				    a64.address = (__u64) bus_add;
2782 				mem_arr->size = curr_alloc_size;
2783 				alloc_size -= curr_alloc_size;
2784 				curr_alloc_size = min(be_max_phys_size *
2785 						      1024, alloc_size);
2786 				j++;
2787 				mem_arr++;
2788 			}
2789 		} while (alloc_size);
2790 		mem_descr->num_elements = j;
2791 		mem_descr->size_in_bytes = phba->mem_req[i];
2792 		mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
2793 					       GFP_KERNEL);
2794 		if (!mem_descr->mem_array)
2795 			goto free_mem;
2796 
2797 		memcpy(mem_descr->mem_array, mem_arr_orig,
2798 		       sizeof(struct mem_array) * j);
2799 		mem_descr++;
2800 	}
2801 	kfree(mem_arr_orig);
2802 	return 0;
2803 free_mem:
2804 	mem_descr->num_elements = j;
2805 	while ((i) || (j)) {
2806 		for (j = mem_descr->num_elements; j > 0; j--) {
2807 			pci_free_consistent(phba->pcidev,
2808 					    mem_descr->mem_array[j - 1].size,
2809 					    mem_descr->mem_array[j - 1].
2810 					    virtual_address,
2811 					    (unsigned long)mem_descr->
2812 					    mem_array[j - 1].
2813 					    bus_address.u.a64.address);
2814 		}
2815 		if (i) {
2816 			i--;
2817 			kfree(mem_descr->mem_array);
2818 			mem_descr--;
2819 		}
2820 	}
2821 	kfree(mem_arr_orig);
2822 	kfree(phba->init_mem);
2823 	kfree(phba->phwi_ctrlr->wrb_context);
2824 	kfree(phba->phwi_ctrlr);
2825 	return -ENOMEM;
2826 }
2827 
2828 static int beiscsi_get_memory(struct beiscsi_hba *phba)
2829 {
2830 	beiscsi_find_mem_req(phba);
2831 	return beiscsi_alloc_mem(phba);
2832 }
2833 
2834 static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2835 {
2836 	struct pdu_data_out *pdata_out;
2837 	struct pdu_nop_out *pnop_out;
2838 	struct be_mem_descriptor *mem_descr;
2839 
2840 	mem_descr = phba->init_mem;
2841 	mem_descr += ISCSI_MEM_GLOBAL_HEADER;
2842 	pdata_out =
2843 	    (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
2844 	memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2845 
2846 	AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
2847 		      IIOC_SCSI_DATA);
2848 
2849 	pnop_out =
2850 	    (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
2851 				   virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
2852 
2853 	memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2854 	AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
2855 	AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
2856 	AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
2857 }
2858 
2859 static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2860 {
2861 	struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
2862 	struct hwi_context_memory *phwi_ctxt;
2863 	struct wrb_handle *pwrb_handle = NULL;
2864 	struct hwi_controller *phwi_ctrlr;
2865 	struct hwi_wrb_context *pwrb_context;
2866 	struct iscsi_wrb *pwrb = NULL;
2867 	unsigned int num_cxn_wrbh = 0;
2868 	unsigned int num_cxn_wrb = 0, j, idx = 0, index;
2869 
2870 	mem_descr_wrbh = phba->init_mem;
2871 	mem_descr_wrbh += HWI_MEM_WRBH;
2872 
2873 	mem_descr_wrb = phba->init_mem;
2874 	mem_descr_wrb += HWI_MEM_WRB;
2875 	phwi_ctrlr = phba->phwi_ctrlr;
2876 
2877 	/* Allocate memory for WRBQ */
2878 	phwi_ctxt = phwi_ctrlr->phwi_ctxt;
2879 	phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) *
2880 				     phba->params.cxns_per_ctrl,
2881 				     GFP_KERNEL);
2882 	if (!phwi_ctxt->be_wrbq) {
2883 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2884 			    "BM_%d : WRBQ Mem Alloc Failed\n");
2885 		return -ENOMEM;
2886 	}
2887 
2888 	for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
2889 		pwrb_context = &phwi_ctrlr->wrb_context[index];
2890 		pwrb_context->pwrb_handle_base =
2891 				kzalloc(sizeof(struct wrb_handle *) *
2892 					phba->params.wrbs_per_cxn, GFP_KERNEL);
2893 		if (!pwrb_context->pwrb_handle_base) {
2894 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2895 				    "BM_%d : Mem Alloc Failed. Failing to load\n");
2896 			goto init_wrb_hndl_failed;
2897 		}
2898 		pwrb_context->pwrb_handle_basestd =
2899 				kzalloc(sizeof(struct wrb_handle *) *
2900 					phba->params.wrbs_per_cxn, GFP_KERNEL);
2901 		if (!pwrb_context->pwrb_handle_basestd) {
2902 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2903 				    "BM_%d : Mem Alloc Failed. Failing to load\n");
2904 			goto init_wrb_hndl_failed;
2905 		}
2906 		if (!num_cxn_wrbh) {
2907 			pwrb_handle =
2908 				mem_descr_wrbh->mem_array[idx].virtual_address;
2909 			num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2910 					((sizeof(struct wrb_handle)) *
2911 					 phba->params.wrbs_per_cxn));
2912 			idx++;
2913 		}
2914 		pwrb_context->alloc_index = 0;
2915 		pwrb_context->wrb_handles_available = 0;
2916 		pwrb_context->free_index = 0;
2917 
2918 		if (num_cxn_wrbh) {
2919 			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2920 				pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2921 				pwrb_context->pwrb_handle_basestd[j] =
2922 								pwrb_handle;
2923 				pwrb_context->wrb_handles_available++;
2924 				pwrb_handle->wrb_index = j;
2925 				pwrb_handle++;
2926 			}
2927 			num_cxn_wrbh--;
2928 		}
2929 		spin_lock_init(&pwrb_context->wrb_lock);
2930 	}
2931 	idx = 0;
2932 	for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
2933 		pwrb_context = &phwi_ctrlr->wrb_context[index];
2934 		if (!num_cxn_wrb) {
2935 			pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2936 			num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2937 				((sizeof(struct iscsi_wrb) *
2938 				  phba->params.wrbs_per_cxn));
2939 			idx++;
2940 		}
2941 
2942 		if (num_cxn_wrb) {
2943 			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2944 				pwrb_handle = pwrb_context->pwrb_handle_base[j];
2945 				pwrb_handle->pwrb = pwrb;
2946 				pwrb++;
2947 			}
2948 			num_cxn_wrb--;
2949 		}
2950 	}
2951 	return 0;
2952 init_wrb_hndl_failed:
2953 	for (j = index; j > 0; j--) {
2954 		pwrb_context = &phwi_ctrlr->wrb_context[j];
2955 		kfree(pwrb_context->pwrb_handle_base);
2956 		kfree(pwrb_context->pwrb_handle_basestd);
2957 	}
2958 	return -ENOMEM;
2959 }
2960 
2961 static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2962 {
2963 	uint8_t ulp_num;
2964 	struct hwi_controller *phwi_ctrlr;
2965 	struct hba_parameters *p = &phba->params;
2966 	struct hwi_async_pdu_context *pasync_ctx;
2967 	struct async_pdu_handle *pasync_header_h, *pasync_data_h;
2968 	unsigned int index, idx, num_per_mem, num_async_data;
2969 	struct be_mem_descriptor *mem_descr;
2970 
2971 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
2972 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
2973 
2974 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2975 			mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
2976 				     (ulp_num * MEM_DESCR_OFFSET));
2977 
2978 			phwi_ctrlr = phba->phwi_ctrlr;
2979 			phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] =
2980 				(struct hwi_async_pdu_context *)
2981 				 mem_descr->mem_array[0].virtual_address;
2982 
2983 			pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
2984 			memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2985 
2986 			pasync_ctx->async_entry =
2987 					(struct hwi_async_entry *)
2988 					((long unsigned int)pasync_ctx +
2989 					sizeof(struct hwi_async_pdu_context));
2990 
2991 			pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba,
2992 						  ulp_num);
2993 			pasync_ctx->buffer_size = p->defpdu_hdr_sz;
2994 
2995 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2996 			mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
2997 				(ulp_num * MEM_DESCR_OFFSET);
2998 			if (mem_descr->mem_array[0].virtual_address) {
2999 				beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3000 					    "BM_%d : hwi_init_async_pdu_ctx"
3001 					    " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n",
3002 					    ulp_num,
3003 					    mem_descr->mem_array[0].
3004 					    virtual_address);
3005 			} else
3006 				beiscsi_log(phba, KERN_WARNING,
3007 					    BEISCSI_LOG_INIT,
3008 					    "BM_%d : No Virtual address for ULP : %d\n",
3009 					    ulp_num);
3010 
3011 			pasync_ctx->async_header.va_base =
3012 				mem_descr->mem_array[0].virtual_address;
3013 
3014 			pasync_ctx->async_header.pa_base.u.a64.address =
3015 				mem_descr->mem_array[0].
3016 				bus_address.u.a64.address;
3017 
3018 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
3019 			mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
3020 				     (ulp_num * MEM_DESCR_OFFSET);
3021 			if (mem_descr->mem_array[0].virtual_address) {
3022 				beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3023 					    "BM_%d : hwi_init_async_pdu_ctx"
3024 					    " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n",
3025 					    ulp_num,
3026 					    mem_descr->mem_array[0].
3027 					    virtual_address);
3028 			} else
3029 				beiscsi_log(phba, KERN_WARNING,
3030 					    BEISCSI_LOG_INIT,
3031 					    "BM_%d : No Virtual address for ULP : %d\n",
3032 					    ulp_num);
3033 
3034 			pasync_ctx->async_header.ring_base =
3035 				mem_descr->mem_array[0].virtual_address;
3036 
3037 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
3038 			mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
3039 				     (ulp_num * MEM_DESCR_OFFSET);
3040 			if (mem_descr->mem_array[0].virtual_address) {
3041 				beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3042 					    "BM_%d : hwi_init_async_pdu_ctx"
3043 					    " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n",
3044 					    ulp_num,
3045 					    mem_descr->mem_array[0].
3046 					    virtual_address);
3047 			} else
3048 				beiscsi_log(phba, KERN_WARNING,
3049 					    BEISCSI_LOG_INIT,
3050 					    "BM_%d : No Virtual address for ULP : %d\n",
3051 					    ulp_num);
3052 
3053 			pasync_ctx->async_header.handle_base =
3054 				mem_descr->mem_array[0].virtual_address;
3055 			pasync_ctx->async_header.writables = 0;
3056 			INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
3057 
3058 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
3059 			mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
3060 				     (ulp_num * MEM_DESCR_OFFSET);
3061 			if (mem_descr->mem_array[0].virtual_address) {
3062 				beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3063 					    "BM_%d : hwi_init_async_pdu_ctx"
3064 					    " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n",
3065 					    ulp_num,
3066 					    mem_descr->mem_array[0].
3067 					    virtual_address);
3068 			} else
3069 				beiscsi_log(phba, KERN_WARNING,
3070 					    BEISCSI_LOG_INIT,
3071 					    "BM_%d : No Virtual address for ULP : %d\n",
3072 					    ulp_num);
3073 
3074 			pasync_ctx->async_data.ring_base =
3075 				mem_descr->mem_array[0].virtual_address;
3076 
3077 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
3078 			mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
3079 				     (ulp_num * MEM_DESCR_OFFSET);
3080 			if (!mem_descr->mem_array[0].virtual_address)
3081 				beiscsi_log(phba, KERN_WARNING,
3082 					    BEISCSI_LOG_INIT,
3083 					    "BM_%d : No Virtual address for ULP : %d\n",
3084 					    ulp_num);
3085 
3086 			pasync_ctx->async_data.handle_base =
3087 				mem_descr->mem_array[0].virtual_address;
3088 			pasync_ctx->async_data.writables = 0;
3089 			INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
3090 
3091 			pasync_header_h =
3092 				(struct async_pdu_handle *)
3093 				pasync_ctx->async_header.handle_base;
3094 			pasync_data_h =
3095 				(struct async_pdu_handle *)
3096 				pasync_ctx->async_data.handle_base;
3097 
3098 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
3099 			mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 +
3100 				     (ulp_num * MEM_DESCR_OFFSET);
3101 			if (mem_descr->mem_array[0].virtual_address) {
3102 				beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3103 					    "BM_%d : hwi_init_async_pdu_ctx"
3104 					    " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n",
3105 					    ulp_num,
3106 					    mem_descr->mem_array[0].
3107 					    virtual_address);
3108 			} else
3109 				beiscsi_log(phba, KERN_WARNING,
3110 					    BEISCSI_LOG_INIT,
3111 					    "BM_%d : No Virtual address for ULP : %d\n",
3112 					    ulp_num);
3113 
3114 			idx = 0;
3115 			pasync_ctx->async_data.va_base =
3116 				mem_descr->mem_array[idx].virtual_address;
3117 			pasync_ctx->async_data.pa_base.u.a64.address =
3118 				mem_descr->mem_array[idx].
3119 				bus_address.u.a64.address;
3120 
3121 			num_async_data = ((mem_descr->mem_array[idx].size) /
3122 					phba->params.defpdu_data_sz);
3123 			num_per_mem = 0;
3124 
3125 			for (index = 0;	index < BEISCSI_GET_CID_COUNT
3126 					(phba, ulp_num); index++) {
3127 				pasync_header_h->cri = -1;
3128 				pasync_header_h->index = (char)index;
3129 				INIT_LIST_HEAD(&pasync_header_h->link);
3130 				pasync_header_h->pbuffer =
3131 					(void *)((unsigned long)
3132 						 (pasync_ctx->
3133 						  async_header.va_base) +
3134 						 (p->defpdu_hdr_sz * index));
3135 
3136 				pasync_header_h->pa.u.a64.address =
3137 					pasync_ctx->async_header.pa_base.u.a64.
3138 					address + (p->defpdu_hdr_sz * index);
3139 
3140 				list_add_tail(&pasync_header_h->link,
3141 					      &pasync_ctx->async_header.
3142 					      free_list);
3143 				pasync_header_h++;
3144 				pasync_ctx->async_header.free_entries++;
3145 				pasync_ctx->async_header.writables++;
3146 
3147 				INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
3148 					       wait_queue.list);
3149 				INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
3150 					       header_busy_list);
3151 				pasync_data_h->cri = -1;
3152 				pasync_data_h->index = (char)index;
3153 				INIT_LIST_HEAD(&pasync_data_h->link);
3154 
3155 				if (!num_async_data) {
3156 					num_per_mem = 0;
3157 					idx++;
3158 					pasync_ctx->async_data.va_base =
3159 						mem_descr->mem_array[idx].
3160 						virtual_address;
3161 					pasync_ctx->async_data.pa_base.u.
3162 						a64.address =
3163 						mem_descr->mem_array[idx].
3164 						bus_address.u.a64.address;
3165 					num_async_data =
3166 						((mem_descr->mem_array[idx].
3167 						  size) /
3168 						 phba->params.defpdu_data_sz);
3169 				}
3170 				pasync_data_h->pbuffer =
3171 					(void *)((unsigned long)
3172 					(pasync_ctx->async_data.va_base) +
3173 					(p->defpdu_data_sz * num_per_mem));
3174 
3175 				pasync_data_h->pa.u.a64.address =
3176 					pasync_ctx->async_data.pa_base.u.a64.
3177 					address + (p->defpdu_data_sz *
3178 					num_per_mem);
3179 				num_per_mem++;
3180 				num_async_data--;
3181 
3182 				list_add_tail(&pasync_data_h->link,
3183 					      &pasync_ctx->async_data.
3184 					      free_list);
3185 				pasync_data_h++;
3186 				pasync_ctx->async_data.free_entries++;
3187 				pasync_ctx->async_data.writables++;
3188 
3189 				INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
3190 					       data_busy_list);
3191 			}
3192 
3193 			pasync_ctx->async_header.host_write_ptr = 0;
3194 			pasync_ctx->async_header.ep_read_ptr = -1;
3195 			pasync_ctx->async_data.host_write_ptr = 0;
3196 			pasync_ctx->async_data.ep_read_ptr = -1;
3197 		}
3198 	}
3199 
3200 	return 0;
3201 }
3202 
3203 static int
3204 be_sgl_create_contiguous(void *virtual_address,
3205 			 u64 physical_address, u32 length,
3206 			 struct be_dma_mem *sgl)
3207 {
3208 	WARN_ON(!virtual_address);
3209 	WARN_ON(!physical_address);
3210 	WARN_ON(!length);
3211 	WARN_ON(!sgl);
3212 
3213 	sgl->va = virtual_address;
3214 	sgl->dma = (unsigned long)physical_address;
3215 	sgl->size = length;
3216 
3217 	return 0;
3218 }
3219 
3220 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
3221 {
3222 	memset(sgl, 0, sizeof(*sgl));
3223 }
3224 
3225 static void
3226 hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
3227 		     struct mem_array *pmem, struct be_dma_mem *sgl)
3228 {
3229 	if (sgl->va)
3230 		be_sgl_destroy_contiguous(sgl);
3231 
3232 	be_sgl_create_contiguous(pmem->virtual_address,
3233 				 pmem->bus_address.u.a64.address,
3234 				 pmem->size, sgl);
3235 }
3236 
3237 static void
3238 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
3239 			   struct mem_array *pmem, struct be_dma_mem *sgl)
3240 {
3241 	if (sgl->va)
3242 		be_sgl_destroy_contiguous(sgl);
3243 
3244 	be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
3245 				 pmem->bus_address.u.a64.address,
3246 				 pmem->size, sgl);
3247 }
3248 
3249 static int be_fill_queue(struct be_queue_info *q,
3250 		u16 len, u16 entry_size, void *vaddress)
3251 {
3252 	struct be_dma_mem *mem = &q->dma_mem;
3253 
3254 	memset(q, 0, sizeof(*q));
3255 	q->len = len;
3256 	q->entry_size = entry_size;
3257 	mem->size = len * entry_size;
3258 	mem->va = vaddress;
3259 	if (!mem->va)
3260 		return -ENOMEM;
3261 	memset(mem->va, 0, mem->size);
3262 	return 0;
3263 }
3264 
3265 static int beiscsi_create_eqs(struct beiscsi_hba *phba,
3266 			     struct hwi_context_memory *phwi_context)
3267 {
3268 	unsigned int i, num_eq_pages;
3269 	int ret = 0, eq_for_mcc;
3270 	struct be_queue_info *eq;
3271 	struct be_dma_mem *mem;
3272 	void *eq_vaddress;
3273 	dma_addr_t paddr;
3274 
3275 	num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
3276 				      sizeof(struct be_eq_entry));
3277 
3278 	if (phba->msix_enabled)
3279 		eq_for_mcc = 1;
3280 	else
3281 		eq_for_mcc = 0;
3282 	for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
3283 		eq = &phwi_context->be_eq[i].q;
3284 		mem = &eq->dma_mem;
3285 		phwi_context->be_eq[i].phba = phba;
3286 		eq_vaddress = pci_alloc_consistent(phba->pcidev,
3287 						     num_eq_pages * PAGE_SIZE,
3288 						     &paddr);
3289 		if (!eq_vaddress)
3290 			goto create_eq_error;
3291 
3292 		mem->va = eq_vaddress;
3293 		ret = be_fill_queue(eq, phba->params.num_eq_entries,
3294 				    sizeof(struct be_eq_entry), eq_vaddress);
3295 		if (ret) {
3296 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3297 				    "BM_%d : be_fill_queue Failed for EQ\n");
3298 			goto create_eq_error;
3299 		}
3300 
3301 		mem->dma = paddr;
3302 		ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
3303 					    phwi_context->cur_eqd);
3304 		if (ret) {
3305 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3306 				    "BM_%d : beiscsi_cmd_eq_create"
3307 				    "Failed for EQ\n");
3308 			goto create_eq_error;
3309 		}
3310 
3311 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3312 			    "BM_%d : eqid = %d\n",
3313 			    phwi_context->be_eq[i].q.id);
3314 	}
3315 	return 0;
3316 create_eq_error:
3317 	for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
3318 		eq = &phwi_context->be_eq[i].q;
3319 		mem = &eq->dma_mem;
3320 		if (mem->va)
3321 			pci_free_consistent(phba->pcidev, num_eq_pages
3322 					    * PAGE_SIZE,
3323 					    mem->va, mem->dma);
3324 	}
3325 	return ret;
3326 }
3327 
3328 static int beiscsi_create_cqs(struct beiscsi_hba *phba,
3329 			     struct hwi_context_memory *phwi_context)
3330 {
3331 	unsigned int i, num_cq_pages;
3332 	int ret = 0;
3333 	struct be_queue_info *cq, *eq;
3334 	struct be_dma_mem *mem;
3335 	struct be_eq_obj *pbe_eq;
3336 	void *cq_vaddress;
3337 	dma_addr_t paddr;
3338 
3339 	num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
3340 				      sizeof(struct sol_cqe));
3341 
3342 	for (i = 0; i < phba->num_cpus; i++) {
3343 		cq = &phwi_context->be_cq[i];
3344 		eq = &phwi_context->be_eq[i].q;
3345 		pbe_eq = &phwi_context->be_eq[i];
3346 		pbe_eq->cq = cq;
3347 		pbe_eq->phba = phba;
3348 		mem = &cq->dma_mem;
3349 		cq_vaddress = pci_alloc_consistent(phba->pcidev,
3350 						     num_cq_pages * PAGE_SIZE,
3351 						     &paddr);
3352 		if (!cq_vaddress)
3353 			goto create_cq_error;
3354 		ret = be_fill_queue(cq, phba->params.num_cq_entries,
3355 				    sizeof(struct sol_cqe), cq_vaddress);
3356 		if (ret) {
3357 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3358 				    "BM_%d : be_fill_queue Failed "
3359 				    "for ISCSI CQ\n");
3360 			goto create_cq_error;
3361 		}
3362 
3363 		mem->dma = paddr;
3364 		ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
3365 					    false, 0);
3366 		if (ret) {
3367 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3368 				    "BM_%d : beiscsi_cmd_eq_create"
3369 				    "Failed for ISCSI CQ\n");
3370 			goto create_cq_error;
3371 		}
3372 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3373 			    "BM_%d : iscsi cq_id is %d for eq_id %d\n"
3374 			    "iSCSI CQ CREATED\n", cq->id, eq->id);
3375 	}
3376 	return 0;
3377 
3378 create_cq_error:
3379 	for (i = 0; i < phba->num_cpus; i++) {
3380 		cq = &phwi_context->be_cq[i];
3381 		mem = &cq->dma_mem;
3382 		if (mem->va)
3383 			pci_free_consistent(phba->pcidev, num_cq_pages
3384 					    * PAGE_SIZE,
3385 					    mem->va, mem->dma);
3386 	}
3387 	return ret;
3388 
3389 }
3390 
3391 static int
3392 beiscsi_create_def_hdr(struct beiscsi_hba *phba,
3393 		       struct hwi_context_memory *phwi_context,
3394 		       struct hwi_controller *phwi_ctrlr,
3395 		       unsigned int def_pdu_ring_sz, uint8_t ulp_num)
3396 {
3397 	unsigned int idx;
3398 	int ret;
3399 	struct be_queue_info *dq, *cq;
3400 	struct be_dma_mem *mem;
3401 	struct be_mem_descriptor *mem_descr;
3402 	void *dq_vaddress;
3403 
3404 	idx = 0;
3405 	dq = &phwi_context->be_def_hdrq[ulp_num];
3406 	cq = &phwi_context->be_cq[0];
3407 	mem = &dq->dma_mem;
3408 	mem_descr = phba->init_mem;
3409 	mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
3410 		    (ulp_num * MEM_DESCR_OFFSET);
3411 	dq_vaddress = mem_descr->mem_array[idx].virtual_address;
3412 	ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
3413 			    sizeof(struct phys_addr),
3414 			    sizeof(struct phys_addr), dq_vaddress);
3415 	if (ret) {
3416 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3417 			    "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n",
3418 			    ulp_num);
3419 
3420 		return ret;
3421 	}
3422 	mem->dma = (unsigned long)mem_descr->mem_array[idx].
3423 				  bus_address.u.a64.address;
3424 	ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
3425 					      def_pdu_ring_sz,
3426 					      phba->params.defpdu_hdr_sz,
3427 					      BEISCSI_DEFQ_HDR, ulp_num);
3428 	if (ret) {
3429 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3430 			    "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n",
3431 			    ulp_num);
3432 
3433 		return ret;
3434 	}
3435 
3436 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3437 		    "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n",
3438 		    ulp_num,
3439 		    phwi_context->be_def_hdrq[ulp_num].id);
3440 	hwi_post_async_buffers(phba, BEISCSI_DEFQ_HDR, ulp_num);
3441 	return 0;
3442 }
3443 
3444 static int
3445 beiscsi_create_def_data(struct beiscsi_hba *phba,
3446 			struct hwi_context_memory *phwi_context,
3447 			struct hwi_controller *phwi_ctrlr,
3448 			unsigned int def_pdu_ring_sz, uint8_t ulp_num)
3449 {
3450 	unsigned int idx;
3451 	int ret;
3452 	struct be_queue_info *dataq, *cq;
3453 	struct be_dma_mem *mem;
3454 	struct be_mem_descriptor *mem_descr;
3455 	void *dq_vaddress;
3456 
3457 	idx = 0;
3458 	dataq = &phwi_context->be_def_dataq[ulp_num];
3459 	cq = &phwi_context->be_cq[0];
3460 	mem = &dataq->dma_mem;
3461 	mem_descr = phba->init_mem;
3462 	mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
3463 		    (ulp_num * MEM_DESCR_OFFSET);
3464 	dq_vaddress = mem_descr->mem_array[idx].virtual_address;
3465 	ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
3466 			    sizeof(struct phys_addr),
3467 			    sizeof(struct phys_addr), dq_vaddress);
3468 	if (ret) {
3469 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3470 			    "BM_%d : be_fill_queue Failed for DEF PDU "
3471 			    "DATA on ULP : %d\n",
3472 			    ulp_num);
3473 
3474 		return ret;
3475 	}
3476 	mem->dma = (unsigned long)mem_descr->mem_array[idx].
3477 				  bus_address.u.a64.address;
3478 	ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
3479 					      def_pdu_ring_sz,
3480 					      phba->params.defpdu_data_sz,
3481 					      BEISCSI_DEFQ_DATA, ulp_num);
3482 	if (ret) {
3483 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3484 			    "BM_%d be_cmd_create_default_pdu_queue"
3485 			    " Failed for DEF PDU DATA on ULP : %d\n",
3486 			    ulp_num);
3487 		return ret;
3488 	}
3489 
3490 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3491 		    "BM_%d : iscsi def data id on ULP : %d is  %d\n",
3492 		    ulp_num,
3493 		    phwi_context->be_def_dataq[ulp_num].id);
3494 
3495 	hwi_post_async_buffers(phba, BEISCSI_DEFQ_DATA, ulp_num);
3496 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3497 		    "BM_%d : DEFAULT PDU DATA RING CREATED"
3498 		    "on ULP : %d\n", ulp_num);
3499 
3500 	return 0;
3501 }
3502 
3503 
3504 static int
3505 beiscsi_post_template_hdr(struct beiscsi_hba *phba)
3506 {
3507 	struct be_mem_descriptor *mem_descr;
3508 	struct mem_array *pm_arr;
3509 	struct be_dma_mem sgl;
3510 	int status, ulp_num;
3511 
3512 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3513 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3514 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
3515 			mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 +
3516 				    (ulp_num * MEM_DESCR_OFFSET);
3517 			pm_arr = mem_descr->mem_array;
3518 
3519 			hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
3520 			status = be_cmd_iscsi_post_template_hdr(
3521 				 &phba->ctrl, &sgl);
3522 
3523 			if (status != 0) {
3524 				beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3525 					    "BM_%d : Post Template HDR Failed for"
3526 					    "ULP_%d\n", ulp_num);
3527 				return status;
3528 			}
3529 
3530 			beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3531 				    "BM_%d : Template HDR Pages Posted for"
3532 				    "ULP_%d\n", ulp_num);
3533 		}
3534 	}
3535 	return 0;
3536 }
3537 
3538 static int
3539 beiscsi_post_pages(struct beiscsi_hba *phba)
3540 {
3541 	struct be_mem_descriptor *mem_descr;
3542 	struct mem_array *pm_arr;
3543 	unsigned int page_offset, i;
3544 	struct be_dma_mem sgl;
3545 	int status, ulp_num = 0;
3546 
3547 	mem_descr = phba->init_mem;
3548 	mem_descr += HWI_MEM_SGE;
3549 	pm_arr = mem_descr->mem_array;
3550 
3551 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
3552 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
3553 			break;
3554 
3555 	page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
3556 			phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE;
3557 	for (i = 0; i < mem_descr->num_elements; i++) {
3558 		hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
3559 		status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
3560 						page_offset,
3561 						(pm_arr->size / PAGE_SIZE));
3562 		page_offset += pm_arr->size / PAGE_SIZE;
3563 		if (status != 0) {
3564 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3565 				    "BM_%d : post sgl failed.\n");
3566 			return status;
3567 		}
3568 		pm_arr++;
3569 	}
3570 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3571 		    "BM_%d : POSTED PAGES\n");
3572 	return 0;
3573 }
3574 
3575 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
3576 {
3577 	struct be_dma_mem *mem = &q->dma_mem;
3578 	if (mem->va) {
3579 		pci_free_consistent(phba->pcidev, mem->size,
3580 			mem->va, mem->dma);
3581 		mem->va = NULL;
3582 	}
3583 }
3584 
3585 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
3586 		u16 len, u16 entry_size)
3587 {
3588 	struct be_dma_mem *mem = &q->dma_mem;
3589 
3590 	memset(q, 0, sizeof(*q));
3591 	q->len = len;
3592 	q->entry_size = entry_size;
3593 	mem->size = len * entry_size;
3594 	mem->va = pci_zalloc_consistent(phba->pcidev, mem->size, &mem->dma);
3595 	if (!mem->va)
3596 		return -ENOMEM;
3597 	return 0;
3598 }
3599 
3600 static int
3601 beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
3602 			 struct hwi_context_memory *phwi_context,
3603 			 struct hwi_controller *phwi_ctrlr)
3604 {
3605 	unsigned int wrb_mem_index, offset, size, num_wrb_rings;
3606 	u64 pa_addr_lo;
3607 	unsigned int idx, num, i, ulp_num;
3608 	struct mem_array *pwrb_arr;
3609 	void *wrb_vaddr;
3610 	struct be_dma_mem sgl;
3611 	struct be_mem_descriptor *mem_descr;
3612 	struct hwi_wrb_context *pwrb_context;
3613 	int status;
3614 	uint8_t ulp_count = 0, ulp_base_num = 0;
3615 	uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 };
3616 
3617 	idx = 0;
3618 	mem_descr = phba->init_mem;
3619 	mem_descr += HWI_MEM_WRB;
3620 	pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
3621 			   GFP_KERNEL);
3622 	if (!pwrb_arr) {
3623 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3624 			    "BM_%d : Memory alloc failed in create wrb ring.\n");
3625 		return -ENOMEM;
3626 	}
3627 	wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
3628 	pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
3629 	num_wrb_rings = mem_descr->mem_array[idx].size /
3630 		(phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
3631 
3632 	for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
3633 		if (num_wrb_rings) {
3634 			pwrb_arr[num].virtual_address = wrb_vaddr;
3635 			pwrb_arr[num].bus_address.u.a64.address	= pa_addr_lo;
3636 			pwrb_arr[num].size = phba->params.wrbs_per_cxn *
3637 					    sizeof(struct iscsi_wrb);
3638 			wrb_vaddr += pwrb_arr[num].size;
3639 			pa_addr_lo += pwrb_arr[num].size;
3640 			num_wrb_rings--;
3641 		} else {
3642 			idx++;
3643 			wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
3644 			pa_addr_lo = mem_descr->mem_array[idx].\
3645 					bus_address.u.a64.address;
3646 			num_wrb_rings = mem_descr->mem_array[idx].size /
3647 					(phba->params.wrbs_per_cxn *
3648 					sizeof(struct iscsi_wrb));
3649 			pwrb_arr[num].virtual_address = wrb_vaddr;
3650 			pwrb_arr[num].bus_address.u.a64.address\
3651 						= pa_addr_lo;
3652 			pwrb_arr[num].size = phba->params.wrbs_per_cxn *
3653 						 sizeof(struct iscsi_wrb);
3654 			wrb_vaddr += pwrb_arr[num].size;
3655 			pa_addr_lo   += pwrb_arr[num].size;
3656 			num_wrb_rings--;
3657 		}
3658 	}
3659 
3660 	/* Get the ULP Count */
3661 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
3662 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3663 			ulp_count++;
3664 			ulp_base_num = ulp_num;
3665 			cid_count_ulp[ulp_num] =
3666 				BEISCSI_GET_CID_COUNT(phba, ulp_num);
3667 		}
3668 
3669 	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3670 		wrb_mem_index = 0;
3671 		offset = 0;
3672 		size = 0;
3673 
3674 		if (ulp_count > 1) {
3675 			ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT;
3676 
3677 			if (!cid_count_ulp[ulp_base_num])
3678 				ulp_base_num = (ulp_base_num + 1) %
3679 						BEISCSI_ULP_COUNT;
3680 
3681 			cid_count_ulp[ulp_base_num]--;
3682 		}
3683 
3684 
3685 		hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
3686 		status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
3687 					    &phwi_context->be_wrbq[i],
3688 					    &phwi_ctrlr->wrb_context[i],
3689 					    ulp_base_num);
3690 		if (status != 0) {
3691 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3692 				    "BM_%d : wrbq create failed.");
3693 			kfree(pwrb_arr);
3694 			return status;
3695 		}
3696 		pwrb_context = &phwi_ctrlr->wrb_context[i];
3697 		BE_SET_CID_TO_CRI(i, pwrb_context->cid);
3698 	}
3699 	kfree(pwrb_arr);
3700 	return 0;
3701 }
3702 
3703 static void free_wrb_handles(struct beiscsi_hba *phba)
3704 {
3705 	unsigned int index;
3706 	struct hwi_controller *phwi_ctrlr;
3707 	struct hwi_wrb_context *pwrb_context;
3708 
3709 	phwi_ctrlr = phba->phwi_ctrlr;
3710 	for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
3711 		pwrb_context = &phwi_ctrlr->wrb_context[index];
3712 		kfree(pwrb_context->pwrb_handle_base);
3713 		kfree(pwrb_context->pwrb_handle_basestd);
3714 	}
3715 }
3716 
3717 static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
3718 {
3719 	struct be_queue_info *q;
3720 	struct be_ctrl_info *ctrl = &phba->ctrl;
3721 
3722 	q = &phba->ctrl.mcc_obj.q;
3723 	if (q->created) {
3724 		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
3725 		be_queue_free(phba, q);
3726 	}
3727 
3728 	q = &phba->ctrl.mcc_obj.cq;
3729 	if (q->created) {
3730 		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3731 		be_queue_free(phba, q);
3732 	}
3733 }
3734 
3735 static void hwi_cleanup(struct beiscsi_hba *phba)
3736 {
3737 	struct be_queue_info *q;
3738 	struct be_ctrl_info *ctrl = &phba->ctrl;
3739 	struct hwi_controller *phwi_ctrlr;
3740 	struct hwi_context_memory *phwi_context;
3741 	struct hwi_async_pdu_context *pasync_ctx;
3742 	int i, eq_for_mcc, ulp_num;
3743 
3744 	phwi_ctrlr = phba->phwi_ctrlr;
3745 	phwi_context = phwi_ctrlr->phwi_ctxt;
3746 
3747 	be_cmd_iscsi_remove_template_hdr(ctrl);
3748 
3749 	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3750 		q = &phwi_context->be_wrbq[i];
3751 		if (q->created)
3752 			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
3753 	}
3754 	kfree(phwi_context->be_wrbq);
3755 	free_wrb_handles(phba);
3756 
3757 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3758 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3759 
3760 			q = &phwi_context->be_def_hdrq[ulp_num];
3761 			if (q->created)
3762 				beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3763 
3764 			q = &phwi_context->be_def_dataq[ulp_num];
3765 			if (q->created)
3766 				beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3767 
3768 			pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
3769 		}
3770 	}
3771 
3772 	beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
3773 
3774 	for (i = 0; i < (phba->num_cpus); i++) {
3775 		q = &phwi_context->be_cq[i];
3776 		if (q->created) {
3777 			be_queue_free(phba, q);
3778 			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3779 		}
3780 	}
3781 
3782 	be_mcc_queues_destroy(phba);
3783 	if (phba->msix_enabled)
3784 		eq_for_mcc = 1;
3785 	else
3786 		eq_for_mcc = 0;
3787 	for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
3788 		q = &phwi_context->be_eq[i].q;
3789 		if (q->created) {
3790 			be_queue_free(phba, q);
3791 			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
3792 		}
3793 	}
3794 	be_cmd_fw_uninit(ctrl);
3795 }
3796 
3797 static int be_mcc_queues_create(struct beiscsi_hba *phba,
3798 				struct hwi_context_memory *phwi_context)
3799 {
3800 	struct be_queue_info *q, *cq;
3801 	struct be_ctrl_info *ctrl = &phba->ctrl;
3802 
3803 	/* Alloc MCC compl queue */
3804 	cq = &phba->ctrl.mcc_obj.cq;
3805 	if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
3806 			sizeof(struct be_mcc_compl)))
3807 		goto err;
3808 	/* Ask BE to create MCC compl queue; */
3809 	if (phba->msix_enabled) {
3810 		if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
3811 					 [phba->num_cpus].q, false, true, 0))
3812 		goto mcc_cq_free;
3813 	} else {
3814 		if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
3815 					  false, true, 0))
3816 		goto mcc_cq_free;
3817 	}
3818 
3819 	/* Alloc MCC queue */
3820 	q = &phba->ctrl.mcc_obj.q;
3821 	if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
3822 		goto mcc_cq_destroy;
3823 
3824 	/* Ask BE to create MCC queue */
3825 	if (beiscsi_cmd_mccq_create(phba, q, cq))
3826 		goto mcc_q_free;
3827 
3828 	return 0;
3829 
3830 mcc_q_free:
3831 	be_queue_free(phba, q);
3832 mcc_cq_destroy:
3833 	beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
3834 mcc_cq_free:
3835 	be_queue_free(phba, cq);
3836 err:
3837 	return -ENOMEM;
3838 }
3839 
3840 /**
3841  * find_num_cpus()- Get the CPU online count
3842  * @phba: ptr to priv structure
3843  *
3844  * CPU count is used for creating EQ.
3845  **/
3846 static void find_num_cpus(struct beiscsi_hba *phba)
3847 {
3848 	int  num_cpus = 0;
3849 
3850 	num_cpus = num_online_cpus();
3851 
3852 	switch (phba->generation) {
3853 	case BE_GEN2:
3854 	case BE_GEN3:
3855 		phba->num_cpus = (num_cpus > BEISCSI_MAX_NUM_CPUS) ?
3856 				  BEISCSI_MAX_NUM_CPUS : num_cpus;
3857 		break;
3858 	case BE_GEN4:
3859 		/*
3860 		 * If eqid_count == 1 fall back to
3861 		 * INTX mechanism
3862 		 **/
3863 		if (phba->fw_config.eqid_count == 1) {
3864 			enable_msix = 0;
3865 			phba->num_cpus = 1;
3866 			return;
3867 		}
3868 
3869 		phba->num_cpus =
3870 			(num_cpus > (phba->fw_config.eqid_count - 1)) ?
3871 			(phba->fw_config.eqid_count - 1) : num_cpus;
3872 		break;
3873 	default:
3874 		phba->num_cpus = 1;
3875 	}
3876 }
3877 
3878 static int hwi_init_port(struct beiscsi_hba *phba)
3879 {
3880 	struct hwi_controller *phwi_ctrlr;
3881 	struct hwi_context_memory *phwi_context;
3882 	unsigned int def_pdu_ring_sz;
3883 	struct be_ctrl_info *ctrl = &phba->ctrl;
3884 	int status, ulp_num;
3885 
3886 	phwi_ctrlr = phba->phwi_ctrlr;
3887 	phwi_context = phwi_ctrlr->phwi_ctxt;
3888 	phwi_context->max_eqd = 128;
3889 	phwi_context->min_eqd = 0;
3890 	phwi_context->cur_eqd = 0;
3891 	be_cmd_fw_initialize(&phba->ctrl);
3892 	/* set optic state to unknown */
3893 	phba->optic_state = 0xff;
3894 
3895 	status = beiscsi_create_eqs(phba, phwi_context);
3896 	if (status != 0) {
3897 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3898 			    "BM_%d : EQ not created\n");
3899 		goto error;
3900 	}
3901 
3902 	status = be_mcc_queues_create(phba, phwi_context);
3903 	if (status != 0)
3904 		goto error;
3905 
3906 	status = mgmt_check_supported_fw(ctrl, phba);
3907 	if (status != 0) {
3908 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3909 			    "BM_%d : Unsupported fw version\n");
3910 		goto error;
3911 	}
3912 
3913 	status = beiscsi_create_cqs(phba, phwi_context);
3914 	if (status != 0) {
3915 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3916 			    "BM_%d : CQ not created\n");
3917 		goto error;
3918 	}
3919 
3920 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3921 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3922 
3923 			def_pdu_ring_sz =
3924 				BEISCSI_GET_CID_COUNT(phba, ulp_num) *
3925 				sizeof(struct phys_addr);
3926 
3927 			status = beiscsi_create_def_hdr(phba, phwi_context,
3928 							phwi_ctrlr,
3929 							def_pdu_ring_sz,
3930 							ulp_num);
3931 			if (status != 0) {
3932 				beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3933 					    "BM_%d : Default Header not created for ULP : %d\n",
3934 					    ulp_num);
3935 				goto error;
3936 			}
3937 
3938 			status = beiscsi_create_def_data(phba, phwi_context,
3939 							 phwi_ctrlr,
3940 							 def_pdu_ring_sz,
3941 							 ulp_num);
3942 			if (status != 0) {
3943 				beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3944 					    "BM_%d : Default Data not created for ULP : %d\n",
3945 					    ulp_num);
3946 				goto error;
3947 			}
3948 		}
3949 	}
3950 
3951 	status = beiscsi_post_pages(phba);
3952 	if (status != 0) {
3953 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3954 			    "BM_%d : Post SGL Pages Failed\n");
3955 		goto error;
3956 	}
3957 
3958 	status = beiscsi_post_template_hdr(phba);
3959 	if (status != 0) {
3960 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3961 			    "BM_%d : Template HDR Posting for CXN Failed\n");
3962 	}
3963 
3964 	status = beiscsi_create_wrb_rings(phba,	phwi_context, phwi_ctrlr);
3965 	if (status != 0) {
3966 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3967 			    "BM_%d : WRB Rings not created\n");
3968 		goto error;
3969 	}
3970 
3971 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3972 		uint16_t async_arr_idx = 0;
3973 
3974 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3975 			uint16_t cri = 0;
3976 			struct hwi_async_pdu_context *pasync_ctx;
3977 
3978 			pasync_ctx = HWI_GET_ASYNC_PDU_CTX(
3979 				     phwi_ctrlr, ulp_num);
3980 			for (cri = 0; cri <
3981 			     phba->params.cxns_per_ctrl; cri++) {
3982 				if (ulp_num == BEISCSI_GET_ULP_FROM_CRI
3983 					       (phwi_ctrlr, cri))
3984 					pasync_ctx->cid_to_async_cri_map[
3985 					phwi_ctrlr->wrb_context[cri].cid] =
3986 					async_arr_idx++;
3987 			}
3988 		}
3989 	}
3990 
3991 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3992 		    "BM_%d : hwi_init_port success\n");
3993 	return 0;
3994 
3995 error:
3996 	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3997 		    "BM_%d : hwi_init_port failed");
3998 	hwi_cleanup(phba);
3999 	return status;
4000 }
4001 
4002 static int hwi_init_controller(struct beiscsi_hba *phba)
4003 {
4004 	struct hwi_controller *phwi_ctrlr;
4005 
4006 	phwi_ctrlr = phba->phwi_ctrlr;
4007 	if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
4008 		phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
4009 		    init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
4010 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4011 			    "BM_%d :  phwi_ctrlr->phwi_ctxt=%p\n",
4012 			    phwi_ctrlr->phwi_ctxt);
4013 	} else {
4014 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4015 			    "BM_%d : HWI_MEM_ADDN_CONTEXT is more "
4016 			    "than one element.Failing to load\n");
4017 		return -ENOMEM;
4018 	}
4019 
4020 	iscsi_init_global_templates(phba);
4021 	if (beiscsi_init_wrb_handle(phba))
4022 		return -ENOMEM;
4023 
4024 	if (hwi_init_async_pdu_ctx(phba)) {
4025 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4026 			    "BM_%d : hwi_init_async_pdu_ctx failed\n");
4027 		return -ENOMEM;
4028 	}
4029 
4030 	if (hwi_init_port(phba) != 0) {
4031 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4032 			    "BM_%d : hwi_init_controller failed\n");
4033 
4034 		return -ENOMEM;
4035 	}
4036 	return 0;
4037 }
4038 
4039 static void beiscsi_free_mem(struct beiscsi_hba *phba)
4040 {
4041 	struct be_mem_descriptor *mem_descr;
4042 	int i, j;
4043 
4044 	mem_descr = phba->init_mem;
4045 	i = 0;
4046 	j = 0;
4047 	for (i = 0; i < SE_MEM_MAX; i++) {
4048 		for (j = mem_descr->num_elements; j > 0; j--) {
4049 			pci_free_consistent(phba->pcidev,
4050 			  mem_descr->mem_array[j - 1].size,
4051 			  mem_descr->mem_array[j - 1].virtual_address,
4052 			  (unsigned long)mem_descr->mem_array[j - 1].
4053 			  bus_address.u.a64.address);
4054 		}
4055 
4056 		kfree(mem_descr->mem_array);
4057 		mem_descr++;
4058 	}
4059 	kfree(phba->init_mem);
4060 	kfree(phba->phwi_ctrlr->wrb_context);
4061 	kfree(phba->phwi_ctrlr);
4062 }
4063 
4064 static int beiscsi_init_controller(struct beiscsi_hba *phba)
4065 {
4066 	int ret = -ENOMEM;
4067 
4068 	ret = beiscsi_get_memory(phba);
4069 	if (ret < 0) {
4070 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4071 			    "BM_%d : beiscsi_dev_probe -"
4072 			    "Failed in beiscsi_alloc_memory\n");
4073 		return ret;
4074 	}
4075 
4076 	ret = hwi_init_controller(phba);
4077 	if (ret)
4078 		goto free_init;
4079 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4080 		    "BM_%d : Return success from beiscsi_init_controller");
4081 
4082 	return 0;
4083 
4084 free_init:
4085 	beiscsi_free_mem(phba);
4086 	return ret;
4087 }
4088 
4089 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
4090 {
4091 	struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
4092 	struct sgl_handle *psgl_handle;
4093 	struct iscsi_sge *pfrag;
4094 	unsigned int arr_index, i, idx;
4095 	unsigned int ulp_icd_start, ulp_num = 0;
4096 
4097 	phba->io_sgl_hndl_avbl = 0;
4098 	phba->eh_sgl_hndl_avbl = 0;
4099 
4100 	mem_descr_sglh = phba->init_mem;
4101 	mem_descr_sglh += HWI_MEM_SGLH;
4102 	if (1 == mem_descr_sglh->num_elements) {
4103 		phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
4104 						 phba->params.ios_per_ctrl,
4105 						 GFP_KERNEL);
4106 		if (!phba->io_sgl_hndl_base) {
4107 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4108 				    "BM_%d : Mem Alloc Failed. Failing to load\n");
4109 			return -ENOMEM;
4110 		}
4111 		phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
4112 						 (phba->params.icds_per_ctrl -
4113 						 phba->params.ios_per_ctrl),
4114 						 GFP_KERNEL);
4115 		if (!phba->eh_sgl_hndl_base) {
4116 			kfree(phba->io_sgl_hndl_base);
4117 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4118 				    "BM_%d : Mem Alloc Failed. Failing to load\n");
4119 			return -ENOMEM;
4120 		}
4121 	} else {
4122 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4123 			    "BM_%d : HWI_MEM_SGLH is more than one element."
4124 			    "Failing to load\n");
4125 		return -ENOMEM;
4126 	}
4127 
4128 	arr_index = 0;
4129 	idx = 0;
4130 	while (idx < mem_descr_sglh->num_elements) {
4131 		psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
4132 
4133 		for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
4134 		      sizeof(struct sgl_handle)); i++) {
4135 			if (arr_index < phba->params.ios_per_ctrl) {
4136 				phba->io_sgl_hndl_base[arr_index] = psgl_handle;
4137 				phba->io_sgl_hndl_avbl++;
4138 				arr_index++;
4139 			} else {
4140 				phba->eh_sgl_hndl_base[arr_index -
4141 					phba->params.ios_per_ctrl] =
4142 								psgl_handle;
4143 				arr_index++;
4144 				phba->eh_sgl_hndl_avbl++;
4145 			}
4146 			psgl_handle++;
4147 		}
4148 		idx++;
4149 	}
4150 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4151 		    "BM_%d : phba->io_sgl_hndl_avbl=%d"
4152 		    "phba->eh_sgl_hndl_avbl=%d\n",
4153 		    phba->io_sgl_hndl_avbl,
4154 		    phba->eh_sgl_hndl_avbl);
4155 
4156 	mem_descr_sg = phba->init_mem;
4157 	mem_descr_sg += HWI_MEM_SGE;
4158 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4159 		    "\n BM_%d : mem_descr_sg->num_elements=%d\n",
4160 		    mem_descr_sg->num_elements);
4161 
4162 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
4163 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
4164 			break;
4165 
4166 	ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num];
4167 
4168 	arr_index = 0;
4169 	idx = 0;
4170 	while (idx < mem_descr_sg->num_elements) {
4171 		pfrag = mem_descr_sg->mem_array[idx].virtual_address;
4172 
4173 		for (i = 0;
4174 		     i < (mem_descr_sg->mem_array[idx].size) /
4175 		     (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
4176 		     i++) {
4177 			if (arr_index < phba->params.ios_per_ctrl)
4178 				psgl_handle = phba->io_sgl_hndl_base[arr_index];
4179 			else
4180 				psgl_handle = phba->eh_sgl_hndl_base[arr_index -
4181 						phba->params.ios_per_ctrl];
4182 			psgl_handle->pfrag = pfrag;
4183 			AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
4184 			AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
4185 			pfrag += phba->params.num_sge_per_io;
4186 			psgl_handle->sgl_index = ulp_icd_start + arr_index++;
4187 		}
4188 		idx++;
4189 	}
4190 	phba->io_sgl_free_index = 0;
4191 	phba->io_sgl_alloc_index = 0;
4192 	phba->eh_sgl_free_index = 0;
4193 	phba->eh_sgl_alloc_index = 0;
4194 	return 0;
4195 }
4196 
4197 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
4198 {
4199 	int ret;
4200 	uint16_t i, ulp_num;
4201 	struct ulp_cid_info *ptr_cid_info = NULL;
4202 
4203 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4204 		if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4205 			ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info),
4206 					       GFP_KERNEL);
4207 
4208 			if (!ptr_cid_info) {
4209 				beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4210 					    "BM_%d : Failed to allocate memory"
4211 					    "for ULP_CID_INFO for ULP : %d\n",
4212 					    ulp_num);
4213 				ret = -ENOMEM;
4214 				goto free_memory;
4215 
4216 			}
4217 
4218 			/* Allocate memory for CID array */
4219 			ptr_cid_info->cid_array = kzalloc(sizeof(void *) *
4220 						  BEISCSI_GET_CID_COUNT(phba,
4221 						  ulp_num), GFP_KERNEL);
4222 			if (!ptr_cid_info->cid_array) {
4223 				beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4224 					    "BM_%d : Failed to allocate memory"
4225 					    "for CID_ARRAY for ULP : %d\n",
4226 					    ulp_num);
4227 				kfree(ptr_cid_info);
4228 				ptr_cid_info = NULL;
4229 				ret = -ENOMEM;
4230 
4231 				goto free_memory;
4232 			}
4233 			ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT(
4234 						   phba, ulp_num);
4235 
4236 			/* Save the cid_info_array ptr */
4237 			phba->cid_array_info[ulp_num] = ptr_cid_info;
4238 		}
4239 	}
4240 	phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
4241 				 phba->params.cxns_per_ctrl, GFP_KERNEL);
4242 	if (!phba->ep_array) {
4243 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4244 			    "BM_%d : Failed to allocate memory in "
4245 			    "hba_setup_cid_tbls\n");
4246 		ret = -ENOMEM;
4247 
4248 		goto free_memory;
4249 	}
4250 
4251 	phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) *
4252 				   phba->params.cxns_per_ctrl, GFP_KERNEL);
4253 	if (!phba->conn_table) {
4254 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4255 			    "BM_%d : Failed to allocate memory in"
4256 			    "hba_setup_cid_tbls\n");
4257 
4258 		kfree(phba->ep_array);
4259 		phba->ep_array = NULL;
4260 		ret = -ENOMEM;
4261 
4262 		goto free_memory;
4263 	}
4264 
4265 	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
4266 		ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num;
4267 
4268 		ptr_cid_info = phba->cid_array_info[ulp_num];
4269 		ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] =
4270 			phba->phwi_ctrlr->wrb_context[i].cid;
4271 
4272 	}
4273 
4274 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4275 		if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4276 			ptr_cid_info = phba->cid_array_info[ulp_num];
4277 
4278 			ptr_cid_info->cid_alloc = 0;
4279 			ptr_cid_info->cid_free = 0;
4280 		}
4281 	}
4282 	return 0;
4283 
4284 free_memory:
4285 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4286 		if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4287 			ptr_cid_info = phba->cid_array_info[ulp_num];
4288 
4289 			if (ptr_cid_info) {
4290 				kfree(ptr_cid_info->cid_array);
4291 				kfree(ptr_cid_info);
4292 				phba->cid_array_info[ulp_num] = NULL;
4293 			}
4294 		}
4295 	}
4296 
4297 	return ret;
4298 }
4299 
4300 static void hwi_enable_intr(struct beiscsi_hba *phba)
4301 {
4302 	struct be_ctrl_info *ctrl = &phba->ctrl;
4303 	struct hwi_controller *phwi_ctrlr;
4304 	struct hwi_context_memory *phwi_context;
4305 	struct be_queue_info *eq;
4306 	u8 __iomem *addr;
4307 	u32 reg, i;
4308 	u32 enabled;
4309 
4310 	phwi_ctrlr = phba->phwi_ctrlr;
4311 	phwi_context = phwi_ctrlr->phwi_ctxt;
4312 
4313 	addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
4314 			PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
4315 	reg = ioread32(addr);
4316 
4317 	enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4318 	if (!enabled) {
4319 		reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4320 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4321 			    "BM_%d : reg =x%08x addr=%p\n", reg, addr);
4322 		iowrite32(reg, addr);
4323 	}
4324 
4325 	if (!phba->msix_enabled) {
4326 		eq = &phwi_context->be_eq[0].q;
4327 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4328 			    "BM_%d : eq->id=%d\n", eq->id);
4329 
4330 		hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
4331 	} else {
4332 		for (i = 0; i <= phba->num_cpus; i++) {
4333 			eq = &phwi_context->be_eq[i].q;
4334 			beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4335 				    "BM_%d : eq->id=%d\n", eq->id);
4336 			hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
4337 		}
4338 	}
4339 }
4340 
4341 static void hwi_disable_intr(struct beiscsi_hba *phba)
4342 {
4343 	struct be_ctrl_info *ctrl = &phba->ctrl;
4344 
4345 	u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
4346 	u32 reg = ioread32(addr);
4347 
4348 	u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4349 	if (enabled) {
4350 		reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4351 		iowrite32(reg, addr);
4352 	} else
4353 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
4354 			    "BM_%d : In hwi_disable_intr, Already Disabled\n");
4355 }
4356 
4357 /**
4358  * beiscsi_get_boot_info()- Get the boot session info
4359  * @phba: The device priv structure instance
4360  *
4361  * Get the boot target info and store in driver priv structure
4362  *
4363  * return values
4364  *	Success: 0
4365  *	Failure: Non-Zero Value
4366  **/
4367 static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
4368 {
4369 	struct be_cmd_get_session_resp *session_resp;
4370 	struct be_dma_mem nonemb_cmd;
4371 	unsigned int tag;
4372 	unsigned int s_handle;
4373 	int ret = -ENOMEM;
4374 
4375 	/* Get the session handle of the boot target */
4376 	ret = be_mgmt_get_boot_shandle(phba, &s_handle);
4377 	if (ret) {
4378 		beiscsi_log(phba, KERN_ERR,
4379 			    BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
4380 			    "BM_%d : No boot session\n");
4381 
4382 		if (ret == -ENXIO)
4383 			phba->get_boot = 0;
4384 
4385 
4386 		return ret;
4387 	}
4388 	phba->get_boot = 0;
4389 	nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev,
4390 					      sizeof(*session_resp),
4391 					      &nonemb_cmd.dma);
4392 	if (nonemb_cmd.va == NULL) {
4393 		beiscsi_log(phba, KERN_ERR,
4394 			    BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
4395 			    "BM_%d : Failed to allocate memory for"
4396 			    "beiscsi_get_session_info\n");
4397 
4398 		return -ENOMEM;
4399 	}
4400 
4401 	tag = mgmt_get_session_info(phba, s_handle,
4402 				    &nonemb_cmd);
4403 	if (!tag) {
4404 		beiscsi_log(phba, KERN_ERR,
4405 			    BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
4406 			    "BM_%d : beiscsi_get_session_info"
4407 			    " Failed\n");
4408 
4409 		goto boot_freemem;
4410 	}
4411 
4412 	ret = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
4413 	if (ret) {
4414 		beiscsi_log(phba, KERN_ERR,
4415 			    BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
4416 			    "BM_%d : beiscsi_get_session_info Failed");
4417 
4418 		if (ret != -EBUSY)
4419 			goto boot_freemem;
4420 		else
4421 			return ret;
4422 	}
4423 
4424 	session_resp = nonemb_cmd.va ;
4425 
4426 	memcpy(&phba->boot_sess, &session_resp->session_info,
4427 	       sizeof(struct mgmt_session_info));
4428 
4429 	 beiscsi_logout_fw_sess(phba,
4430 				phba->boot_sess.session_handle);
4431 	ret = 0;
4432 
4433 boot_freemem:
4434 	pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4435 		    nonemb_cmd.va, nonemb_cmd.dma);
4436 	return ret;
4437 }
4438 
4439 static void beiscsi_boot_release(void *data)
4440 {
4441 	struct beiscsi_hba *phba = data;
4442 
4443 	scsi_host_put(phba->shost);
4444 }
4445 
4446 static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
4447 {
4448 	struct iscsi_boot_kobj *boot_kobj;
4449 
4450 	/* it has been created previously */
4451 	if (phba->boot_kset)
4452 		return 0;
4453 
4454 	/* get boot info using mgmt cmd */
4455 	if (beiscsi_get_boot_info(phba))
4456 		/* Try to see if we can carry on without this */
4457 		return 0;
4458 
4459 	phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
4460 	if (!phba->boot_kset)
4461 		return -ENOMEM;
4462 
4463 	/* get a ref because the show function will ref the phba */
4464 	if (!scsi_host_get(phba->shost))
4465 		goto free_kset;
4466 	boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
4467 					     beiscsi_show_boot_tgt_info,
4468 					     beiscsi_tgt_get_attr_visibility,
4469 					     beiscsi_boot_release);
4470 	if (!boot_kobj)
4471 		goto put_shost;
4472 
4473 	if (!scsi_host_get(phba->shost))
4474 		goto free_kset;
4475 	boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
4476 						beiscsi_show_boot_ini_info,
4477 						beiscsi_ini_get_attr_visibility,
4478 						beiscsi_boot_release);
4479 	if (!boot_kobj)
4480 		goto put_shost;
4481 
4482 	if (!scsi_host_get(phba->shost))
4483 		goto free_kset;
4484 	boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
4485 					       beiscsi_show_boot_eth_info,
4486 					       beiscsi_eth_get_attr_visibility,
4487 					       beiscsi_boot_release);
4488 	if (!boot_kobj)
4489 		goto put_shost;
4490 	return 0;
4491 
4492 put_shost:
4493 	scsi_host_put(phba->shost);
4494 free_kset:
4495 	iscsi_boot_destroy_kset(phba->boot_kset);
4496 	phba->boot_kset = NULL;
4497 	return -ENOMEM;
4498 }
4499 
4500 static int beiscsi_init_port(struct beiscsi_hba *phba)
4501 {
4502 	int ret;
4503 
4504 	ret = beiscsi_init_controller(phba);
4505 	if (ret < 0) {
4506 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4507 			    "BM_%d : beiscsi_dev_probe - Failed in"
4508 			    "beiscsi_init_controller\n");
4509 		return ret;
4510 	}
4511 	ret = beiscsi_init_sgl_handle(phba);
4512 	if (ret < 0) {
4513 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4514 			    "BM_%d : beiscsi_dev_probe - Failed in"
4515 			    "beiscsi_init_sgl_handle\n");
4516 		goto do_cleanup_ctrlr;
4517 	}
4518 
4519 	if (hba_setup_cid_tbls(phba)) {
4520 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4521 			    "BM_%d : Failed in hba_setup_cid_tbls\n");
4522 		kfree(phba->io_sgl_hndl_base);
4523 		kfree(phba->eh_sgl_hndl_base);
4524 		goto do_cleanup_ctrlr;
4525 	}
4526 
4527 	return ret;
4528 
4529 do_cleanup_ctrlr:
4530 	hwi_cleanup(phba);
4531 	return ret;
4532 }
4533 
4534 static void hwi_purge_eq(struct beiscsi_hba *phba)
4535 {
4536 	struct hwi_controller *phwi_ctrlr;
4537 	struct hwi_context_memory *phwi_context;
4538 	struct be_queue_info *eq;
4539 	struct be_eq_entry *eqe = NULL;
4540 	int i, eq_msix;
4541 	unsigned int num_processed;
4542 
4543 	phwi_ctrlr = phba->phwi_ctrlr;
4544 	phwi_context = phwi_ctrlr->phwi_ctxt;
4545 	if (phba->msix_enabled)
4546 		eq_msix = 1;
4547 	else
4548 		eq_msix = 0;
4549 
4550 	for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
4551 		eq = &phwi_context->be_eq[i].q;
4552 		eqe = queue_tail_node(eq);
4553 		num_processed = 0;
4554 		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
4555 					& EQE_VALID_MASK) {
4556 			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
4557 			queue_tail_inc(eq);
4558 			eqe = queue_tail_node(eq);
4559 			num_processed++;
4560 		}
4561 
4562 		if (num_processed)
4563 			hwi_ring_eq_db(phba, eq->id, 1,	num_processed, 1, 1);
4564 	}
4565 }
4566 
4567 static void beiscsi_clean_port(struct beiscsi_hba *phba)
4568 {
4569 	int mgmt_status, ulp_num;
4570 	struct ulp_cid_info *ptr_cid_info = NULL;
4571 
4572 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4573 		if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4574 			mgmt_status = mgmt_epfw_cleanup(phba, ulp_num);
4575 			if (mgmt_status)
4576 				beiscsi_log(phba, KERN_WARNING,
4577 					    BEISCSI_LOG_INIT,
4578 					    "BM_%d : mgmt_epfw_cleanup FAILED"
4579 					    " for ULP_%d\n", ulp_num);
4580 		}
4581 	}
4582 
4583 	hwi_purge_eq(phba);
4584 	hwi_cleanup(phba);
4585 	kfree(phba->io_sgl_hndl_base);
4586 	kfree(phba->eh_sgl_hndl_base);
4587 	kfree(phba->ep_array);
4588 	kfree(phba->conn_table);
4589 
4590 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4591 		if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4592 			ptr_cid_info = phba->cid_array_info[ulp_num];
4593 
4594 			if (ptr_cid_info) {
4595 				kfree(ptr_cid_info->cid_array);
4596 				kfree(ptr_cid_info);
4597 				phba->cid_array_info[ulp_num] = NULL;
4598 			}
4599 		}
4600 	}
4601 
4602 }
4603 
4604 /**
4605  * beiscsi_free_mgmt_task_handles()- Free driver CXN resources
4606  * @beiscsi_conn: ptr to the conn to be cleaned up
4607  * @task: ptr to iscsi_task resource to be freed.
4608  *
4609  * Free driver mgmt resources binded to CXN.
4610  **/
4611 void
4612 beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
4613 				struct iscsi_task *task)
4614 {
4615 	struct beiscsi_io_task *io_task;
4616 	struct beiscsi_hba *phba = beiscsi_conn->phba;
4617 	struct hwi_wrb_context *pwrb_context;
4618 	struct hwi_controller *phwi_ctrlr;
4619 	uint16_t cri_index = BE_GET_CRI_FROM_CID(
4620 				beiscsi_conn->beiscsi_conn_cid);
4621 
4622 	phwi_ctrlr = phba->phwi_ctrlr;
4623 	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
4624 
4625 	io_task = task->dd_data;
4626 
4627 	if (io_task->pwrb_handle) {
4628 		memset(io_task->pwrb_handle->pwrb, 0,
4629 		       sizeof(struct iscsi_wrb));
4630 		free_wrb_handle(phba, pwrb_context,
4631 				io_task->pwrb_handle);
4632 		io_task->pwrb_handle = NULL;
4633 	}
4634 
4635 	if (io_task->psgl_handle) {
4636 		free_mgmt_sgl_handle(phba,
4637 				     io_task->psgl_handle);
4638 		io_task->psgl_handle = NULL;
4639 	}
4640 
4641 	if (io_task->mtask_addr) {
4642 		pci_unmap_single(phba->pcidev,
4643 				 io_task->mtask_addr,
4644 				 io_task->mtask_data_count,
4645 				 PCI_DMA_TODEVICE);
4646 		io_task->mtask_addr = 0;
4647 	}
4648 }
4649 
4650 /**
4651  * beiscsi_cleanup_task()- Free driver resources of the task
4652  * @task: ptr to the iscsi task
4653  *
4654  **/
4655 static void beiscsi_cleanup_task(struct iscsi_task *task)
4656 {
4657 	struct beiscsi_io_task *io_task = task->dd_data;
4658 	struct iscsi_conn *conn = task->conn;
4659 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4660 	struct beiscsi_hba *phba = beiscsi_conn->phba;
4661 	struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
4662 	struct hwi_wrb_context *pwrb_context;
4663 	struct hwi_controller *phwi_ctrlr;
4664 	uint16_t cri_index = BE_GET_CRI_FROM_CID(
4665 			     beiscsi_conn->beiscsi_conn_cid);
4666 
4667 	phwi_ctrlr = phba->phwi_ctrlr;
4668 	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
4669 
4670 	if (io_task->cmd_bhs) {
4671 		pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
4672 			      io_task->bhs_pa.u.a64.address);
4673 		io_task->cmd_bhs = NULL;
4674 	}
4675 
4676 	if (task->sc) {
4677 		if (io_task->pwrb_handle) {
4678 			free_wrb_handle(phba, pwrb_context,
4679 					io_task->pwrb_handle);
4680 			io_task->pwrb_handle = NULL;
4681 		}
4682 
4683 		if (io_task->psgl_handle) {
4684 			free_io_sgl_handle(phba, io_task->psgl_handle);
4685 			io_task->psgl_handle = NULL;
4686 		}
4687 
4688 		if (io_task->scsi_cmnd) {
4689 			scsi_dma_unmap(io_task->scsi_cmnd);
4690 			io_task->scsi_cmnd = NULL;
4691 		}
4692 	} else {
4693 		if (!beiscsi_conn->login_in_progress)
4694 			beiscsi_free_mgmt_task_handles(beiscsi_conn, task);
4695 	}
4696 }
4697 
4698 void
4699 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
4700 			   struct beiscsi_offload_params *params)
4701 {
4702 	struct wrb_handle *pwrb_handle;
4703 	struct hwi_wrb_context *pwrb_context = NULL;
4704 	struct beiscsi_hba *phba = beiscsi_conn->phba;
4705 	struct iscsi_task *task = beiscsi_conn->task;
4706 	struct iscsi_session *session = task->conn->session;
4707 	u32 doorbell = 0;
4708 
4709 	/*
4710 	 * We can always use 0 here because it is reserved by libiscsi for
4711 	 * login/startup related tasks.
4712 	 */
4713 	beiscsi_conn->login_in_progress = 0;
4714 	spin_lock_bh(&session->back_lock);
4715 	beiscsi_cleanup_task(task);
4716 	spin_unlock_bh(&session->back_lock);
4717 
4718 	pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid,
4719 				       &pwrb_context);
4720 
4721 	/* Check for the adapter family */
4722 	if (is_chip_be2_be3r(phba))
4723 		beiscsi_offload_cxn_v0(params, pwrb_handle,
4724 				       phba->init_mem,
4725 				       pwrb_context);
4726 	else
4727 		beiscsi_offload_cxn_v2(params, pwrb_handle,
4728 				       pwrb_context);
4729 
4730 	be_dws_le_to_cpu(pwrb_handle->pwrb,
4731 			 sizeof(struct iscsi_target_context_update_wrb));
4732 
4733 	doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
4734 	doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
4735 			     << DB_DEF_PDU_WRB_INDEX_SHIFT;
4736 	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4737 	iowrite32(doorbell, phba->db_va +
4738 		  beiscsi_conn->doorbell_offset);
4739 
4740 	/*
4741 	 * There is no completion for CONTEXT_UPDATE. The completion of next
4742 	 * WRB posted guarantees FW's processing and DMA'ing of it.
4743 	 * Use beiscsi_put_wrb_handle to put it back in the pool which makes
4744 	 * sure zero'ing or reuse of the WRB only after wrbs_per_cxn.
4745 	 */
4746 	beiscsi_put_wrb_handle(pwrb_context, pwrb_handle,
4747 			       phba->params.wrbs_per_cxn);
4748 	beiscsi_log(phba, KERN_INFO,
4749 		    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4750 		    "BM_%d : put CONTEXT_UPDATE pwrb_handle=%p free_index=0x%x wrb_handles_available=%d\n",
4751 		    pwrb_handle, pwrb_context->free_index,
4752 		    pwrb_context->wrb_handles_available);
4753 }
4754 
4755 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
4756 			      int *index, int *age)
4757 {
4758 	*index = (int)itt;
4759 	if (age)
4760 		*age = conn->session->age;
4761 }
4762 
4763 /**
4764  * beiscsi_alloc_pdu - allocates pdu and related resources
4765  * @task: libiscsi task
4766  * @opcode: opcode of pdu for task
4767  *
4768  * This is called with the session lock held. It will allocate
4769  * the wrb and sgl if needed for the command. And it will prep
4770  * the pdu's itt. beiscsi_parse_pdu will later translate
4771  * the pdu itt to the libiscsi task itt.
4772  */
4773 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
4774 {
4775 	struct beiscsi_io_task *io_task = task->dd_data;
4776 	struct iscsi_conn *conn = task->conn;
4777 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4778 	struct beiscsi_hba *phba = beiscsi_conn->phba;
4779 	struct hwi_wrb_context *pwrb_context;
4780 	struct hwi_controller *phwi_ctrlr;
4781 	itt_t itt;
4782 	uint16_t cri_index = 0;
4783 	struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
4784 	dma_addr_t paddr;
4785 
4786 	io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
4787 					  GFP_ATOMIC, &paddr);
4788 	if (!io_task->cmd_bhs)
4789 		return -ENOMEM;
4790 	io_task->bhs_pa.u.a64.address = paddr;
4791 	io_task->libiscsi_itt = (itt_t)task->itt;
4792 	io_task->conn = beiscsi_conn;
4793 
4794 	task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
4795 	task->hdr_max = sizeof(struct be_cmd_bhs);
4796 	io_task->psgl_handle = NULL;
4797 	io_task->pwrb_handle = NULL;
4798 
4799 	if (task->sc) {
4800 		io_task->psgl_handle = alloc_io_sgl_handle(phba);
4801 		if (!io_task->psgl_handle) {
4802 			beiscsi_log(phba, KERN_ERR,
4803 				    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4804 				    "BM_%d : Alloc of IO_SGL_ICD Failed"
4805 				    "for the CID : %d\n",
4806 				    beiscsi_conn->beiscsi_conn_cid);
4807 			goto free_hndls;
4808 		}
4809 		io_task->pwrb_handle = alloc_wrb_handle(phba,
4810 					beiscsi_conn->beiscsi_conn_cid,
4811 					&io_task->pwrb_context);
4812 		if (!io_task->pwrb_handle) {
4813 			beiscsi_log(phba, KERN_ERR,
4814 				    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4815 				    "BM_%d : Alloc of WRB_HANDLE Failed"
4816 				    "for the CID : %d\n",
4817 				    beiscsi_conn->beiscsi_conn_cid);
4818 			goto free_io_hndls;
4819 		}
4820 	} else {
4821 		io_task->scsi_cmnd = NULL;
4822 		if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
4823 			beiscsi_conn->task = task;
4824 			if (!beiscsi_conn->login_in_progress) {
4825 				io_task->psgl_handle = (struct sgl_handle *)
4826 						alloc_mgmt_sgl_handle(phba);
4827 				if (!io_task->psgl_handle) {
4828 					beiscsi_log(phba, KERN_ERR,
4829 						    BEISCSI_LOG_IO |
4830 						    BEISCSI_LOG_CONFIG,
4831 						    "BM_%d : Alloc of MGMT_SGL_ICD Failed"
4832 						    "for the CID : %d\n",
4833 						    beiscsi_conn->
4834 						    beiscsi_conn_cid);
4835 					goto free_hndls;
4836 				}
4837 
4838 				beiscsi_conn->login_in_progress = 1;
4839 				beiscsi_conn->plogin_sgl_handle =
4840 							io_task->psgl_handle;
4841 				io_task->pwrb_handle =
4842 					alloc_wrb_handle(phba,
4843 					beiscsi_conn->beiscsi_conn_cid,
4844 					&io_task->pwrb_context);
4845 				if (!io_task->pwrb_handle) {
4846 					beiscsi_log(phba, KERN_ERR,
4847 						    BEISCSI_LOG_IO |
4848 						    BEISCSI_LOG_CONFIG,
4849 						    "BM_%d : Alloc of WRB_HANDLE Failed"
4850 						    "for the CID : %d\n",
4851 						    beiscsi_conn->
4852 						    beiscsi_conn_cid);
4853 					goto free_mgmt_hndls;
4854 				}
4855 				beiscsi_conn->plogin_wrb_handle =
4856 							io_task->pwrb_handle;
4857 
4858 			} else {
4859 				io_task->psgl_handle =
4860 						beiscsi_conn->plogin_sgl_handle;
4861 				io_task->pwrb_handle =
4862 						beiscsi_conn->plogin_wrb_handle;
4863 			}
4864 		} else {
4865 			io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
4866 			if (!io_task->psgl_handle) {
4867 				beiscsi_log(phba, KERN_ERR,
4868 					    BEISCSI_LOG_IO |
4869 					    BEISCSI_LOG_CONFIG,
4870 					    "BM_%d : Alloc of MGMT_SGL_ICD Failed"
4871 					    "for the CID : %d\n",
4872 					    beiscsi_conn->
4873 					    beiscsi_conn_cid);
4874 				goto free_hndls;
4875 			}
4876 			io_task->pwrb_handle =
4877 					alloc_wrb_handle(phba,
4878 					beiscsi_conn->beiscsi_conn_cid,
4879 					&io_task->pwrb_context);
4880 			if (!io_task->pwrb_handle) {
4881 				beiscsi_log(phba, KERN_ERR,
4882 					    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4883 					    "BM_%d : Alloc of WRB_HANDLE Failed"
4884 					    "for the CID : %d\n",
4885 					    beiscsi_conn->beiscsi_conn_cid);
4886 				goto free_mgmt_hndls;
4887 			}
4888 
4889 		}
4890 	}
4891 	itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
4892 				 wrb_index << 16) | (unsigned int)
4893 				(io_task->psgl_handle->sgl_index));
4894 	io_task->pwrb_handle->pio_handle = task;
4895 
4896 	io_task->cmd_bhs->iscsi_hdr.itt = itt;
4897 	return 0;
4898 
4899 free_io_hndls:
4900 	free_io_sgl_handle(phba, io_task->psgl_handle);
4901 	goto free_hndls;
4902 free_mgmt_hndls:
4903 	free_mgmt_sgl_handle(phba, io_task->psgl_handle);
4904 	io_task->psgl_handle = NULL;
4905 free_hndls:
4906 	phwi_ctrlr = phba->phwi_ctrlr;
4907 	cri_index = BE_GET_CRI_FROM_CID(
4908 	beiscsi_conn->beiscsi_conn_cid);
4909 	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
4910 	if (io_task->pwrb_handle)
4911 		free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
4912 	io_task->pwrb_handle = NULL;
4913 	pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
4914 		      io_task->bhs_pa.u.a64.address);
4915 	io_task->cmd_bhs = NULL;
4916 	return -ENOMEM;
4917 }
4918 int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
4919 		       unsigned int num_sg, unsigned int xferlen,
4920 		       unsigned int writedir)
4921 {
4922 
4923 	struct beiscsi_io_task *io_task = task->dd_data;
4924 	struct iscsi_conn *conn = task->conn;
4925 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4926 	struct beiscsi_hba *phba = beiscsi_conn->phba;
4927 	struct iscsi_wrb *pwrb = NULL;
4928 	unsigned int doorbell = 0;
4929 
4930 	pwrb = io_task->pwrb_handle->pwrb;
4931 
4932 	io_task->bhs_len = sizeof(struct be_cmd_bhs);
4933 
4934 	if (writedir) {
4935 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb,
4936 			      INI_WR_CMD);
4937 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 1);
4938 	} else {
4939 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb,
4940 			      INI_RD_CMD);
4941 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 0);
4942 	}
4943 
4944 	io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb_v2,
4945 					  type, pwrb);
4946 
4947 	AMAP_SET_BITS(struct amap_iscsi_wrb_v2, lun, pwrb,
4948 		      cpu_to_be16(*(unsigned short *)
4949 		      &io_task->cmd_bhs->iscsi_hdr.lun));
4950 	AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, xferlen);
4951 	AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
4952 		      io_task->pwrb_handle->wrb_index);
4953 	AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
4954 		      be32_to_cpu(task->cmdsn));
4955 	AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
4956 		      io_task->psgl_handle->sgl_index);
4957 
4958 	hwi_write_sgl_v2(pwrb, sg, num_sg, io_task);
4959 	AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
4960 		      io_task->pwrb_handle->wrb_index);
4961 	if (io_task->pwrb_context->plast_wrb)
4962 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb,
4963 			      io_task->pwrb_context->plast_wrb,
4964 			      io_task->pwrb_handle->wrb_index);
4965 	io_task->pwrb_context->plast_wrb = pwrb;
4966 
4967 	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4968 
4969 	doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
4970 	doorbell |= (io_task->pwrb_handle->wrb_index &
4971 		     DB_DEF_PDU_WRB_INDEX_MASK) <<
4972 		     DB_DEF_PDU_WRB_INDEX_SHIFT;
4973 	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4974 	iowrite32(doorbell, phba->db_va +
4975 		  beiscsi_conn->doorbell_offset);
4976 	return 0;
4977 }
4978 
4979 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
4980 			  unsigned int num_sg, unsigned int xferlen,
4981 			  unsigned int writedir)
4982 {
4983 
4984 	struct beiscsi_io_task *io_task = task->dd_data;
4985 	struct iscsi_conn *conn = task->conn;
4986 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4987 	struct beiscsi_hba *phba = beiscsi_conn->phba;
4988 	struct iscsi_wrb *pwrb = NULL;
4989 	unsigned int doorbell = 0;
4990 
4991 	pwrb = io_task->pwrb_handle->pwrb;
4992 	io_task->bhs_len = sizeof(struct be_cmd_bhs);
4993 
4994 	if (writedir) {
4995 		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4996 			      INI_WR_CMD);
4997 		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
4998 	} else {
4999 		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
5000 			      INI_RD_CMD);
5001 		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
5002 	}
5003 
5004 	io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb,
5005 					  type, pwrb);
5006 
5007 	AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
5008 		      cpu_to_be16(*(unsigned short *)
5009 				  &io_task->cmd_bhs->iscsi_hdr.lun));
5010 	AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
5011 	AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
5012 		      io_task->pwrb_handle->wrb_index);
5013 	AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
5014 		      be32_to_cpu(task->cmdsn));
5015 	AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
5016 		      io_task->psgl_handle->sgl_index);
5017 
5018 	hwi_write_sgl(pwrb, sg, num_sg, io_task);
5019 
5020 	AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
5021 		      io_task->pwrb_handle->wrb_index);
5022 	if (io_task->pwrb_context->plast_wrb)
5023 		AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb,
5024 			      io_task->pwrb_context->plast_wrb,
5025 			      io_task->pwrb_handle->wrb_index);
5026 	io_task->pwrb_context->plast_wrb = pwrb;
5027 
5028 	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
5029 
5030 	doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
5031 	doorbell |= (io_task->pwrb_handle->wrb_index &
5032 		     DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
5033 	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
5034 
5035 	iowrite32(doorbell, phba->db_va +
5036 		  beiscsi_conn->doorbell_offset);
5037 	return 0;
5038 }
5039 
5040 static int beiscsi_mtask(struct iscsi_task *task)
5041 {
5042 	struct beiscsi_io_task *io_task = task->dd_data;
5043 	struct iscsi_conn *conn = task->conn;
5044 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
5045 	struct beiscsi_hba *phba = beiscsi_conn->phba;
5046 	struct iscsi_wrb *pwrb = NULL;
5047 	unsigned int doorbell = 0;
5048 	unsigned int cid;
5049 	unsigned int pwrb_typeoffset = 0;
5050 	int ret = 0;
5051 
5052 	cid = beiscsi_conn->beiscsi_conn_cid;
5053 	pwrb = io_task->pwrb_handle->pwrb;
5054 	memset(pwrb, 0, sizeof(*pwrb));
5055 
5056 	if (is_chip_be2_be3r(phba)) {
5057 		AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
5058 			      be32_to_cpu(task->cmdsn));
5059 		AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
5060 			      io_task->pwrb_handle->wrb_index);
5061 		AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
5062 			      io_task->psgl_handle->sgl_index);
5063 		AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
5064 			      task->data_count);
5065 		AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
5066 			      io_task->pwrb_handle->wrb_index);
5067 		if (io_task->pwrb_context->plast_wrb)
5068 			AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb,
5069 				      io_task->pwrb_context->plast_wrb,
5070 				      io_task->pwrb_handle->wrb_index);
5071 		io_task->pwrb_context->plast_wrb = pwrb;
5072 
5073 		pwrb_typeoffset = BE_WRB_TYPE_OFFSET;
5074 	} else {
5075 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
5076 			      be32_to_cpu(task->cmdsn));
5077 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
5078 			      io_task->pwrb_handle->wrb_index);
5079 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
5080 			      io_task->psgl_handle->sgl_index);
5081 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
5082 			      task->data_count);
5083 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
5084 			      io_task->pwrb_handle->wrb_index);
5085 		if (io_task->pwrb_context->plast_wrb)
5086 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb,
5087 				      io_task->pwrb_context->plast_wrb,
5088 				      io_task->pwrb_handle->wrb_index);
5089 		io_task->pwrb_context->plast_wrb = pwrb;
5090 
5091 		pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
5092 	}
5093 
5094 
5095 	switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
5096 	case ISCSI_OP_LOGIN:
5097 		AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
5098 		ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
5099 		ret = hwi_write_buffer(pwrb, task);
5100 		break;
5101 	case ISCSI_OP_NOOP_OUT:
5102 		if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
5103 			ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
5104 			if (is_chip_be2_be3r(phba))
5105 				AMAP_SET_BITS(struct amap_iscsi_wrb,
5106 					      dmsg, pwrb, 1);
5107 			else
5108 				AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
5109 					      dmsg, pwrb, 1);
5110 		} else {
5111 			ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset);
5112 			if (is_chip_be2_be3r(phba))
5113 				AMAP_SET_BITS(struct amap_iscsi_wrb,
5114 					      dmsg, pwrb, 0);
5115 			else
5116 				AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
5117 					      dmsg, pwrb, 0);
5118 		}
5119 		ret = hwi_write_buffer(pwrb, task);
5120 		break;
5121 	case ISCSI_OP_TEXT:
5122 		ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
5123 		ret = hwi_write_buffer(pwrb, task);
5124 		break;
5125 	case ISCSI_OP_SCSI_TMFUNC:
5126 		ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset);
5127 		ret = hwi_write_buffer(pwrb, task);
5128 		break;
5129 	case ISCSI_OP_LOGOUT:
5130 		ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset);
5131 		ret = hwi_write_buffer(pwrb, task);
5132 		break;
5133 
5134 	default:
5135 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
5136 			    "BM_%d : opcode =%d Not supported\n",
5137 			    task->hdr->opcode & ISCSI_OPCODE_MASK);
5138 
5139 		return -EINVAL;
5140 	}
5141 
5142 	if (ret)
5143 		return ret;
5144 
5145 	/* Set the task type */
5146 	io_task->wrb_type = (is_chip_be2_be3r(phba)) ?
5147 		AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) :
5148 		AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb);
5149 
5150 	doorbell |= cid & DB_WRB_POST_CID_MASK;
5151 	doorbell |= (io_task->pwrb_handle->wrb_index &
5152 		     DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
5153 	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
5154 	iowrite32(doorbell, phba->db_va +
5155 		  beiscsi_conn->doorbell_offset);
5156 	return 0;
5157 }
5158 
5159 static int beiscsi_task_xmit(struct iscsi_task *task)
5160 {
5161 	struct beiscsi_io_task *io_task = task->dd_data;
5162 	struct scsi_cmnd *sc = task->sc;
5163 	struct beiscsi_hba *phba;
5164 	struct scatterlist *sg;
5165 	int num_sg;
5166 	unsigned int  writedir = 0, xferlen = 0;
5167 
5168 	if (!io_task->conn->login_in_progress)
5169 		task->hdr->exp_statsn = 0;
5170 
5171 	if (!sc)
5172 		return beiscsi_mtask(task);
5173 
5174 	io_task->scsi_cmnd = sc;
5175 	num_sg = scsi_dma_map(sc);
5176 	phba = io_task->conn->phba;
5177 	if (num_sg < 0) {
5178 		beiscsi_log(phba, KERN_ERR,
5179 			    BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI,
5180 			    "BM_%d : scsi_dma_map Failed "
5181 			    "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n",
5182 			    be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt),
5183 			    io_task->libiscsi_itt, scsi_bufflen(sc));
5184 
5185 		return num_sg;
5186 	}
5187 	xferlen = scsi_bufflen(sc);
5188 	sg = scsi_sglist(sc);
5189 	if (sc->sc_data_direction == DMA_TO_DEVICE)
5190 		writedir = 1;
5191 	 else
5192 		writedir = 0;
5193 
5194 	 return phba->iotask_fn(task, sg, num_sg, xferlen, writedir);
5195 }
5196 
5197 /**
5198  * beiscsi_bsg_request - handle bsg request from ISCSI transport
5199  * @job: job to handle
5200  */
5201 static int beiscsi_bsg_request(struct bsg_job *job)
5202 {
5203 	struct Scsi_Host *shost;
5204 	struct beiscsi_hba *phba;
5205 	struct iscsi_bsg_request *bsg_req = job->request;
5206 	int rc = -EINVAL;
5207 	unsigned int tag;
5208 	struct be_dma_mem nonemb_cmd;
5209 	struct be_cmd_resp_hdr *resp;
5210 	struct iscsi_bsg_reply *bsg_reply = job->reply;
5211 	unsigned short status, extd_status;
5212 
5213 	shost = iscsi_job_to_shost(job);
5214 	phba = iscsi_host_priv(shost);
5215 
5216 	switch (bsg_req->msgcode) {
5217 	case ISCSI_BSG_HST_VENDOR:
5218 		nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
5219 					job->request_payload.payload_len,
5220 					&nonemb_cmd.dma);
5221 		if (nonemb_cmd.va == NULL) {
5222 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
5223 				    "BM_%d : Failed to allocate memory for "
5224 				    "beiscsi_bsg_request\n");
5225 			return -ENOMEM;
5226 		}
5227 		tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job,
5228 						  &nonemb_cmd);
5229 		if (!tag) {
5230 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
5231 				    "BM_%d : MBX Tag Allocation Failed\n");
5232 
5233 			pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
5234 					    nonemb_cmd.va, nonemb_cmd.dma);
5235 			return -EAGAIN;
5236 		}
5237 
5238 		rc = wait_event_interruptible_timeout(
5239 					phba->ctrl.mcc_wait[tag],
5240 					phba->ctrl.mcc_tag_status[tag],
5241 					msecs_to_jiffies(
5242 					BEISCSI_HOST_MBX_TIMEOUT));
5243 		extd_status = (phba->ctrl.mcc_tag_status[tag] &
5244 			       CQE_STATUS_ADDL_MASK) >> CQE_STATUS_ADDL_SHIFT;
5245 		status = phba->ctrl.mcc_tag_status[tag] & CQE_STATUS_MASK;
5246 		free_mcc_wrb(&phba->ctrl, tag);
5247 		resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va;
5248 		sg_copy_from_buffer(job->reply_payload.sg_list,
5249 				    job->reply_payload.sg_cnt,
5250 				    nonemb_cmd.va, (resp->response_length
5251 				    + sizeof(*resp)));
5252 		bsg_reply->reply_payload_rcv_len = resp->response_length;
5253 		bsg_reply->result = status;
5254 		bsg_job_done(job, bsg_reply->result,
5255 			     bsg_reply->reply_payload_rcv_len);
5256 		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
5257 				    nonemb_cmd.va, nonemb_cmd.dma);
5258 		if (status || extd_status) {
5259 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
5260 				    "BM_%d : MBX Cmd Failed"
5261 				    " status = %d extd_status = %d\n",
5262 				    status, extd_status);
5263 
5264 			return -EIO;
5265 		} else {
5266 			rc = 0;
5267 		}
5268 		break;
5269 
5270 	default:
5271 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
5272 				"BM_%d : Unsupported bsg command: 0x%x\n",
5273 				bsg_req->msgcode);
5274 		break;
5275 	}
5276 
5277 	return rc;
5278 }
5279 
5280 void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
5281 {
5282 	/* Set the logging parameter */
5283 	beiscsi_log_enable_init(phba, beiscsi_log_enable);
5284 }
5285 
5286 /*
5287  * beiscsi_quiesce()- Cleanup Driver resources
5288  * @phba: Instance Priv structure
5289  * @unload_state:i Clean or EEH unload state
5290  *
5291  * Free the OS and HW resources held by the driver
5292  **/
5293 static void beiscsi_quiesce(struct beiscsi_hba *phba,
5294 		uint32_t unload_state)
5295 {
5296 	struct hwi_controller *phwi_ctrlr;
5297 	struct hwi_context_memory *phwi_context;
5298 	struct be_eq_obj *pbe_eq;
5299 	unsigned int i, msix_vec;
5300 
5301 	phwi_ctrlr = phba->phwi_ctrlr;
5302 	phwi_context = phwi_ctrlr->phwi_ctxt;
5303 	hwi_disable_intr(phba);
5304 	if (phba->msix_enabled) {
5305 		for (i = 0; i <= phba->num_cpus; i++) {
5306 			msix_vec = phba->msix_entries[i].vector;
5307 			free_irq(msix_vec, &phwi_context->be_eq[i]);
5308 			kfree(phba->msi_name[i]);
5309 		}
5310 	} else
5311 		if (phba->pcidev->irq)
5312 			free_irq(phba->pcidev->irq, phba);
5313 	pci_disable_msix(phba->pcidev);
5314 	cancel_delayed_work_sync(&phba->beiscsi_hw_check_task);
5315 
5316 	for (i = 0; i < phba->num_cpus; i++) {
5317 		pbe_eq = &phwi_context->be_eq[i];
5318 		irq_poll_disable(&pbe_eq->iopoll);
5319 	}
5320 
5321 	if (unload_state == BEISCSI_CLEAN_UNLOAD) {
5322 		destroy_workqueue(phba->wq);
5323 		beiscsi_clean_port(phba);
5324 		beiscsi_free_mem(phba);
5325 
5326 		beiscsi_unmap_pci_function(phba);
5327 		pci_free_consistent(phba->pcidev,
5328 				    phba->ctrl.mbox_mem_alloced.size,
5329 				    phba->ctrl.mbox_mem_alloced.va,
5330 				    phba->ctrl.mbox_mem_alloced.dma);
5331 	} else {
5332 		hwi_purge_eq(phba);
5333 		hwi_cleanup(phba);
5334 	}
5335 
5336 }
5337 
5338 static void beiscsi_remove(struct pci_dev *pcidev)
5339 {
5340 	struct beiscsi_hba *phba = NULL;
5341 
5342 	phba = pci_get_drvdata(pcidev);
5343 	if (!phba) {
5344 		dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
5345 		return;
5346 	}
5347 
5348 	beiscsi_destroy_def_ifaces(phba);
5349 	iscsi_boot_destroy_kset(phba->boot_kset);
5350 	iscsi_host_remove(phba->shost);
5351 	beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
5352 	pci_dev_put(phba->pcidev);
5353 	iscsi_host_free(phba->shost);
5354 	pci_disable_pcie_error_reporting(pcidev);
5355 	pci_set_drvdata(pcidev, NULL);
5356 	pci_release_regions(pcidev);
5357 	pci_disable_device(pcidev);
5358 }
5359 
5360 static void beiscsi_msix_enable(struct beiscsi_hba *phba)
5361 {
5362 	int i, status;
5363 
5364 	for (i = 0; i <= phba->num_cpus; i++)
5365 		phba->msix_entries[i].entry = i;
5366 
5367 	status = pci_enable_msix_range(phba->pcidev, phba->msix_entries,
5368 				       phba->num_cpus + 1, phba->num_cpus + 1);
5369 	if (status > 0)
5370 		phba->msix_enabled = true;
5371 
5372 	return;
5373 }
5374 
5375 static void be_eqd_update(struct beiscsi_hba *phba)
5376 {
5377 	struct be_set_eqd set_eqd[MAX_CPUS];
5378 	struct be_aic_obj *aic;
5379 	struct be_eq_obj *pbe_eq;
5380 	struct hwi_controller *phwi_ctrlr;
5381 	struct hwi_context_memory *phwi_context;
5382 	int eqd, i, num = 0;
5383 	ulong now;
5384 	u32 pps, delta;
5385 	unsigned int tag;
5386 
5387 	phwi_ctrlr = phba->phwi_ctrlr;
5388 	phwi_context = phwi_ctrlr->phwi_ctxt;
5389 
5390 	for (i = 0; i <= phba->num_cpus; i++) {
5391 		aic = &phba->aic_obj[i];
5392 		pbe_eq = &phwi_context->be_eq[i];
5393 		now = jiffies;
5394 		if (!aic->jiffs || time_before(now, aic->jiffs) ||
5395 		    pbe_eq->cq_count < aic->eq_prev) {
5396 			aic->jiffs = now;
5397 			aic->eq_prev = pbe_eq->cq_count;
5398 			continue;
5399 		}
5400 		delta = jiffies_to_msecs(now - aic->jiffs);
5401 		pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta);
5402 		eqd = (pps / 1500) << 2;
5403 
5404 		if (eqd < 8)
5405 			eqd = 0;
5406 		eqd = min_t(u32, eqd, phwi_context->max_eqd);
5407 		eqd = max_t(u32, eqd, phwi_context->min_eqd);
5408 
5409 		aic->jiffs = now;
5410 		aic->eq_prev = pbe_eq->cq_count;
5411 
5412 		if (eqd != aic->prev_eqd) {
5413 			set_eqd[num].delay_multiplier = (eqd * 65)/100;
5414 			set_eqd[num].eq_id = pbe_eq->q.id;
5415 			aic->prev_eqd = eqd;
5416 			num++;
5417 		}
5418 	}
5419 	if (num) {
5420 		tag = be_cmd_modify_eq_delay(phba, set_eqd, num);
5421 		if (tag)
5422 			beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
5423 	}
5424 }
5425 
5426 static void be_check_boot_session(struct beiscsi_hba *phba)
5427 {
5428 	if (beiscsi_setup_boot_info(phba))
5429 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5430 			    "BM_%d : Could not set up "
5431 			    "iSCSI boot info on async event.\n");
5432 }
5433 
5434 /*
5435  * beiscsi_hw_health_check()- Check adapter health
5436  * @work: work item to check HW health
5437  *
5438  * Check if adapter in an unrecoverable state or not.
5439  **/
5440 static void
5441 beiscsi_hw_health_check(struct work_struct *work)
5442 {
5443 	struct beiscsi_hba *phba =
5444 		container_of(work, struct beiscsi_hba,
5445 			     beiscsi_hw_check_task.work);
5446 
5447 	be_eqd_update(phba);
5448 
5449 	if (phba->state & BE_ADAPTER_CHECK_BOOT) {
5450 		if ((phba->get_boot > 0) && (!phba->boot_kset)) {
5451 			phba->get_boot--;
5452 			if (!(phba->get_boot % BE_GET_BOOT_TO))
5453 				be_check_boot_session(phba);
5454 		} else {
5455 			phba->state &= ~BE_ADAPTER_CHECK_BOOT;
5456 			phba->get_boot = 0;
5457 		}
5458 	}
5459 
5460 	beiscsi_ue_detect(phba);
5461 
5462 	schedule_delayed_work(&phba->beiscsi_hw_check_task,
5463 			      msecs_to_jiffies(1000));
5464 }
5465 
5466 
5467 static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev,
5468 		pci_channel_state_t state)
5469 {
5470 	struct beiscsi_hba *phba = NULL;
5471 
5472 	phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
5473 	phba->state |= BE_ADAPTER_PCI_ERR;
5474 
5475 	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5476 		    "BM_%d : EEH error detected\n");
5477 
5478 	beiscsi_quiesce(phba, BEISCSI_EEH_UNLOAD);
5479 
5480 	if (state == pci_channel_io_perm_failure) {
5481 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5482 			    "BM_%d : EEH : State PERM Failure");
5483 		return PCI_ERS_RESULT_DISCONNECT;
5484 	}
5485 
5486 	pci_disable_device(pdev);
5487 
5488 	/* The error could cause the FW to trigger a flash debug dump.
5489 	 * Resetting the card while flash dump is in progress
5490 	 * can cause it not to recover; wait for it to finish.
5491 	 * Wait only for first function as it is needed only once per
5492 	 * adapter.
5493 	 **/
5494 	if (pdev->devfn == 0)
5495 		ssleep(30);
5496 
5497 	return PCI_ERS_RESULT_NEED_RESET;
5498 }
5499 
5500 static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev)
5501 {
5502 	struct beiscsi_hba *phba = NULL;
5503 	int status = 0;
5504 
5505 	phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
5506 
5507 	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5508 		    "BM_%d : EEH Reset\n");
5509 
5510 	status = pci_enable_device(pdev);
5511 	if (status)
5512 		return PCI_ERS_RESULT_DISCONNECT;
5513 
5514 	pci_set_master(pdev);
5515 	pci_set_power_state(pdev, PCI_D0);
5516 	pci_restore_state(pdev);
5517 
5518 	/* Wait for the CHIP Reset to complete */
5519 	status = be_chk_reset_complete(phba);
5520 	if (!status) {
5521 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
5522 			    "BM_%d : EEH Reset Completed\n");
5523 	} else {
5524 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
5525 			    "BM_%d : EEH Reset Completion Failure\n");
5526 		return PCI_ERS_RESULT_DISCONNECT;
5527 	}
5528 
5529 	pci_cleanup_aer_uncorrect_error_status(pdev);
5530 	return PCI_ERS_RESULT_RECOVERED;
5531 }
5532 
5533 static void beiscsi_eeh_resume(struct pci_dev *pdev)
5534 {
5535 	int ret = 0, i;
5536 	struct be_eq_obj *pbe_eq;
5537 	struct beiscsi_hba *phba = NULL;
5538 	struct hwi_controller *phwi_ctrlr;
5539 	struct hwi_context_memory *phwi_context;
5540 
5541 	phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
5542 	pci_save_state(pdev);
5543 
5544 	if (enable_msix)
5545 		find_num_cpus(phba);
5546 	else
5547 		phba->num_cpus = 1;
5548 
5549 	if (enable_msix) {
5550 		beiscsi_msix_enable(phba);
5551 		if (!phba->msix_enabled)
5552 			phba->num_cpus = 1;
5553 	}
5554 
5555 	ret = beiscsi_cmd_reset_function(phba);
5556 	if (ret) {
5557 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5558 			    "BM_%d : Reset Failed\n");
5559 		goto ret_err;
5560 	}
5561 
5562 	ret = be_chk_reset_complete(phba);
5563 	if (ret) {
5564 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5565 			    "BM_%d : Failed to get out of reset.\n");
5566 		goto ret_err;
5567 	}
5568 
5569 	beiscsi_get_params(phba);
5570 	phba->shost->max_id = phba->params.cxns_per_ctrl;
5571 	phba->shost->can_queue = phba->params.ios_per_ctrl;
5572 	ret = hwi_init_controller(phba);
5573 	if (ret) {
5574 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5575 			    "BM_%d : beiscsi_eeh_resume -"
5576 			     "Failed to initialize beiscsi_hba.\n");
5577 		goto ret_err;
5578 	}
5579 
5580 	for (i = 0; i < MAX_MCC_CMD; i++) {
5581 		init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
5582 		phba->ctrl.mcc_tag[i] = i + 1;
5583 		phba->ctrl.mcc_tag_status[i + 1] = 0;
5584 		phba->ctrl.mcc_tag_available++;
5585 	}
5586 
5587 	phwi_ctrlr = phba->phwi_ctrlr;
5588 	phwi_context = phwi_ctrlr->phwi_ctxt;
5589 
5590 	for (i = 0; i < phba->num_cpus; i++) {
5591 		pbe_eq = &phwi_context->be_eq[i];
5592 		irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget,
5593 				be_iopoll);
5594 	}
5595 
5596 	i = (phba->msix_enabled) ? i : 0;
5597 	/* Work item for MCC handling */
5598 	pbe_eq = &phwi_context->be_eq[i];
5599 	INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
5600 
5601 	ret = beiscsi_init_irqs(phba);
5602 	if (ret < 0) {
5603 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5604 			    "BM_%d : beiscsi_eeh_resume - "
5605 			    "Failed to beiscsi_init_irqs\n");
5606 		goto ret_err;
5607 	}
5608 
5609 	hwi_enable_intr(phba);
5610 	phba->state &= ~BE_ADAPTER_PCI_ERR;
5611 
5612 	return;
5613 ret_err:
5614 	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5615 		    "BM_%d : AER EEH Resume Failed\n");
5616 }
5617 
5618 static int beiscsi_dev_probe(struct pci_dev *pcidev,
5619 			     const struct pci_device_id *id)
5620 {
5621 	struct beiscsi_hba *phba = NULL;
5622 	struct hwi_controller *phwi_ctrlr;
5623 	struct hwi_context_memory *phwi_context;
5624 	struct be_eq_obj *pbe_eq;
5625 	int ret = 0, i;
5626 
5627 	ret = beiscsi_enable_pci(pcidev);
5628 	if (ret < 0) {
5629 		dev_err(&pcidev->dev,
5630 			"beiscsi_dev_probe - Failed to enable pci device\n");
5631 		return ret;
5632 	}
5633 
5634 	phba = beiscsi_hba_alloc(pcidev);
5635 	if (!phba) {
5636 		dev_err(&pcidev->dev,
5637 			"beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n");
5638 		goto disable_pci;
5639 	}
5640 
5641 	/* Enable EEH reporting */
5642 	ret = pci_enable_pcie_error_reporting(pcidev);
5643 	if (ret)
5644 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
5645 			    "BM_%d : PCIe Error Reporting "
5646 			    "Enabling Failed\n");
5647 
5648 	pci_save_state(pcidev);
5649 
5650 	/* Initialize Driver configuration Paramters */
5651 	beiscsi_hba_attrs_init(phba);
5652 
5653 	phba->fw_timeout = false;
5654 	phba->mac_addr_set = false;
5655 
5656 
5657 	switch (pcidev->device) {
5658 	case BE_DEVICE_ID1:
5659 	case OC_DEVICE_ID1:
5660 	case OC_DEVICE_ID2:
5661 		phba->generation = BE_GEN2;
5662 		phba->iotask_fn = beiscsi_iotask;
5663 		break;
5664 	case BE_DEVICE_ID2:
5665 	case OC_DEVICE_ID3:
5666 		phba->generation = BE_GEN3;
5667 		phba->iotask_fn = beiscsi_iotask;
5668 		break;
5669 	case OC_SKH_ID1:
5670 		phba->generation = BE_GEN4;
5671 		phba->iotask_fn = beiscsi_iotask_v2;
5672 		break;
5673 	default:
5674 		phba->generation = 0;
5675 	}
5676 
5677 	ret = be_ctrl_init(phba, pcidev);
5678 	if (ret) {
5679 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5680 			    "BM_%d : beiscsi_dev_probe-"
5681 			    "Failed in be_ctrl_init\n");
5682 		goto hba_free;
5683 	}
5684 
5685 	/*
5686 	 * FUNCTION_RESET should clean up any stale info in FW for this fn
5687 	 */
5688 	ret = beiscsi_cmd_reset_function(phba);
5689 	if (ret) {
5690 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5691 			    "BM_%d : Reset Failed\n");
5692 		goto hba_free;
5693 	}
5694 	ret = be_chk_reset_complete(phba);
5695 	if (ret) {
5696 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5697 			    "BM_%d : Failed to get out of reset.\n");
5698 		goto hba_free;
5699 	}
5700 
5701 	spin_lock_init(&phba->io_sgl_lock);
5702 	spin_lock_init(&phba->mgmt_sgl_lock);
5703 	spin_lock_init(&phba->isr_lock);
5704 	spin_lock_init(&phba->async_pdu_lock);
5705 	ret = mgmt_get_fw_config(&phba->ctrl, phba);
5706 	if (ret != 0) {
5707 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5708 			    "BM_%d : Error getting fw config\n");
5709 		goto free_port;
5710 	}
5711 	mgmt_get_port_name(&phba->ctrl, phba);
5712 	beiscsi_get_params(phba);
5713 
5714 	if (enable_msix)
5715 		find_num_cpus(phba);
5716 	else
5717 		phba->num_cpus = 1;
5718 
5719 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
5720 		    "BM_%d : num_cpus = %d\n",
5721 		    phba->num_cpus);
5722 
5723 	if (enable_msix) {
5724 		beiscsi_msix_enable(phba);
5725 		if (!phba->msix_enabled)
5726 			phba->num_cpus = 1;
5727 	}
5728 
5729 	phba->shost->max_id = phba->params.cxns_per_ctrl;
5730 	phba->shost->can_queue = phba->params.ios_per_ctrl;
5731 	ret = beiscsi_init_port(phba);
5732 	if (ret < 0) {
5733 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5734 			    "BM_%d : beiscsi_dev_probe-"
5735 			    "Failed in beiscsi_init_port\n");
5736 		goto free_port;
5737 	}
5738 
5739 	for (i = 0; i < MAX_MCC_CMD; i++) {
5740 		init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
5741 		phba->ctrl.mcc_tag[i] = i + 1;
5742 		phba->ctrl.mcc_tag_status[i + 1] = 0;
5743 		phba->ctrl.mcc_tag_available++;
5744 		memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0,
5745 		       sizeof(struct be_dma_mem));
5746 	}
5747 
5748 	phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
5749 
5750 	snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_%02x_wq",
5751 		 phba->shost->host_no);
5752 	phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, phba->wq_name);
5753 	if (!phba->wq) {
5754 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5755 			    "BM_%d : beiscsi_dev_probe-"
5756 			    "Failed to allocate work queue\n");
5757 		goto free_twq;
5758 	}
5759 
5760 	INIT_DELAYED_WORK(&phba->beiscsi_hw_check_task,
5761 			  beiscsi_hw_health_check);
5762 
5763 	phwi_ctrlr = phba->phwi_ctrlr;
5764 	phwi_context = phwi_ctrlr->phwi_ctxt;
5765 
5766 	for (i = 0; i < phba->num_cpus; i++) {
5767 		pbe_eq = &phwi_context->be_eq[i];
5768 		irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget,
5769 				be_iopoll);
5770 	}
5771 
5772 	i = (phba->msix_enabled) ? i : 0;
5773 	/* Work item for MCC handling */
5774 	pbe_eq = &phwi_context->be_eq[i];
5775 	INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
5776 
5777 	ret = beiscsi_init_irqs(phba);
5778 	if (ret < 0) {
5779 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5780 			    "BM_%d : beiscsi_dev_probe-"
5781 			    "Failed to beiscsi_init_irqs\n");
5782 		goto free_blkenbld;
5783 	}
5784 	hwi_enable_intr(phba);
5785 
5786 	if (iscsi_host_add(phba->shost, &phba->pcidev->dev))
5787 		goto free_blkenbld;
5788 
5789 	if (beiscsi_setup_boot_info(phba))
5790 		/*
5791 		 * log error but continue, because we may not be using
5792 		 * iscsi boot.
5793 		 */
5794 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5795 			    "BM_%d : Could not set up "
5796 			    "iSCSI boot info.\n");
5797 
5798 	beiscsi_create_def_ifaces(phba);
5799 	schedule_delayed_work(&phba->beiscsi_hw_check_task,
5800 			      msecs_to_jiffies(1000));
5801 
5802 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
5803 		    "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
5804 	return 0;
5805 
5806 free_blkenbld:
5807 	destroy_workqueue(phba->wq);
5808 	for (i = 0; i < phba->num_cpus; i++) {
5809 		pbe_eq = &phwi_context->be_eq[i];
5810 		irq_poll_disable(&pbe_eq->iopoll);
5811 	}
5812 free_twq:
5813 	beiscsi_clean_port(phba);
5814 	beiscsi_free_mem(phba);
5815 free_port:
5816 	pci_free_consistent(phba->pcidev,
5817 			    phba->ctrl.mbox_mem_alloced.size,
5818 			    phba->ctrl.mbox_mem_alloced.va,
5819 			   phba->ctrl.mbox_mem_alloced.dma);
5820 	beiscsi_unmap_pci_function(phba);
5821 hba_free:
5822 	if (phba->msix_enabled)
5823 		pci_disable_msix(phba->pcidev);
5824 	pci_dev_put(phba->pcidev);
5825 	iscsi_host_free(phba->shost);
5826 	pci_set_drvdata(pcidev, NULL);
5827 disable_pci:
5828 	pci_release_regions(pcidev);
5829 	pci_disable_device(pcidev);
5830 	return ret;
5831 }
5832 
5833 static struct pci_error_handlers beiscsi_eeh_handlers = {
5834 	.error_detected = beiscsi_eeh_err_detected,
5835 	.slot_reset = beiscsi_eeh_reset,
5836 	.resume = beiscsi_eeh_resume,
5837 };
5838 
5839 struct iscsi_transport beiscsi_iscsi_transport = {
5840 	.owner = THIS_MODULE,
5841 	.name = DRV_NAME,
5842 	.caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
5843 		CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
5844 	.create_session = beiscsi_session_create,
5845 	.destroy_session = beiscsi_session_destroy,
5846 	.create_conn = beiscsi_conn_create,
5847 	.bind_conn = beiscsi_conn_bind,
5848 	.destroy_conn = iscsi_conn_teardown,
5849 	.attr_is_visible = be2iscsi_attr_is_visible,
5850 	.set_iface_param = be2iscsi_iface_set_param,
5851 	.get_iface_param = be2iscsi_iface_get_param,
5852 	.set_param = beiscsi_set_param,
5853 	.get_conn_param = iscsi_conn_get_param,
5854 	.get_session_param = iscsi_session_get_param,
5855 	.get_host_param = beiscsi_get_host_param,
5856 	.start_conn = beiscsi_conn_start,
5857 	.stop_conn = iscsi_conn_stop,
5858 	.send_pdu = iscsi_conn_send_pdu,
5859 	.xmit_task = beiscsi_task_xmit,
5860 	.cleanup_task = beiscsi_cleanup_task,
5861 	.alloc_pdu = beiscsi_alloc_pdu,
5862 	.parse_pdu_itt = beiscsi_parse_pdu,
5863 	.get_stats = beiscsi_conn_get_stats,
5864 	.get_ep_param = beiscsi_ep_get_param,
5865 	.ep_connect = beiscsi_ep_connect,
5866 	.ep_poll = beiscsi_ep_poll,
5867 	.ep_disconnect = beiscsi_ep_disconnect,
5868 	.session_recovery_timedout = iscsi_session_recovery_timedout,
5869 	.bsg_request = beiscsi_bsg_request,
5870 };
5871 
5872 static struct pci_driver beiscsi_pci_driver = {
5873 	.name = DRV_NAME,
5874 	.probe = beiscsi_dev_probe,
5875 	.remove = beiscsi_remove,
5876 	.id_table = beiscsi_pci_id_table,
5877 	.err_handler = &beiscsi_eeh_handlers
5878 };
5879 
5880 
5881 static int __init beiscsi_module_init(void)
5882 {
5883 	int ret;
5884 
5885 	beiscsi_scsi_transport =
5886 			iscsi_register_transport(&beiscsi_iscsi_transport);
5887 	if (!beiscsi_scsi_transport) {
5888 		printk(KERN_ERR
5889 		       "beiscsi_module_init - Unable to  register beiscsi transport.\n");
5890 		return -ENOMEM;
5891 	}
5892 	printk(KERN_INFO "In beiscsi_module_init, tt=%p\n",
5893 	       &beiscsi_iscsi_transport);
5894 
5895 	ret = pci_register_driver(&beiscsi_pci_driver);
5896 	if (ret) {
5897 		printk(KERN_ERR
5898 		       "beiscsi_module_init - Unable to  register beiscsi pci driver.\n");
5899 		goto unregister_iscsi_transport;
5900 	}
5901 	return 0;
5902 
5903 unregister_iscsi_transport:
5904 	iscsi_unregister_transport(&beiscsi_iscsi_transport);
5905 	return ret;
5906 }
5907 
5908 static void __exit beiscsi_module_exit(void)
5909 {
5910 	pci_unregister_driver(&beiscsi_pci_driver);
5911 	iscsi_unregister_transport(&beiscsi_iscsi_transport);
5912 }
5913 
5914 module_init(beiscsi_module_init);
5915 module_exit(beiscsi_module_exit);
5916