xref: /openbmc/linux/drivers/scsi/be2iscsi/be_main.c (revision ee8ec048)
1 /*
2  * This file is part of the Emulex Linux Device Driver for Enterprise iSCSI
3  * Host Bus Adapters. Refer to the README file included with this package
4  * for driver version and adapter compatibility.
5  *
6  * Copyright (c) 2018 Broadcom. All Rights Reserved.
7  * The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of version 2 of the GNU General Public License as published
11  * by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful. ALL EXPRESS
14  * OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY
15  * IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
16  * OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH
17  * DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
18  * See the GNU General Public License for more details, a copy of which
19  * can be found in the file COPYING included with this package.
20  *
21  * Contact Information:
22  * linux-drivers@broadcom.com
23  *
24  */
25 
26 #include <linux/reboot.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/blkdev.h>
31 #include <linux/pci.h>
32 #include <linux/string.h>
33 #include <linux/kernel.h>
34 #include <linux/semaphore.h>
35 #include <linux/iscsi_boot_sysfs.h>
36 #include <linux/module.h>
37 #include <linux/bsg-lib.h>
38 #include <linux/irq_poll.h>
39 
40 #include <scsi/libiscsi.h>
41 #include <scsi/scsi_bsg_iscsi.h>
42 #include <scsi/scsi_netlink.h>
43 #include <scsi/scsi_transport_iscsi.h>
44 #include <scsi/scsi_transport.h>
45 #include <scsi/scsi_cmnd.h>
46 #include <scsi/scsi_device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi.h>
49 #include "be_main.h"
50 #include "be_iscsi.h"
51 #include "be_mgmt.h"
52 #include "be_cmds.h"
53 
54 static unsigned int be_iopoll_budget = 10;
55 static unsigned int be_max_phys_size = 64;
56 static unsigned int enable_msix = 1;
57 
58 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
59 MODULE_VERSION(BUILD_STR);
60 MODULE_AUTHOR("Emulex Corporation");
61 MODULE_LICENSE("GPL");
62 module_param(be_iopoll_budget, int, 0);
63 module_param(enable_msix, int, 0);
64 module_param(be_max_phys_size, uint, S_IRUGO);
65 MODULE_PARM_DESC(be_max_phys_size,
66 		"Maximum Size (In Kilobytes) of physically contiguous "
67 		"memory that can be allocated. Range is 16 - 128");
68 
69 #define beiscsi_disp_param(_name)\
70 static ssize_t	\
71 beiscsi_##_name##_disp(struct device *dev,\
72 			struct device_attribute *attrib, char *buf)	\
73 {	\
74 	struct Scsi_Host *shost = class_to_shost(dev);\
75 	struct beiscsi_hba *phba = iscsi_host_priv(shost); \
76 	return snprintf(buf, PAGE_SIZE, "%d\n",\
77 			phba->attr_##_name);\
78 }
79 
80 #define beiscsi_change_param(_name, _minval, _maxval, _defaval)\
81 static int \
82 beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\
83 {\
84 	if (val >= _minval && val <= _maxval) {\
85 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
86 			    "BA_%d : beiscsi_"#_name" updated "\
87 			    "from 0x%x ==> 0x%x\n",\
88 			    phba->attr_##_name, val); \
89 		phba->attr_##_name = val;\
90 		return 0;\
91 	} \
92 	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \
93 		    "BA_%d beiscsi_"#_name" attribute "\
94 		    "cannot be updated to 0x%x, "\
95 		    "range allowed is ["#_minval" - "#_maxval"]\n", val);\
96 		return -EINVAL;\
97 }
98 
99 #define beiscsi_store_param(_name)  \
100 static ssize_t \
101 beiscsi_##_name##_store(struct device *dev,\
102 			 struct device_attribute *attr, const char *buf,\
103 			 size_t count) \
104 { \
105 	struct Scsi_Host  *shost = class_to_shost(dev);\
106 	struct beiscsi_hba *phba = iscsi_host_priv(shost);\
107 	uint32_t param_val = 0;\
108 	if (!isdigit(buf[0]))\
109 		return -EINVAL;\
110 	if (sscanf(buf, "%i", &param_val) != 1)\
111 		return -EINVAL;\
112 	if (beiscsi_##_name##_change(phba, param_val) == 0) \
113 		return strlen(buf);\
114 	else \
115 		return -EINVAL;\
116 }
117 
118 #define beiscsi_init_param(_name, _minval, _maxval, _defval) \
119 static int \
120 beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \
121 { \
122 	if (val >= _minval && val <= _maxval) {\
123 		phba->attr_##_name = val;\
124 		return 0;\
125 	} \
126 	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
127 		    "BA_%d beiscsi_"#_name" attribute " \
128 		    "cannot be updated to 0x%x, "\
129 		    "range allowed is ["#_minval" - "#_maxval"]\n", val);\
130 	phba->attr_##_name = _defval;\
131 	return -EINVAL;\
132 }
133 
134 #define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \
135 static uint beiscsi_##_name = _defval;\
136 module_param(beiscsi_##_name, uint, S_IRUGO);\
137 MODULE_PARM_DESC(beiscsi_##_name, _descp);\
138 beiscsi_disp_param(_name)\
139 beiscsi_change_param(_name, _minval, _maxval, _defval)\
140 beiscsi_store_param(_name)\
141 beiscsi_init_param(_name, _minval, _maxval, _defval)\
142 DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\
143 	      beiscsi_##_name##_disp, beiscsi_##_name##_store)
144 
145 /*
146  * When new log level added update MAX allowed value for log_enable
147  */
148 BEISCSI_RW_ATTR(log_enable, 0x00,
149 		0xFF, 0x00, "Enable logging Bit Mask\n"
150 		"\t\t\t\tInitialization Events	: 0x01\n"
151 		"\t\t\t\tMailbox Events		: 0x02\n"
152 		"\t\t\t\tMiscellaneous Events	: 0x04\n"
153 		"\t\t\t\tError Handling		: 0x08\n"
154 		"\t\t\t\tIO Path Events		: 0x10\n"
155 		"\t\t\t\tConfiguration Path	: 0x20\n"
156 		"\t\t\t\tiSCSI Protocol		: 0x40\n");
157 
158 DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
159 DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL);
160 DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL);
161 DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL);
162 DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO,
163 	     beiscsi_active_session_disp, NULL);
164 DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO,
165 	     beiscsi_free_session_disp, NULL);
166 
167 static struct attribute *beiscsi_attrs[] = {
168 	&dev_attr_beiscsi_log_enable.attr,
169 	&dev_attr_beiscsi_drvr_ver.attr,
170 	&dev_attr_beiscsi_adapter_family.attr,
171 	&dev_attr_beiscsi_fw_ver.attr,
172 	&dev_attr_beiscsi_active_session_count.attr,
173 	&dev_attr_beiscsi_free_session_count.attr,
174 	&dev_attr_beiscsi_phys_port.attr,
175 	NULL,
176 };
177 
178 ATTRIBUTE_GROUPS(beiscsi);
179 
180 static char const *cqe_desc[] = {
181 	"RESERVED_DESC",
182 	"SOL_CMD_COMPLETE",
183 	"SOL_CMD_KILLED_DATA_DIGEST_ERR",
184 	"CXN_KILLED_PDU_SIZE_EXCEEDS_DSL",
185 	"CXN_KILLED_BURST_LEN_MISMATCH",
186 	"CXN_KILLED_AHS_RCVD",
187 	"CXN_KILLED_HDR_DIGEST_ERR",
188 	"CXN_KILLED_UNKNOWN_HDR",
189 	"CXN_KILLED_STALE_ITT_TTT_RCVD",
190 	"CXN_KILLED_INVALID_ITT_TTT_RCVD",
191 	"CXN_KILLED_RST_RCVD",
192 	"CXN_KILLED_TIMED_OUT",
193 	"CXN_KILLED_RST_SENT",
194 	"CXN_KILLED_FIN_RCVD",
195 	"CXN_KILLED_BAD_UNSOL_PDU_RCVD",
196 	"CXN_KILLED_BAD_WRB_INDEX_ERROR",
197 	"CXN_KILLED_OVER_RUN_RESIDUAL",
198 	"CXN_KILLED_UNDER_RUN_RESIDUAL",
199 	"CMD_KILLED_INVALID_STATSN_RCVD",
200 	"CMD_KILLED_INVALID_R2T_RCVD",
201 	"CMD_CXN_KILLED_LUN_INVALID",
202 	"CMD_CXN_KILLED_ICD_INVALID",
203 	"CMD_CXN_KILLED_ITT_INVALID",
204 	"CMD_CXN_KILLED_SEQ_OUTOFORDER",
205 	"CMD_CXN_KILLED_INVALID_DATASN_RCVD",
206 	"CXN_INVALIDATE_NOTIFY",
207 	"CXN_INVALIDATE_INDEX_NOTIFY",
208 	"CMD_INVALIDATED_NOTIFY",
209 	"UNSOL_HDR_NOTIFY",
210 	"UNSOL_DATA_NOTIFY",
211 	"UNSOL_DATA_DIGEST_ERROR_NOTIFY",
212 	"DRIVERMSG_NOTIFY",
213 	"CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN",
214 	"SOL_CMD_KILLED_DIF_ERR",
215 	"CXN_KILLED_SYN_RCVD",
216 	"CXN_KILLED_IMM_DATA_RCVD"
217 };
218 
219 static int beiscsi_eh_abort(struct scsi_cmnd *sc)
220 {
221 	struct iscsi_task *abrt_task = (struct iscsi_task *)sc->SCp.ptr;
222 	struct iscsi_cls_session *cls_session;
223 	struct beiscsi_io_task *abrt_io_task;
224 	struct beiscsi_conn *beiscsi_conn;
225 	struct iscsi_session *session;
226 	struct invldt_cmd_tbl inv_tbl;
227 	struct beiscsi_hba *phba;
228 	struct iscsi_conn *conn;
229 	int rc;
230 
231 	cls_session = starget_to_session(scsi_target(sc->device));
232 	session = cls_session->dd_data;
233 
234 	/* check if we raced, task just got cleaned up under us */
235 	spin_lock_bh(&session->back_lock);
236 	if (!abrt_task || !abrt_task->sc) {
237 		spin_unlock_bh(&session->back_lock);
238 		return SUCCESS;
239 	}
240 	/* get a task ref till FW processes the req for the ICD used */
241 	__iscsi_get_task(abrt_task);
242 	abrt_io_task = abrt_task->dd_data;
243 	conn = abrt_task->conn;
244 	beiscsi_conn = conn->dd_data;
245 	phba = beiscsi_conn->phba;
246 	/* mark WRB invalid which have been not processed by FW yet */
247 	if (is_chip_be2_be3r(phba)) {
248 		AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
249 			      abrt_io_task->pwrb_handle->pwrb, 1);
250 	} else {
251 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld,
252 			      abrt_io_task->pwrb_handle->pwrb, 1);
253 	}
254 	inv_tbl.cid = beiscsi_conn->beiscsi_conn_cid;
255 	inv_tbl.icd = abrt_io_task->psgl_handle->sgl_index;
256 	spin_unlock_bh(&session->back_lock);
257 
258 	rc = beiscsi_mgmt_invalidate_icds(phba, &inv_tbl, 1);
259 	iscsi_put_task(abrt_task);
260 	if (rc) {
261 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
262 			    "BM_%d : sc %p invalidation failed %d\n",
263 			    sc, rc);
264 		return FAILED;
265 	}
266 
267 	return iscsi_eh_abort(sc);
268 }
269 
270 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
271 {
272 	struct beiscsi_invldt_cmd_tbl {
273 		struct invldt_cmd_tbl tbl[BE_INVLDT_CMD_TBL_SZ];
274 		struct iscsi_task *task[BE_INVLDT_CMD_TBL_SZ];
275 	} *inv_tbl;
276 	struct iscsi_cls_session *cls_session;
277 	struct beiscsi_conn *beiscsi_conn;
278 	struct beiscsi_io_task *io_task;
279 	struct iscsi_session *session;
280 	struct beiscsi_hba *phba;
281 	struct iscsi_conn *conn;
282 	struct iscsi_task *task;
283 	unsigned int i, nents;
284 	int rc, more = 0;
285 
286 	cls_session = starget_to_session(scsi_target(sc->device));
287 	session = cls_session->dd_data;
288 
289 	spin_lock_bh(&session->frwd_lock);
290 	if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
291 		spin_unlock_bh(&session->frwd_lock);
292 		return FAILED;
293 	}
294 
295 	conn = session->leadconn;
296 	beiscsi_conn = conn->dd_data;
297 	phba = beiscsi_conn->phba;
298 
299 	inv_tbl = kzalloc(sizeof(*inv_tbl), GFP_ATOMIC);
300 	if (!inv_tbl) {
301 		spin_unlock_bh(&session->frwd_lock);
302 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
303 			    "BM_%d : invldt_cmd_tbl alloc failed\n");
304 		return FAILED;
305 	}
306 	nents = 0;
307 	/* take back_lock to prevent task from getting cleaned up under us */
308 	spin_lock(&session->back_lock);
309 	for (i = 0; i < conn->session->cmds_max; i++) {
310 		task = conn->session->cmds[i];
311 		if (!task->sc)
312 			continue;
313 
314 		if (sc->device->lun != task->sc->device->lun)
315 			continue;
316 		/**
317 		 * Can't fit in more cmds? Normally this won't happen b'coz
318 		 * BEISCSI_CMD_PER_LUN is same as BE_INVLDT_CMD_TBL_SZ.
319 		 */
320 		if (nents == BE_INVLDT_CMD_TBL_SZ) {
321 			more = 1;
322 			break;
323 		}
324 
325 		/* get a task ref till FW processes the req for the ICD used */
326 		__iscsi_get_task(task);
327 		io_task = task->dd_data;
328 		/* mark WRB invalid which have been not processed by FW yet */
329 		if (is_chip_be2_be3r(phba)) {
330 			AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
331 				      io_task->pwrb_handle->pwrb, 1);
332 		} else {
333 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld,
334 				      io_task->pwrb_handle->pwrb, 1);
335 		}
336 
337 		inv_tbl->tbl[nents].cid = beiscsi_conn->beiscsi_conn_cid;
338 		inv_tbl->tbl[nents].icd = io_task->psgl_handle->sgl_index;
339 		inv_tbl->task[nents] = task;
340 		nents++;
341 	}
342 	spin_unlock(&session->back_lock);
343 	spin_unlock_bh(&session->frwd_lock);
344 
345 	rc = SUCCESS;
346 	if (!nents)
347 		goto end_reset;
348 
349 	if (more) {
350 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
351 			    "BM_%d : number of cmds exceeds size of invalidation table\n");
352 		rc = FAILED;
353 		goto end_reset;
354 	}
355 
356 	if (beiscsi_mgmt_invalidate_icds(phba, &inv_tbl->tbl[0], nents)) {
357 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
358 			    "BM_%d : cid %u scmds invalidation failed\n",
359 			    beiscsi_conn->beiscsi_conn_cid);
360 		rc = FAILED;
361 	}
362 
363 end_reset:
364 	for (i = 0; i < nents; i++)
365 		iscsi_put_task(inv_tbl->task[i]);
366 	kfree(inv_tbl);
367 
368 	if (rc == SUCCESS)
369 		rc = iscsi_eh_device_reset(sc);
370 	return rc;
371 }
372 
373 /*------------------- PCI Driver operations and data ----------------- */
374 static const struct pci_device_id beiscsi_pci_id_table[] = {
375 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
376 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
377 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
378 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
379 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
380 	{ PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) },
381 	{ 0 }
382 };
383 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
384 
385 
386 static struct scsi_host_template beiscsi_sht = {
387 	.module = THIS_MODULE,
388 	.name = "Emulex 10Gbe open-iscsi Initiator Driver",
389 	.proc_name = DRV_NAME,
390 	.queuecommand = iscsi_queuecommand,
391 	.change_queue_depth = scsi_change_queue_depth,
392 	.target_alloc = iscsi_target_alloc,
393 	.eh_timed_out = iscsi_eh_cmd_timed_out,
394 	.eh_abort_handler = beiscsi_eh_abort,
395 	.eh_device_reset_handler = beiscsi_eh_device_reset,
396 	.eh_target_reset_handler = iscsi_eh_session_reset,
397 	.shost_groups = beiscsi_groups,
398 	.sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
399 	.can_queue = BE2_IO_DEPTH,
400 	.this_id = -1,
401 	.max_sectors = BEISCSI_MAX_SECTORS,
402 	.max_segment_size = 65536,
403 	.cmd_per_lun = BEISCSI_CMD_PER_LUN,
404 	.vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
405 	.track_queue_depth = 1,
406 };
407 
408 static struct scsi_transport_template *beiscsi_scsi_transport;
409 
410 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
411 {
412 	struct beiscsi_hba *phba;
413 	struct Scsi_Host *shost;
414 
415 	shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
416 	if (!shost) {
417 		dev_err(&pcidev->dev,
418 			"beiscsi_hba_alloc - iscsi_host_alloc failed\n");
419 		return NULL;
420 	}
421 	shost->max_id = BE2_MAX_SESSIONS - 1;
422 	shost->max_channel = 0;
423 	shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
424 	shost->max_lun = BEISCSI_NUM_MAX_LUN;
425 	shost->transportt = beiscsi_scsi_transport;
426 	phba = iscsi_host_priv(shost);
427 	memset(phba, 0, sizeof(*phba));
428 	phba->shost = shost;
429 	phba->pcidev = pci_dev_get(pcidev);
430 	pci_set_drvdata(pcidev, phba);
431 	phba->interface_handle = 0xFFFFFFFF;
432 
433 	return phba;
434 }
435 
436 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
437 {
438 	if (phba->csr_va) {
439 		iounmap(phba->csr_va);
440 		phba->csr_va = NULL;
441 	}
442 	if (phba->db_va) {
443 		iounmap(phba->db_va);
444 		phba->db_va = NULL;
445 	}
446 	if (phba->pci_va) {
447 		iounmap(phba->pci_va);
448 		phba->pci_va = NULL;
449 	}
450 }
451 
452 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
453 				struct pci_dev *pcidev)
454 {
455 	u8 __iomem *addr;
456 	int pcicfg_reg;
457 
458 	addr = ioremap(pci_resource_start(pcidev, 2),
459 			       pci_resource_len(pcidev, 2));
460 	if (addr == NULL)
461 		return -ENOMEM;
462 	phba->ctrl.csr = addr;
463 	phba->csr_va = addr;
464 
465 	addr = ioremap(pci_resource_start(pcidev, 4), 128 * 1024);
466 	if (addr == NULL)
467 		goto pci_map_err;
468 	phba->ctrl.db = addr;
469 	phba->db_va = addr;
470 
471 	if (phba->generation == BE_GEN2)
472 		pcicfg_reg = 1;
473 	else
474 		pcicfg_reg = 0;
475 
476 	addr = ioremap(pci_resource_start(pcidev, pcicfg_reg),
477 			       pci_resource_len(pcidev, pcicfg_reg));
478 
479 	if (addr == NULL)
480 		goto pci_map_err;
481 	phba->ctrl.pcicfg = addr;
482 	phba->pci_va = addr;
483 	return 0;
484 
485 pci_map_err:
486 	beiscsi_unmap_pci_function(phba);
487 	return -ENOMEM;
488 }
489 
490 static int beiscsi_enable_pci(struct pci_dev *pcidev)
491 {
492 	int ret;
493 
494 	ret = pci_enable_device(pcidev);
495 	if (ret) {
496 		dev_err(&pcidev->dev,
497 			"beiscsi_enable_pci - enable device failed\n");
498 		return ret;
499 	}
500 
501 	ret = pci_request_regions(pcidev, DRV_NAME);
502 	if (ret) {
503 		dev_err(&pcidev->dev,
504 				"beiscsi_enable_pci - request region failed\n");
505 		goto pci_dev_disable;
506 	}
507 
508 	pci_set_master(pcidev);
509 	ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64));
510 	if (ret) {
511 		ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32));
512 		if (ret) {
513 			dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
514 			goto pci_region_release;
515 		}
516 	}
517 	return 0;
518 
519 pci_region_release:
520 	pci_release_regions(pcidev);
521 pci_dev_disable:
522 	pci_disable_device(pcidev);
523 
524 	return ret;
525 }
526 
527 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
528 {
529 	struct be_ctrl_info *ctrl = &phba->ctrl;
530 	struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
531 	struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
532 	int status = 0;
533 
534 	ctrl->pdev = pdev;
535 	status = beiscsi_map_pci_bars(phba, pdev);
536 	if (status)
537 		return status;
538 	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
539 	mbox_mem_alloc->va = dma_alloc_coherent(&pdev->dev,
540 			mbox_mem_alloc->size, &mbox_mem_alloc->dma, GFP_KERNEL);
541 	if (!mbox_mem_alloc->va) {
542 		beiscsi_unmap_pci_function(phba);
543 		return -ENOMEM;
544 	}
545 
546 	mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
547 	mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
548 	mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
549 	memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
550 	mutex_init(&ctrl->mbox_lock);
551 	spin_lock_init(&phba->ctrl.mcc_lock);
552 
553 	return status;
554 }
555 
556 /**
557  * beiscsi_get_params()- Set the config paramters
558  * @phba: ptr  device priv structure
559  **/
560 static void beiscsi_get_params(struct beiscsi_hba *phba)
561 {
562 	uint32_t total_cid_count = 0;
563 	uint32_t total_icd_count = 0;
564 	uint8_t ulp_num = 0;
565 
566 	total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
567 			  BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
568 
569 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
570 		uint32_t align_mask = 0;
571 		uint32_t icd_post_per_page = 0;
572 		uint32_t icd_count_unavailable = 0;
573 		uint32_t icd_start = 0, icd_count = 0;
574 		uint32_t icd_start_align = 0, icd_count_align = 0;
575 
576 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
577 			icd_start = phba->fw_config.iscsi_icd_start[ulp_num];
578 			icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
579 
580 			/* Get ICD count that can be posted on each page */
581 			icd_post_per_page = (PAGE_SIZE / (BE2_SGE *
582 					     sizeof(struct iscsi_sge)));
583 			align_mask = (icd_post_per_page - 1);
584 
585 			/* Check if icd_start is aligned ICD per page posting */
586 			if (icd_start % icd_post_per_page) {
587 				icd_start_align = ((icd_start +
588 						    icd_post_per_page) &
589 						    ~(align_mask));
590 				phba->fw_config.
591 					iscsi_icd_start[ulp_num] =
592 					icd_start_align;
593 			}
594 
595 			icd_count_align = (icd_count & ~align_mask);
596 
597 			/* ICD discarded in the process of alignment */
598 			if (icd_start_align)
599 				icd_count_unavailable = ((icd_start_align -
600 							  icd_start) +
601 							 (icd_count -
602 							  icd_count_align));
603 
604 			/* Updated ICD count available */
605 			phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count -
606 					icd_count_unavailable);
607 
608 			beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
609 					"BM_%d : Aligned ICD values\n"
610 					"\t ICD Start : %d\n"
611 					"\t ICD Count : %d\n"
612 					"\t ICD Discarded : %d\n",
613 					phba->fw_config.
614 					iscsi_icd_start[ulp_num],
615 					phba->fw_config.
616 					iscsi_icd_count[ulp_num],
617 					icd_count_unavailable);
618 			break;
619 		}
620 	}
621 
622 	total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
623 	phba->params.ios_per_ctrl = (total_icd_count -
624 				    (total_cid_count +
625 				     BE2_TMFS + BE2_NOPOUT_REQ));
626 	phba->params.cxns_per_ctrl = total_cid_count;
627 	phba->params.icds_per_ctrl = total_icd_count;
628 	phba->params.num_sge_per_io = BE2_SGE;
629 	phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
630 	phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
631 	phba->params.num_eq_entries = 1024;
632 	phba->params.num_cq_entries = 1024;
633 	phba->params.wrbs_per_cxn = 256;
634 }
635 
636 static void hwi_ring_eq_db(struct beiscsi_hba *phba,
637 			   unsigned int id, unsigned int clr_interrupt,
638 			   unsigned int num_processed,
639 			   unsigned char rearm, unsigned char event)
640 {
641 	u32 val = 0;
642 
643 	if (rearm)
644 		val |= 1 << DB_EQ_REARM_SHIFT;
645 	if (clr_interrupt)
646 		val |= 1 << DB_EQ_CLR_SHIFT;
647 	if (event)
648 		val |= 1 << DB_EQ_EVNT_SHIFT;
649 
650 	val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
651 	/* Setting lower order EQ_ID Bits */
652 	val |= (id & DB_EQ_RING_ID_LOW_MASK);
653 
654 	/* Setting Higher order EQ_ID Bits */
655 	val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) &
656 		  DB_EQ_RING_ID_HIGH_MASK)
657 		  << DB_EQ_HIGH_SET_SHIFT);
658 
659 	iowrite32(val, phba->db_va + DB_EQ_OFFSET);
660 }
661 
662 /**
663  * be_isr_mcc - The isr routine of the driver.
664  * @irq: Not used
665  * @dev_id: Pointer to host adapter structure
666  */
667 static irqreturn_t be_isr_mcc(int irq, void *dev_id)
668 {
669 	struct beiscsi_hba *phba;
670 	struct be_eq_entry *eqe;
671 	struct be_queue_info *eq;
672 	struct be_queue_info *mcc;
673 	unsigned int mcc_events;
674 	struct be_eq_obj *pbe_eq;
675 
676 	pbe_eq = dev_id;
677 	eq = &pbe_eq->q;
678 	phba =  pbe_eq->phba;
679 	mcc = &phba->ctrl.mcc_obj.cq;
680 	eqe = queue_tail_node(eq);
681 
682 	mcc_events = 0;
683 	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
684 				& EQE_VALID_MASK) {
685 		if (((eqe->dw[offsetof(struct amap_eq_entry,
686 		     resource_id) / 32] &
687 		     EQE_RESID_MASK) >> 16) == mcc->id) {
688 			mcc_events++;
689 		}
690 		AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
691 		queue_tail_inc(eq);
692 		eqe = queue_tail_node(eq);
693 	}
694 
695 	if (mcc_events) {
696 		queue_work(phba->wq, &pbe_eq->mcc_work);
697 		hwi_ring_eq_db(phba, eq->id, 1,	mcc_events, 1, 1);
698 	}
699 	return IRQ_HANDLED;
700 }
701 
702 /**
703  * be_isr_msix - The isr routine of the driver.
704  * @irq: Not used
705  * @dev_id: Pointer to host adapter structure
706  */
707 static irqreturn_t be_isr_msix(int irq, void *dev_id)
708 {
709 	struct beiscsi_hba *phba;
710 	struct be_queue_info *eq;
711 	struct be_eq_obj *pbe_eq;
712 
713 	pbe_eq = dev_id;
714 	eq = &pbe_eq->q;
715 
716 	phba = pbe_eq->phba;
717 	/* disable interrupt till iopoll completes */
718 	hwi_ring_eq_db(phba, eq->id, 1,	0, 0, 1);
719 	irq_poll_sched(&pbe_eq->iopoll);
720 
721 	return IRQ_HANDLED;
722 }
723 
724 /**
725  * be_isr - The isr routine of the driver.
726  * @irq: Not used
727  * @dev_id: Pointer to host adapter structure
728  */
729 static irqreturn_t be_isr(int irq, void *dev_id)
730 {
731 	struct beiscsi_hba *phba;
732 	struct hwi_controller *phwi_ctrlr;
733 	struct hwi_context_memory *phwi_context;
734 	struct be_eq_entry *eqe;
735 	struct be_queue_info *eq;
736 	struct be_queue_info *mcc;
737 	unsigned int mcc_events, io_events;
738 	struct be_ctrl_info *ctrl;
739 	struct be_eq_obj *pbe_eq;
740 	int isr, rearm;
741 
742 	phba = dev_id;
743 	ctrl = &phba->ctrl;
744 	isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
745 		       (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
746 	if (!isr)
747 		return IRQ_NONE;
748 
749 	phwi_ctrlr = phba->phwi_ctrlr;
750 	phwi_context = phwi_ctrlr->phwi_ctxt;
751 	pbe_eq = &phwi_context->be_eq[0];
752 
753 	eq = &phwi_context->be_eq[0].q;
754 	mcc = &phba->ctrl.mcc_obj.cq;
755 	eqe = queue_tail_node(eq);
756 
757 	io_events = 0;
758 	mcc_events = 0;
759 	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
760 				& EQE_VALID_MASK) {
761 		if (((eqe->dw[offsetof(struct amap_eq_entry,
762 		      resource_id) / 32] & EQE_RESID_MASK) >> 16) == mcc->id)
763 			mcc_events++;
764 		else
765 			io_events++;
766 		AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
767 		queue_tail_inc(eq);
768 		eqe = queue_tail_node(eq);
769 	}
770 	if (!io_events && !mcc_events)
771 		return IRQ_NONE;
772 
773 	/* no need to rearm if interrupt is only for IOs */
774 	rearm = 0;
775 	if (mcc_events) {
776 		queue_work(phba->wq, &pbe_eq->mcc_work);
777 		/* rearm for MCCQ */
778 		rearm = 1;
779 	}
780 	if (io_events)
781 		irq_poll_sched(&pbe_eq->iopoll);
782 	hwi_ring_eq_db(phba, eq->id, 0, (io_events + mcc_events), rearm, 1);
783 	return IRQ_HANDLED;
784 }
785 
786 static void beiscsi_free_irqs(struct beiscsi_hba *phba)
787 {
788 	struct hwi_context_memory *phwi_context;
789 	int i;
790 
791 	if (!phba->pcidev->msix_enabled) {
792 		if (phba->pcidev->irq)
793 			free_irq(phba->pcidev->irq, phba);
794 		return;
795 	}
796 
797 	phwi_context = phba->phwi_ctrlr->phwi_ctxt;
798 	for (i = 0; i <= phba->num_cpus; i++) {
799 		free_irq(pci_irq_vector(phba->pcidev, i),
800 			 &phwi_context->be_eq[i]);
801 		kfree(phba->msi_name[i]);
802 	}
803 }
804 
805 static int beiscsi_init_irqs(struct beiscsi_hba *phba)
806 {
807 	struct pci_dev *pcidev = phba->pcidev;
808 	struct hwi_controller *phwi_ctrlr;
809 	struct hwi_context_memory *phwi_context;
810 	int ret, i, j;
811 
812 	phwi_ctrlr = phba->phwi_ctrlr;
813 	phwi_context = phwi_ctrlr->phwi_ctxt;
814 
815 	if (pcidev->msix_enabled) {
816 		for (i = 0; i < phba->num_cpus; i++) {
817 			phba->msi_name[i] = kasprintf(GFP_KERNEL,
818 						      "beiscsi_%02x_%02x",
819 						      phba->shost->host_no, i);
820 			if (!phba->msi_name[i]) {
821 				ret = -ENOMEM;
822 				goto free_msix_irqs;
823 			}
824 
825 			ret = request_irq(pci_irq_vector(pcidev, i),
826 					  be_isr_msix, 0, phba->msi_name[i],
827 					  &phwi_context->be_eq[i]);
828 			if (ret) {
829 				beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
830 					    "BM_%d : %s-Failed to register msix for i = %d\n",
831 					    __func__, i);
832 				kfree(phba->msi_name[i]);
833 				goto free_msix_irqs;
834 			}
835 		}
836 		phba->msi_name[i] = kasprintf(GFP_KERNEL, "beiscsi_mcc_%02x",
837 					      phba->shost->host_no);
838 		if (!phba->msi_name[i]) {
839 			ret = -ENOMEM;
840 			goto free_msix_irqs;
841 		}
842 		ret = request_irq(pci_irq_vector(pcidev, i), be_isr_mcc, 0,
843 				  phba->msi_name[i], &phwi_context->be_eq[i]);
844 		if (ret) {
845 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
846 				    "BM_%d : %s-Failed to register beiscsi_msix_mcc\n",
847 				    __func__);
848 			kfree(phba->msi_name[i]);
849 			goto free_msix_irqs;
850 		}
851 
852 	} else {
853 		ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
854 				  "beiscsi", phba);
855 		if (ret) {
856 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
857 				    "BM_%d : %s-Failed to register irq\n",
858 				    __func__);
859 			return ret;
860 		}
861 	}
862 	return 0;
863 free_msix_irqs:
864 	for (j = i - 1; j >= 0; j--) {
865 		free_irq(pci_irq_vector(pcidev, i), &phwi_context->be_eq[j]);
866 		kfree(phba->msi_name[j]);
867 	}
868 	return ret;
869 }
870 
871 void hwi_ring_cq_db(struct beiscsi_hba *phba,
872 			   unsigned int id, unsigned int num_processed,
873 			   unsigned char rearm)
874 {
875 	u32 val = 0;
876 
877 	if (rearm)
878 		val |= 1 << DB_CQ_REARM_SHIFT;
879 
880 	val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
881 
882 	/* Setting lower order CQ_ID Bits */
883 	val |= (id & DB_CQ_RING_ID_LOW_MASK);
884 
885 	/* Setting Higher order CQ_ID Bits */
886 	val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) &
887 		  DB_CQ_RING_ID_HIGH_MASK)
888 		  << DB_CQ_HIGH_SET_SHIFT);
889 
890 	iowrite32(val, phba->db_va + DB_CQ_OFFSET);
891 }
892 
893 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
894 {
895 	struct sgl_handle *psgl_handle;
896 	unsigned long flags;
897 
898 	spin_lock_irqsave(&phba->io_sgl_lock, flags);
899 	if (phba->io_sgl_hndl_avbl) {
900 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
901 			    "BM_%d : In alloc_io_sgl_handle,"
902 			    " io_sgl_alloc_index=%d\n",
903 			    phba->io_sgl_alloc_index);
904 
905 		psgl_handle = phba->io_sgl_hndl_base[phba->
906 						io_sgl_alloc_index];
907 		phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
908 		phba->io_sgl_hndl_avbl--;
909 		if (phba->io_sgl_alloc_index == (phba->params.
910 						 ios_per_ctrl - 1))
911 			phba->io_sgl_alloc_index = 0;
912 		else
913 			phba->io_sgl_alloc_index++;
914 	} else
915 		psgl_handle = NULL;
916 	spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
917 	return psgl_handle;
918 }
919 
920 static void
921 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
922 {
923 	unsigned long flags;
924 
925 	spin_lock_irqsave(&phba->io_sgl_lock, flags);
926 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
927 		    "BM_%d : In free_,io_sgl_free_index=%d\n",
928 		    phba->io_sgl_free_index);
929 
930 	if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
931 		/*
932 		 * this can happen if clean_task is called on a task that
933 		 * failed in xmit_task or alloc_pdu.
934 		 */
935 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
936 			    "BM_%d : Double Free in IO SGL io_sgl_free_index=%d, value there=%p\n",
937 			    phba->io_sgl_free_index,
938 			    phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
939 		spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
940 		return;
941 	}
942 	phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
943 	phba->io_sgl_hndl_avbl++;
944 	if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
945 		phba->io_sgl_free_index = 0;
946 	else
947 		phba->io_sgl_free_index++;
948 	spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
949 }
950 
951 static inline struct wrb_handle *
952 beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context,
953 		       unsigned int wrbs_per_cxn)
954 {
955 	struct wrb_handle *pwrb_handle;
956 	unsigned long flags;
957 
958 	spin_lock_irqsave(&pwrb_context->wrb_lock, flags);
959 	if (!pwrb_context->wrb_handles_available) {
960 		spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags);
961 		return NULL;
962 	}
963 	pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index];
964 	pwrb_context->wrb_handles_available--;
965 	if (pwrb_context->alloc_index == (wrbs_per_cxn - 1))
966 		pwrb_context->alloc_index = 0;
967 	else
968 		pwrb_context->alloc_index++;
969 	spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags);
970 
971 	if (pwrb_handle)
972 		memset(pwrb_handle->pwrb, 0, sizeof(*pwrb_handle->pwrb));
973 
974 	return pwrb_handle;
975 }
976 
977 /**
978  * alloc_wrb_handle - To allocate a wrb handle
979  * @phba: The hba pointer
980  * @cid: The cid to use for allocation
981  * @pcontext: ptr to ptr to wrb context
982  *
983  * This happens under session_lock until submission to chip
984  */
985 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
986 				    struct hwi_wrb_context **pcontext)
987 {
988 	struct hwi_wrb_context *pwrb_context;
989 	struct hwi_controller *phwi_ctrlr;
990 	uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
991 
992 	phwi_ctrlr = phba->phwi_ctrlr;
993 	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
994 	/* return the context address */
995 	*pcontext = pwrb_context;
996 	return beiscsi_get_wrb_handle(pwrb_context, phba->params.wrbs_per_cxn);
997 }
998 
999 static inline void
1000 beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context,
1001 		       struct wrb_handle *pwrb_handle,
1002 		       unsigned int wrbs_per_cxn)
1003 {
1004 	unsigned long flags;
1005 
1006 	spin_lock_irqsave(&pwrb_context->wrb_lock, flags);
1007 	pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
1008 	pwrb_context->wrb_handles_available++;
1009 	if (pwrb_context->free_index == (wrbs_per_cxn - 1))
1010 		pwrb_context->free_index = 0;
1011 	else
1012 		pwrb_context->free_index++;
1013 	pwrb_handle->pio_handle = NULL;
1014 	spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags);
1015 }
1016 
1017 /**
1018  * free_wrb_handle - To free the wrb handle back to pool
1019  * @phba: The hba pointer
1020  * @pwrb_context: The context to free from
1021  * @pwrb_handle: The wrb_handle to free
1022  *
1023  * This happens under session_lock until submission to chip
1024  */
1025 static void
1026 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
1027 		struct wrb_handle *pwrb_handle)
1028 {
1029 	beiscsi_put_wrb_handle(pwrb_context,
1030 			       pwrb_handle,
1031 			       phba->params.wrbs_per_cxn);
1032 	beiscsi_log(phba, KERN_INFO,
1033 		    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1034 		    "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x "
1035 		    "wrb_handles_available=%d\n",
1036 		    pwrb_handle, pwrb_context->free_index,
1037 		    pwrb_context->wrb_handles_available);
1038 }
1039 
1040 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
1041 {
1042 	struct sgl_handle *psgl_handle;
1043 	unsigned long flags;
1044 
1045 	spin_lock_irqsave(&phba->mgmt_sgl_lock, flags);
1046 	if (phba->eh_sgl_hndl_avbl) {
1047 		psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
1048 		phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
1049 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1050 			    "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n",
1051 			    phba->eh_sgl_alloc_index,
1052 			    phba->eh_sgl_alloc_index);
1053 
1054 		phba->eh_sgl_hndl_avbl--;
1055 		if (phba->eh_sgl_alloc_index ==
1056 		    (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
1057 		     1))
1058 			phba->eh_sgl_alloc_index = 0;
1059 		else
1060 			phba->eh_sgl_alloc_index++;
1061 	} else
1062 		psgl_handle = NULL;
1063 	spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags);
1064 	return psgl_handle;
1065 }
1066 
1067 void
1068 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1069 {
1070 	unsigned long flags;
1071 
1072 	spin_lock_irqsave(&phba->mgmt_sgl_lock, flags);
1073 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1074 		    "BM_%d : In  free_mgmt_sgl_handle,"
1075 		    "eh_sgl_free_index=%d\n",
1076 		    phba->eh_sgl_free_index);
1077 
1078 	if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
1079 		/*
1080 		 * this can happen if clean_task is called on a task that
1081 		 * failed in xmit_task or alloc_pdu.
1082 		 */
1083 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
1084 			    "BM_%d : Double Free in eh SGL ,"
1085 			    "eh_sgl_free_index=%d\n",
1086 			    phba->eh_sgl_free_index);
1087 		spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags);
1088 		return;
1089 	}
1090 	phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
1091 	phba->eh_sgl_hndl_avbl++;
1092 	if (phba->eh_sgl_free_index ==
1093 	    (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
1094 		phba->eh_sgl_free_index = 0;
1095 	else
1096 		phba->eh_sgl_free_index++;
1097 	spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags);
1098 }
1099 
1100 static void
1101 be_complete_io(struct beiscsi_conn *beiscsi_conn,
1102 		struct iscsi_task *task,
1103 		struct common_sol_cqe *csol_cqe)
1104 {
1105 	struct beiscsi_io_task *io_task = task->dd_data;
1106 	struct be_status_bhs *sts_bhs =
1107 				(struct be_status_bhs *)io_task->cmd_bhs;
1108 	struct iscsi_conn *conn = beiscsi_conn->conn;
1109 	unsigned char *sense;
1110 	u32 resid = 0, exp_cmdsn, max_cmdsn;
1111 	u8 rsp, status, flags;
1112 
1113 	exp_cmdsn = csol_cqe->exp_cmdsn;
1114 	max_cmdsn = (csol_cqe->exp_cmdsn +
1115 		     csol_cqe->cmd_wnd - 1);
1116 	rsp = csol_cqe->i_resp;
1117 	status = csol_cqe->i_sts;
1118 	flags = csol_cqe->i_flags;
1119 	resid = csol_cqe->res_cnt;
1120 
1121 	if (!task->sc) {
1122 		if (io_task->scsi_cmnd) {
1123 			scsi_dma_unmap(io_task->scsi_cmnd);
1124 			io_task->scsi_cmnd = NULL;
1125 		}
1126 
1127 		return;
1128 	}
1129 	task->sc->result = (DID_OK << 16) | status;
1130 	if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
1131 		task->sc->result = DID_ERROR << 16;
1132 		goto unmap;
1133 	}
1134 
1135 	/* bidi not initially supported */
1136 	if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
1137 		if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
1138 			task->sc->result = DID_ERROR << 16;
1139 
1140 		if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
1141 			scsi_set_resid(task->sc, resid);
1142 			if (!status && (scsi_bufflen(task->sc) - resid <
1143 			    task->sc->underflow))
1144 				task->sc->result = DID_ERROR << 16;
1145 		}
1146 	}
1147 
1148 	if (status == SAM_STAT_CHECK_CONDITION) {
1149 		u16 sense_len;
1150 		unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
1151 
1152 		sense = sts_bhs->sense_info + sizeof(unsigned short);
1153 		sense_len = be16_to_cpu(*slen);
1154 		memcpy(task->sc->sense_buffer, sense,
1155 		       min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
1156 	}
1157 
1158 	if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ)
1159 		conn->rxdata_octets += resid;
1160 unmap:
1161 	if (io_task->scsi_cmnd) {
1162 		scsi_dma_unmap(io_task->scsi_cmnd);
1163 		io_task->scsi_cmnd = NULL;
1164 	}
1165 	iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
1166 }
1167 
1168 static void
1169 be_complete_logout(struct beiscsi_conn *beiscsi_conn,
1170 		    struct iscsi_task *task,
1171 		    struct common_sol_cqe *csol_cqe)
1172 {
1173 	struct iscsi_logout_rsp *hdr;
1174 	struct beiscsi_io_task *io_task = task->dd_data;
1175 	struct iscsi_conn *conn = beiscsi_conn->conn;
1176 
1177 	hdr = (struct iscsi_logout_rsp *)task->hdr;
1178 	hdr->opcode = ISCSI_OP_LOGOUT_RSP;
1179 	hdr->t2wait = 5;
1180 	hdr->t2retain = 0;
1181 	hdr->flags = csol_cqe->i_flags;
1182 	hdr->response = csol_cqe->i_resp;
1183 	hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
1184 	hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1185 				     csol_cqe->cmd_wnd - 1);
1186 
1187 	hdr->dlength[0] = 0;
1188 	hdr->dlength[1] = 0;
1189 	hdr->dlength[2] = 0;
1190 	hdr->hlength = 0;
1191 	hdr->itt = io_task->libiscsi_itt;
1192 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1193 }
1194 
1195 static void
1196 be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
1197 		 struct iscsi_task *task,
1198 		 struct common_sol_cqe *csol_cqe)
1199 {
1200 	struct iscsi_tm_rsp *hdr;
1201 	struct iscsi_conn *conn = beiscsi_conn->conn;
1202 	struct beiscsi_io_task *io_task = task->dd_data;
1203 
1204 	hdr = (struct iscsi_tm_rsp *)task->hdr;
1205 	hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
1206 	hdr->flags = csol_cqe->i_flags;
1207 	hdr->response = csol_cqe->i_resp;
1208 	hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
1209 	hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1210 				     csol_cqe->cmd_wnd - 1);
1211 
1212 	hdr->itt = io_task->libiscsi_itt;
1213 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1214 }
1215 
1216 static void
1217 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1218 		       struct beiscsi_hba *phba, struct sol_cqe *psol)
1219 {
1220 	struct hwi_wrb_context *pwrb_context;
1221 	uint16_t wrb_index, cid, cri_index;
1222 	struct hwi_controller *phwi_ctrlr;
1223 	struct wrb_handle *pwrb_handle;
1224 	struct iscsi_session *session;
1225 	struct iscsi_task *task;
1226 
1227 	phwi_ctrlr = phba->phwi_ctrlr;
1228 	if (is_chip_be2_be3r(phba)) {
1229 		wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
1230 					  wrb_idx, psol);
1231 		cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
1232 				    cid, psol);
1233 	} else {
1234 		wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
1235 					  wrb_idx, psol);
1236 		cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
1237 				    cid, psol);
1238 	}
1239 
1240 	cri_index = BE_GET_CRI_FROM_CID(cid);
1241 	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
1242 	pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index];
1243 	session = beiscsi_conn->conn->session;
1244 	spin_lock_bh(&session->back_lock);
1245 	task = pwrb_handle->pio_handle;
1246 	if (task)
1247 		__iscsi_put_task(task);
1248 	spin_unlock_bh(&session->back_lock);
1249 }
1250 
1251 static void
1252 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
1253 			struct iscsi_task *task,
1254 			struct common_sol_cqe *csol_cqe)
1255 {
1256 	struct iscsi_nopin *hdr;
1257 	struct iscsi_conn *conn = beiscsi_conn->conn;
1258 	struct beiscsi_io_task *io_task = task->dd_data;
1259 
1260 	hdr = (struct iscsi_nopin *)task->hdr;
1261 	hdr->flags = csol_cqe->i_flags;
1262 	hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
1263 	hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1264 				     csol_cqe->cmd_wnd - 1);
1265 
1266 	hdr->opcode = ISCSI_OP_NOOP_IN;
1267 	hdr->itt = io_task->libiscsi_itt;
1268 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1269 }
1270 
1271 static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
1272 		struct sol_cqe *psol,
1273 		struct common_sol_cqe *csol_cqe)
1274 {
1275 	if (is_chip_be2_be3r(phba)) {
1276 		csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe,
1277 						    i_exp_cmd_sn, psol);
1278 		csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe,
1279 						  i_res_cnt, psol);
1280 		csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
1281 						  i_cmd_wnd, psol);
1282 		csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe,
1283 						    wrb_index, psol);
1284 		csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe,
1285 					      cid, psol);
1286 		csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe,
1287 						 hw_sts, psol);
1288 		csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe,
1289 						 i_resp, psol);
1290 		csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe,
1291 						i_sts, psol);
1292 		csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe,
1293 						  i_flags, psol);
1294 	} else {
1295 		csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1296 						    i_exp_cmd_sn, psol);
1297 		csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1298 						  i_res_cnt, psol);
1299 		csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1300 						    wrb_index, psol);
1301 		csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1302 					      cid, psol);
1303 		csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1304 						 hw_sts, psol);
1305 		csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1306 						  i_cmd_wnd, psol);
1307 		if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
1308 				  cmd_cmpl, psol))
1309 			csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1310 							i_sts, psol);
1311 		else
1312 			csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1313 							 i_sts, psol);
1314 		if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
1315 				  u, psol))
1316 			csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW;
1317 
1318 		if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
1319 				  o, psol))
1320 			csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW;
1321 	}
1322 }
1323 
1324 
1325 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1326 			     struct beiscsi_hba *phba, struct sol_cqe *psol)
1327 {
1328 	struct iscsi_conn *conn = beiscsi_conn->conn;
1329 	struct iscsi_session *session = conn->session;
1330 	struct common_sol_cqe csol_cqe = {0};
1331 	struct hwi_wrb_context *pwrb_context;
1332 	struct hwi_controller *phwi_ctrlr;
1333 	struct wrb_handle *pwrb_handle;
1334 	struct iscsi_task *task;
1335 	uint16_t cri_index = 0;
1336 	uint8_t type;
1337 
1338 	phwi_ctrlr = phba->phwi_ctrlr;
1339 
1340 	/* Copy the elements to a common structure */
1341 	adapter_get_sol_cqe(phba, psol, &csol_cqe);
1342 
1343 	cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid);
1344 	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
1345 
1346 	pwrb_handle = pwrb_context->pwrb_handle_basestd[
1347 		      csol_cqe.wrb_index];
1348 
1349 	spin_lock_bh(&session->back_lock);
1350 	task = pwrb_handle->pio_handle;
1351 	if (!task) {
1352 		spin_unlock_bh(&session->back_lock);
1353 		return;
1354 	}
1355 	type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type;
1356 
1357 	switch (type) {
1358 	case HWH_TYPE_IO:
1359 	case HWH_TYPE_IO_RD:
1360 		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
1361 		     ISCSI_OP_NOOP_OUT)
1362 			be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe);
1363 		else
1364 			be_complete_io(beiscsi_conn, task, &csol_cqe);
1365 		break;
1366 
1367 	case HWH_TYPE_LOGOUT:
1368 		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
1369 			be_complete_logout(beiscsi_conn, task, &csol_cqe);
1370 		else
1371 			be_complete_tmf(beiscsi_conn, task, &csol_cqe);
1372 		break;
1373 
1374 	case HWH_TYPE_LOGIN:
1375 		beiscsi_log(phba, KERN_ERR,
1376 			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1377 			    "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in"
1378 			    " %s- Solicited path\n", __func__);
1379 		break;
1380 
1381 	case HWH_TYPE_NOP:
1382 		be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe);
1383 		break;
1384 
1385 	default:
1386 		beiscsi_log(phba, KERN_WARNING,
1387 			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1388 			    "BM_%d : In %s, unknown type = %d "
1389 			    "wrb_index 0x%x CID 0x%x\n", __func__, type,
1390 			    csol_cqe.wrb_index,
1391 			    csol_cqe.cid);
1392 		break;
1393 	}
1394 
1395 	spin_unlock_bh(&session->back_lock);
1396 }
1397 
1398 /*
1399  * ASYNC PDUs include
1400  * a. Unsolicited NOP-In (target initiated NOP-In)
1401  * b. ASYNC Messages
1402  * c. Reject PDU
1403  * d. Login response
1404  * These headers arrive unprocessed by the EP firmware.
1405  * iSCSI layer processes them.
1406  */
1407 static unsigned int
1408 beiscsi_complete_pdu(struct beiscsi_conn *beiscsi_conn,
1409 		struct pdu_base *phdr, void *pdata, unsigned int dlen)
1410 {
1411 	struct beiscsi_hba *phba = beiscsi_conn->phba;
1412 	struct iscsi_conn *conn = beiscsi_conn->conn;
1413 	struct beiscsi_io_task *io_task;
1414 	struct iscsi_hdr *login_hdr;
1415 	struct iscsi_task *task;
1416 	u8 code;
1417 
1418 	code = AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr);
1419 	switch (code) {
1420 	case ISCSI_OP_NOOP_IN:
1421 		pdata = NULL;
1422 		dlen = 0;
1423 		break;
1424 	case ISCSI_OP_ASYNC_EVENT:
1425 		break;
1426 	case ISCSI_OP_REJECT:
1427 		WARN_ON(!pdata);
1428 		WARN_ON(!(dlen == 48));
1429 		beiscsi_log(phba, KERN_ERR,
1430 			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1431 			    "BM_%d : In ISCSI_OP_REJECT\n");
1432 		break;
1433 	case ISCSI_OP_LOGIN_RSP:
1434 	case ISCSI_OP_TEXT_RSP:
1435 		task = conn->login_task;
1436 		io_task = task->dd_data;
1437 		login_hdr = (struct iscsi_hdr *)phdr;
1438 		login_hdr->itt = io_task->libiscsi_itt;
1439 		break;
1440 	default:
1441 		beiscsi_log(phba, KERN_WARNING,
1442 			    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1443 			    "BM_%d : unrecognized async PDU opcode 0x%x\n",
1444 			    code);
1445 		return 1;
1446 	}
1447 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)phdr, pdata, dlen);
1448 	return 0;
1449 }
1450 
1451 static inline void
1452 beiscsi_hdl_put_handle(struct hd_async_context *pasync_ctx,
1453 			 struct hd_async_handle *pasync_handle)
1454 {
1455 	pasync_handle->is_final = 0;
1456 	pasync_handle->buffer_len = 0;
1457 	pasync_handle->in_use = 0;
1458 	list_del_init(&pasync_handle->link);
1459 }
1460 
1461 static void
1462 beiscsi_hdl_purge_handles(struct beiscsi_hba *phba,
1463 			  struct hd_async_context *pasync_ctx,
1464 			  u16 cri)
1465 {
1466 	struct hd_async_handle *pasync_handle, *tmp_handle;
1467 	struct list_head *plist;
1468 
1469 	plist  = &pasync_ctx->async_entry[cri].wq.list;
1470 	list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link)
1471 		beiscsi_hdl_put_handle(pasync_ctx, pasync_handle);
1472 
1473 	INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wq.list);
1474 	pasync_ctx->async_entry[cri].wq.hdr_len = 0;
1475 	pasync_ctx->async_entry[cri].wq.bytes_received = 0;
1476 	pasync_ctx->async_entry[cri].wq.bytes_needed = 0;
1477 }
1478 
1479 static struct hd_async_handle *
1480 beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn,
1481 		       struct hd_async_context *pasync_ctx,
1482 		       struct i_t_dpdu_cqe *pdpdu_cqe,
1483 		       u8 *header)
1484 {
1485 	struct beiscsi_hba *phba = beiscsi_conn->phba;
1486 	struct hd_async_handle *pasync_handle;
1487 	struct be_bus_address phys_addr;
1488 	u16 cid, code, ci, cri;
1489 	u8 final, error = 0;
1490 	u32 dpl;
1491 
1492 	cid = beiscsi_conn->beiscsi_conn_cid;
1493 	cri = BE_GET_ASYNC_CRI_FROM_CID(cid);
1494 	/**
1495 	 * This function is invoked to get the right async_handle structure
1496 	 * from a given DEF PDU CQ entry.
1497 	 *
1498 	 * - index in CQ entry gives the vertical index
1499 	 * - address in CQ entry is the offset where the DMA last ended
1500 	 * - final - no more notifications for this PDU
1501 	 */
1502 	if (is_chip_be2_be3r(phba)) {
1503 		dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1504 				    dpl, pdpdu_cqe);
1505 		ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1506 				      index, pdpdu_cqe);
1507 		final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1508 				      final, pdpdu_cqe);
1509 	} else {
1510 		dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
1511 				    dpl, pdpdu_cqe);
1512 		ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
1513 				      index, pdpdu_cqe);
1514 		final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
1515 				      final, pdpdu_cqe);
1516 	}
1517 
1518 	/**
1519 	 * DB addr Hi/Lo is same for BE and SKH.
1520 	 * Subtract the dataplacementlength to get to the base.
1521 	 */
1522 	phys_addr.u.a32.address_lo = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1523 						   db_addr_lo, pdpdu_cqe);
1524 	phys_addr.u.a32.address_lo -= dpl;
1525 	phys_addr.u.a32.address_hi = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1526 						   db_addr_hi, pdpdu_cqe);
1527 
1528 	code = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, code, pdpdu_cqe);
1529 	switch (code) {
1530 	case UNSOL_HDR_NOTIFY:
1531 		pasync_handle = pasync_ctx->async_entry[ci].header;
1532 		*header = 1;
1533 		break;
1534 	case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1535 		error = 1;
1536 		fallthrough;
1537 	case UNSOL_DATA_NOTIFY:
1538 		pasync_handle = pasync_ctx->async_entry[ci].data;
1539 		break;
1540 	/* called only for above codes */
1541 	default:
1542 		return NULL;
1543 	}
1544 
1545 	if (pasync_handle->pa.u.a64.address != phys_addr.u.a64.address ||
1546 	    pasync_handle->index != ci) {
1547 		/* driver bug - if ci does not match async handle index */
1548 		error = 1;
1549 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
1550 			    "BM_%d : cid %u async PDU handle mismatch - addr in %cQE %llx at %u:addr in CQE %llx ci %u\n",
1551 			    cid, pasync_handle->is_header ? 'H' : 'D',
1552 			    pasync_handle->pa.u.a64.address,
1553 			    pasync_handle->index,
1554 			    phys_addr.u.a64.address, ci);
1555 		/* FW has stale address - attempt continuing by dropping */
1556 	}
1557 
1558 	/**
1559 	 * DEF PDU header and data buffers with errors should be simply
1560 	 * dropped as there are no consumers for it.
1561 	 */
1562 	if (error) {
1563 		beiscsi_hdl_put_handle(pasync_ctx, pasync_handle);
1564 		return NULL;
1565 	}
1566 
1567 	if (pasync_handle->in_use || !list_empty(&pasync_handle->link)) {
1568 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
1569 			    "BM_%d : cid %d async PDU handle in use - code %d ci %d addr %llx\n",
1570 			    cid, code, ci, phys_addr.u.a64.address);
1571 		beiscsi_hdl_purge_handles(phba, pasync_ctx, cri);
1572 	}
1573 
1574 	list_del_init(&pasync_handle->link);
1575 	/**
1576 	 * Each CID is associated with unique CRI.
1577 	 * ASYNC_CRI_FROM_CID mapping and CRI_FROM_CID are totaly different.
1578 	 **/
1579 	pasync_handle->cri = cri;
1580 	pasync_handle->is_final = final;
1581 	pasync_handle->buffer_len = dpl;
1582 	pasync_handle->in_use = 1;
1583 
1584 	return pasync_handle;
1585 }
1586 
1587 static unsigned int
1588 beiscsi_hdl_fwd_pdu(struct beiscsi_conn *beiscsi_conn,
1589 		    struct hd_async_context *pasync_ctx,
1590 		    u16 cri)
1591 {
1592 	struct iscsi_session *session = beiscsi_conn->conn->session;
1593 	struct hd_async_handle *pasync_handle, *plast_handle;
1594 	struct beiscsi_hba *phba = beiscsi_conn->phba;
1595 	void *phdr = NULL, *pdata = NULL;
1596 	u32 dlen = 0, status = 0;
1597 	struct list_head *plist;
1598 
1599 	plist = &pasync_ctx->async_entry[cri].wq.list;
1600 	plast_handle = NULL;
1601 	list_for_each_entry(pasync_handle, plist, link) {
1602 		plast_handle = pasync_handle;
1603 		/* get the header, the first entry */
1604 		if (!phdr) {
1605 			phdr = pasync_handle->pbuffer;
1606 			continue;
1607 		}
1608 		/* use first buffer to collect all the data */
1609 		if (!pdata) {
1610 			pdata = pasync_handle->pbuffer;
1611 			dlen = pasync_handle->buffer_len;
1612 			continue;
1613 		}
1614 		if (!pasync_handle->buffer_len ||
1615 		    (dlen + pasync_handle->buffer_len) >
1616 		    pasync_ctx->async_data.buffer_size)
1617 			break;
1618 		memcpy(pdata + dlen, pasync_handle->pbuffer,
1619 		       pasync_handle->buffer_len);
1620 		dlen += pasync_handle->buffer_len;
1621 	}
1622 
1623 	if (!plast_handle->is_final) {
1624 		/* last handle should have final PDU notification from FW */
1625 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
1626 			    "BM_%d : cid %u %p fwd async PDU opcode %x with last handle missing - HL%u:DN%u:DR%u\n",
1627 			    beiscsi_conn->beiscsi_conn_cid, plast_handle,
1628 			    AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr),
1629 			    pasync_ctx->async_entry[cri].wq.hdr_len,
1630 			    pasync_ctx->async_entry[cri].wq.bytes_needed,
1631 			    pasync_ctx->async_entry[cri].wq.bytes_received);
1632 	}
1633 	spin_lock_bh(&session->back_lock);
1634 	status = beiscsi_complete_pdu(beiscsi_conn, phdr, pdata, dlen);
1635 	spin_unlock_bh(&session->back_lock);
1636 	beiscsi_hdl_purge_handles(phba, pasync_ctx, cri);
1637 	return status;
1638 }
1639 
1640 static unsigned int
1641 beiscsi_hdl_gather_pdu(struct beiscsi_conn *beiscsi_conn,
1642 		       struct hd_async_context *pasync_ctx,
1643 		       struct hd_async_handle *pasync_handle)
1644 {
1645 	unsigned int bytes_needed = 0, status = 0;
1646 	u16 cri = pasync_handle->cri;
1647 	struct cri_wait_queue *wq;
1648 	struct beiscsi_hba *phba;
1649 	struct pdu_base *ppdu;
1650 	char *err = "";
1651 
1652 	phba = beiscsi_conn->phba;
1653 	wq = &pasync_ctx->async_entry[cri].wq;
1654 	if (pasync_handle->is_header) {
1655 		/* check if PDU hdr is rcv'd when old hdr not completed */
1656 		if (wq->hdr_len) {
1657 			err = "incomplete";
1658 			goto drop_pdu;
1659 		}
1660 		ppdu = pasync_handle->pbuffer;
1661 		bytes_needed = AMAP_GET_BITS(struct amap_pdu_base,
1662 					     data_len_hi, ppdu);
1663 		bytes_needed <<= 16;
1664 		bytes_needed |= be16_to_cpu(AMAP_GET_BITS(struct amap_pdu_base,
1665 							  data_len_lo, ppdu));
1666 		wq->hdr_len = pasync_handle->buffer_len;
1667 		wq->bytes_received = 0;
1668 		wq->bytes_needed = bytes_needed;
1669 		list_add_tail(&pasync_handle->link, &wq->list);
1670 		if (!bytes_needed)
1671 			status = beiscsi_hdl_fwd_pdu(beiscsi_conn,
1672 						     pasync_ctx, cri);
1673 	} else {
1674 		/* check if data received has header and is needed */
1675 		if (!wq->hdr_len || !wq->bytes_needed) {
1676 			err = "header less";
1677 			goto drop_pdu;
1678 		}
1679 		wq->bytes_received += pasync_handle->buffer_len;
1680 		/* Something got overwritten? Better catch it here. */
1681 		if (wq->bytes_received > wq->bytes_needed) {
1682 			err = "overflow";
1683 			goto drop_pdu;
1684 		}
1685 		list_add_tail(&pasync_handle->link, &wq->list);
1686 		if (wq->bytes_received == wq->bytes_needed)
1687 			status = beiscsi_hdl_fwd_pdu(beiscsi_conn,
1688 						     pasync_ctx, cri);
1689 	}
1690 	return status;
1691 
1692 drop_pdu:
1693 	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
1694 		    "BM_%d : cid %u async PDU %s - def-%c:HL%u:DN%u:DR%u\n",
1695 		    beiscsi_conn->beiscsi_conn_cid, err,
1696 		    pasync_handle->is_header ? 'H' : 'D',
1697 		    wq->hdr_len, wq->bytes_needed,
1698 		    pasync_handle->buffer_len);
1699 	/* discard this handle */
1700 	beiscsi_hdl_put_handle(pasync_ctx, pasync_handle);
1701 	/* free all the other handles in cri_wait_queue */
1702 	beiscsi_hdl_purge_handles(phba, pasync_ctx, cri);
1703 	/* try continuing */
1704 	return status;
1705 }
1706 
1707 static void
1708 beiscsi_hdq_post_handles(struct beiscsi_hba *phba,
1709 			 u8 header, u8 ulp_num, u16 nbuf)
1710 {
1711 	struct hd_async_handle *pasync_handle;
1712 	struct hd_async_context *pasync_ctx;
1713 	struct hwi_controller *phwi_ctrlr;
1714 	struct phys_addr *pasync_sge;
1715 	u32 ring_id, doorbell = 0;
1716 	u32 doorbell_offset;
1717 	u16 prod, pi;
1718 
1719 	phwi_ctrlr = phba->phwi_ctrlr;
1720 	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
1721 	if (header) {
1722 		pasync_sge = pasync_ctx->async_header.ring_base;
1723 		pi = pasync_ctx->async_header.pi;
1724 		ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id;
1725 		doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num].
1726 					doorbell_offset;
1727 	} else {
1728 		pasync_sge = pasync_ctx->async_data.ring_base;
1729 		pi = pasync_ctx->async_data.pi;
1730 		ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id;
1731 		doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num].
1732 					doorbell_offset;
1733 	}
1734 
1735 	for (prod = 0; prod < nbuf; prod++) {
1736 		if (header)
1737 			pasync_handle = pasync_ctx->async_entry[pi].header;
1738 		else
1739 			pasync_handle = pasync_ctx->async_entry[pi].data;
1740 		WARN_ON(pasync_handle->is_header != header);
1741 		WARN_ON(pasync_handle->index != pi);
1742 		/* setup the ring only once */
1743 		if (nbuf == pasync_ctx->num_entries) {
1744 			/* note hi is lo */
1745 			pasync_sge[pi].hi = pasync_handle->pa.u.a32.address_lo;
1746 			pasync_sge[pi].lo = pasync_handle->pa.u.a32.address_hi;
1747 		}
1748 		if (++pi == pasync_ctx->num_entries)
1749 			pi = 0;
1750 	}
1751 
1752 	if (header)
1753 		pasync_ctx->async_header.pi = pi;
1754 	else
1755 		pasync_ctx->async_data.pi = pi;
1756 
1757 	doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1758 	doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1759 	doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1760 	doorbell |= (prod & DB_DEF_PDU_CQPROC_MASK) << DB_DEF_PDU_CQPROC_SHIFT;
1761 	iowrite32(doorbell, phba->db_va + doorbell_offset);
1762 }
1763 
1764 static void
1765 beiscsi_hdq_process_compl(struct beiscsi_conn *beiscsi_conn,
1766 			  struct i_t_dpdu_cqe *pdpdu_cqe)
1767 {
1768 	struct beiscsi_hba *phba = beiscsi_conn->phba;
1769 	struct hd_async_handle *pasync_handle = NULL;
1770 	struct hd_async_context *pasync_ctx;
1771 	struct hwi_controller *phwi_ctrlr;
1772 	u8 ulp_num, consumed, header = 0;
1773 	u16 cid_cri;
1774 
1775 	phwi_ctrlr = phba->phwi_ctrlr;
1776 	cid_cri = BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid);
1777 	ulp_num = BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cid_cri);
1778 	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
1779 	pasync_handle = beiscsi_hdl_get_handle(beiscsi_conn, pasync_ctx,
1780 					       pdpdu_cqe, &header);
1781 	if (is_chip_be2_be3r(phba))
1782 		consumed = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1783 					 num_cons, pdpdu_cqe);
1784 	else
1785 		consumed = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
1786 					 num_cons, pdpdu_cqe);
1787 	if (pasync_handle)
1788 		beiscsi_hdl_gather_pdu(beiscsi_conn, pasync_ctx, pasync_handle);
1789 	/* num_cons indicates number of 8 RQEs consumed */
1790 	if (consumed)
1791 		beiscsi_hdq_post_handles(phba, header, ulp_num, 8 * consumed);
1792 }
1793 
1794 void beiscsi_process_mcc_cq(struct beiscsi_hba *phba)
1795 {
1796 	struct be_queue_info *mcc_cq;
1797 	struct  be_mcc_compl *mcc_compl;
1798 	unsigned int num_processed = 0;
1799 
1800 	mcc_cq = &phba->ctrl.mcc_obj.cq;
1801 	mcc_compl = queue_tail_node(mcc_cq);
1802 	mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1803 	while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1804 		if (beiscsi_hba_in_error(phba))
1805 			return;
1806 
1807 		if (num_processed >= 32) {
1808 			hwi_ring_cq_db(phba, mcc_cq->id,
1809 					num_processed, 0);
1810 			num_processed = 0;
1811 		}
1812 		if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1813 			beiscsi_process_async_event(phba, mcc_compl);
1814 		} else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1815 			beiscsi_process_mcc_compl(&phba->ctrl, mcc_compl);
1816 		}
1817 
1818 		mcc_compl->flags = 0;
1819 		queue_tail_inc(mcc_cq);
1820 		mcc_compl = queue_tail_node(mcc_cq);
1821 		mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1822 		num_processed++;
1823 	}
1824 
1825 	if (num_processed > 0)
1826 		hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1);
1827 }
1828 
1829 static void beiscsi_mcc_work(struct work_struct *work)
1830 {
1831 	struct be_eq_obj *pbe_eq;
1832 	struct beiscsi_hba *phba;
1833 
1834 	pbe_eq = container_of(work, struct be_eq_obj, mcc_work);
1835 	phba = pbe_eq->phba;
1836 	beiscsi_process_mcc_cq(phba);
1837 	/* rearm EQ for further interrupts */
1838 	if (!beiscsi_hba_in_error(phba))
1839 		hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
1840 }
1841 
1842 /**
1843  * beiscsi_process_cq()- Process the Completion Queue
1844  * @pbe_eq: Event Q on which the Completion has come
1845  * @budget: Max number of events to processed
1846  *
1847  * return
1848  *     Number of Completion Entries processed.
1849  **/
1850 unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
1851 {
1852 	struct be_queue_info *cq;
1853 	struct sol_cqe *sol;
1854 	unsigned int total = 0;
1855 	unsigned int num_processed = 0;
1856 	unsigned short code = 0, cid = 0;
1857 	uint16_t cri_index = 0;
1858 	struct beiscsi_conn *beiscsi_conn;
1859 	struct beiscsi_endpoint *beiscsi_ep;
1860 	struct iscsi_endpoint *ep;
1861 	struct beiscsi_hba *phba;
1862 
1863 	cq = pbe_eq->cq;
1864 	sol = queue_tail_node(cq);
1865 	phba = pbe_eq->phba;
1866 
1867 	while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1868 	       CQE_VALID_MASK) {
1869 		if (beiscsi_hba_in_error(phba))
1870 			return 0;
1871 
1872 		be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1873 
1874 		code = (sol->dw[offsetof(struct amap_sol_cqe, code) / 32] &
1875 				CQE_CODE_MASK);
1876 
1877 		 /* Get the CID */
1878 		if (is_chip_be2_be3r(phba)) {
1879 			cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol);
1880 		} else {
1881 			if ((code == DRIVERMSG_NOTIFY) ||
1882 			    (code == UNSOL_HDR_NOTIFY) ||
1883 			    (code == UNSOL_DATA_NOTIFY))
1884 				cid = AMAP_GET_BITS(
1885 						    struct amap_i_t_dpdu_cqe_v2,
1886 						    cid, sol);
1887 			else
1888 				cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1889 						    cid, sol);
1890 		}
1891 
1892 		cri_index = BE_GET_CRI_FROM_CID(cid);
1893 		ep = phba->ep_array[cri_index];
1894 
1895 		if (ep == NULL) {
1896 			/* connection has already been freed
1897 			 * just move on to next one
1898 			 */
1899 			beiscsi_log(phba, KERN_WARNING,
1900 				    BEISCSI_LOG_INIT,
1901 				    "BM_%d : proc cqe of disconn ep: cid %d\n",
1902 				    cid);
1903 			goto proc_next_cqe;
1904 		}
1905 
1906 		beiscsi_ep = ep->dd_data;
1907 		beiscsi_conn = beiscsi_ep->conn;
1908 
1909 		/* replenish cq */
1910 		if (num_processed == 32) {
1911 			hwi_ring_cq_db(phba, cq->id, 32, 0);
1912 			num_processed = 0;
1913 		}
1914 		total++;
1915 
1916 		switch (code) {
1917 		case SOL_CMD_COMPLETE:
1918 			hwi_complete_cmd(beiscsi_conn, phba, sol);
1919 			break;
1920 		case DRIVERMSG_NOTIFY:
1921 			beiscsi_log(phba, KERN_INFO,
1922 				    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1923 				    "BM_%d : Received %s[%d] on CID : %d\n",
1924 				    cqe_desc[code], code, cid);
1925 
1926 			hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1927 			break;
1928 		case UNSOL_HDR_NOTIFY:
1929 			beiscsi_log(phba, KERN_INFO,
1930 				    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1931 				    "BM_%d : Received %s[%d] on CID : %d\n",
1932 				    cqe_desc[code], code, cid);
1933 
1934 			spin_lock_bh(&phba->async_pdu_lock);
1935 			beiscsi_hdq_process_compl(beiscsi_conn,
1936 						  (struct i_t_dpdu_cqe *)sol);
1937 			spin_unlock_bh(&phba->async_pdu_lock);
1938 			break;
1939 		case UNSOL_DATA_NOTIFY:
1940 			beiscsi_log(phba, KERN_INFO,
1941 				    BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1942 				    "BM_%d : Received %s[%d] on CID : %d\n",
1943 				    cqe_desc[code], code, cid);
1944 
1945 			spin_lock_bh(&phba->async_pdu_lock);
1946 			beiscsi_hdq_process_compl(beiscsi_conn,
1947 						  (struct i_t_dpdu_cqe *)sol);
1948 			spin_unlock_bh(&phba->async_pdu_lock);
1949 			break;
1950 		case CXN_INVALIDATE_INDEX_NOTIFY:
1951 		case CMD_INVALIDATED_NOTIFY:
1952 		case CXN_INVALIDATE_NOTIFY:
1953 			beiscsi_log(phba, KERN_ERR,
1954 				    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1955 				    "BM_%d : Ignoring %s[%d] on CID : %d\n",
1956 				    cqe_desc[code], code, cid);
1957 			break;
1958 		case CXN_KILLED_HDR_DIGEST_ERR:
1959 		case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1960 			beiscsi_log(phba, KERN_ERR,
1961 				    BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1962 				    "BM_%d : Cmd Notification %s[%d] on CID : %d\n",
1963 				    cqe_desc[code], code,  cid);
1964 			break;
1965 		case CMD_KILLED_INVALID_STATSN_RCVD:
1966 		case CMD_KILLED_INVALID_R2T_RCVD:
1967 		case CMD_CXN_KILLED_LUN_INVALID:
1968 		case CMD_CXN_KILLED_ICD_INVALID:
1969 		case CMD_CXN_KILLED_ITT_INVALID:
1970 		case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1971 		case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
1972 			beiscsi_log(phba, KERN_ERR,
1973 				    BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1974 				    "BM_%d : Cmd Notification %s[%d] on CID : %d\n",
1975 				    cqe_desc[code], code,  cid);
1976 			break;
1977 		case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1978 			beiscsi_log(phba, KERN_ERR,
1979 				    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1980 				    "BM_%d :  Dropping %s[%d] on DPDU ring on CID : %d\n",
1981 				    cqe_desc[code], code, cid);
1982 			spin_lock_bh(&phba->async_pdu_lock);
1983 			/* driver consumes the entry and drops the contents */
1984 			beiscsi_hdq_process_compl(beiscsi_conn,
1985 						  (struct i_t_dpdu_cqe *)sol);
1986 			spin_unlock_bh(&phba->async_pdu_lock);
1987 			break;
1988 		case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
1989 		case CXN_KILLED_BURST_LEN_MISMATCH:
1990 		case CXN_KILLED_AHS_RCVD:
1991 		case CXN_KILLED_UNKNOWN_HDR:
1992 		case CXN_KILLED_STALE_ITT_TTT_RCVD:
1993 		case CXN_KILLED_INVALID_ITT_TTT_RCVD:
1994 		case CXN_KILLED_TIMED_OUT:
1995 		case CXN_KILLED_FIN_RCVD:
1996 		case CXN_KILLED_RST_SENT:
1997 		case CXN_KILLED_RST_RCVD:
1998 		case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
1999 		case CXN_KILLED_BAD_WRB_INDEX_ERROR:
2000 		case CXN_KILLED_OVER_RUN_RESIDUAL:
2001 		case CXN_KILLED_UNDER_RUN_RESIDUAL:
2002 		case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
2003 			beiscsi_log(phba, KERN_ERR,
2004 				    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2005 				    "BM_%d : Event %s[%d] received on CID : %d\n",
2006 				    cqe_desc[code], code, cid);
2007 			if (beiscsi_conn)
2008 				iscsi_conn_failure(beiscsi_conn->conn,
2009 						   ISCSI_ERR_CONN_FAILED);
2010 			break;
2011 		default:
2012 			beiscsi_log(phba, KERN_ERR,
2013 				    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2014 				    "BM_%d : Invalid CQE Event Received Code : %d CID 0x%x...\n",
2015 				    code, cid);
2016 			break;
2017 		}
2018 
2019 proc_next_cqe:
2020 		AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
2021 		queue_tail_inc(cq);
2022 		sol = queue_tail_node(cq);
2023 		num_processed++;
2024 		if (total == budget)
2025 			break;
2026 	}
2027 
2028 	hwi_ring_cq_db(phba, cq->id, num_processed, 1);
2029 	return total;
2030 }
2031 
2032 static int be_iopoll(struct irq_poll *iop, int budget)
2033 {
2034 	unsigned int ret, io_events;
2035 	struct beiscsi_hba *phba;
2036 	struct be_eq_obj *pbe_eq;
2037 	struct be_eq_entry *eqe = NULL;
2038 	struct be_queue_info *eq;
2039 
2040 	pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
2041 	phba = pbe_eq->phba;
2042 	if (beiscsi_hba_in_error(phba)) {
2043 		irq_poll_complete(iop);
2044 		return 0;
2045 	}
2046 
2047 	io_events = 0;
2048 	eq = &pbe_eq->q;
2049 	eqe = queue_tail_node(eq);
2050 	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] &
2051 			EQE_VALID_MASK) {
2052 		AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
2053 		queue_tail_inc(eq);
2054 		eqe = queue_tail_node(eq);
2055 		io_events++;
2056 	}
2057 	hwi_ring_eq_db(phba, eq->id, 1, io_events, 0, 1);
2058 
2059 	ret = beiscsi_process_cq(pbe_eq, budget);
2060 	pbe_eq->cq_count += ret;
2061 	if (ret < budget) {
2062 		irq_poll_complete(iop);
2063 		beiscsi_log(phba, KERN_INFO,
2064 			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
2065 			    "BM_%d : rearm pbe_eq->q.id =%d ret %d\n",
2066 			    pbe_eq->q.id, ret);
2067 		if (!beiscsi_hba_in_error(phba))
2068 			hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
2069 	}
2070 	return ret;
2071 }
2072 
2073 static void
2074 hwi_write_sgl_v2(struct iscsi_wrb *pwrb, struct scatterlist *sg,
2075 		  unsigned int num_sg, struct beiscsi_io_task *io_task)
2076 {
2077 	struct iscsi_sge *psgl;
2078 	unsigned int sg_len, index;
2079 	unsigned int sge_len = 0;
2080 	unsigned long long addr;
2081 	struct scatterlist *l_sg;
2082 	unsigned int offset;
2083 
2084 	AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_lo, pwrb,
2085 		      io_task->bhs_pa.u.a32.address_lo);
2086 	AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_hi, pwrb,
2087 		      io_task->bhs_pa.u.a32.address_hi);
2088 
2089 	l_sg = sg;
2090 	for (index = 0; (index < num_sg) && (index < 2); index++,
2091 			sg = sg_next(sg)) {
2092 		if (index == 0) {
2093 			sg_len = sg_dma_len(sg);
2094 			addr = (u64) sg_dma_address(sg);
2095 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2096 				      sge0_addr_lo, pwrb,
2097 				      lower_32_bits(addr));
2098 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2099 				      sge0_addr_hi, pwrb,
2100 				      upper_32_bits(addr));
2101 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2102 				      sge0_len, pwrb,
2103 				      sg_len);
2104 			sge_len = sg_len;
2105 		} else {
2106 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_r2t_offset,
2107 				      pwrb, sge_len);
2108 			sg_len = sg_dma_len(sg);
2109 			addr = (u64) sg_dma_address(sg);
2110 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2111 				      sge1_addr_lo, pwrb,
2112 				      lower_32_bits(addr));
2113 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2114 				      sge1_addr_hi, pwrb,
2115 				      upper_32_bits(addr));
2116 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2117 				      sge1_len, pwrb,
2118 				      sg_len);
2119 		}
2120 	}
2121 	psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2122 	memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
2123 
2124 	AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
2125 
2126 	AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2127 		      io_task->bhs_pa.u.a32.address_hi);
2128 	AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2129 		      io_task->bhs_pa.u.a32.address_lo);
2130 
2131 	if (num_sg == 1) {
2132 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
2133 			      1);
2134 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
2135 			      0);
2136 	} else if (num_sg == 2) {
2137 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
2138 			      0);
2139 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
2140 			      1);
2141 	} else {
2142 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
2143 			      0);
2144 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
2145 			      0);
2146 	}
2147 
2148 	sg = l_sg;
2149 	psgl++;
2150 	psgl++;
2151 	offset = 0;
2152 	for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
2153 		sg_len = sg_dma_len(sg);
2154 		addr = (u64) sg_dma_address(sg);
2155 		AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2156 			      lower_32_bits(addr));
2157 		AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2158 			      upper_32_bits(addr));
2159 		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
2160 		AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
2161 		AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2162 		offset += sg_len;
2163 	}
2164 	psgl--;
2165 	AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2166 }
2167 
2168 static void
2169 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
2170 	      unsigned int num_sg, struct beiscsi_io_task *io_task)
2171 {
2172 	struct iscsi_sge *psgl;
2173 	unsigned int sg_len, index;
2174 	unsigned int sge_len = 0;
2175 	unsigned long long addr;
2176 	struct scatterlist *l_sg;
2177 	unsigned int offset;
2178 
2179 	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2180 				      io_task->bhs_pa.u.a32.address_lo);
2181 	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2182 				      io_task->bhs_pa.u.a32.address_hi);
2183 
2184 	l_sg = sg;
2185 	for (index = 0; (index < num_sg) && (index < 2); index++,
2186 							 sg = sg_next(sg)) {
2187 		if (index == 0) {
2188 			sg_len = sg_dma_len(sg);
2189 			addr = (u64) sg_dma_address(sg);
2190 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
2191 						((u32)(addr & 0xFFFFFFFF)));
2192 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
2193 							((u32)(addr >> 32)));
2194 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2195 							sg_len);
2196 			sge_len = sg_len;
2197 		} else {
2198 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
2199 							pwrb, sge_len);
2200 			sg_len = sg_dma_len(sg);
2201 			addr = (u64) sg_dma_address(sg);
2202 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
2203 						((u32)(addr & 0xFFFFFFFF)));
2204 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
2205 							((u32)(addr >> 32)));
2206 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
2207 							sg_len);
2208 		}
2209 	}
2210 	psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2211 	memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
2212 
2213 	AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
2214 
2215 	AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2216 			io_task->bhs_pa.u.a32.address_hi);
2217 	AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2218 			io_task->bhs_pa.u.a32.address_lo);
2219 
2220 	if (num_sg == 1) {
2221 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2222 								1);
2223 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2224 								0);
2225 	} else if (num_sg == 2) {
2226 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2227 								0);
2228 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2229 								1);
2230 	} else {
2231 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2232 								0);
2233 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2234 								0);
2235 	}
2236 	sg = l_sg;
2237 	psgl++;
2238 	psgl++;
2239 	offset = 0;
2240 	for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
2241 		sg_len = sg_dma_len(sg);
2242 		addr = (u64) sg_dma_address(sg);
2243 		AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2244 						(addr & 0xFFFFFFFF));
2245 		AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2246 						(addr >> 32));
2247 		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
2248 		AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
2249 		AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2250 		offset += sg_len;
2251 	}
2252 	psgl--;
2253 	AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2254 }
2255 
2256 /**
2257  * hwi_write_buffer()- Populate the WRB with task info
2258  * @pwrb: ptr to the WRB entry
2259  * @task: iscsi task which is to be executed
2260  **/
2261 static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
2262 {
2263 	struct iscsi_sge *psgl;
2264 	struct beiscsi_io_task *io_task = task->dd_data;
2265 	struct beiscsi_conn *beiscsi_conn = io_task->conn;
2266 	struct beiscsi_hba *phba = beiscsi_conn->phba;
2267 	uint8_t dsp_value = 0;
2268 
2269 	io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
2270 	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2271 				io_task->bhs_pa.u.a32.address_lo);
2272 	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2273 				io_task->bhs_pa.u.a32.address_hi);
2274 
2275 	if (task->data) {
2276 
2277 		/* Check for the data_count */
2278 		dsp_value = (task->data_count) ? 1 : 0;
2279 
2280 		if (is_chip_be2_be3r(phba))
2281 			AMAP_SET_BITS(struct amap_iscsi_wrb, dsp,
2282 				      pwrb, dsp_value);
2283 		else
2284 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp,
2285 				      pwrb, dsp_value);
2286 
2287 		/* Map addr only if there is data_count */
2288 		if (dsp_value) {
2289 			io_task->mtask_addr = dma_map_single(&phba->pcidev->dev,
2290 							     task->data,
2291 							     task->data_count,
2292 							     DMA_TO_DEVICE);
2293 			if (dma_mapping_error(&phba->pcidev->dev,
2294 						  io_task->mtask_addr))
2295 				return -ENOMEM;
2296 			io_task->mtask_data_count = task->data_count;
2297 		} else
2298 			io_task->mtask_addr = 0;
2299 
2300 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
2301 			      lower_32_bits(io_task->mtask_addr));
2302 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
2303 			      upper_32_bits(io_task->mtask_addr));
2304 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2305 						task->data_count);
2306 
2307 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
2308 	} else {
2309 		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
2310 		io_task->mtask_addr = 0;
2311 	}
2312 
2313 	psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2314 
2315 	AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
2316 
2317 	AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2318 		      io_task->bhs_pa.u.a32.address_hi);
2319 	AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2320 		      io_task->bhs_pa.u.a32.address_lo);
2321 	if (task->data) {
2322 		psgl++;
2323 		AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
2324 		AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
2325 		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
2326 		AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
2327 		AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
2328 		AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2329 
2330 		psgl++;
2331 		if (task->data) {
2332 			AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2333 				      lower_32_bits(io_task->mtask_addr));
2334 			AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2335 				      upper_32_bits(io_task->mtask_addr));
2336 		}
2337 		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
2338 	}
2339 	AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2340 	return 0;
2341 }
2342 
2343 /**
2344  * beiscsi_find_mem_req()- Find mem needed
2345  * @phba: ptr to HBA struct
2346  **/
2347 static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
2348 {
2349 	uint8_t mem_descr_index, ulp_num;
2350 	unsigned int num_async_pdu_buf_pages;
2351 	unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
2352 	unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
2353 
2354 	phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
2355 
2356 	phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
2357 						 BE_ISCSI_PDU_HEADER_SIZE;
2358 	phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
2359 					    sizeof(struct hwi_context_memory);
2360 
2361 
2362 	phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
2363 	    * (phba->params.wrbs_per_cxn)
2364 	    * phba->params.cxns_per_ctrl;
2365 	wrb_sz_per_cxn =  sizeof(struct wrb_handle) *
2366 				 (phba->params.wrbs_per_cxn);
2367 	phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
2368 				phba->params.cxns_per_ctrl);
2369 
2370 	phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
2371 		phba->params.icds_per_ctrl;
2372 	phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
2373 		phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
2374 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
2375 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
2376 
2377 			num_async_pdu_buf_sgl_pages =
2378 				PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE(
2379 					       phba, ulp_num) *
2380 					       sizeof(struct phys_addr));
2381 
2382 			num_async_pdu_buf_pages =
2383 				PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE(
2384 					       phba, ulp_num) *
2385 					       phba->params.defpdu_hdr_sz);
2386 
2387 			num_async_pdu_data_pages =
2388 				PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE(
2389 					       phba, ulp_num) *
2390 					       phba->params.defpdu_data_sz);
2391 
2392 			num_async_pdu_data_sgl_pages =
2393 				PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE(
2394 					       phba, ulp_num) *
2395 					       sizeof(struct phys_addr));
2396 
2397 			mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 +
2398 					  (ulp_num * MEM_DESCR_OFFSET));
2399 			phba->mem_req[mem_descr_index] =
2400 					BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2401 					BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE;
2402 
2403 			mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
2404 					  (ulp_num * MEM_DESCR_OFFSET));
2405 			phba->mem_req[mem_descr_index] =
2406 					  num_async_pdu_buf_pages *
2407 					  PAGE_SIZE;
2408 
2409 			mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 +
2410 					  (ulp_num * MEM_DESCR_OFFSET));
2411 			phba->mem_req[mem_descr_index] =
2412 					  num_async_pdu_data_pages *
2413 					  PAGE_SIZE;
2414 
2415 			mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 +
2416 					  (ulp_num * MEM_DESCR_OFFSET));
2417 			phba->mem_req[mem_descr_index] =
2418 					  num_async_pdu_buf_sgl_pages *
2419 					  PAGE_SIZE;
2420 
2421 			mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 +
2422 					  (ulp_num * MEM_DESCR_OFFSET));
2423 			phba->mem_req[mem_descr_index] =
2424 					  num_async_pdu_data_sgl_pages *
2425 					  PAGE_SIZE;
2426 
2427 			mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
2428 					  (ulp_num * MEM_DESCR_OFFSET));
2429 			phba->mem_req[mem_descr_index] =
2430 				BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) *
2431 				sizeof(struct hd_async_handle);
2432 
2433 			mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
2434 					  (ulp_num * MEM_DESCR_OFFSET));
2435 			phba->mem_req[mem_descr_index] =
2436 				BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) *
2437 				sizeof(struct hd_async_handle);
2438 
2439 			mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
2440 					  (ulp_num * MEM_DESCR_OFFSET));
2441 			phba->mem_req[mem_descr_index] =
2442 				sizeof(struct hd_async_context) +
2443 				(BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) *
2444 				 sizeof(struct hd_async_entry));
2445 		}
2446 	}
2447 }
2448 
2449 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2450 {
2451 	dma_addr_t bus_add;
2452 	struct hwi_controller *phwi_ctrlr;
2453 	struct be_mem_descriptor *mem_descr;
2454 	struct mem_array *mem_arr, *mem_arr_orig;
2455 	unsigned int i, j, alloc_size, curr_alloc_size;
2456 
2457 	phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
2458 	if (!phba->phwi_ctrlr)
2459 		return -ENOMEM;
2460 
2461 	/* Allocate memory for wrb_context */
2462 	phwi_ctrlr = phba->phwi_ctrlr;
2463 	phwi_ctrlr->wrb_context = kcalloc(phba->params.cxns_per_ctrl,
2464 					  sizeof(struct hwi_wrb_context),
2465 					  GFP_KERNEL);
2466 	if (!phwi_ctrlr->wrb_context) {
2467 		kfree(phba->phwi_ctrlr);
2468 		return -ENOMEM;
2469 	}
2470 
2471 	phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
2472 				 GFP_KERNEL);
2473 	if (!phba->init_mem) {
2474 		kfree(phwi_ctrlr->wrb_context);
2475 		kfree(phba->phwi_ctrlr);
2476 		return -ENOMEM;
2477 	}
2478 
2479 	mem_arr_orig = kmalloc_array(BEISCSI_MAX_FRAGS_INIT,
2480 				     sizeof(*mem_arr_orig),
2481 				     GFP_KERNEL);
2482 	if (!mem_arr_orig) {
2483 		kfree(phba->init_mem);
2484 		kfree(phwi_ctrlr->wrb_context);
2485 		kfree(phba->phwi_ctrlr);
2486 		return -ENOMEM;
2487 	}
2488 
2489 	mem_descr = phba->init_mem;
2490 	for (i = 0; i < SE_MEM_MAX; i++) {
2491 		if (!phba->mem_req[i]) {
2492 			mem_descr->mem_array = NULL;
2493 			mem_descr++;
2494 			continue;
2495 		}
2496 
2497 		j = 0;
2498 		mem_arr = mem_arr_orig;
2499 		alloc_size = phba->mem_req[i];
2500 		memset(mem_arr, 0, sizeof(struct mem_array) *
2501 		       BEISCSI_MAX_FRAGS_INIT);
2502 		curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
2503 		do {
2504 			mem_arr->virtual_address =
2505 				dma_alloc_coherent(&phba->pcidev->dev,
2506 					curr_alloc_size, &bus_add, GFP_KERNEL);
2507 			if (!mem_arr->virtual_address) {
2508 				if (curr_alloc_size <= BE_MIN_MEM_SIZE)
2509 					goto free_mem;
2510 				if (curr_alloc_size -
2511 					rounddown_pow_of_two(curr_alloc_size))
2512 					curr_alloc_size = rounddown_pow_of_two
2513 							     (curr_alloc_size);
2514 				else
2515 					curr_alloc_size = curr_alloc_size / 2;
2516 			} else {
2517 				mem_arr->bus_address.u.
2518 				    a64.address = (__u64) bus_add;
2519 				mem_arr->size = curr_alloc_size;
2520 				alloc_size -= curr_alloc_size;
2521 				curr_alloc_size = min(be_max_phys_size *
2522 						      1024, alloc_size);
2523 				j++;
2524 				mem_arr++;
2525 			}
2526 		} while (alloc_size);
2527 		mem_descr->num_elements = j;
2528 		mem_descr->size_in_bytes = phba->mem_req[i];
2529 		mem_descr->mem_array = kmalloc_array(j, sizeof(*mem_arr),
2530 						     GFP_KERNEL);
2531 		if (!mem_descr->mem_array)
2532 			goto free_mem;
2533 
2534 		memcpy(mem_descr->mem_array, mem_arr_orig,
2535 		       sizeof(struct mem_array) * j);
2536 		mem_descr++;
2537 	}
2538 	kfree(mem_arr_orig);
2539 	return 0;
2540 free_mem:
2541 	mem_descr->num_elements = j;
2542 	while ((i) || (j)) {
2543 		for (j = mem_descr->num_elements; j > 0; j--) {
2544 			dma_free_coherent(&phba->pcidev->dev,
2545 					    mem_descr->mem_array[j - 1].size,
2546 					    mem_descr->mem_array[j - 1].
2547 					    virtual_address,
2548 					    (unsigned long)mem_descr->
2549 					    mem_array[j - 1].
2550 					    bus_address.u.a64.address);
2551 		}
2552 		if (i) {
2553 			i--;
2554 			kfree(mem_descr->mem_array);
2555 			mem_descr--;
2556 		}
2557 	}
2558 	kfree(mem_arr_orig);
2559 	kfree(phba->init_mem);
2560 	kfree(phba->phwi_ctrlr->wrb_context);
2561 	kfree(phba->phwi_ctrlr);
2562 	return -ENOMEM;
2563 }
2564 
2565 static int beiscsi_get_memory(struct beiscsi_hba *phba)
2566 {
2567 	beiscsi_find_mem_req(phba);
2568 	return beiscsi_alloc_mem(phba);
2569 }
2570 
2571 static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2572 {
2573 	struct pdu_data_out *pdata_out;
2574 	struct pdu_nop_out *pnop_out;
2575 	struct be_mem_descriptor *mem_descr;
2576 
2577 	mem_descr = phba->init_mem;
2578 	mem_descr += ISCSI_MEM_GLOBAL_HEADER;
2579 	pdata_out =
2580 	    (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
2581 	memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2582 
2583 	AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
2584 		      IIOC_SCSI_DATA);
2585 
2586 	pnop_out =
2587 	    (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
2588 				   virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
2589 
2590 	memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2591 	AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
2592 	AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
2593 	AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
2594 }
2595 
2596 static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2597 {
2598 	struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
2599 	struct hwi_context_memory *phwi_ctxt;
2600 	struct wrb_handle *pwrb_handle = NULL;
2601 	struct hwi_controller *phwi_ctrlr;
2602 	struct hwi_wrb_context *pwrb_context;
2603 	struct iscsi_wrb *pwrb = NULL;
2604 	unsigned int num_cxn_wrbh = 0;
2605 	unsigned int num_cxn_wrb = 0, j, idx = 0, index;
2606 
2607 	mem_descr_wrbh = phba->init_mem;
2608 	mem_descr_wrbh += HWI_MEM_WRBH;
2609 
2610 	mem_descr_wrb = phba->init_mem;
2611 	mem_descr_wrb += HWI_MEM_WRB;
2612 	phwi_ctrlr = phba->phwi_ctrlr;
2613 
2614 	/* Allocate memory for WRBQ */
2615 	phwi_ctxt = phwi_ctrlr->phwi_ctxt;
2616 	phwi_ctxt->be_wrbq = kcalloc(phba->params.cxns_per_ctrl,
2617 				     sizeof(struct be_queue_info),
2618 				     GFP_KERNEL);
2619 	if (!phwi_ctxt->be_wrbq) {
2620 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2621 			    "BM_%d : WRBQ Mem Alloc Failed\n");
2622 		return -ENOMEM;
2623 	}
2624 
2625 	for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
2626 		pwrb_context = &phwi_ctrlr->wrb_context[index];
2627 		pwrb_context->pwrb_handle_base =
2628 				kcalloc(phba->params.wrbs_per_cxn,
2629 					sizeof(struct wrb_handle *),
2630 					GFP_KERNEL);
2631 		if (!pwrb_context->pwrb_handle_base) {
2632 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2633 				    "BM_%d : Mem Alloc Failed. Failing to load\n");
2634 			goto init_wrb_hndl_failed;
2635 		}
2636 		pwrb_context->pwrb_handle_basestd =
2637 				kcalloc(phba->params.wrbs_per_cxn,
2638 					sizeof(struct wrb_handle *),
2639 					GFP_KERNEL);
2640 		if (!pwrb_context->pwrb_handle_basestd) {
2641 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2642 				    "BM_%d : Mem Alloc Failed. Failing to load\n");
2643 			goto init_wrb_hndl_failed;
2644 		}
2645 		if (!num_cxn_wrbh) {
2646 			pwrb_handle =
2647 				mem_descr_wrbh->mem_array[idx].virtual_address;
2648 			num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2649 					((sizeof(struct wrb_handle)) *
2650 					 phba->params.wrbs_per_cxn));
2651 			idx++;
2652 		}
2653 		pwrb_context->alloc_index = 0;
2654 		pwrb_context->wrb_handles_available = 0;
2655 		pwrb_context->free_index = 0;
2656 
2657 		if (num_cxn_wrbh) {
2658 			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2659 				pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2660 				pwrb_context->pwrb_handle_basestd[j] =
2661 								pwrb_handle;
2662 				pwrb_context->wrb_handles_available++;
2663 				pwrb_handle->wrb_index = j;
2664 				pwrb_handle++;
2665 			}
2666 			num_cxn_wrbh--;
2667 		}
2668 		spin_lock_init(&pwrb_context->wrb_lock);
2669 	}
2670 	idx = 0;
2671 	for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
2672 		pwrb_context = &phwi_ctrlr->wrb_context[index];
2673 		if (!num_cxn_wrb) {
2674 			pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2675 			num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2676 				((sizeof(struct iscsi_wrb) *
2677 				  phba->params.wrbs_per_cxn));
2678 			idx++;
2679 		}
2680 
2681 		if (num_cxn_wrb) {
2682 			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2683 				pwrb_handle = pwrb_context->pwrb_handle_base[j];
2684 				pwrb_handle->pwrb = pwrb;
2685 				pwrb++;
2686 			}
2687 			num_cxn_wrb--;
2688 		}
2689 	}
2690 	return 0;
2691 init_wrb_hndl_failed:
2692 	for (j = index; j > 0; j--) {
2693 		pwrb_context = &phwi_ctrlr->wrb_context[j];
2694 		kfree(pwrb_context->pwrb_handle_base);
2695 		kfree(pwrb_context->pwrb_handle_basestd);
2696 	}
2697 	return -ENOMEM;
2698 }
2699 
2700 static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2701 {
2702 	uint8_t ulp_num;
2703 	struct hwi_controller *phwi_ctrlr;
2704 	struct hba_parameters *p = &phba->params;
2705 	struct hd_async_context *pasync_ctx;
2706 	struct hd_async_handle *pasync_header_h, *pasync_data_h;
2707 	unsigned int index, idx, num_per_mem, num_async_data;
2708 	struct be_mem_descriptor *mem_descr;
2709 
2710 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
2711 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
2712 			/* get async_ctx for each ULP */
2713 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2714 			mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
2715 				     (ulp_num * MEM_DESCR_OFFSET));
2716 
2717 			phwi_ctrlr = phba->phwi_ctrlr;
2718 			phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] =
2719 				(struct hd_async_context *)
2720 				 mem_descr->mem_array[0].virtual_address;
2721 
2722 			pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
2723 			memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2724 
2725 			pasync_ctx->async_entry =
2726 					(struct hd_async_entry *)
2727 					((long unsigned int)pasync_ctx +
2728 					sizeof(struct hd_async_context));
2729 
2730 			pasync_ctx->num_entries = BEISCSI_ASYNC_HDQ_SIZE(phba,
2731 						  ulp_num);
2732 			/* setup header buffers */
2733 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2734 			mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
2735 				(ulp_num * MEM_DESCR_OFFSET);
2736 			if (mem_descr->mem_array[0].virtual_address) {
2737 				beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2738 					    "BM_%d : hwi_init_async_pdu_ctx"
2739 					    " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n",
2740 					    ulp_num,
2741 					    mem_descr->mem_array[0].
2742 					    virtual_address);
2743 			} else
2744 				beiscsi_log(phba, KERN_WARNING,
2745 					    BEISCSI_LOG_INIT,
2746 					    "BM_%d : No Virtual address for ULP : %d\n",
2747 					    ulp_num);
2748 
2749 			pasync_ctx->async_header.pi = 0;
2750 			pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
2751 			pasync_ctx->async_header.va_base =
2752 				mem_descr->mem_array[0].virtual_address;
2753 
2754 			pasync_ctx->async_header.pa_base.u.a64.address =
2755 				mem_descr->mem_array[0].
2756 				bus_address.u.a64.address;
2757 
2758 			/* setup header buffer sgls */
2759 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2760 			mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
2761 				     (ulp_num * MEM_DESCR_OFFSET);
2762 			if (mem_descr->mem_array[0].virtual_address) {
2763 				beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2764 					    "BM_%d : hwi_init_async_pdu_ctx"
2765 					    " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n",
2766 					    ulp_num,
2767 					    mem_descr->mem_array[0].
2768 					    virtual_address);
2769 			} else
2770 				beiscsi_log(phba, KERN_WARNING,
2771 					    BEISCSI_LOG_INIT,
2772 					    "BM_%d : No Virtual address for ULP : %d\n",
2773 					    ulp_num);
2774 
2775 			pasync_ctx->async_header.ring_base =
2776 				mem_descr->mem_array[0].virtual_address;
2777 
2778 			/* setup header buffer handles */
2779 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2780 			mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
2781 				     (ulp_num * MEM_DESCR_OFFSET);
2782 			if (mem_descr->mem_array[0].virtual_address) {
2783 				beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2784 					    "BM_%d : hwi_init_async_pdu_ctx"
2785 					    " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n",
2786 					    ulp_num,
2787 					    mem_descr->mem_array[0].
2788 					    virtual_address);
2789 			} else
2790 				beiscsi_log(phba, KERN_WARNING,
2791 					    BEISCSI_LOG_INIT,
2792 					    "BM_%d : No Virtual address for ULP : %d\n",
2793 					    ulp_num);
2794 
2795 			pasync_ctx->async_header.handle_base =
2796 				mem_descr->mem_array[0].virtual_address;
2797 
2798 			/* setup data buffer sgls */
2799 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2800 			mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
2801 				     (ulp_num * MEM_DESCR_OFFSET);
2802 			if (mem_descr->mem_array[0].virtual_address) {
2803 				beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2804 					    "BM_%d : hwi_init_async_pdu_ctx"
2805 					    " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n",
2806 					    ulp_num,
2807 					    mem_descr->mem_array[0].
2808 					    virtual_address);
2809 			} else
2810 				beiscsi_log(phba, KERN_WARNING,
2811 					    BEISCSI_LOG_INIT,
2812 					    "BM_%d : No Virtual address for ULP : %d\n",
2813 					    ulp_num);
2814 
2815 			pasync_ctx->async_data.ring_base =
2816 				mem_descr->mem_array[0].virtual_address;
2817 
2818 			/* setup data buffer handles */
2819 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2820 			mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
2821 				     (ulp_num * MEM_DESCR_OFFSET);
2822 			if (!mem_descr->mem_array[0].virtual_address)
2823 				beiscsi_log(phba, KERN_WARNING,
2824 					    BEISCSI_LOG_INIT,
2825 					    "BM_%d : No Virtual address for ULP : %d\n",
2826 					    ulp_num);
2827 
2828 			pasync_ctx->async_data.handle_base =
2829 				mem_descr->mem_array[0].virtual_address;
2830 
2831 			pasync_header_h =
2832 				(struct hd_async_handle *)
2833 				pasync_ctx->async_header.handle_base;
2834 			pasync_data_h =
2835 				(struct hd_async_handle *)
2836 				pasync_ctx->async_data.handle_base;
2837 
2838 			/* setup data buffers */
2839 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2840 			mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 +
2841 				     (ulp_num * MEM_DESCR_OFFSET);
2842 			if (mem_descr->mem_array[0].virtual_address) {
2843 				beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2844 					    "BM_%d : hwi_init_async_pdu_ctx"
2845 					    " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n",
2846 					    ulp_num,
2847 					    mem_descr->mem_array[0].
2848 					    virtual_address);
2849 			} else
2850 				beiscsi_log(phba, KERN_WARNING,
2851 					    BEISCSI_LOG_INIT,
2852 					    "BM_%d : No Virtual address for ULP : %d\n",
2853 					    ulp_num);
2854 
2855 			idx = 0;
2856 			pasync_ctx->async_data.pi = 0;
2857 			pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
2858 			pasync_ctx->async_data.va_base =
2859 				mem_descr->mem_array[idx].virtual_address;
2860 			pasync_ctx->async_data.pa_base.u.a64.address =
2861 				mem_descr->mem_array[idx].
2862 				bus_address.u.a64.address;
2863 
2864 			num_async_data = ((mem_descr->mem_array[idx].size) /
2865 					phba->params.defpdu_data_sz);
2866 			num_per_mem = 0;
2867 
2868 			for (index = 0;	index < BEISCSI_ASYNC_HDQ_SIZE
2869 					(phba, ulp_num); index++) {
2870 				pasync_header_h->cri = -1;
2871 				pasync_header_h->is_header = 1;
2872 				pasync_header_h->index = index;
2873 				INIT_LIST_HEAD(&pasync_header_h->link);
2874 				pasync_header_h->pbuffer =
2875 					(void *)((unsigned long)
2876 						 (pasync_ctx->
2877 						  async_header.va_base) +
2878 						 (p->defpdu_hdr_sz * index));
2879 
2880 				pasync_header_h->pa.u.a64.address =
2881 					pasync_ctx->async_header.pa_base.u.a64.
2882 					address + (p->defpdu_hdr_sz * index);
2883 
2884 				pasync_ctx->async_entry[index].header =
2885 					pasync_header_h;
2886 				pasync_header_h++;
2887 				INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2888 						wq.list);
2889 
2890 				pasync_data_h->cri = -1;
2891 				pasync_data_h->is_header = 0;
2892 				pasync_data_h->index = index;
2893 				INIT_LIST_HEAD(&pasync_data_h->link);
2894 
2895 				if (!num_async_data) {
2896 					num_per_mem = 0;
2897 					idx++;
2898 					pasync_ctx->async_data.va_base =
2899 						mem_descr->mem_array[idx].
2900 						virtual_address;
2901 					pasync_ctx->async_data.pa_base.u.
2902 						a64.address =
2903 						mem_descr->mem_array[idx].
2904 						bus_address.u.a64.address;
2905 					num_async_data =
2906 						((mem_descr->mem_array[idx].
2907 						  size) /
2908 						 phba->params.defpdu_data_sz);
2909 				}
2910 				pasync_data_h->pbuffer =
2911 					(void *)((unsigned long)
2912 					(pasync_ctx->async_data.va_base) +
2913 					(p->defpdu_data_sz * num_per_mem));
2914 
2915 				pasync_data_h->pa.u.a64.address =
2916 					pasync_ctx->async_data.pa_base.u.a64.
2917 					address + (p->defpdu_data_sz *
2918 					num_per_mem);
2919 				num_per_mem++;
2920 				num_async_data--;
2921 
2922 				pasync_ctx->async_entry[index].data =
2923 					pasync_data_h;
2924 				pasync_data_h++;
2925 			}
2926 		}
2927 	}
2928 
2929 	return 0;
2930 }
2931 
2932 static int
2933 be_sgl_create_contiguous(void *virtual_address,
2934 			 u64 physical_address, u32 length,
2935 			 struct be_dma_mem *sgl)
2936 {
2937 	WARN_ON(!virtual_address);
2938 	WARN_ON(!physical_address);
2939 	WARN_ON(!length);
2940 	WARN_ON(!sgl);
2941 
2942 	sgl->va = virtual_address;
2943 	sgl->dma = (unsigned long)physical_address;
2944 	sgl->size = length;
2945 
2946 	return 0;
2947 }
2948 
2949 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2950 {
2951 	memset(sgl, 0, sizeof(*sgl));
2952 }
2953 
2954 static void
2955 hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2956 		     struct mem_array *pmem, struct be_dma_mem *sgl)
2957 {
2958 	if (sgl->va)
2959 		be_sgl_destroy_contiguous(sgl);
2960 
2961 	be_sgl_create_contiguous(pmem->virtual_address,
2962 				 pmem->bus_address.u.a64.address,
2963 				 pmem->size, sgl);
2964 }
2965 
2966 static void
2967 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2968 			   struct mem_array *pmem, struct be_dma_mem *sgl)
2969 {
2970 	if (sgl->va)
2971 		be_sgl_destroy_contiguous(sgl);
2972 
2973 	be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2974 				 pmem->bus_address.u.a64.address,
2975 				 pmem->size, sgl);
2976 }
2977 
2978 static int be_fill_queue(struct be_queue_info *q,
2979 		u16 len, u16 entry_size, void *vaddress)
2980 {
2981 	struct be_dma_mem *mem = &q->dma_mem;
2982 
2983 	memset(q, 0, sizeof(*q));
2984 	q->len = len;
2985 	q->entry_size = entry_size;
2986 	mem->size = len * entry_size;
2987 	mem->va = vaddress;
2988 	if (!mem->va)
2989 		return -ENOMEM;
2990 	memset(mem->va, 0, mem->size);
2991 	return 0;
2992 }
2993 
2994 static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2995 			     struct hwi_context_memory *phwi_context)
2996 {
2997 	int ret = -ENOMEM, eq_for_mcc;
2998 	unsigned int i, num_eq_pages;
2999 	struct be_queue_info *eq;
3000 	struct be_dma_mem *mem;
3001 	void *eq_vaddress;
3002 	dma_addr_t paddr;
3003 
3004 	num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries *
3005 				      sizeof(struct be_eq_entry));
3006 
3007 	if (phba->pcidev->msix_enabled)
3008 		eq_for_mcc = 1;
3009 	else
3010 		eq_for_mcc = 0;
3011 	for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
3012 		eq = &phwi_context->be_eq[i].q;
3013 		mem = &eq->dma_mem;
3014 		phwi_context->be_eq[i].phba = phba;
3015 		eq_vaddress = dma_alloc_coherent(&phba->pcidev->dev,
3016 						   num_eq_pages * PAGE_SIZE,
3017 						   &paddr, GFP_KERNEL);
3018 		if (!eq_vaddress) {
3019 			ret = -ENOMEM;
3020 			goto create_eq_error;
3021 		}
3022 
3023 		mem->va = eq_vaddress;
3024 		ret = be_fill_queue(eq, phba->params.num_eq_entries,
3025 				    sizeof(struct be_eq_entry), eq_vaddress);
3026 		if (ret) {
3027 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3028 				    "BM_%d : be_fill_queue Failed for EQ\n");
3029 			goto create_eq_error;
3030 		}
3031 
3032 		mem->dma = paddr;
3033 		ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
3034 					    BEISCSI_EQ_DELAY_DEF);
3035 		if (ret) {
3036 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3037 				    "BM_%d : beiscsi_cmd_eq_create Failed for EQ\n");
3038 			goto create_eq_error;
3039 		}
3040 
3041 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3042 			    "BM_%d : eqid = %d\n",
3043 			    phwi_context->be_eq[i].q.id);
3044 	}
3045 	return 0;
3046 
3047 create_eq_error:
3048 	for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
3049 		eq = &phwi_context->be_eq[i].q;
3050 		mem = &eq->dma_mem;
3051 		if (mem->va)
3052 			dma_free_coherent(&phba->pcidev->dev, num_eq_pages
3053 					    * PAGE_SIZE,
3054 					    mem->va, mem->dma);
3055 	}
3056 	return ret;
3057 }
3058 
3059 static int beiscsi_create_cqs(struct beiscsi_hba *phba,
3060 			     struct hwi_context_memory *phwi_context)
3061 {
3062 	unsigned int i, num_cq_pages;
3063 	struct be_queue_info *cq, *eq;
3064 	struct be_dma_mem *mem;
3065 	struct be_eq_obj *pbe_eq;
3066 	void *cq_vaddress;
3067 	int ret = -ENOMEM;
3068 	dma_addr_t paddr;
3069 
3070 	num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries *
3071 				      sizeof(struct sol_cqe));
3072 
3073 	for (i = 0; i < phba->num_cpus; i++) {
3074 		cq = &phwi_context->be_cq[i];
3075 		eq = &phwi_context->be_eq[i].q;
3076 		pbe_eq = &phwi_context->be_eq[i];
3077 		pbe_eq->cq = cq;
3078 		pbe_eq->phba = phba;
3079 		mem = &cq->dma_mem;
3080 		cq_vaddress = dma_alloc_coherent(&phba->pcidev->dev,
3081 						   num_cq_pages * PAGE_SIZE,
3082 						   &paddr, GFP_KERNEL);
3083 		if (!cq_vaddress) {
3084 			ret = -ENOMEM;
3085 			goto create_cq_error;
3086 		}
3087 
3088 		ret = be_fill_queue(cq, phba->params.num_cq_entries,
3089 				    sizeof(struct sol_cqe), cq_vaddress);
3090 		if (ret) {
3091 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3092 				    "BM_%d : be_fill_queue Failed for ISCSI CQ\n");
3093 			goto create_cq_error;
3094 		}
3095 
3096 		mem->dma = paddr;
3097 		ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
3098 					    false, 0);
3099 		if (ret) {
3100 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3101 				    "BM_%d : beiscsi_cmd_eq_create Failed for ISCSI CQ\n");
3102 			goto create_cq_error;
3103 		}
3104 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3105 			    "BM_%d : iscsi cq_id is %d for eq_id %d\n"
3106 			    "iSCSI CQ CREATED\n", cq->id, eq->id);
3107 	}
3108 	return 0;
3109 
3110 create_cq_error:
3111 	for (i = 0; i < phba->num_cpus; i++) {
3112 		cq = &phwi_context->be_cq[i];
3113 		mem = &cq->dma_mem;
3114 		if (mem->va)
3115 			dma_free_coherent(&phba->pcidev->dev, num_cq_pages
3116 					    * PAGE_SIZE,
3117 					    mem->va, mem->dma);
3118 	}
3119 	return ret;
3120 }
3121 
3122 static int
3123 beiscsi_create_def_hdr(struct beiscsi_hba *phba,
3124 		       struct hwi_context_memory *phwi_context,
3125 		       struct hwi_controller *phwi_ctrlr,
3126 		       unsigned int def_pdu_ring_sz, uint8_t ulp_num)
3127 {
3128 	unsigned int idx;
3129 	int ret;
3130 	struct be_queue_info *dq, *cq;
3131 	struct be_dma_mem *mem;
3132 	struct be_mem_descriptor *mem_descr;
3133 	void *dq_vaddress;
3134 
3135 	idx = 0;
3136 	dq = &phwi_context->be_def_hdrq[ulp_num];
3137 	cq = &phwi_context->be_cq[0];
3138 	mem = &dq->dma_mem;
3139 	mem_descr = phba->init_mem;
3140 	mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
3141 		    (ulp_num * MEM_DESCR_OFFSET);
3142 	dq_vaddress = mem_descr->mem_array[idx].virtual_address;
3143 	ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
3144 			    sizeof(struct phys_addr),
3145 			    sizeof(struct phys_addr), dq_vaddress);
3146 	if (ret) {
3147 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3148 			    "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n",
3149 			    ulp_num);
3150 
3151 		return ret;
3152 	}
3153 	mem->dma = (unsigned long)mem_descr->mem_array[idx].
3154 				  bus_address.u.a64.address;
3155 	ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
3156 					      def_pdu_ring_sz,
3157 					      phba->params.defpdu_hdr_sz,
3158 					      BEISCSI_DEFQ_HDR, ulp_num);
3159 	if (ret) {
3160 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3161 			    "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n",
3162 			    ulp_num);
3163 
3164 		return ret;
3165 	}
3166 
3167 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3168 		    "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n",
3169 		    ulp_num,
3170 		    phwi_context->be_def_hdrq[ulp_num].id);
3171 	return 0;
3172 }
3173 
3174 static int
3175 beiscsi_create_def_data(struct beiscsi_hba *phba,
3176 			struct hwi_context_memory *phwi_context,
3177 			struct hwi_controller *phwi_ctrlr,
3178 			unsigned int def_pdu_ring_sz, uint8_t ulp_num)
3179 {
3180 	unsigned int idx;
3181 	int ret;
3182 	struct be_queue_info *dataq, *cq;
3183 	struct be_dma_mem *mem;
3184 	struct be_mem_descriptor *mem_descr;
3185 	void *dq_vaddress;
3186 
3187 	idx = 0;
3188 	dataq = &phwi_context->be_def_dataq[ulp_num];
3189 	cq = &phwi_context->be_cq[0];
3190 	mem = &dataq->dma_mem;
3191 	mem_descr = phba->init_mem;
3192 	mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
3193 		    (ulp_num * MEM_DESCR_OFFSET);
3194 	dq_vaddress = mem_descr->mem_array[idx].virtual_address;
3195 	ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
3196 			    sizeof(struct phys_addr),
3197 			    sizeof(struct phys_addr), dq_vaddress);
3198 	if (ret) {
3199 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3200 			    "BM_%d : be_fill_queue Failed for DEF PDU "
3201 			    "DATA on ULP : %d\n",
3202 			    ulp_num);
3203 
3204 		return ret;
3205 	}
3206 	mem->dma = (unsigned long)mem_descr->mem_array[idx].
3207 				  bus_address.u.a64.address;
3208 	ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
3209 					      def_pdu_ring_sz,
3210 					      phba->params.defpdu_data_sz,
3211 					      BEISCSI_DEFQ_DATA, ulp_num);
3212 	if (ret) {
3213 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3214 			    "BM_%d be_cmd_create_default_pdu_queue"
3215 			    " Failed for DEF PDU DATA on ULP : %d\n",
3216 			    ulp_num);
3217 		return ret;
3218 	}
3219 
3220 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3221 		    "BM_%d : iscsi def data id on ULP : %d is  %d\n",
3222 		    ulp_num,
3223 		    phwi_context->be_def_dataq[ulp_num].id);
3224 
3225 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3226 		    "BM_%d : DEFAULT PDU DATA RING CREATED on ULP : %d\n",
3227 		    ulp_num);
3228 	return 0;
3229 }
3230 
3231 
3232 static int
3233 beiscsi_post_template_hdr(struct beiscsi_hba *phba)
3234 {
3235 	struct be_mem_descriptor *mem_descr;
3236 	struct mem_array *pm_arr;
3237 	struct be_dma_mem sgl;
3238 	int status, ulp_num;
3239 
3240 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3241 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3242 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
3243 			mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 +
3244 				    (ulp_num * MEM_DESCR_OFFSET);
3245 			pm_arr = mem_descr->mem_array;
3246 
3247 			hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
3248 			status = be_cmd_iscsi_post_template_hdr(
3249 				 &phba->ctrl, &sgl);
3250 
3251 			if (status != 0) {
3252 				beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3253 					    "BM_%d : Post Template HDR Failed for "
3254 					    "ULP_%d\n", ulp_num);
3255 				return status;
3256 			}
3257 
3258 			beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3259 				    "BM_%d : Template HDR Pages Posted for "
3260 				    "ULP_%d\n", ulp_num);
3261 		}
3262 	}
3263 	return 0;
3264 }
3265 
3266 static int
3267 beiscsi_post_pages(struct beiscsi_hba *phba)
3268 {
3269 	struct be_mem_descriptor *mem_descr;
3270 	struct mem_array *pm_arr;
3271 	unsigned int page_offset, i;
3272 	struct be_dma_mem sgl;
3273 	int status, ulp_num = 0;
3274 
3275 	mem_descr = phba->init_mem;
3276 	mem_descr += HWI_MEM_SGE;
3277 	pm_arr = mem_descr->mem_array;
3278 
3279 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
3280 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
3281 			break;
3282 
3283 	page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
3284 			phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE;
3285 	for (i = 0; i < mem_descr->num_elements; i++) {
3286 		hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
3287 		status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
3288 						page_offset,
3289 						(pm_arr->size / PAGE_SIZE));
3290 		page_offset += pm_arr->size / PAGE_SIZE;
3291 		if (status != 0) {
3292 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3293 				    "BM_%d : post sgl failed.\n");
3294 			return status;
3295 		}
3296 		pm_arr++;
3297 	}
3298 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3299 		    "BM_%d : POSTED PAGES\n");
3300 	return 0;
3301 }
3302 
3303 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
3304 {
3305 	struct be_dma_mem *mem = &q->dma_mem;
3306 	if (mem->va) {
3307 		dma_free_coherent(&phba->pcidev->dev, mem->size,
3308 			mem->va, mem->dma);
3309 		mem->va = NULL;
3310 	}
3311 }
3312 
3313 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
3314 		u16 len, u16 entry_size)
3315 {
3316 	struct be_dma_mem *mem = &q->dma_mem;
3317 
3318 	memset(q, 0, sizeof(*q));
3319 	q->len = len;
3320 	q->entry_size = entry_size;
3321 	mem->size = len * entry_size;
3322 	mem->va = dma_alloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma,
3323 				     GFP_KERNEL);
3324 	if (!mem->va)
3325 		return -ENOMEM;
3326 	return 0;
3327 }
3328 
3329 static int
3330 beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
3331 			 struct hwi_context_memory *phwi_context,
3332 			 struct hwi_controller *phwi_ctrlr)
3333 {
3334 	unsigned int num_wrb_rings;
3335 	u64 pa_addr_lo;
3336 	unsigned int idx, num, i, ulp_num;
3337 	struct mem_array *pwrb_arr;
3338 	void *wrb_vaddr;
3339 	struct be_dma_mem sgl;
3340 	struct be_mem_descriptor *mem_descr;
3341 	struct hwi_wrb_context *pwrb_context;
3342 	int status;
3343 	uint8_t ulp_count = 0, ulp_base_num = 0;
3344 	uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 };
3345 
3346 	idx = 0;
3347 	mem_descr = phba->init_mem;
3348 	mem_descr += HWI_MEM_WRB;
3349 	pwrb_arr = kmalloc_array(phba->params.cxns_per_ctrl,
3350 				 sizeof(*pwrb_arr),
3351 				 GFP_KERNEL);
3352 	if (!pwrb_arr) {
3353 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3354 			    "BM_%d : Memory alloc failed in create wrb ring.\n");
3355 		return -ENOMEM;
3356 	}
3357 	wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
3358 	pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
3359 	num_wrb_rings = mem_descr->mem_array[idx].size /
3360 		(phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
3361 
3362 	for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
3363 		if (num_wrb_rings) {
3364 			pwrb_arr[num].virtual_address = wrb_vaddr;
3365 			pwrb_arr[num].bus_address.u.a64.address	= pa_addr_lo;
3366 			pwrb_arr[num].size = phba->params.wrbs_per_cxn *
3367 					    sizeof(struct iscsi_wrb);
3368 			wrb_vaddr += pwrb_arr[num].size;
3369 			pa_addr_lo += pwrb_arr[num].size;
3370 			num_wrb_rings--;
3371 		} else {
3372 			idx++;
3373 			wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
3374 			pa_addr_lo = mem_descr->mem_array[idx].
3375 					bus_address.u.a64.address;
3376 			num_wrb_rings = mem_descr->mem_array[idx].size /
3377 					(phba->params.wrbs_per_cxn *
3378 					sizeof(struct iscsi_wrb));
3379 			pwrb_arr[num].virtual_address = wrb_vaddr;
3380 			pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
3381 			pwrb_arr[num].size = phba->params.wrbs_per_cxn *
3382 						 sizeof(struct iscsi_wrb);
3383 			wrb_vaddr += pwrb_arr[num].size;
3384 			pa_addr_lo += pwrb_arr[num].size;
3385 			num_wrb_rings--;
3386 		}
3387 	}
3388 
3389 	/* Get the ULP Count */
3390 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
3391 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3392 			ulp_count++;
3393 			ulp_base_num = ulp_num;
3394 			cid_count_ulp[ulp_num] =
3395 				BEISCSI_GET_CID_COUNT(phba, ulp_num);
3396 		}
3397 
3398 	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3399 		if (ulp_count > 1) {
3400 			ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT;
3401 
3402 			if (!cid_count_ulp[ulp_base_num])
3403 				ulp_base_num = (ulp_base_num + 1) %
3404 						BEISCSI_ULP_COUNT;
3405 
3406 			cid_count_ulp[ulp_base_num]--;
3407 		}
3408 
3409 
3410 		hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
3411 		status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
3412 					    &phwi_context->be_wrbq[i],
3413 					    &phwi_ctrlr->wrb_context[i],
3414 					    ulp_base_num);
3415 		if (status != 0) {
3416 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3417 				    "BM_%d : wrbq create failed.");
3418 			kfree(pwrb_arr);
3419 			return status;
3420 		}
3421 		pwrb_context = &phwi_ctrlr->wrb_context[i];
3422 		BE_SET_CID_TO_CRI(i, pwrb_context->cid);
3423 	}
3424 	kfree(pwrb_arr);
3425 	return 0;
3426 }
3427 
3428 static void free_wrb_handles(struct beiscsi_hba *phba)
3429 {
3430 	unsigned int index;
3431 	struct hwi_controller *phwi_ctrlr;
3432 	struct hwi_wrb_context *pwrb_context;
3433 
3434 	phwi_ctrlr = phba->phwi_ctrlr;
3435 	for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
3436 		pwrb_context = &phwi_ctrlr->wrb_context[index];
3437 		kfree(pwrb_context->pwrb_handle_base);
3438 		kfree(pwrb_context->pwrb_handle_basestd);
3439 	}
3440 }
3441 
3442 static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
3443 {
3444 	struct be_ctrl_info *ctrl = &phba->ctrl;
3445 	struct be_dma_mem *ptag_mem;
3446 	struct be_queue_info *q;
3447 	int i, tag;
3448 
3449 	q = &phba->ctrl.mcc_obj.q;
3450 	for (i = 0; i < MAX_MCC_CMD; i++) {
3451 		tag = i + 1;
3452 		if (!test_bit(MCC_TAG_STATE_RUNNING,
3453 			      &ctrl->ptag_state[tag].tag_state))
3454 			continue;
3455 
3456 		if (test_bit(MCC_TAG_STATE_TIMEOUT,
3457 			     &ctrl->ptag_state[tag].tag_state)) {
3458 			ptag_mem = &ctrl->ptag_state[tag].tag_mem_state;
3459 			if (ptag_mem->size) {
3460 				dma_free_coherent(&ctrl->pdev->dev,
3461 						    ptag_mem->size,
3462 						    ptag_mem->va,
3463 						    ptag_mem->dma);
3464 				ptag_mem->size = 0;
3465 			}
3466 			continue;
3467 		}
3468 		/**
3469 		 * If MCC is still active and waiting then wake up the process.
3470 		 * We are here only because port is going offline. The process
3471 		 * sees that (BEISCSI_HBA_ONLINE is cleared) and EIO error is
3472 		 * returned for the operation and allocated memory cleaned up.
3473 		 */
3474 		if (waitqueue_active(&ctrl->mcc_wait[tag])) {
3475 			ctrl->mcc_tag_status[tag] = MCC_STATUS_FAILED;
3476 			ctrl->mcc_tag_status[tag] |= CQE_VALID_MASK;
3477 			wake_up_interruptible(&ctrl->mcc_wait[tag]);
3478 			/*
3479 			 * Control tag info gets reinitialized in enable
3480 			 * so wait for the process to clear running state.
3481 			 */
3482 			while (test_bit(MCC_TAG_STATE_RUNNING,
3483 					&ctrl->ptag_state[tag].tag_state))
3484 				schedule_timeout_uninterruptible(HZ);
3485 		}
3486 		/**
3487 		 * For MCC with tag_states MCC_TAG_STATE_ASYNC and
3488 		 * MCC_TAG_STATE_IGNORE nothing needs to done.
3489 		 */
3490 	}
3491 	if (q->created) {
3492 		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
3493 		be_queue_free(phba, q);
3494 	}
3495 
3496 	q = &phba->ctrl.mcc_obj.cq;
3497 	if (q->created) {
3498 		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3499 		be_queue_free(phba, q);
3500 	}
3501 }
3502 
3503 static int be_mcc_queues_create(struct beiscsi_hba *phba,
3504 				struct hwi_context_memory *phwi_context)
3505 {
3506 	struct be_queue_info *q, *cq;
3507 	struct be_ctrl_info *ctrl = &phba->ctrl;
3508 
3509 	/* Alloc MCC compl queue */
3510 	cq = &phba->ctrl.mcc_obj.cq;
3511 	if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
3512 			sizeof(struct be_mcc_compl)))
3513 		goto err;
3514 	/* Ask BE to create MCC compl queue; */
3515 	if (phba->pcidev->msix_enabled) {
3516 		if (beiscsi_cmd_cq_create(ctrl, cq,
3517 					&phwi_context->be_eq[phba->num_cpus].q,
3518 					false, true, 0))
3519 			goto mcc_cq_free;
3520 	} else {
3521 		if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
3522 					  false, true, 0))
3523 			goto mcc_cq_free;
3524 	}
3525 
3526 	/* Alloc MCC queue */
3527 	q = &phba->ctrl.mcc_obj.q;
3528 	if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
3529 		goto mcc_cq_destroy;
3530 
3531 	/* Ask BE to create MCC queue */
3532 	if (beiscsi_cmd_mccq_create(phba, q, cq))
3533 		goto mcc_q_free;
3534 
3535 	return 0;
3536 
3537 mcc_q_free:
3538 	be_queue_free(phba, q);
3539 mcc_cq_destroy:
3540 	beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
3541 mcc_cq_free:
3542 	be_queue_free(phba, cq);
3543 err:
3544 	return -ENOMEM;
3545 }
3546 
3547 static void be2iscsi_enable_msix(struct beiscsi_hba *phba)
3548 {
3549 	int nvec = 1;
3550 
3551 	switch (phba->generation) {
3552 	case BE_GEN2:
3553 	case BE_GEN3:
3554 		nvec = BEISCSI_MAX_NUM_CPUS + 1;
3555 		break;
3556 	case BE_GEN4:
3557 		nvec = phba->fw_config.eqid_count;
3558 		break;
3559 	default:
3560 		nvec = 2;
3561 		break;
3562 	}
3563 
3564 	/* if eqid_count == 1 fall back to INTX */
3565 	if (enable_msix && nvec > 1) {
3566 		struct irq_affinity desc = { .post_vectors = 1 };
3567 
3568 		if (pci_alloc_irq_vectors_affinity(phba->pcidev, 2, nvec,
3569 				PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc) < 0) {
3570 			phba->num_cpus = nvec - 1;
3571 			return;
3572 		}
3573 	}
3574 
3575 	phba->num_cpus = 1;
3576 }
3577 
3578 static void hwi_purge_eq(struct beiscsi_hba *phba)
3579 {
3580 	struct hwi_controller *phwi_ctrlr;
3581 	struct hwi_context_memory *phwi_context;
3582 	struct be_queue_info *eq;
3583 	struct be_eq_entry *eqe = NULL;
3584 	int i, eq_msix;
3585 	unsigned int num_processed;
3586 
3587 	if (beiscsi_hba_in_error(phba))
3588 		return;
3589 
3590 	phwi_ctrlr = phba->phwi_ctrlr;
3591 	phwi_context = phwi_ctrlr->phwi_ctxt;
3592 	if (phba->pcidev->msix_enabled)
3593 		eq_msix = 1;
3594 	else
3595 		eq_msix = 0;
3596 
3597 	for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3598 		eq = &phwi_context->be_eq[i].q;
3599 		eqe = queue_tail_node(eq);
3600 		num_processed = 0;
3601 		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3602 					& EQE_VALID_MASK) {
3603 			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3604 			queue_tail_inc(eq);
3605 			eqe = queue_tail_node(eq);
3606 			num_processed++;
3607 		}
3608 
3609 		if (num_processed)
3610 			hwi_ring_eq_db(phba, eq->id, 1,	num_processed, 1, 1);
3611 	}
3612 }
3613 
3614 static void hwi_cleanup_port(struct beiscsi_hba *phba)
3615 {
3616 	struct be_queue_info *q;
3617 	struct be_ctrl_info *ctrl = &phba->ctrl;
3618 	struct hwi_controller *phwi_ctrlr;
3619 	struct hwi_context_memory *phwi_context;
3620 	int i, eq_for_mcc, ulp_num;
3621 
3622 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
3623 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
3624 			beiscsi_cmd_iscsi_cleanup(phba, ulp_num);
3625 
3626 	/**
3627 	 * Purge all EQ entries that may have been left out. This is to
3628 	 * workaround a problem we've seen occasionally where driver gets an
3629 	 * interrupt with EQ entry bit set after stopping the controller.
3630 	 */
3631 	hwi_purge_eq(phba);
3632 
3633 	phwi_ctrlr = phba->phwi_ctrlr;
3634 	phwi_context = phwi_ctrlr->phwi_ctxt;
3635 
3636 	be_cmd_iscsi_remove_template_hdr(ctrl);
3637 
3638 	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3639 		q = &phwi_context->be_wrbq[i];
3640 		if (q->created)
3641 			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
3642 	}
3643 	kfree(phwi_context->be_wrbq);
3644 	free_wrb_handles(phba);
3645 
3646 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3647 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3648 
3649 			q = &phwi_context->be_def_hdrq[ulp_num];
3650 			if (q->created)
3651 				beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3652 
3653 			q = &phwi_context->be_def_dataq[ulp_num];
3654 			if (q->created)
3655 				beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3656 		}
3657 	}
3658 
3659 	beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
3660 
3661 	for (i = 0; i < (phba->num_cpus); i++) {
3662 		q = &phwi_context->be_cq[i];
3663 		if (q->created) {
3664 			be_queue_free(phba, q);
3665 			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3666 		}
3667 	}
3668 
3669 	be_mcc_queues_destroy(phba);
3670 	if (phba->pcidev->msix_enabled)
3671 		eq_for_mcc = 1;
3672 	else
3673 		eq_for_mcc = 0;
3674 	for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
3675 		q = &phwi_context->be_eq[i].q;
3676 		if (q->created) {
3677 			be_queue_free(phba, q);
3678 			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
3679 		}
3680 	}
3681 	/* this ensures complete FW cleanup */
3682 	beiscsi_cmd_function_reset(phba);
3683 	/* last communication, indicate driver is unloading */
3684 	beiscsi_cmd_special_wrb(&phba->ctrl, 0);
3685 }
3686 
3687 static int hwi_init_port(struct beiscsi_hba *phba)
3688 {
3689 	struct hwi_controller *phwi_ctrlr;
3690 	struct hwi_context_memory *phwi_context;
3691 	unsigned int def_pdu_ring_sz;
3692 	struct be_ctrl_info *ctrl = &phba->ctrl;
3693 	int status, ulp_num;
3694 	u16 nbufs;
3695 
3696 	phwi_ctrlr = phba->phwi_ctrlr;
3697 	phwi_context = phwi_ctrlr->phwi_ctxt;
3698 	/* set port optic state to unknown */
3699 	phba->optic_state = 0xff;
3700 
3701 	status = beiscsi_create_eqs(phba, phwi_context);
3702 	if (status != 0) {
3703 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3704 			    "BM_%d : EQ not created\n");
3705 		goto error;
3706 	}
3707 
3708 	status = be_mcc_queues_create(phba, phwi_context);
3709 	if (status != 0)
3710 		goto error;
3711 
3712 	status = beiscsi_check_supported_fw(ctrl, phba);
3713 	if (status != 0) {
3714 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3715 			    "BM_%d : Unsupported fw version\n");
3716 		goto error;
3717 	}
3718 
3719 	status = beiscsi_create_cqs(phba, phwi_context);
3720 	if (status != 0) {
3721 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3722 			    "BM_%d : CQ not created\n");
3723 		goto error;
3724 	}
3725 
3726 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3727 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3728 			nbufs = phwi_context->pasync_ctx[ulp_num]->num_entries;
3729 			def_pdu_ring_sz = nbufs * sizeof(struct phys_addr);
3730 
3731 			status = beiscsi_create_def_hdr(phba, phwi_context,
3732 							phwi_ctrlr,
3733 							def_pdu_ring_sz,
3734 							ulp_num);
3735 			if (status != 0) {
3736 				beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3737 					    "BM_%d : Default Header not created for ULP : %d\n",
3738 					    ulp_num);
3739 				goto error;
3740 			}
3741 
3742 			status = beiscsi_create_def_data(phba, phwi_context,
3743 							 phwi_ctrlr,
3744 							 def_pdu_ring_sz,
3745 							 ulp_num);
3746 			if (status != 0) {
3747 				beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3748 					    "BM_%d : Default Data not created for ULP : %d\n",
3749 					    ulp_num);
3750 				goto error;
3751 			}
3752 			/**
3753 			 * Now that the default PDU rings have been created,
3754 			 * let EP know about it.
3755 			 */
3756 			beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR,
3757 						 ulp_num, nbufs);
3758 			beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_DATA,
3759 						 ulp_num, nbufs);
3760 		}
3761 	}
3762 
3763 	status = beiscsi_post_pages(phba);
3764 	if (status != 0) {
3765 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3766 			    "BM_%d : Post SGL Pages Failed\n");
3767 		goto error;
3768 	}
3769 
3770 	status = beiscsi_post_template_hdr(phba);
3771 	if (status != 0) {
3772 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3773 			    "BM_%d : Template HDR Posting for CXN Failed\n");
3774 	}
3775 
3776 	status = beiscsi_create_wrb_rings(phba,	phwi_context, phwi_ctrlr);
3777 	if (status != 0) {
3778 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3779 			    "BM_%d : WRB Rings not created\n");
3780 		goto error;
3781 	}
3782 
3783 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3784 		uint16_t async_arr_idx = 0;
3785 
3786 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3787 			uint16_t cri = 0;
3788 			struct hd_async_context *pasync_ctx;
3789 
3790 			pasync_ctx = HWI_GET_ASYNC_PDU_CTX(
3791 				     phwi_ctrlr, ulp_num);
3792 			for (cri = 0; cri <
3793 			     phba->params.cxns_per_ctrl; cri++) {
3794 				if (ulp_num == BEISCSI_GET_ULP_FROM_CRI
3795 					       (phwi_ctrlr, cri))
3796 					pasync_ctx->cid_to_async_cri_map[
3797 					phwi_ctrlr->wrb_context[cri].cid] =
3798 					async_arr_idx++;
3799 			}
3800 		}
3801 	}
3802 
3803 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3804 		    "BM_%d : hwi_init_port success\n");
3805 	return 0;
3806 
3807 error:
3808 	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3809 		    "BM_%d : hwi_init_port failed");
3810 	hwi_cleanup_port(phba);
3811 	return status;
3812 }
3813 
3814 static int hwi_init_controller(struct beiscsi_hba *phba)
3815 {
3816 	struct hwi_controller *phwi_ctrlr;
3817 
3818 	phwi_ctrlr = phba->phwi_ctrlr;
3819 	if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
3820 		phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
3821 		    init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
3822 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3823 			    "BM_%d :  phwi_ctrlr->phwi_ctxt=%p\n",
3824 			    phwi_ctrlr->phwi_ctxt);
3825 	} else {
3826 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3827 			    "BM_%d : HWI_MEM_ADDN_CONTEXT is more "
3828 			    "than one element.Failing to load\n");
3829 		return -ENOMEM;
3830 	}
3831 
3832 	iscsi_init_global_templates(phba);
3833 	if (beiscsi_init_wrb_handle(phba))
3834 		return -ENOMEM;
3835 
3836 	if (hwi_init_async_pdu_ctx(phba)) {
3837 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3838 			    "BM_%d : hwi_init_async_pdu_ctx failed\n");
3839 		return -ENOMEM;
3840 	}
3841 
3842 	if (hwi_init_port(phba) != 0) {
3843 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3844 			    "BM_%d : hwi_init_controller failed\n");
3845 
3846 		return -ENOMEM;
3847 	}
3848 	return 0;
3849 }
3850 
3851 static void beiscsi_free_mem(struct beiscsi_hba *phba)
3852 {
3853 	struct be_mem_descriptor *mem_descr;
3854 	int i, j;
3855 
3856 	mem_descr = phba->init_mem;
3857 	for (i = 0; i < SE_MEM_MAX; i++) {
3858 		for (j = mem_descr->num_elements; j > 0; j--) {
3859 			dma_free_coherent(&phba->pcidev->dev,
3860 			  mem_descr->mem_array[j - 1].size,
3861 			  mem_descr->mem_array[j - 1].virtual_address,
3862 			  (unsigned long)mem_descr->mem_array[j - 1].
3863 			  bus_address.u.a64.address);
3864 		}
3865 
3866 		kfree(mem_descr->mem_array);
3867 		mem_descr++;
3868 	}
3869 	kfree(phba->init_mem);
3870 	kfree(phba->phwi_ctrlr->wrb_context);
3871 	kfree(phba->phwi_ctrlr);
3872 }
3873 
3874 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3875 {
3876 	struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
3877 	struct sgl_handle *psgl_handle;
3878 	struct iscsi_sge *pfrag;
3879 	unsigned int arr_index, i, idx;
3880 	unsigned int ulp_icd_start, ulp_num = 0;
3881 
3882 	phba->io_sgl_hndl_avbl = 0;
3883 	phba->eh_sgl_hndl_avbl = 0;
3884 
3885 	mem_descr_sglh = phba->init_mem;
3886 	mem_descr_sglh += HWI_MEM_SGLH;
3887 	if (1 == mem_descr_sglh->num_elements) {
3888 		phba->io_sgl_hndl_base = kcalloc(phba->params.ios_per_ctrl,
3889 						 sizeof(struct sgl_handle *),
3890 						 GFP_KERNEL);
3891 		if (!phba->io_sgl_hndl_base) {
3892 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3893 				    "BM_%d : Mem Alloc Failed. Failing to load\n");
3894 			return -ENOMEM;
3895 		}
3896 		phba->eh_sgl_hndl_base =
3897 			kcalloc(phba->params.icds_per_ctrl -
3898 					phba->params.ios_per_ctrl,
3899 				sizeof(struct sgl_handle *), GFP_KERNEL);
3900 		if (!phba->eh_sgl_hndl_base) {
3901 			kfree(phba->io_sgl_hndl_base);
3902 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3903 				    "BM_%d : Mem Alloc Failed. Failing to load\n");
3904 			return -ENOMEM;
3905 		}
3906 	} else {
3907 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3908 			    "BM_%d : HWI_MEM_SGLH is more than one element."
3909 			    "Failing to load\n");
3910 		return -ENOMEM;
3911 	}
3912 
3913 	arr_index = 0;
3914 	idx = 0;
3915 	while (idx < mem_descr_sglh->num_elements) {
3916 		psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
3917 
3918 		for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
3919 		      sizeof(struct sgl_handle)); i++) {
3920 			if (arr_index < phba->params.ios_per_ctrl) {
3921 				phba->io_sgl_hndl_base[arr_index] = psgl_handle;
3922 				phba->io_sgl_hndl_avbl++;
3923 				arr_index++;
3924 			} else {
3925 				phba->eh_sgl_hndl_base[arr_index -
3926 					phba->params.ios_per_ctrl] =
3927 								psgl_handle;
3928 				arr_index++;
3929 				phba->eh_sgl_hndl_avbl++;
3930 			}
3931 			psgl_handle++;
3932 		}
3933 		idx++;
3934 	}
3935 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3936 		    "BM_%d : phba->io_sgl_hndl_avbl=%d "
3937 		    "phba->eh_sgl_hndl_avbl=%d\n",
3938 		    phba->io_sgl_hndl_avbl,
3939 		    phba->eh_sgl_hndl_avbl);
3940 
3941 	mem_descr_sg = phba->init_mem;
3942 	mem_descr_sg += HWI_MEM_SGE;
3943 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3944 		    "\n BM_%d : mem_descr_sg->num_elements=%d\n",
3945 		    mem_descr_sg->num_elements);
3946 
3947 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
3948 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
3949 			break;
3950 
3951 	ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num];
3952 
3953 	arr_index = 0;
3954 	idx = 0;
3955 	while (idx < mem_descr_sg->num_elements) {
3956 		pfrag = mem_descr_sg->mem_array[idx].virtual_address;
3957 
3958 		for (i = 0;
3959 		     i < (mem_descr_sg->mem_array[idx].size) /
3960 		     (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
3961 		     i++) {
3962 			if (arr_index < phba->params.ios_per_ctrl)
3963 				psgl_handle = phba->io_sgl_hndl_base[arr_index];
3964 			else
3965 				psgl_handle = phba->eh_sgl_hndl_base[arr_index -
3966 						phba->params.ios_per_ctrl];
3967 			psgl_handle->pfrag = pfrag;
3968 			AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3969 			AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3970 			pfrag += phba->params.num_sge_per_io;
3971 			psgl_handle->sgl_index = ulp_icd_start + arr_index++;
3972 		}
3973 		idx++;
3974 	}
3975 	phba->io_sgl_free_index = 0;
3976 	phba->io_sgl_alloc_index = 0;
3977 	phba->eh_sgl_free_index = 0;
3978 	phba->eh_sgl_alloc_index = 0;
3979 	return 0;
3980 }
3981 
3982 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3983 {
3984 	int ret;
3985 	uint16_t i, ulp_num;
3986 	struct ulp_cid_info *ptr_cid_info = NULL;
3987 
3988 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3989 		if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
3990 			ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info),
3991 					       GFP_KERNEL);
3992 
3993 			if (!ptr_cid_info) {
3994 				ret = -ENOMEM;
3995 				goto free_memory;
3996 			}
3997 
3998 			/* Allocate memory for CID array */
3999 			ptr_cid_info->cid_array =
4000 				kcalloc(BEISCSI_GET_CID_COUNT(phba, ulp_num),
4001 					sizeof(*ptr_cid_info->cid_array),
4002 					GFP_KERNEL);
4003 			if (!ptr_cid_info->cid_array) {
4004 				kfree(ptr_cid_info);
4005 				ptr_cid_info = NULL;
4006 				ret = -ENOMEM;
4007 
4008 				goto free_memory;
4009 			}
4010 			ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT(
4011 						   phba, ulp_num);
4012 
4013 			/* Save the cid_info_array ptr */
4014 			phba->cid_array_info[ulp_num] = ptr_cid_info;
4015 		}
4016 	}
4017 	phba->ep_array = kcalloc(phba->params.cxns_per_ctrl,
4018 				 sizeof(struct iscsi_endpoint *),
4019 				 GFP_KERNEL);
4020 	if (!phba->ep_array) {
4021 		ret = -ENOMEM;
4022 
4023 		goto free_memory;
4024 	}
4025 
4026 	phba->conn_table = kcalloc(phba->params.cxns_per_ctrl,
4027 				   sizeof(struct beiscsi_conn *),
4028 				   GFP_KERNEL);
4029 	if (!phba->conn_table) {
4030 		kfree(phba->ep_array);
4031 		phba->ep_array = NULL;
4032 		ret = -ENOMEM;
4033 
4034 		goto free_memory;
4035 	}
4036 
4037 	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
4038 		ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num;
4039 
4040 		ptr_cid_info = phba->cid_array_info[ulp_num];
4041 		ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] =
4042 			phba->phwi_ctrlr->wrb_context[i].cid;
4043 
4044 	}
4045 
4046 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4047 		if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4048 			ptr_cid_info = phba->cid_array_info[ulp_num];
4049 
4050 			ptr_cid_info->cid_alloc = 0;
4051 			ptr_cid_info->cid_free = 0;
4052 		}
4053 	}
4054 	return 0;
4055 
4056 free_memory:
4057 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4058 		if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4059 			ptr_cid_info = phba->cid_array_info[ulp_num];
4060 
4061 			if (ptr_cid_info) {
4062 				kfree(ptr_cid_info->cid_array);
4063 				kfree(ptr_cid_info);
4064 				phba->cid_array_info[ulp_num] = NULL;
4065 			}
4066 		}
4067 	}
4068 
4069 	return ret;
4070 }
4071 
4072 static void hwi_enable_intr(struct beiscsi_hba *phba)
4073 {
4074 	struct be_ctrl_info *ctrl = &phba->ctrl;
4075 	struct hwi_controller *phwi_ctrlr;
4076 	struct hwi_context_memory *phwi_context;
4077 	struct be_queue_info *eq;
4078 	u8 __iomem *addr;
4079 	u32 reg, i;
4080 	u32 enabled;
4081 
4082 	phwi_ctrlr = phba->phwi_ctrlr;
4083 	phwi_context = phwi_ctrlr->phwi_ctxt;
4084 
4085 	addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
4086 			PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
4087 	reg = ioread32(addr);
4088 
4089 	enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4090 	if (!enabled) {
4091 		reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4092 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4093 			    "BM_%d : reg =x%08x addr=%p\n", reg, addr);
4094 		iowrite32(reg, addr);
4095 	}
4096 
4097 	if (!phba->pcidev->msix_enabled) {
4098 		eq = &phwi_context->be_eq[0].q;
4099 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4100 			    "BM_%d : eq->id=%d\n", eq->id);
4101 
4102 		hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
4103 	} else {
4104 		for (i = 0; i <= phba->num_cpus; i++) {
4105 			eq = &phwi_context->be_eq[i].q;
4106 			beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4107 				    "BM_%d : eq->id=%d\n", eq->id);
4108 			hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
4109 		}
4110 	}
4111 }
4112 
4113 static void hwi_disable_intr(struct beiscsi_hba *phba)
4114 {
4115 	struct be_ctrl_info *ctrl = &phba->ctrl;
4116 
4117 	u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
4118 	u32 reg = ioread32(addr);
4119 
4120 	u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4121 	if (enabled) {
4122 		reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4123 		iowrite32(reg, addr);
4124 	} else
4125 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
4126 			    "BM_%d : In hwi_disable_intr, Already Disabled\n");
4127 }
4128 
4129 static int beiscsi_init_port(struct beiscsi_hba *phba)
4130 {
4131 	int ret;
4132 
4133 	ret = hwi_init_controller(phba);
4134 	if (ret < 0) {
4135 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4136 			    "BM_%d : init controller failed\n");
4137 		return ret;
4138 	}
4139 	ret = beiscsi_init_sgl_handle(phba);
4140 	if (ret < 0) {
4141 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4142 			    "BM_%d : init sgl handles failed\n");
4143 		goto cleanup_port;
4144 	}
4145 
4146 	ret = hba_setup_cid_tbls(phba);
4147 	if (ret < 0) {
4148 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4149 			    "BM_%d : setup CID table failed\n");
4150 		kfree(phba->io_sgl_hndl_base);
4151 		kfree(phba->eh_sgl_hndl_base);
4152 		goto cleanup_port;
4153 	}
4154 	return ret;
4155 
4156 cleanup_port:
4157 	hwi_cleanup_port(phba);
4158 	return ret;
4159 }
4160 
4161 static void beiscsi_cleanup_port(struct beiscsi_hba *phba)
4162 {
4163 	struct ulp_cid_info *ptr_cid_info = NULL;
4164 	int ulp_num;
4165 
4166 	kfree(phba->io_sgl_hndl_base);
4167 	kfree(phba->eh_sgl_hndl_base);
4168 	kfree(phba->ep_array);
4169 	kfree(phba->conn_table);
4170 
4171 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4172 		if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4173 			ptr_cid_info = phba->cid_array_info[ulp_num];
4174 
4175 			if (ptr_cid_info) {
4176 				kfree(ptr_cid_info->cid_array);
4177 				kfree(ptr_cid_info);
4178 				phba->cid_array_info[ulp_num] = NULL;
4179 			}
4180 		}
4181 	}
4182 }
4183 
4184 /**
4185  * beiscsi_free_mgmt_task_handles()- Free driver CXN resources
4186  * @beiscsi_conn: ptr to the conn to be cleaned up
4187  * @task: ptr to iscsi_task resource to be freed.
4188  *
4189  * Free driver mgmt resources binded to CXN.
4190  **/
4191 void
4192 beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
4193 				struct iscsi_task *task)
4194 {
4195 	struct beiscsi_io_task *io_task;
4196 	struct beiscsi_hba *phba = beiscsi_conn->phba;
4197 	struct hwi_wrb_context *pwrb_context;
4198 	struct hwi_controller *phwi_ctrlr;
4199 	uint16_t cri_index = BE_GET_CRI_FROM_CID(
4200 				beiscsi_conn->beiscsi_conn_cid);
4201 
4202 	phwi_ctrlr = phba->phwi_ctrlr;
4203 	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
4204 
4205 	io_task = task->dd_data;
4206 
4207 	if (io_task->pwrb_handle) {
4208 		free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
4209 		io_task->pwrb_handle = NULL;
4210 	}
4211 
4212 	if (io_task->psgl_handle) {
4213 		free_mgmt_sgl_handle(phba, io_task->psgl_handle);
4214 		io_task->psgl_handle = NULL;
4215 	}
4216 
4217 	if (io_task->mtask_addr) {
4218 		dma_unmap_single(&phba->pcidev->dev,
4219 				 io_task->mtask_addr,
4220 				 io_task->mtask_data_count,
4221 				 DMA_TO_DEVICE);
4222 		io_task->mtask_addr = 0;
4223 	}
4224 }
4225 
4226 /**
4227  * beiscsi_cleanup_task()- Free driver resources of the task
4228  * @task: ptr to the iscsi task
4229  *
4230  **/
4231 static void beiscsi_cleanup_task(struct iscsi_task *task)
4232 {
4233 	struct beiscsi_io_task *io_task = task->dd_data;
4234 	struct iscsi_conn *conn = task->conn;
4235 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4236 	struct beiscsi_hba *phba = beiscsi_conn->phba;
4237 	struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
4238 	struct hwi_wrb_context *pwrb_context;
4239 	struct hwi_controller *phwi_ctrlr;
4240 	uint16_t cri_index = BE_GET_CRI_FROM_CID(
4241 			     beiscsi_conn->beiscsi_conn_cid);
4242 
4243 	phwi_ctrlr = phba->phwi_ctrlr;
4244 	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
4245 
4246 	if (io_task->cmd_bhs) {
4247 		dma_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
4248 			      io_task->bhs_pa.u.a64.address);
4249 		io_task->cmd_bhs = NULL;
4250 		task->hdr = NULL;
4251 	}
4252 
4253 	if (task->sc) {
4254 		if (io_task->pwrb_handle) {
4255 			free_wrb_handle(phba, pwrb_context,
4256 					io_task->pwrb_handle);
4257 			io_task->pwrb_handle = NULL;
4258 		}
4259 
4260 		if (io_task->psgl_handle) {
4261 			free_io_sgl_handle(phba, io_task->psgl_handle);
4262 			io_task->psgl_handle = NULL;
4263 		}
4264 
4265 		if (io_task->scsi_cmnd) {
4266 			if (io_task->num_sg)
4267 				scsi_dma_unmap(io_task->scsi_cmnd);
4268 			io_task->scsi_cmnd = NULL;
4269 		}
4270 	} else {
4271 		if (!beiscsi_conn->login_in_progress)
4272 			beiscsi_free_mgmt_task_handles(beiscsi_conn, task);
4273 	}
4274 }
4275 
4276 void
4277 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
4278 			   struct beiscsi_offload_params *params)
4279 {
4280 	struct wrb_handle *pwrb_handle;
4281 	struct hwi_wrb_context *pwrb_context = NULL;
4282 	struct beiscsi_hba *phba = beiscsi_conn->phba;
4283 	struct iscsi_task *task = beiscsi_conn->task;
4284 	struct iscsi_session *session = task->conn->session;
4285 	u32 doorbell = 0;
4286 
4287 	/*
4288 	 * We can always use 0 here because it is reserved by libiscsi for
4289 	 * login/startup related tasks.
4290 	 */
4291 	beiscsi_conn->login_in_progress = 0;
4292 	spin_lock_bh(&session->back_lock);
4293 	beiscsi_cleanup_task(task);
4294 	spin_unlock_bh(&session->back_lock);
4295 
4296 	pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid,
4297 				       &pwrb_context);
4298 
4299 	/* Check for the adapter family */
4300 	if (is_chip_be2_be3r(phba))
4301 		beiscsi_offload_cxn_v0(params, pwrb_handle,
4302 				       phba->init_mem,
4303 				       pwrb_context);
4304 	else
4305 		beiscsi_offload_cxn_v2(params, pwrb_handle,
4306 				       pwrb_context);
4307 
4308 	be_dws_le_to_cpu(pwrb_handle->pwrb,
4309 			 sizeof(struct iscsi_target_context_update_wrb));
4310 
4311 	doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
4312 	doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
4313 			     << DB_DEF_PDU_WRB_INDEX_SHIFT;
4314 	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4315 	iowrite32(doorbell, phba->db_va +
4316 		  beiscsi_conn->doorbell_offset);
4317 
4318 	/*
4319 	 * There is no completion for CONTEXT_UPDATE. The completion of next
4320 	 * WRB posted guarantees FW's processing and DMA'ing of it.
4321 	 * Use beiscsi_put_wrb_handle to put it back in the pool which makes
4322 	 * sure zero'ing or reuse of the WRB only after wrbs_per_cxn.
4323 	 */
4324 	beiscsi_put_wrb_handle(pwrb_context, pwrb_handle,
4325 			       phba->params.wrbs_per_cxn);
4326 	beiscsi_log(phba, KERN_INFO,
4327 		    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4328 		    "BM_%d : put CONTEXT_UPDATE pwrb_handle=%p free_index=0x%x wrb_handles_available=%d\n",
4329 		    pwrb_handle, pwrb_context->free_index,
4330 		    pwrb_context->wrb_handles_available);
4331 }
4332 
4333 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
4334 			      int *index, int *age)
4335 {
4336 	*index = (int)itt;
4337 	if (age)
4338 		*age = conn->session->age;
4339 }
4340 
4341 /**
4342  * beiscsi_alloc_pdu - allocates pdu and related resources
4343  * @task: libiscsi task
4344  * @opcode: opcode of pdu for task
4345  *
4346  * This is called with the session lock held. It will allocate
4347  * the wrb and sgl if needed for the command. And it will prep
4348  * the pdu's itt. beiscsi_parse_pdu will later translate
4349  * the pdu itt to the libiscsi task itt.
4350  */
4351 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
4352 {
4353 	struct beiscsi_io_task *io_task = task->dd_data;
4354 	struct iscsi_conn *conn = task->conn;
4355 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4356 	struct beiscsi_hba *phba = beiscsi_conn->phba;
4357 	struct hwi_wrb_context *pwrb_context;
4358 	struct hwi_controller *phwi_ctrlr;
4359 	itt_t itt;
4360 	uint16_t cri_index = 0;
4361 	struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
4362 	dma_addr_t paddr;
4363 
4364 	io_task->cmd_bhs = dma_pool_alloc(beiscsi_sess->bhs_pool,
4365 					  GFP_ATOMIC, &paddr);
4366 	if (!io_task->cmd_bhs)
4367 		return -ENOMEM;
4368 	io_task->bhs_pa.u.a64.address = paddr;
4369 	io_task->libiscsi_itt = (itt_t)task->itt;
4370 	io_task->conn = beiscsi_conn;
4371 
4372 	task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
4373 	task->hdr_max = sizeof(struct be_cmd_bhs);
4374 	io_task->psgl_handle = NULL;
4375 	io_task->pwrb_handle = NULL;
4376 
4377 	if (task->sc) {
4378 		io_task->psgl_handle = alloc_io_sgl_handle(phba);
4379 		if (!io_task->psgl_handle) {
4380 			beiscsi_log(phba, KERN_ERR,
4381 				    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4382 				    "BM_%d : Alloc of IO_SGL_ICD Failed "
4383 				    "for the CID : %d\n",
4384 				    beiscsi_conn->beiscsi_conn_cid);
4385 			goto free_hndls;
4386 		}
4387 		io_task->pwrb_handle = alloc_wrb_handle(phba,
4388 					beiscsi_conn->beiscsi_conn_cid,
4389 					&io_task->pwrb_context);
4390 		if (!io_task->pwrb_handle) {
4391 			beiscsi_log(phba, KERN_ERR,
4392 				    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4393 				    "BM_%d : Alloc of WRB_HANDLE Failed "
4394 				    "for the CID : %d\n",
4395 				    beiscsi_conn->beiscsi_conn_cid);
4396 			goto free_io_hndls;
4397 		}
4398 	} else {
4399 		io_task->scsi_cmnd = NULL;
4400 		if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
4401 			beiscsi_conn->task = task;
4402 			if (!beiscsi_conn->login_in_progress) {
4403 				io_task->psgl_handle = (struct sgl_handle *)
4404 						alloc_mgmt_sgl_handle(phba);
4405 				if (!io_task->psgl_handle) {
4406 					beiscsi_log(phba, KERN_ERR,
4407 						    BEISCSI_LOG_IO |
4408 						    BEISCSI_LOG_CONFIG,
4409 						    "BM_%d : Alloc of MGMT_SGL_ICD Failed "
4410 						    "for the CID : %d\n",
4411 						    beiscsi_conn->beiscsi_conn_cid);
4412 					goto free_hndls;
4413 				}
4414 
4415 				beiscsi_conn->login_in_progress = 1;
4416 				beiscsi_conn->plogin_sgl_handle =
4417 							io_task->psgl_handle;
4418 				io_task->pwrb_handle =
4419 					alloc_wrb_handle(phba,
4420 					beiscsi_conn->beiscsi_conn_cid,
4421 					&io_task->pwrb_context);
4422 				if (!io_task->pwrb_handle) {
4423 					beiscsi_log(phba, KERN_ERR,
4424 						    BEISCSI_LOG_IO |
4425 						    BEISCSI_LOG_CONFIG,
4426 						    "BM_%d : Alloc of WRB_HANDLE Failed "
4427 						    "for the CID : %d\n",
4428 						    beiscsi_conn->beiscsi_conn_cid);
4429 					goto free_mgmt_hndls;
4430 				}
4431 				beiscsi_conn->plogin_wrb_handle =
4432 							io_task->pwrb_handle;
4433 
4434 			} else {
4435 				io_task->psgl_handle =
4436 						beiscsi_conn->plogin_sgl_handle;
4437 				io_task->pwrb_handle =
4438 						beiscsi_conn->plogin_wrb_handle;
4439 			}
4440 		} else {
4441 			io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
4442 			if (!io_task->psgl_handle) {
4443 				beiscsi_log(phba, KERN_ERR,
4444 					    BEISCSI_LOG_IO |
4445 					    BEISCSI_LOG_CONFIG,
4446 					    "BM_%d : Alloc of MGMT_SGL_ICD Failed "
4447 					    "for the CID : %d\n",
4448 					    beiscsi_conn->beiscsi_conn_cid);
4449 				goto free_hndls;
4450 			}
4451 			io_task->pwrb_handle =
4452 					alloc_wrb_handle(phba,
4453 					beiscsi_conn->beiscsi_conn_cid,
4454 					&io_task->pwrb_context);
4455 			if (!io_task->pwrb_handle) {
4456 				beiscsi_log(phba, KERN_ERR,
4457 					    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4458 					    "BM_%d : Alloc of WRB_HANDLE Failed "
4459 					    "for the CID : %d\n",
4460 					    beiscsi_conn->beiscsi_conn_cid);
4461 				goto free_mgmt_hndls;
4462 			}
4463 
4464 		}
4465 	}
4466 	itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
4467 				 wrb_index << 16) | (unsigned int)
4468 				(io_task->psgl_handle->sgl_index));
4469 	io_task->pwrb_handle->pio_handle = task;
4470 
4471 	io_task->cmd_bhs->iscsi_hdr.itt = itt;
4472 	return 0;
4473 
4474 free_io_hndls:
4475 	free_io_sgl_handle(phba, io_task->psgl_handle);
4476 	goto free_hndls;
4477 free_mgmt_hndls:
4478 	free_mgmt_sgl_handle(phba, io_task->psgl_handle);
4479 	io_task->psgl_handle = NULL;
4480 free_hndls:
4481 	phwi_ctrlr = phba->phwi_ctrlr;
4482 	cri_index = BE_GET_CRI_FROM_CID(
4483 	beiscsi_conn->beiscsi_conn_cid);
4484 	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
4485 	if (io_task->pwrb_handle)
4486 		free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
4487 	io_task->pwrb_handle = NULL;
4488 	dma_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
4489 		      io_task->bhs_pa.u.a64.address);
4490 	io_task->cmd_bhs = NULL;
4491 	return -ENOMEM;
4492 }
4493 static int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
4494 		       unsigned int num_sg, unsigned int xferlen,
4495 		       unsigned int writedir)
4496 {
4497 
4498 	struct beiscsi_io_task *io_task = task->dd_data;
4499 	struct iscsi_conn *conn = task->conn;
4500 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4501 	struct beiscsi_hba *phba = beiscsi_conn->phba;
4502 	struct iscsi_wrb *pwrb = NULL;
4503 	unsigned int doorbell = 0;
4504 
4505 	pwrb = io_task->pwrb_handle->pwrb;
4506 
4507 	io_task->bhs_len = sizeof(struct be_cmd_bhs);
4508 
4509 	if (writedir) {
4510 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb,
4511 			      INI_WR_CMD);
4512 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 1);
4513 	} else {
4514 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb,
4515 			      INI_RD_CMD);
4516 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 0);
4517 	}
4518 
4519 	io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb_v2,
4520 					  type, pwrb);
4521 
4522 	AMAP_SET_BITS(struct amap_iscsi_wrb_v2, lun, pwrb,
4523 		      cpu_to_be16(*(unsigned short *)
4524 		      &io_task->cmd_bhs->iscsi_hdr.lun));
4525 	AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, xferlen);
4526 	AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
4527 		      io_task->pwrb_handle->wrb_index);
4528 	AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
4529 		      be32_to_cpu(task->cmdsn));
4530 	AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
4531 		      io_task->psgl_handle->sgl_index);
4532 
4533 	hwi_write_sgl_v2(pwrb, sg, num_sg, io_task);
4534 	AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
4535 		      io_task->pwrb_handle->wrb_index);
4536 	if (io_task->pwrb_context->plast_wrb)
4537 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb,
4538 			      io_task->pwrb_context->plast_wrb,
4539 			      io_task->pwrb_handle->wrb_index);
4540 	io_task->pwrb_context->plast_wrb = pwrb;
4541 
4542 	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4543 
4544 	doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
4545 	doorbell |= (io_task->pwrb_handle->wrb_index &
4546 		     DB_DEF_PDU_WRB_INDEX_MASK) <<
4547 		     DB_DEF_PDU_WRB_INDEX_SHIFT;
4548 	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4549 	iowrite32(doorbell, phba->db_va +
4550 		  beiscsi_conn->doorbell_offset);
4551 	return 0;
4552 }
4553 
4554 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
4555 			  unsigned int num_sg, unsigned int xferlen,
4556 			  unsigned int writedir)
4557 {
4558 
4559 	struct beiscsi_io_task *io_task = task->dd_data;
4560 	struct iscsi_conn *conn = task->conn;
4561 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4562 	struct beiscsi_hba *phba = beiscsi_conn->phba;
4563 	struct iscsi_wrb *pwrb = NULL;
4564 	unsigned int doorbell = 0;
4565 
4566 	pwrb = io_task->pwrb_handle->pwrb;
4567 	io_task->bhs_len = sizeof(struct be_cmd_bhs);
4568 
4569 	if (writedir) {
4570 		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4571 			      INI_WR_CMD);
4572 		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
4573 	} else {
4574 		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4575 			      INI_RD_CMD);
4576 		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
4577 	}
4578 
4579 	io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb,
4580 					  type, pwrb);
4581 
4582 	AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
4583 		      cpu_to_be16(*(unsigned short *)
4584 				  &io_task->cmd_bhs->iscsi_hdr.lun));
4585 	AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
4586 	AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4587 		      io_task->pwrb_handle->wrb_index);
4588 	AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4589 		      be32_to_cpu(task->cmdsn));
4590 	AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4591 		      io_task->psgl_handle->sgl_index);
4592 
4593 	hwi_write_sgl(pwrb, sg, num_sg, io_task);
4594 
4595 	AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4596 		      io_task->pwrb_handle->wrb_index);
4597 	if (io_task->pwrb_context->plast_wrb)
4598 		AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb,
4599 			      io_task->pwrb_context->plast_wrb,
4600 			      io_task->pwrb_handle->wrb_index);
4601 	io_task->pwrb_context->plast_wrb = pwrb;
4602 
4603 	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4604 
4605 	doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
4606 	doorbell |= (io_task->pwrb_handle->wrb_index &
4607 		     DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4608 	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4609 
4610 	iowrite32(doorbell, phba->db_va +
4611 		  beiscsi_conn->doorbell_offset);
4612 	return 0;
4613 }
4614 
4615 static int beiscsi_mtask(struct iscsi_task *task)
4616 {
4617 	struct beiscsi_io_task *io_task = task->dd_data;
4618 	struct iscsi_conn *conn = task->conn;
4619 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4620 	struct beiscsi_hba *phba = beiscsi_conn->phba;
4621 	struct iscsi_wrb *pwrb = NULL;
4622 	unsigned int doorbell = 0;
4623 	unsigned int cid;
4624 	unsigned int pwrb_typeoffset = 0;
4625 	int ret = 0;
4626 
4627 	cid = beiscsi_conn->beiscsi_conn_cid;
4628 	pwrb = io_task->pwrb_handle->pwrb;
4629 
4630 	if (is_chip_be2_be3r(phba)) {
4631 		AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4632 			      be32_to_cpu(task->cmdsn));
4633 		AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4634 			      io_task->pwrb_handle->wrb_index);
4635 		AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4636 			      io_task->psgl_handle->sgl_index);
4637 		AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
4638 			      task->data_count);
4639 		AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4640 			      io_task->pwrb_handle->wrb_index);
4641 		if (io_task->pwrb_context->plast_wrb)
4642 			AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb,
4643 				      io_task->pwrb_context->plast_wrb,
4644 				      io_task->pwrb_handle->wrb_index);
4645 		io_task->pwrb_context->plast_wrb = pwrb;
4646 
4647 		pwrb_typeoffset = BE_WRB_TYPE_OFFSET;
4648 	} else {
4649 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
4650 			      be32_to_cpu(task->cmdsn));
4651 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
4652 			      io_task->pwrb_handle->wrb_index);
4653 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
4654 			      io_task->psgl_handle->sgl_index);
4655 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
4656 			      task->data_count);
4657 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
4658 			      io_task->pwrb_handle->wrb_index);
4659 		if (io_task->pwrb_context->plast_wrb)
4660 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb,
4661 				      io_task->pwrb_context->plast_wrb,
4662 				      io_task->pwrb_handle->wrb_index);
4663 		io_task->pwrb_context->plast_wrb = pwrb;
4664 
4665 		pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
4666 	}
4667 
4668 
4669 	switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
4670 	case ISCSI_OP_LOGIN:
4671 		AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
4672 		ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
4673 		ret = hwi_write_buffer(pwrb, task);
4674 		break;
4675 	case ISCSI_OP_NOOP_OUT:
4676 		if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
4677 			ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
4678 			if (is_chip_be2_be3r(phba))
4679 				AMAP_SET_BITS(struct amap_iscsi_wrb,
4680 					      dmsg, pwrb, 1);
4681 			else
4682 				AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
4683 					      dmsg, pwrb, 1);
4684 		} else {
4685 			ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset);
4686 			if (is_chip_be2_be3r(phba))
4687 				AMAP_SET_BITS(struct amap_iscsi_wrb,
4688 					      dmsg, pwrb, 0);
4689 			else
4690 				AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
4691 					      dmsg, pwrb, 0);
4692 		}
4693 		ret = hwi_write_buffer(pwrb, task);
4694 		break;
4695 	case ISCSI_OP_TEXT:
4696 		ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
4697 		ret = hwi_write_buffer(pwrb, task);
4698 		break;
4699 	case ISCSI_OP_SCSI_TMFUNC:
4700 		ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset);
4701 		ret = hwi_write_buffer(pwrb, task);
4702 		break;
4703 	case ISCSI_OP_LOGOUT:
4704 		ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset);
4705 		ret = hwi_write_buffer(pwrb, task);
4706 		break;
4707 
4708 	default:
4709 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4710 			    "BM_%d : opcode =%d Not supported\n",
4711 			    task->hdr->opcode & ISCSI_OPCODE_MASK);
4712 
4713 		return -EINVAL;
4714 	}
4715 
4716 	if (ret)
4717 		return ret;
4718 
4719 	/* Set the task type */
4720 	io_task->wrb_type = (is_chip_be2_be3r(phba)) ?
4721 		AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) :
4722 		AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb);
4723 
4724 	doorbell |= cid & DB_WRB_POST_CID_MASK;
4725 	doorbell |= (io_task->pwrb_handle->wrb_index &
4726 		     DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4727 	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4728 	iowrite32(doorbell, phba->db_va +
4729 		  beiscsi_conn->doorbell_offset);
4730 	return 0;
4731 }
4732 
4733 static int beiscsi_task_xmit(struct iscsi_task *task)
4734 {
4735 	struct beiscsi_io_task *io_task = task->dd_data;
4736 	struct scsi_cmnd *sc = task->sc;
4737 	struct beiscsi_hba *phba;
4738 	struct scatterlist *sg;
4739 	int num_sg;
4740 	unsigned int  writedir = 0, xferlen = 0;
4741 
4742 	phba = io_task->conn->phba;
4743 	/**
4744 	 * HBA in error includes BEISCSI_HBA_FW_TIMEOUT. IO path might be
4745 	 * operational if FW still gets heartbeat from EP FW. Is management
4746 	 * path really needed to continue further?
4747 	 */
4748 	if (!beiscsi_hba_is_online(phba))
4749 		return -EIO;
4750 
4751 	if (!io_task->conn->login_in_progress)
4752 		task->hdr->exp_statsn = 0;
4753 
4754 	if (!sc)
4755 		return beiscsi_mtask(task);
4756 
4757 	io_task->scsi_cmnd = sc;
4758 	io_task->num_sg = 0;
4759 	num_sg = scsi_dma_map(sc);
4760 	if (num_sg < 0) {
4761 		beiscsi_log(phba, KERN_ERR,
4762 			    BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI,
4763 			    "BM_%d : scsi_dma_map Failed "
4764 			    "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n",
4765 			    be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt),
4766 			    io_task->libiscsi_itt, scsi_bufflen(sc));
4767 
4768 		return num_sg;
4769 	}
4770 	/**
4771 	 * For scsi cmd task, check num_sg before unmapping in cleanup_task.
4772 	 * For management task, cleanup_task checks mtask_addr before unmapping.
4773 	 */
4774 	io_task->num_sg = num_sg;
4775 	xferlen = scsi_bufflen(sc);
4776 	sg = scsi_sglist(sc);
4777 	if (sc->sc_data_direction == DMA_TO_DEVICE)
4778 		writedir = 1;
4779 	else
4780 		writedir = 0;
4781 
4782 	return phba->iotask_fn(task, sg, num_sg, xferlen, writedir);
4783 }
4784 
4785 /**
4786  * beiscsi_bsg_request - handle bsg request from ISCSI transport
4787  * @job: job to handle
4788  */
4789 static int beiscsi_bsg_request(struct bsg_job *job)
4790 {
4791 	struct Scsi_Host *shost;
4792 	struct beiscsi_hba *phba;
4793 	struct iscsi_bsg_request *bsg_req = job->request;
4794 	int rc = -EINVAL;
4795 	unsigned int tag;
4796 	struct be_dma_mem nonemb_cmd;
4797 	struct be_cmd_resp_hdr *resp;
4798 	struct iscsi_bsg_reply *bsg_reply = job->reply;
4799 	unsigned short status, extd_status;
4800 
4801 	shost = iscsi_job_to_shost(job);
4802 	phba = iscsi_host_priv(shost);
4803 
4804 	if (!beiscsi_hba_is_online(phba)) {
4805 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
4806 			    "BM_%d : HBA in error 0x%lx\n", phba->state);
4807 		return -ENXIO;
4808 	}
4809 
4810 	switch (bsg_req->msgcode) {
4811 	case ISCSI_BSG_HST_VENDOR:
4812 		nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
4813 					job->request_payload.payload_len,
4814 					&nonemb_cmd.dma, GFP_KERNEL);
4815 		if (nonemb_cmd.va == NULL) {
4816 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4817 				    "BM_%d : Failed to allocate memory for "
4818 				    "beiscsi_bsg_request\n");
4819 			return -ENOMEM;
4820 		}
4821 		tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job,
4822 						  &nonemb_cmd);
4823 		if (!tag) {
4824 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4825 				    "BM_%d : MBX Tag Allocation Failed\n");
4826 
4827 			dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
4828 					    nonemb_cmd.va, nonemb_cmd.dma);
4829 			return -EAGAIN;
4830 		}
4831 
4832 		rc = wait_event_interruptible_timeout(
4833 					phba->ctrl.mcc_wait[tag],
4834 					phba->ctrl.mcc_tag_status[tag],
4835 					msecs_to_jiffies(
4836 					BEISCSI_HOST_MBX_TIMEOUT));
4837 
4838 		if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
4839 			clear_bit(MCC_TAG_STATE_RUNNING,
4840 				  &phba->ctrl.ptag_state[tag].tag_state);
4841 			dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
4842 					    nonemb_cmd.va, nonemb_cmd.dma);
4843 			return -EIO;
4844 		}
4845 		extd_status = (phba->ctrl.mcc_tag_status[tag] &
4846 			       CQE_STATUS_ADDL_MASK) >> CQE_STATUS_ADDL_SHIFT;
4847 		status = phba->ctrl.mcc_tag_status[tag] & CQE_STATUS_MASK;
4848 		free_mcc_wrb(&phba->ctrl, tag);
4849 		resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va;
4850 		sg_copy_from_buffer(job->reply_payload.sg_list,
4851 				    job->reply_payload.sg_cnt,
4852 				    nonemb_cmd.va, (resp->response_length
4853 				    + sizeof(*resp)));
4854 		bsg_reply->reply_payload_rcv_len = resp->response_length;
4855 		bsg_reply->result = status;
4856 		bsg_job_done(job, bsg_reply->result,
4857 			     bsg_reply->reply_payload_rcv_len);
4858 		dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
4859 				    nonemb_cmd.va, nonemb_cmd.dma);
4860 		if (status || extd_status) {
4861 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4862 				    "BM_%d : MBX Cmd Failed"
4863 				    " status = %d extd_status = %d\n",
4864 				    status, extd_status);
4865 
4866 			return -EIO;
4867 		} else {
4868 			rc = 0;
4869 		}
4870 		break;
4871 
4872 	default:
4873 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4874 				"BM_%d : Unsupported bsg command: 0x%x\n",
4875 				bsg_req->msgcode);
4876 		break;
4877 	}
4878 
4879 	return rc;
4880 }
4881 
4882 static void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
4883 {
4884 	/* Set the logging parameter */
4885 	beiscsi_log_enable_init(phba, beiscsi_log_enable);
4886 }
4887 
4888 void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle)
4889 {
4890 	if (phba->boot_struct.boot_kset)
4891 		return;
4892 
4893 	/* skip if boot work is already in progress */
4894 	if (test_and_set_bit(BEISCSI_HBA_BOOT_WORK, &phba->state))
4895 		return;
4896 
4897 	phba->boot_struct.retry = 3;
4898 	phba->boot_struct.tag = 0;
4899 	phba->boot_struct.s_handle = s_handle;
4900 	phba->boot_struct.action = BEISCSI_BOOT_GET_SHANDLE;
4901 	schedule_work(&phba->boot_work);
4902 }
4903 
4904 #define BEISCSI_SYSFS_ISCSI_BOOT_FLAGS	3
4905 /*
4906  * beiscsi_show_boot_tgt_info()
4907  * Boot flag info for iscsi-utilities
4908  * Bit 0 Block valid flag
4909  * Bit 1 Firmware booting selected
4910  */
4911 static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
4912 {
4913 	struct beiscsi_hba *phba = data;
4914 	struct mgmt_session_info *boot_sess = &phba->boot_struct.boot_sess;
4915 	struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
4916 	char *str = buf;
4917 	int rc = -EPERM;
4918 
4919 	switch (type) {
4920 	case ISCSI_BOOT_TGT_NAME:
4921 		rc = sprintf(buf, "%.*s\n",
4922 			    (int)strlen(boot_sess->target_name),
4923 			    (char *)&boot_sess->target_name);
4924 		break;
4925 	case ISCSI_BOOT_TGT_IP_ADDR:
4926 		if (boot_conn->dest_ipaddr.ip_type == BEISCSI_IP_TYPE_V4)
4927 			rc = sprintf(buf, "%pI4\n",
4928 				(char *)&boot_conn->dest_ipaddr.addr);
4929 		else
4930 			rc = sprintf(str, "%pI6\n",
4931 				(char *)&boot_conn->dest_ipaddr.addr);
4932 		break;
4933 	case ISCSI_BOOT_TGT_PORT:
4934 		rc = sprintf(str, "%d\n", boot_conn->dest_port);
4935 		break;
4936 
4937 	case ISCSI_BOOT_TGT_CHAP_NAME:
4938 		rc = sprintf(str,  "%.*s\n",
4939 			     boot_conn->negotiated_login_options.auth_data.chap.
4940 			     target_chap_name_length,
4941 			     (char *)&boot_conn->negotiated_login_options.
4942 			     auth_data.chap.target_chap_name);
4943 		break;
4944 	case ISCSI_BOOT_TGT_CHAP_SECRET:
4945 		rc = sprintf(str,  "%.*s\n",
4946 			     boot_conn->negotiated_login_options.auth_data.chap.
4947 			     target_secret_length,
4948 			     (char *)&boot_conn->negotiated_login_options.
4949 			     auth_data.chap.target_secret);
4950 		break;
4951 	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
4952 		rc = sprintf(str,  "%.*s\n",
4953 			     boot_conn->negotiated_login_options.auth_data.chap.
4954 			     intr_chap_name_length,
4955 			     (char *)&boot_conn->negotiated_login_options.
4956 			     auth_data.chap.intr_chap_name);
4957 		break;
4958 	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
4959 		rc = sprintf(str,  "%.*s\n",
4960 			     boot_conn->negotiated_login_options.auth_data.chap.
4961 			     intr_secret_length,
4962 			     (char *)&boot_conn->negotiated_login_options.
4963 			     auth_data.chap.intr_secret);
4964 		break;
4965 	case ISCSI_BOOT_TGT_FLAGS:
4966 		rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS);
4967 		break;
4968 	case ISCSI_BOOT_TGT_NIC_ASSOC:
4969 		rc = sprintf(str, "0\n");
4970 		break;
4971 	}
4972 	return rc;
4973 }
4974 
4975 static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
4976 {
4977 	struct beiscsi_hba *phba = data;
4978 	char *str = buf;
4979 	int rc = -EPERM;
4980 
4981 	switch (type) {
4982 	case ISCSI_BOOT_INI_INITIATOR_NAME:
4983 		rc = sprintf(str, "%s\n",
4984 			     phba->boot_struct.boot_sess.initiator_iscsiname);
4985 		break;
4986 	}
4987 	return rc;
4988 }
4989 
4990 static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
4991 {
4992 	struct beiscsi_hba *phba = data;
4993 	char *str = buf;
4994 	int rc = -EPERM;
4995 
4996 	switch (type) {
4997 	case ISCSI_BOOT_ETH_FLAGS:
4998 		rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS);
4999 		break;
5000 	case ISCSI_BOOT_ETH_INDEX:
5001 		rc = sprintf(str, "0\n");
5002 		break;
5003 	case ISCSI_BOOT_ETH_MAC:
5004 		rc  = beiscsi_get_macaddr(str, phba);
5005 		break;
5006 	}
5007 	return rc;
5008 }
5009 
5010 static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
5011 {
5012 	umode_t rc = 0;
5013 
5014 	switch (type) {
5015 	case ISCSI_BOOT_TGT_NAME:
5016 	case ISCSI_BOOT_TGT_IP_ADDR:
5017 	case ISCSI_BOOT_TGT_PORT:
5018 	case ISCSI_BOOT_TGT_CHAP_NAME:
5019 	case ISCSI_BOOT_TGT_CHAP_SECRET:
5020 	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
5021 	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
5022 	case ISCSI_BOOT_TGT_NIC_ASSOC:
5023 	case ISCSI_BOOT_TGT_FLAGS:
5024 		rc = S_IRUGO;
5025 		break;
5026 	}
5027 	return rc;
5028 }
5029 
5030 static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
5031 {
5032 	umode_t rc = 0;
5033 
5034 	switch (type) {
5035 	case ISCSI_BOOT_INI_INITIATOR_NAME:
5036 		rc = S_IRUGO;
5037 		break;
5038 	}
5039 	return rc;
5040 }
5041 
5042 static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
5043 {
5044 	umode_t rc = 0;
5045 
5046 	switch (type) {
5047 	case ISCSI_BOOT_ETH_FLAGS:
5048 	case ISCSI_BOOT_ETH_MAC:
5049 	case ISCSI_BOOT_ETH_INDEX:
5050 		rc = S_IRUGO;
5051 		break;
5052 	}
5053 	return rc;
5054 }
5055 
5056 static void beiscsi_boot_kobj_release(void *data)
5057 {
5058 	struct beiscsi_hba *phba = data;
5059 
5060 	scsi_host_put(phba->shost);
5061 }
5062 
5063 static int beiscsi_boot_create_kset(struct beiscsi_hba *phba)
5064 {
5065 	struct boot_struct *bs = &phba->boot_struct;
5066 	struct iscsi_boot_kobj *boot_kobj;
5067 
5068 	if (bs->boot_kset) {
5069 		__beiscsi_log(phba, KERN_ERR,
5070 			      "BM_%d: boot_kset already created\n");
5071 		return 0;
5072 	}
5073 
5074 	bs->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
5075 	if (!bs->boot_kset) {
5076 		__beiscsi_log(phba, KERN_ERR,
5077 			      "BM_%d: boot_kset alloc failed\n");
5078 		return -ENOMEM;
5079 	}
5080 
5081 	/* get shost ref because the show function will refer phba */
5082 	if (!scsi_host_get(phba->shost))
5083 		goto free_kset;
5084 
5085 	boot_kobj = iscsi_boot_create_target(bs->boot_kset, 0, phba,
5086 					     beiscsi_show_boot_tgt_info,
5087 					     beiscsi_tgt_get_attr_visibility,
5088 					     beiscsi_boot_kobj_release);
5089 	if (!boot_kobj)
5090 		goto put_shost;
5091 
5092 	if (!scsi_host_get(phba->shost))
5093 		goto free_kset;
5094 
5095 	boot_kobj = iscsi_boot_create_initiator(bs->boot_kset, 0, phba,
5096 						beiscsi_show_boot_ini_info,
5097 						beiscsi_ini_get_attr_visibility,
5098 						beiscsi_boot_kobj_release);
5099 	if (!boot_kobj)
5100 		goto put_shost;
5101 
5102 	if (!scsi_host_get(phba->shost))
5103 		goto free_kset;
5104 
5105 	boot_kobj = iscsi_boot_create_ethernet(bs->boot_kset, 0, phba,
5106 					       beiscsi_show_boot_eth_info,
5107 					       beiscsi_eth_get_attr_visibility,
5108 					       beiscsi_boot_kobj_release);
5109 	if (!boot_kobj)
5110 		goto put_shost;
5111 
5112 	return 0;
5113 
5114 put_shost:
5115 	scsi_host_put(phba->shost);
5116 free_kset:
5117 	iscsi_boot_destroy_kset(bs->boot_kset);
5118 	bs->boot_kset = NULL;
5119 	return -ENOMEM;
5120 }
5121 
5122 static void beiscsi_boot_work(struct work_struct *work)
5123 {
5124 	struct beiscsi_hba *phba =
5125 		container_of(work, struct beiscsi_hba, boot_work);
5126 	struct boot_struct *bs = &phba->boot_struct;
5127 	unsigned int tag = 0;
5128 
5129 	if (!beiscsi_hba_is_online(phba))
5130 		return;
5131 
5132 	beiscsi_log(phba, KERN_INFO,
5133 		    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
5134 		    "BM_%d : %s action %d\n",
5135 		    __func__, phba->boot_struct.action);
5136 
5137 	switch (phba->boot_struct.action) {
5138 	case BEISCSI_BOOT_REOPEN_SESS:
5139 		tag = beiscsi_boot_reopen_sess(phba);
5140 		break;
5141 	case BEISCSI_BOOT_GET_SHANDLE:
5142 		tag = __beiscsi_boot_get_shandle(phba, 1);
5143 		break;
5144 	case BEISCSI_BOOT_GET_SINFO:
5145 		tag = beiscsi_boot_get_sinfo(phba);
5146 		break;
5147 	case BEISCSI_BOOT_LOGOUT_SESS:
5148 		tag = beiscsi_boot_logout_sess(phba);
5149 		break;
5150 	case BEISCSI_BOOT_CREATE_KSET:
5151 		beiscsi_boot_create_kset(phba);
5152 		/**
5153 		 * updated boot_kset is made visible to all before
5154 		 * ending the boot work.
5155 		 */
5156 		mb();
5157 		clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state);
5158 		return;
5159 	}
5160 	if (!tag) {
5161 		if (bs->retry--)
5162 			schedule_work(&phba->boot_work);
5163 		else
5164 			clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state);
5165 	}
5166 }
5167 
5168 static void beiscsi_eqd_update_work(struct work_struct *work)
5169 {
5170 	struct hwi_context_memory *phwi_context;
5171 	struct be_set_eqd set_eqd[MAX_CPUS];
5172 	struct hwi_controller *phwi_ctrlr;
5173 	struct be_eq_obj *pbe_eq;
5174 	struct beiscsi_hba *phba;
5175 	unsigned int pps, delta;
5176 	struct be_aic_obj *aic;
5177 	int eqd, i, num = 0;
5178 	unsigned long now;
5179 
5180 	phba = container_of(work, struct beiscsi_hba, eqd_update.work);
5181 	if (!beiscsi_hba_is_online(phba))
5182 		return;
5183 
5184 	phwi_ctrlr = phba->phwi_ctrlr;
5185 	phwi_context = phwi_ctrlr->phwi_ctxt;
5186 
5187 	for (i = 0; i <= phba->num_cpus; i++) {
5188 		aic = &phba->aic_obj[i];
5189 		pbe_eq = &phwi_context->be_eq[i];
5190 		now = jiffies;
5191 		if (!aic->jiffies || time_before(now, aic->jiffies) ||
5192 		    pbe_eq->cq_count < aic->eq_prev) {
5193 			aic->jiffies = now;
5194 			aic->eq_prev = pbe_eq->cq_count;
5195 			continue;
5196 		}
5197 		delta = jiffies_to_msecs(now - aic->jiffies);
5198 		pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta);
5199 		eqd = (pps / 1500) << 2;
5200 
5201 		if (eqd < 8)
5202 			eqd = 0;
5203 		eqd = min_t(u32, eqd, BEISCSI_EQ_DELAY_MAX);
5204 		eqd = max_t(u32, eqd, BEISCSI_EQ_DELAY_MIN);
5205 
5206 		aic->jiffies = now;
5207 		aic->eq_prev = pbe_eq->cq_count;
5208 
5209 		if (eqd != aic->prev_eqd) {
5210 			set_eqd[num].delay_multiplier = (eqd * 65)/100;
5211 			set_eqd[num].eq_id = pbe_eq->q.id;
5212 			aic->prev_eqd = eqd;
5213 			num++;
5214 		}
5215 	}
5216 	if (num)
5217 		/* completion of this is ignored */
5218 		beiscsi_modify_eq_delay(phba, set_eqd, num);
5219 
5220 	schedule_delayed_work(&phba->eqd_update,
5221 			      msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL));
5222 }
5223 
5224 static void beiscsi_hw_tpe_check(struct timer_list *t)
5225 {
5226 	struct beiscsi_hba *phba = from_timer(phba, t, hw_check);
5227 	u32 wait;
5228 
5229 	/* if not TPE, do nothing */
5230 	if (!beiscsi_detect_tpe(phba))
5231 		return;
5232 
5233 	/* wait default 4000ms before recovering */
5234 	wait = 4000;
5235 	if (phba->ue2rp > BEISCSI_UE_DETECT_INTERVAL)
5236 		wait = phba->ue2rp - BEISCSI_UE_DETECT_INTERVAL;
5237 	queue_delayed_work(phba->wq, &phba->recover_port,
5238 			   msecs_to_jiffies(wait));
5239 }
5240 
5241 static void beiscsi_hw_health_check(struct timer_list *t)
5242 {
5243 	struct beiscsi_hba *phba = from_timer(phba, t, hw_check);
5244 
5245 	beiscsi_detect_ue(phba);
5246 	if (beiscsi_detect_ue(phba)) {
5247 		__beiscsi_log(phba, KERN_ERR,
5248 			      "BM_%d : port in error: %lx\n", phba->state);
5249 		/* sessions are no longer valid, so first fail the sessions */
5250 		queue_work(phba->wq, &phba->sess_work);
5251 
5252 		/* detect UER supported */
5253 		if (!test_bit(BEISCSI_HBA_UER_SUPP, &phba->state))
5254 			return;
5255 		/* modify this timer to check TPE */
5256 		phba->hw_check.function = beiscsi_hw_tpe_check;
5257 	}
5258 
5259 	mod_timer(&phba->hw_check,
5260 		  jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
5261 }
5262 
5263 /*
5264  * beiscsi_enable_port()- Enables the disabled port.
5265  * Only port resources freed in disable function are reallocated.
5266  * This is called in HBA error handling path.
5267  *
5268  * @phba: Instance of driver private structure
5269  *
5270  **/
5271 static int beiscsi_enable_port(struct beiscsi_hba *phba)
5272 {
5273 	struct hwi_context_memory *phwi_context;
5274 	struct hwi_controller *phwi_ctrlr;
5275 	struct be_eq_obj *pbe_eq;
5276 	int ret, i;
5277 
5278 	if (test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
5279 		__beiscsi_log(phba, KERN_ERR,
5280 			      "BM_%d : %s : port is online %lx\n",
5281 			      __func__, phba->state);
5282 		return 0;
5283 	}
5284 
5285 	ret = beiscsi_init_sliport(phba);
5286 	if (ret)
5287 		return ret;
5288 
5289 	be2iscsi_enable_msix(phba);
5290 
5291 	beiscsi_get_params(phba);
5292 	beiscsi_set_host_data(phba);
5293 	/* Re-enable UER. If different TPE occurs then it is recoverable. */
5294 	beiscsi_set_uer_feature(phba);
5295 
5296 	phba->shost->max_id = phba->params.cxns_per_ctrl - 1;
5297 	phba->shost->can_queue = phba->params.ios_per_ctrl;
5298 	ret = beiscsi_init_port(phba);
5299 	if (ret < 0) {
5300 		__beiscsi_log(phba, KERN_ERR,
5301 			      "BM_%d : init port failed\n");
5302 		goto disable_msix;
5303 	}
5304 
5305 	for (i = 0; i < MAX_MCC_CMD; i++) {
5306 		init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
5307 		phba->ctrl.mcc_tag[i] = i + 1;
5308 		phba->ctrl.mcc_tag_status[i + 1] = 0;
5309 		phba->ctrl.mcc_tag_available++;
5310 	}
5311 
5312 	phwi_ctrlr = phba->phwi_ctrlr;
5313 	phwi_context = phwi_ctrlr->phwi_ctxt;
5314 	for (i = 0; i < phba->num_cpus; i++) {
5315 		pbe_eq = &phwi_context->be_eq[i];
5316 		irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll);
5317 	}
5318 
5319 	i = (phba->pcidev->msix_enabled) ? i : 0;
5320 	/* Work item for MCC handling */
5321 	pbe_eq = &phwi_context->be_eq[i];
5322 	INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work);
5323 
5324 	ret = beiscsi_init_irqs(phba);
5325 	if (ret < 0) {
5326 		__beiscsi_log(phba, KERN_ERR,
5327 			      "BM_%d : setup IRQs failed %d\n", ret);
5328 		goto cleanup_port;
5329 	}
5330 	hwi_enable_intr(phba);
5331 	/* port operational: clear all error bits */
5332 	set_bit(BEISCSI_HBA_ONLINE, &phba->state);
5333 	__beiscsi_log(phba, KERN_INFO,
5334 		      "BM_%d : port online: 0x%lx\n", phba->state);
5335 
5336 	/* start hw_check timer and eqd_update work */
5337 	schedule_delayed_work(&phba->eqd_update,
5338 			      msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL));
5339 
5340 	/**
5341 	 * Timer function gets modified for TPE detection.
5342 	 * Always reinit to do health check first.
5343 	 */
5344 	phba->hw_check.function = beiscsi_hw_health_check;
5345 	mod_timer(&phba->hw_check,
5346 		  jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
5347 	return 0;
5348 
5349 cleanup_port:
5350 	for (i = 0; i < phba->num_cpus; i++) {
5351 		pbe_eq = &phwi_context->be_eq[i];
5352 		irq_poll_disable(&pbe_eq->iopoll);
5353 	}
5354 	hwi_cleanup_port(phba);
5355 
5356 disable_msix:
5357 	pci_free_irq_vectors(phba->pcidev);
5358 	return ret;
5359 }
5360 
5361 /*
5362  * beiscsi_disable_port()- Disable port and cleanup driver resources.
5363  * This is called in HBA error handling and driver removal.
5364  * @phba: Instance Priv structure
5365  * @unload: indicate driver is unloading
5366  *
5367  * Free the OS and HW resources held by the driver
5368  **/
5369 static void beiscsi_disable_port(struct beiscsi_hba *phba, int unload)
5370 {
5371 	struct hwi_context_memory *phwi_context;
5372 	struct hwi_controller *phwi_ctrlr;
5373 	struct be_eq_obj *pbe_eq;
5374 	unsigned int i;
5375 
5376 	if (!test_and_clear_bit(BEISCSI_HBA_ONLINE, &phba->state))
5377 		return;
5378 
5379 	phwi_ctrlr = phba->phwi_ctrlr;
5380 	phwi_context = phwi_ctrlr->phwi_ctxt;
5381 	hwi_disable_intr(phba);
5382 	beiscsi_free_irqs(phba);
5383 	pci_free_irq_vectors(phba->pcidev);
5384 
5385 	for (i = 0; i < phba->num_cpus; i++) {
5386 		pbe_eq = &phwi_context->be_eq[i];
5387 		irq_poll_disable(&pbe_eq->iopoll);
5388 	}
5389 	cancel_delayed_work_sync(&phba->eqd_update);
5390 	cancel_work_sync(&phba->boot_work);
5391 	/* WQ might be running cancel queued mcc_work if we are not exiting */
5392 	if (!unload && beiscsi_hba_in_error(phba)) {
5393 		pbe_eq = &phwi_context->be_eq[i];
5394 		cancel_work_sync(&pbe_eq->mcc_work);
5395 	}
5396 	hwi_cleanup_port(phba);
5397 	beiscsi_cleanup_port(phba);
5398 }
5399 
5400 static void beiscsi_sess_work(struct work_struct *work)
5401 {
5402 	struct beiscsi_hba *phba;
5403 
5404 	phba = container_of(work, struct beiscsi_hba, sess_work);
5405 	/*
5406 	 * This work gets scheduled only in case of HBA error.
5407 	 * Old sessions are gone so need to be re-established.
5408 	 * iscsi_session_failure needs process context hence this work.
5409 	 */
5410 	iscsi_host_for_each_session(phba->shost, beiscsi_session_fail);
5411 }
5412 
5413 static void beiscsi_recover_port(struct work_struct *work)
5414 {
5415 	struct beiscsi_hba *phba;
5416 
5417 	phba = container_of(work, struct beiscsi_hba, recover_port.work);
5418 	beiscsi_disable_port(phba, 0);
5419 	beiscsi_enable_port(phba);
5420 }
5421 
5422 static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev,
5423 		pci_channel_state_t state)
5424 {
5425 	struct beiscsi_hba *phba = NULL;
5426 
5427 	phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
5428 	set_bit(BEISCSI_HBA_PCI_ERR, &phba->state);
5429 
5430 	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5431 		    "BM_%d : EEH error detected\n");
5432 
5433 	/* first stop UE detection when PCI error detected */
5434 	del_timer_sync(&phba->hw_check);
5435 	cancel_delayed_work_sync(&phba->recover_port);
5436 
5437 	/* sessions are no longer valid, so first fail the sessions */
5438 	iscsi_host_for_each_session(phba->shost, beiscsi_session_fail);
5439 	beiscsi_disable_port(phba, 0);
5440 
5441 	if (state == pci_channel_io_perm_failure) {
5442 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5443 			    "BM_%d : EEH : State PERM Failure");
5444 		return PCI_ERS_RESULT_DISCONNECT;
5445 	}
5446 
5447 	pci_disable_device(pdev);
5448 
5449 	/* The error could cause the FW to trigger a flash debug dump.
5450 	 * Resetting the card while flash dump is in progress
5451 	 * can cause it not to recover; wait for it to finish.
5452 	 * Wait only for first function as it is needed only once per
5453 	 * adapter.
5454 	 **/
5455 	if (pdev->devfn == 0)
5456 		ssleep(30);
5457 
5458 	return PCI_ERS_RESULT_NEED_RESET;
5459 }
5460 
5461 static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev)
5462 {
5463 	struct beiscsi_hba *phba = NULL;
5464 	int status = 0;
5465 
5466 	phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
5467 
5468 	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5469 		    "BM_%d : EEH Reset\n");
5470 
5471 	status = pci_enable_device(pdev);
5472 	if (status)
5473 		return PCI_ERS_RESULT_DISCONNECT;
5474 
5475 	pci_set_master(pdev);
5476 	pci_set_power_state(pdev, PCI_D0);
5477 	pci_restore_state(pdev);
5478 
5479 	status = beiscsi_check_fw_rdy(phba);
5480 	if (status) {
5481 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
5482 			    "BM_%d : EEH Reset Completed\n");
5483 	} else {
5484 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
5485 			    "BM_%d : EEH Reset Completion Failure\n");
5486 		return PCI_ERS_RESULT_DISCONNECT;
5487 	}
5488 
5489 	return PCI_ERS_RESULT_RECOVERED;
5490 }
5491 
5492 static void beiscsi_eeh_resume(struct pci_dev *pdev)
5493 {
5494 	struct beiscsi_hba *phba;
5495 	int ret;
5496 
5497 	phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
5498 	pci_save_state(pdev);
5499 
5500 	ret = beiscsi_enable_port(phba);
5501 	if (ret)
5502 		__beiscsi_log(phba, KERN_ERR,
5503 			      "BM_%d : AER EEH resume failed\n");
5504 }
5505 
5506 static int beiscsi_dev_probe(struct pci_dev *pcidev,
5507 			     const struct pci_device_id *id)
5508 {
5509 	struct hwi_context_memory *phwi_context;
5510 	struct hwi_controller *phwi_ctrlr;
5511 	struct beiscsi_hba *phba = NULL;
5512 	struct be_eq_obj *pbe_eq;
5513 	unsigned int s_handle;
5514 	char wq_name[20];
5515 	int ret, i;
5516 
5517 	ret = beiscsi_enable_pci(pcidev);
5518 	if (ret < 0) {
5519 		dev_err(&pcidev->dev,
5520 			"beiscsi_dev_probe - Failed to enable pci device\n");
5521 		return ret;
5522 	}
5523 
5524 	phba = beiscsi_hba_alloc(pcidev);
5525 	if (!phba) {
5526 		dev_err(&pcidev->dev,
5527 			"beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n");
5528 		ret = -ENOMEM;
5529 		goto disable_pci;
5530 	}
5531 
5532 	/* Enable EEH reporting */
5533 	ret = pci_enable_pcie_error_reporting(pcidev);
5534 	if (ret)
5535 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
5536 			    "BM_%d : PCIe Error Reporting "
5537 			    "Enabling Failed\n");
5538 
5539 	pci_save_state(pcidev);
5540 
5541 	/* Initialize Driver configuration Paramters */
5542 	beiscsi_hba_attrs_init(phba);
5543 
5544 	phba->mac_addr_set = false;
5545 
5546 	switch (pcidev->device) {
5547 	case BE_DEVICE_ID1:
5548 	case OC_DEVICE_ID1:
5549 	case OC_DEVICE_ID2:
5550 		phba->generation = BE_GEN2;
5551 		phba->iotask_fn = beiscsi_iotask;
5552 		dev_warn(&pcidev->dev,
5553 			 "Obsolete/Unsupported BE2 Adapter Family\n");
5554 		break;
5555 	case BE_DEVICE_ID2:
5556 	case OC_DEVICE_ID3:
5557 		phba->generation = BE_GEN3;
5558 		phba->iotask_fn = beiscsi_iotask;
5559 		break;
5560 	case OC_SKH_ID1:
5561 		phba->generation = BE_GEN4;
5562 		phba->iotask_fn = beiscsi_iotask_v2;
5563 		break;
5564 	default:
5565 		phba->generation = 0;
5566 	}
5567 
5568 	ret = be_ctrl_init(phba, pcidev);
5569 	if (ret) {
5570 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5571 			    "BM_%d : be_ctrl_init failed\n");
5572 		goto free_hba;
5573 	}
5574 
5575 	ret = beiscsi_init_sliport(phba);
5576 	if (ret)
5577 		goto free_hba;
5578 
5579 	spin_lock_init(&phba->io_sgl_lock);
5580 	spin_lock_init(&phba->mgmt_sgl_lock);
5581 	spin_lock_init(&phba->async_pdu_lock);
5582 	ret = beiscsi_get_fw_config(&phba->ctrl, phba);
5583 	if (ret != 0) {
5584 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5585 			    "BM_%d : Error getting fw config\n");
5586 		goto free_port;
5587 	}
5588 	beiscsi_get_port_name(&phba->ctrl, phba);
5589 	beiscsi_get_params(phba);
5590 	beiscsi_set_host_data(phba);
5591 	beiscsi_set_uer_feature(phba);
5592 
5593 	be2iscsi_enable_msix(phba);
5594 
5595 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
5596 		    "BM_%d : num_cpus = %d\n",
5597 		    phba->num_cpus);
5598 
5599 	phba->shost->max_id = phba->params.cxns_per_ctrl;
5600 	phba->shost->can_queue = phba->params.ios_per_ctrl;
5601 	ret = beiscsi_get_memory(phba);
5602 	if (ret < 0) {
5603 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5604 			    "BM_%d : alloc host mem failed\n");
5605 		goto free_port;
5606 	}
5607 
5608 	ret = beiscsi_init_port(phba);
5609 	if (ret < 0) {
5610 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5611 			    "BM_%d : init port failed\n");
5612 		beiscsi_free_mem(phba);
5613 		goto free_port;
5614 	}
5615 
5616 	for (i = 0; i < MAX_MCC_CMD; i++) {
5617 		init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
5618 		phba->ctrl.mcc_tag[i] = i + 1;
5619 		phba->ctrl.mcc_tag_status[i + 1] = 0;
5620 		phba->ctrl.mcc_tag_available++;
5621 		memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0,
5622 		       sizeof(struct be_dma_mem));
5623 	}
5624 
5625 	phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
5626 
5627 	snprintf(wq_name, sizeof(wq_name), "beiscsi_%02x_wq",
5628 		 phba->shost->host_no);
5629 	phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, wq_name);
5630 	if (!phba->wq) {
5631 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5632 			    "BM_%d : beiscsi_dev_probe-"
5633 			    "Failed to allocate work queue\n");
5634 		ret = -ENOMEM;
5635 		goto free_twq;
5636 	}
5637 
5638 	INIT_DELAYED_WORK(&phba->eqd_update, beiscsi_eqd_update_work);
5639 
5640 	phwi_ctrlr = phba->phwi_ctrlr;
5641 	phwi_context = phwi_ctrlr->phwi_ctxt;
5642 
5643 	for (i = 0; i < phba->num_cpus; i++) {
5644 		pbe_eq = &phwi_context->be_eq[i];
5645 		irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll);
5646 	}
5647 
5648 	i = (phba->pcidev->msix_enabled) ? i : 0;
5649 	/* Work item for MCC handling */
5650 	pbe_eq = &phwi_context->be_eq[i];
5651 	INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work);
5652 
5653 	ret = beiscsi_init_irqs(phba);
5654 	if (ret < 0) {
5655 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5656 			    "BM_%d : beiscsi_dev_probe-"
5657 			    "Failed to beiscsi_init_irqs\n");
5658 		goto disable_iopoll;
5659 	}
5660 	hwi_enable_intr(phba);
5661 
5662 	ret = iscsi_host_add(phba->shost, &phba->pcidev->dev);
5663 	if (ret)
5664 		goto free_irqs;
5665 
5666 	/* set online bit after port is operational */
5667 	set_bit(BEISCSI_HBA_ONLINE, &phba->state);
5668 	__beiscsi_log(phba, KERN_INFO,
5669 		      "BM_%d : port online: 0x%lx\n", phba->state);
5670 
5671 	INIT_WORK(&phba->boot_work, beiscsi_boot_work);
5672 	ret = beiscsi_boot_get_shandle(phba, &s_handle);
5673 	if (ret > 0) {
5674 		beiscsi_start_boot_work(phba, s_handle);
5675 		/**
5676 		 * Set this bit after starting the work to let
5677 		 * probe handle it first.
5678 		 * ASYNC event can too schedule this work.
5679 		 */
5680 		set_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state);
5681 	}
5682 
5683 	beiscsi_iface_create_default(phba);
5684 	schedule_delayed_work(&phba->eqd_update,
5685 			      msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL));
5686 
5687 	INIT_WORK(&phba->sess_work, beiscsi_sess_work);
5688 	INIT_DELAYED_WORK(&phba->recover_port, beiscsi_recover_port);
5689 	/**
5690 	 * Start UE detection here. UE before this will cause stall in probe
5691 	 * and eventually fail the probe.
5692 	 */
5693 	timer_setup(&phba->hw_check, beiscsi_hw_health_check, 0);
5694 	mod_timer(&phba->hw_check,
5695 		  jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
5696 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
5697 		    "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
5698 	return 0;
5699 
5700 free_irqs:
5701 	hwi_disable_intr(phba);
5702 	beiscsi_free_irqs(phba);
5703 disable_iopoll:
5704 	for (i = 0; i < phba->num_cpus; i++) {
5705 		pbe_eq = &phwi_context->be_eq[i];
5706 		irq_poll_disable(&pbe_eq->iopoll);
5707 	}
5708 	destroy_workqueue(phba->wq);
5709 free_twq:
5710 	hwi_cleanup_port(phba);
5711 	beiscsi_cleanup_port(phba);
5712 	beiscsi_free_mem(phba);
5713 free_port:
5714 	dma_free_coherent(&phba->pcidev->dev,
5715 			    phba->ctrl.mbox_mem_alloced.size,
5716 			    phba->ctrl.mbox_mem_alloced.va,
5717 			    phba->ctrl.mbox_mem_alloced.dma);
5718 	beiscsi_unmap_pci_function(phba);
5719 free_hba:
5720 	pci_disable_msix(phba->pcidev);
5721 	pci_dev_put(phba->pcidev);
5722 	iscsi_host_free(phba->shost);
5723 	pci_disable_pcie_error_reporting(pcidev);
5724 	pci_set_drvdata(pcidev, NULL);
5725 disable_pci:
5726 	pci_release_regions(pcidev);
5727 	pci_disable_device(pcidev);
5728 	return ret;
5729 }
5730 
5731 static void beiscsi_remove(struct pci_dev *pcidev)
5732 {
5733 	struct beiscsi_hba *phba = NULL;
5734 
5735 	phba = pci_get_drvdata(pcidev);
5736 	if (!phba) {
5737 		dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
5738 		return;
5739 	}
5740 
5741 	/* first stop UE detection before unloading */
5742 	del_timer_sync(&phba->hw_check);
5743 	cancel_delayed_work_sync(&phba->recover_port);
5744 	cancel_work_sync(&phba->sess_work);
5745 
5746 	beiscsi_iface_destroy_default(phba);
5747 	iscsi_host_remove(phba->shost);
5748 	beiscsi_disable_port(phba, 1);
5749 
5750 	/* after cancelling boot_work */
5751 	iscsi_boot_destroy_kset(phba->boot_struct.boot_kset);
5752 
5753 	/* free all resources */
5754 	destroy_workqueue(phba->wq);
5755 	beiscsi_free_mem(phba);
5756 
5757 	/* ctrl uninit */
5758 	beiscsi_unmap_pci_function(phba);
5759 	dma_free_coherent(&phba->pcidev->dev,
5760 			    phba->ctrl.mbox_mem_alloced.size,
5761 			    phba->ctrl.mbox_mem_alloced.va,
5762 			    phba->ctrl.mbox_mem_alloced.dma);
5763 
5764 	pci_dev_put(phba->pcidev);
5765 	iscsi_host_free(phba->shost);
5766 	pci_disable_pcie_error_reporting(pcidev);
5767 	pci_set_drvdata(pcidev, NULL);
5768 	pci_release_regions(pcidev);
5769 	pci_disable_device(pcidev);
5770 }
5771 
5772 
5773 static struct pci_error_handlers beiscsi_eeh_handlers = {
5774 	.error_detected = beiscsi_eeh_err_detected,
5775 	.slot_reset = beiscsi_eeh_reset,
5776 	.resume = beiscsi_eeh_resume,
5777 };
5778 
5779 struct iscsi_transport beiscsi_iscsi_transport = {
5780 	.owner = THIS_MODULE,
5781 	.name = DRV_NAME,
5782 	.caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
5783 		CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
5784 	.create_session = beiscsi_session_create,
5785 	.destroy_session = beiscsi_session_destroy,
5786 	.create_conn = beiscsi_conn_create,
5787 	.bind_conn = beiscsi_conn_bind,
5788 	.unbind_conn = iscsi_conn_unbind,
5789 	.destroy_conn = iscsi_conn_teardown,
5790 	.attr_is_visible = beiscsi_attr_is_visible,
5791 	.set_iface_param = beiscsi_iface_set_param,
5792 	.get_iface_param = beiscsi_iface_get_param,
5793 	.set_param = beiscsi_set_param,
5794 	.get_conn_param = iscsi_conn_get_param,
5795 	.get_session_param = iscsi_session_get_param,
5796 	.get_host_param = beiscsi_get_host_param,
5797 	.start_conn = beiscsi_conn_start,
5798 	.stop_conn = iscsi_conn_stop,
5799 	.send_pdu = iscsi_conn_send_pdu,
5800 	.xmit_task = beiscsi_task_xmit,
5801 	.cleanup_task = beiscsi_cleanup_task,
5802 	.alloc_pdu = beiscsi_alloc_pdu,
5803 	.parse_pdu_itt = beiscsi_parse_pdu,
5804 	.get_stats = beiscsi_conn_get_stats,
5805 	.get_ep_param = beiscsi_ep_get_param,
5806 	.ep_connect = beiscsi_ep_connect,
5807 	.ep_poll = beiscsi_ep_poll,
5808 	.ep_disconnect = beiscsi_ep_disconnect,
5809 	.session_recovery_timedout = iscsi_session_recovery_timedout,
5810 	.bsg_request = beiscsi_bsg_request,
5811 };
5812 
5813 static struct pci_driver beiscsi_pci_driver = {
5814 	.name = DRV_NAME,
5815 	.probe = beiscsi_dev_probe,
5816 	.remove = beiscsi_remove,
5817 	.id_table = beiscsi_pci_id_table,
5818 	.err_handler = &beiscsi_eeh_handlers
5819 };
5820 
5821 static int __init beiscsi_module_init(void)
5822 {
5823 	int ret;
5824 
5825 	beiscsi_scsi_transport =
5826 			iscsi_register_transport(&beiscsi_iscsi_transport);
5827 	if (!beiscsi_scsi_transport) {
5828 		printk(KERN_ERR
5829 		       "beiscsi_module_init - Unable to  register beiscsi transport.\n");
5830 		return -ENOMEM;
5831 	}
5832 	printk(KERN_INFO "In beiscsi_module_init, tt=%p\n",
5833 	       &beiscsi_iscsi_transport);
5834 
5835 	ret = pci_register_driver(&beiscsi_pci_driver);
5836 	if (ret) {
5837 		printk(KERN_ERR
5838 		       "beiscsi_module_init - Unable to  register beiscsi pci driver.\n");
5839 		goto unregister_iscsi_transport;
5840 	}
5841 	return 0;
5842 
5843 unregister_iscsi_transport:
5844 	iscsi_unregister_transport(&beiscsi_iscsi_transport);
5845 	return ret;
5846 }
5847 
5848 static void __exit beiscsi_module_exit(void)
5849 {
5850 	pci_unregister_driver(&beiscsi_pci_driver);
5851 	iscsi_unregister_transport(&beiscsi_iscsi_transport);
5852 }
5853 
5854 module_init(beiscsi_module_init);
5855 module_exit(beiscsi_module_exit);
5856